source stringlengths 3 86 | python stringlengths 75 1.04M |
|---|---|
test_radius.py | # RADIUS tests
# Copyright (c) 2013-2016, Jouni Malinen <j@w1.fi>
#
# This software may be distributed under the terms of the BSD license.
# See README for more details.
import binascii
import hashlib
import hmac
import logging
logger = logging.getLogger()
import os
import select
import struct
import subprocess
import threading
import time
import hostapd
from utils import HwsimSkip, require_under_vm, skip_with_fips, fail_test
from test_ap_hs20 import build_dhcp_ack
from test_ap_ft import ft_params1
def connect(dev, ssid, wait_connect=True):
dev.connect(ssid, key_mgmt="WPA-EAP", scan_freq="2412",
eap="PSK", identity="psk.user@example.com",
password_hex="0123456789abcdef0123456789abcdef",
wait_connect=wait_connect)
def test_radius_auth_unreachable(dev, apdev):
"""RADIUS Authentication server unreachable"""
params = hostapd.wpa2_eap_params(ssid="radius-auth")
params['auth_server_port'] = "18139"
hostapd.add_ap(apdev[0], params)
hapd = hostapd.Hostapd(apdev[0]['ifname'])
connect(dev[0], "radius-auth", wait_connect=False)
ev = dev[0].wait_event(["CTRL-EVENT-EAP-STARTED"])
if ev is None:
raise Exception("Timeout on EAP start")
logger.info("Checking for RADIUS retries")
time.sleep(4)
mib = hapd.get_mib()
if "radiusAuthClientAccessRequests" not in mib:
raise Exception("Missing MIB fields")
if int(mib["radiusAuthClientAccessRetransmissions"]) < 1:
raise Exception("Missing RADIUS Authentication retransmission")
if int(mib["radiusAuthClientPendingRequests"]) < 1:
raise Exception("Missing pending RADIUS Authentication request")
def test_radius_auth_unreachable2(dev, apdev):
"""RADIUS Authentication server unreachable (2)"""
subprocess.call(['ip', 'ro', 'replace', '192.168.213.17', 'dev', 'lo'])
params = hostapd.wpa2_eap_params(ssid="radius-auth")
params['auth_server_addr'] = "192.168.213.17"
params['auth_server_port'] = "18139"
hostapd.add_ap(apdev[0], params)
hapd = hostapd.Hostapd(apdev[0]['ifname'])
subprocess.call(['ip', 'ro', 'del', '192.168.213.17', 'dev', 'lo'])
connect(dev[0], "radius-auth", wait_connect=False)
ev = dev[0].wait_event(["CTRL-EVENT-EAP-STARTED"])
if ev is None:
raise Exception("Timeout on EAP start")
logger.info("Checking for RADIUS retries")
time.sleep(4)
mib = hapd.get_mib()
if "radiusAuthClientAccessRequests" not in mib:
raise Exception("Missing MIB fields")
if int(mib["radiusAuthClientAccessRetransmissions"]) < 1:
raise Exception("Missing RADIUS Authentication retransmission")
def test_radius_auth_unreachable3(dev, apdev):
"""RADIUS Authentication server initially unreachable, but then available"""
subprocess.call(['ip', 'ro', 'replace', 'blackhole', '192.168.213.18'])
params = hostapd.wpa2_eap_params(ssid="radius-auth")
params['auth_server_addr'] = "192.168.213.18"
hostapd.add_ap(apdev[0], params)
hapd = hostapd.Hostapd(apdev[0]['ifname'])
connect(dev[0], "radius-auth", wait_connect=False)
ev = dev[0].wait_event(["CTRL-EVENT-EAP-STARTED"])
if ev is None:
raise Exception("Timeout on EAP start")
subprocess.call(['ip', 'ro', 'del', 'blackhole', '192.168.213.18'])
time.sleep(0.1)
dev[0].request("DISCONNECT")
hapd.set('auth_server_addr_replace', '127.0.0.1')
dev[0].request("RECONNECT")
dev[0].wait_connected()
def test_radius_acct_unreachable(dev, apdev):
"""RADIUS Accounting server unreachable"""
params = hostapd.wpa2_eap_params(ssid="radius-acct")
params['acct_server_addr'] = "127.0.0.1"
params['acct_server_port'] = "18139"
params['acct_server_shared_secret'] = "radius"
hostapd.add_ap(apdev[0], params)
hapd = hostapd.Hostapd(apdev[0]['ifname'])
connect(dev[0], "radius-acct")
logger.info("Checking for RADIUS retries")
time.sleep(4)
mib = hapd.get_mib()
if "radiusAccClientRetransmissions" not in mib:
raise Exception("Missing MIB fields")
if int(mib["radiusAccClientRetransmissions"]) < 2:
raise Exception("Missing RADIUS Accounting retransmissions")
if int(mib["radiusAccClientPendingRequests"]) < 2:
raise Exception("Missing pending RADIUS Accounting requests")
def test_radius_acct_unreachable2(dev, apdev):
"""RADIUS Accounting server unreachable(2)"""
subprocess.call(['ip', 'ro', 'replace', '192.168.213.17', 'dev', 'lo'])
params = hostapd.wpa2_eap_params(ssid="radius-acct")
params['acct_server_addr'] = "192.168.213.17"
params['acct_server_port'] = "18139"
params['acct_server_shared_secret'] = "radius"
hostapd.add_ap(apdev[0], params)
hapd = hostapd.Hostapd(apdev[0]['ifname'])
subprocess.call(['ip', 'ro', 'del', '192.168.213.17', 'dev', 'lo'])
connect(dev[0], "radius-acct")
logger.info("Checking for RADIUS retries")
time.sleep(4)
mib = hapd.get_mib()
if "radiusAccClientRetransmissions" not in mib:
raise Exception("Missing MIB fields")
if int(mib["radiusAccClientRetransmissions"]) < 1 and int(mib["radiusAccClientPendingRequests"]) < 1:
raise Exception("Missing pending or retransmitted RADIUS Accounting requests")
def test_radius_acct_unreachable3(dev, apdev):
"""RADIUS Accounting server initially unreachable, but then available"""
require_under_vm()
subprocess.call(['ip', 'ro', 'replace', 'blackhole', '192.168.213.18'])
as_hapd = hostapd.Hostapd("as")
as_mib_start = as_hapd.get_mib(param="radius_server")
params = hostapd.wpa2_eap_params(ssid="radius-acct")
params['acct_server_addr'] = "192.168.213.18"
params['acct_server_port'] = "1813"
params['acct_server_shared_secret'] = "radius"
hostapd.add_ap(apdev[0], params)
hapd = hostapd.Hostapd(apdev[0]['ifname'])
connect(dev[0], "radius-acct")
subprocess.call(['ip', 'ro', 'del', 'blackhole', '192.168.213.18'])
time.sleep(0.1)
dev[0].request("DISCONNECT")
hapd.set('acct_server_addr_replace', '127.0.0.1')
dev[0].request("RECONNECT")
dev[0].wait_connected()
time.sleep(1)
as_mib_end = as_hapd.get_mib(param="radius_server")
req_s = int(as_mib_start['radiusAccServTotalResponses'])
req_e = int(as_mib_end['radiusAccServTotalResponses'])
if req_e <= req_s:
raise Exception("Unexpected RADIUS server acct MIB value")
def test_radius_acct_unreachable4(dev, apdev):
"""RADIUS Accounting server unreachable and multiple STAs"""
params = hostapd.wpa2_eap_params(ssid="radius-acct")
params['acct_server_addr'] = "127.0.0.1"
params['acct_server_port'] = "18139"
params['acct_server_shared_secret'] = "radius"
hostapd.add_ap(apdev[0], params)
hapd = hostapd.Hostapd(apdev[0]['ifname'])
for i in range(20):
connect(dev[0], "radius-acct")
dev[0].request("REMOVE_NETWORK all")
dev[0].wait_disconnected()
def test_radius_acct(dev, apdev):
"""RADIUS Accounting"""
as_hapd = hostapd.Hostapd("as")
as_mib_start = as_hapd.get_mib(param="radius_server")
params = hostapd.wpa2_eap_params(ssid="radius-acct")
params['acct_server_addr'] = "127.0.0.1"
params['acct_server_port'] = "1813"
params['acct_server_shared_secret'] = "radius"
params['radius_auth_req_attr'] = [ "126:s:Operator", "77:s:testing" ]
params['radius_acct_req_attr'] = [ "126:s:Operator", "77:s:testing" ]
hostapd.add_ap(apdev[0], params)
hapd = hostapd.Hostapd(apdev[0]['ifname'])
connect(dev[0], "radius-acct")
dev[1].connect("radius-acct", key_mgmt="WPA-EAP", scan_freq="2412",
eap="PAX", identity="test-class",
password_hex="0123456789abcdef0123456789abcdef")
dev[2].connect("radius-acct", key_mgmt="WPA-EAP",
eap="GPSK", identity="gpsk-cui",
password="abcdefghijklmnop0123456789abcdef",
scan_freq="2412")
logger.info("Checking for RADIUS counters")
count = 0
while True:
mib = hapd.get_mib()
if int(mib['radiusAccClientResponses']) >= 3:
break
time.sleep(0.1)
count += 1
if count > 10:
raise Exception("Did not receive Accounting-Response packets")
if int(mib['radiusAccClientRetransmissions']) > 0:
raise Exception("Unexpected Accounting-Request retransmission")
as_mib_end = as_hapd.get_mib(param="radius_server")
req_s = int(as_mib_start['radiusAccServTotalRequests'])
req_e = int(as_mib_end['radiusAccServTotalRequests'])
if req_e < req_s + 2:
raise Exception("Unexpected RADIUS server acct MIB value")
acc_s = int(as_mib_start['radiusAuthServAccessAccepts'])
acc_e = int(as_mib_end['radiusAuthServAccessAccepts'])
if acc_e < acc_s + 1:
raise Exception("Unexpected RADIUS server auth MIB value")
def test_radius_acct_non_ascii_ssid(dev, apdev):
"""RADIUS Accounting and non-ASCII SSID"""
params = hostapd.wpa2_eap_params()
params['acct_server_addr'] = "127.0.0.1"
params['acct_server_port'] = "1813"
params['acct_server_shared_secret'] = "radius"
ssid2 = "740665007374"
params['ssid2'] = ssid2
hostapd.add_ap(apdev[0], params)
dev[0].connect(ssid2=ssid2, key_mgmt="WPA-EAP", scan_freq="2412",
eap="PSK", identity="psk.user@example.com",
password_hex="0123456789abcdef0123456789abcdef")
def test_radius_acct_pmksa_caching(dev, apdev):
"""RADIUS Accounting with PMKSA caching"""
as_hapd = hostapd.Hostapd("as")
as_mib_start = as_hapd.get_mib(param="radius_server")
params = hostapd.wpa2_eap_params(ssid="radius-acct")
params['acct_server_addr'] = "127.0.0.1"
params['acct_server_port'] = "1813"
params['acct_server_shared_secret'] = "radius"
hapd = hostapd.add_ap(apdev[0], params)
connect(dev[0], "radius-acct")
dev[1].connect("radius-acct", key_mgmt="WPA-EAP", scan_freq="2412",
eap="PAX", identity="test-class",
password_hex="0123456789abcdef0123456789abcdef")
for d in [ dev[0], dev[1] ]:
d.request("REASSOCIATE")
d.wait_connected(timeout=15, error="Reassociation timed out")
count = 0
while True:
mib = hapd.get_mib()
if int(mib['radiusAccClientResponses']) >= 4:
break
time.sleep(0.1)
count += 1
if count > 10:
raise Exception("Did not receive Accounting-Response packets")
if int(mib['radiusAccClientRetransmissions']) > 0:
raise Exception("Unexpected Accounting-Request retransmission")
as_mib_end = as_hapd.get_mib(param="radius_server")
req_s = int(as_mib_start['radiusAccServTotalRequests'])
req_e = int(as_mib_end['radiusAccServTotalRequests'])
if req_e < req_s + 2:
raise Exception("Unexpected RADIUS server acct MIB value")
acc_s = int(as_mib_start['radiusAuthServAccessAccepts'])
acc_e = int(as_mib_end['radiusAuthServAccessAccepts'])
if acc_e < acc_s + 1:
raise Exception("Unexpected RADIUS server auth MIB value")
def test_radius_acct_interim(dev, apdev):
"""RADIUS Accounting interim update"""
as_hapd = hostapd.Hostapd("as")
params = hostapd.wpa2_eap_params(ssid="radius-acct")
params['acct_server_addr'] = "127.0.0.1"
params['acct_server_port'] = "1813"
params['acct_server_shared_secret'] = "radius"
params['radius_acct_interim_interval'] = "1"
hostapd.add_ap(apdev[0], params)
hapd = hostapd.Hostapd(apdev[0]['ifname'])
connect(dev[0], "radius-acct")
logger.info("Checking for RADIUS counters")
as_mib_start = as_hapd.get_mib(param="radius_server")
time.sleep(3.1)
as_mib_end = as_hapd.get_mib(param="radius_server")
req_s = int(as_mib_start['radiusAccServTotalRequests'])
req_e = int(as_mib_end['radiusAccServTotalRequests'])
if req_e < req_s + 3:
raise Exception("Unexpected RADIUS server acct MIB value")
def test_radius_acct_interim_unreachable(dev, apdev):
"""RADIUS Accounting interim update with unreachable server"""
params = hostapd.wpa2_eap_params(ssid="radius-acct")
params['acct_server_addr'] = "127.0.0.1"
params['acct_server_port'] = "18139"
params['acct_server_shared_secret'] = "radius"
params['radius_acct_interim_interval'] = "1"
hapd = hostapd.add_ap(apdev[0], params)
start = hapd.get_mib()
connect(dev[0], "radius-acct")
logger.info("Waiting for interium accounting updates")
time.sleep(3.1)
end = hapd.get_mib()
req_s = int(start['radiusAccClientTimeouts'])
req_e = int(end['radiusAccClientTimeouts'])
if req_e < req_s + 2:
raise Exception("Unexpected RADIUS server acct MIB value")
def test_radius_acct_interim_unreachable2(dev, apdev):
"""RADIUS Accounting interim update with unreachable server (retry)"""
params = hostapd.wpa2_eap_params(ssid="radius-acct")
params['acct_server_addr'] = "127.0.0.1"
params['acct_server_port'] = "18139"
params['acct_server_shared_secret'] = "radius"
# Use long enough interim update interval to allow RADIUS retransmission
# case (3 seconds) to trigger first.
params['radius_acct_interim_interval'] = "4"
hapd = hostapd.add_ap(apdev[0], params)
start = hapd.get_mib()
connect(dev[0], "radius-acct")
logger.info("Waiting for interium accounting updates")
time.sleep(7.5)
end = hapd.get_mib()
req_s = int(start['radiusAccClientTimeouts'])
req_e = int(end['radiusAccClientTimeouts'])
if req_e < req_s + 2:
raise Exception("Unexpected RADIUS server acct MIB value")
def test_radius_acct_ipaddr(dev, apdev):
"""RADIUS Accounting and Framed-IP-Address"""
try:
_test_radius_acct_ipaddr(dev, apdev)
finally:
subprocess.call(['ip', 'link', 'set', 'dev', 'ap-br0', 'down'],
stderr=open('/dev/null', 'w'))
subprocess.call(['brctl', 'delbr', 'ap-br0'],
stderr=open('/dev/null', 'w'))
def _test_radius_acct_ipaddr(dev, apdev):
params = { "ssid": "radius-acct-open",
'acct_server_addr': "127.0.0.1",
'acct_server_port': "1813",
'acct_server_shared_secret': "radius",
'proxy_arp': '1',
'ap_isolate': '1',
'bridge': 'ap-br0' }
hapd = hostapd.add_ap(apdev[0], params, no_enable=True)
try:
hapd.enable()
except:
# For now, do not report failures due to missing kernel support
raise HwsimSkip("Could not start hostapd - assume proxyarp not supported in kernel version")
bssid = apdev[0]['bssid']
subprocess.call(['brctl', 'setfd', 'ap-br0', '0'])
subprocess.call(['ip', 'link', 'set', 'dev', 'ap-br0', 'up'])
dev[0].connect("radius-acct-open", key_mgmt="NONE", scan_freq="2412")
addr0 = dev[0].own_addr()
pkt = build_dhcp_ack(dst_ll="ff:ff:ff:ff:ff:ff", src_ll=bssid,
ip_src="192.168.1.1", ip_dst="255.255.255.255",
yiaddr="192.168.1.123", chaddr=addr0)
if "OK" not in hapd.request("DATA_TEST_FRAME ifname=ap-br0 " + binascii.hexlify(pkt)):
raise Exception("DATA_TEST_FRAME failed")
dev[0].request("DISCONNECT")
dev[0].wait_disconnected()
hapd.disable()
def send_and_check_reply(srv, req, code, error_cause=0):
reply = srv.SendPacket(req)
logger.debug("RADIUS response from hostapd")
for i in reply.keys():
logger.debug("%s: %s" % (i, reply[i]))
if reply.code != code:
raise Exception("Unexpected response code")
if error_cause:
if 'Error-Cause' not in reply:
raise Exception("Missing Error-Cause")
if reply['Error-Cause'][0] != error_cause:
raise Exception("Unexpected Error-Cause: {}".format(reply['Error-Cause']))
def test_radius_acct_psk(dev, apdev):
"""RADIUS Accounting - PSK"""
as_hapd = hostapd.Hostapd("as")
params = hostapd.wpa2_params(ssid="radius-acct", passphrase="12345678")
params['acct_server_addr'] = "127.0.0.1"
params['acct_server_port'] = "1813"
params['acct_server_shared_secret'] = "radius"
hapd = hostapd.add_ap(apdev[0], params)
dev[0].connect("radius-acct", psk="12345678", scan_freq="2412")
def test_radius_acct_psk_sha256(dev, apdev):
"""RADIUS Accounting - PSK SHA256"""
as_hapd = hostapd.Hostapd("as")
params = hostapd.wpa2_params(ssid="radius-acct", passphrase="12345678")
params["wpa_key_mgmt"] = "WPA-PSK-SHA256"
params['acct_server_addr'] = "127.0.0.1"
params['acct_server_port'] = "1813"
params['acct_server_shared_secret'] = "radius"
hapd = hostapd.add_ap(apdev[0], params)
dev[0].connect("radius-acct", key_mgmt="WPA-PSK-SHA256",
psk="12345678", scan_freq="2412")
def test_radius_acct_ft_psk(dev, apdev):
"""RADIUS Accounting - FT-PSK"""
as_hapd = hostapd.Hostapd("as")
params = ft_params1(ssid="radius-acct", passphrase="12345678")
params['acct_server_addr'] = "127.0.0.1"
params['acct_server_port'] = "1813"
params['acct_server_shared_secret'] = "radius"
hapd = hostapd.add_ap(apdev[0], params)
dev[0].connect("radius-acct", key_mgmt="FT-PSK",
psk="12345678", scan_freq="2412")
def test_radius_acct_ieee8021x(dev, apdev):
"""RADIUS Accounting - IEEE 802.1X"""
skip_with_fips(dev[0])
as_hapd = hostapd.Hostapd("as")
params = hostapd.radius_params()
params["ssid"] = "radius-acct-1x"
params["ieee8021x"] = "1"
params["wep_key_len_broadcast"] = "13"
params["wep_key_len_unicast"] = "13"
params['acct_server_addr'] = "127.0.0.1"
params['acct_server_port'] = "1813"
params['acct_server_shared_secret'] = "radius"
hapd = hostapd.add_ap(apdev[0], params)
dev[0].connect("radius-acct-1x", key_mgmt="IEEE8021X", eap="PSK",
identity="psk.user@example.com",
password_hex="0123456789abcdef0123456789abcdef",
scan_freq="2412")
def test_radius_das_disconnect(dev, apdev):
"""RADIUS Dynamic Authorization Extensions - Disconnect"""
try:
import pyrad.client
import pyrad.packet
import pyrad.dictionary
import radius_das
except ImportError:
raise HwsimSkip("No pyrad modules available")
params = hostapd.wpa2_eap_params(ssid="radius-das")
params['radius_das_port'] = "3799"
params['radius_das_client'] = "127.0.0.1 secret"
params['radius_das_require_event_timestamp'] = "1"
params['own_ip_addr'] = "127.0.0.1"
params['nas_identifier'] = "nas.example.com"
hapd = hostapd.add_ap(apdev[0], params)
connect(dev[0], "radius-das")
addr = dev[0].p2p_interface_addr()
sta = hapd.get_sta(addr)
id = sta['dot1xAuthSessionId']
dict = pyrad.dictionary.Dictionary("dictionary.radius")
srv = pyrad.client.Client(server="127.0.0.1", acctport=3799,
secret="secret", dict=dict)
srv.retries = 1
srv.timeout = 1
logger.info("Disconnect-Request with incorrect secret")
req = radius_das.DisconnectPacket(dict=dict, secret="incorrect",
User_Name="foo",
NAS_Identifier="localhost",
Event_Timestamp=int(time.time()))
logger.debug(req)
try:
reply = srv.SendPacket(req)
raise Exception("Unexpected response to Disconnect-Request")
except pyrad.client.Timeout:
logger.info("Disconnect-Request with incorrect secret properly ignored")
logger.info("Disconnect-Request without Event-Timestamp")
req = radius_das.DisconnectPacket(dict=dict, secret="secret",
User_Name="psk.user@example.com")
logger.debug(req)
try:
reply = srv.SendPacket(req)
raise Exception("Unexpected response to Disconnect-Request")
except pyrad.client.Timeout:
logger.info("Disconnect-Request without Event-Timestamp properly ignored")
logger.info("Disconnect-Request with non-matching Event-Timestamp")
req = radius_das.DisconnectPacket(dict=dict, secret="secret",
User_Name="psk.user@example.com",
Event_Timestamp=123456789)
logger.debug(req)
try:
reply = srv.SendPacket(req)
raise Exception("Unexpected response to Disconnect-Request")
except pyrad.client.Timeout:
logger.info("Disconnect-Request with non-matching Event-Timestamp properly ignored")
logger.info("Disconnect-Request with unsupported attribute")
req = radius_das.DisconnectPacket(dict=dict, secret="secret",
User_Name="foo",
User_Password="foo",
Event_Timestamp=int(time.time()))
send_and_check_reply(srv, req, pyrad.packet.DisconnectNAK, 401)
logger.info("Disconnect-Request with invalid Calling-Station-Id")
req = radius_das.DisconnectPacket(dict=dict, secret="secret",
User_Name="foo",
Calling_Station_Id="foo",
Event_Timestamp=int(time.time()))
send_and_check_reply(srv, req, pyrad.packet.DisconnectNAK, 407)
logger.info("Disconnect-Request with mismatching User-Name")
req = radius_das.DisconnectPacket(dict=dict, secret="secret",
User_Name="foo",
Event_Timestamp=int(time.time()))
send_and_check_reply(srv, req, pyrad.packet.DisconnectNAK, 503)
logger.info("Disconnect-Request with mismatching Calling-Station-Id")
req = radius_das.DisconnectPacket(dict=dict, secret="secret",
Calling_Station_Id="12:34:56:78:90:aa",
Event_Timestamp=int(time.time()))
send_and_check_reply(srv, req, pyrad.packet.DisconnectNAK, 503)
logger.info("Disconnect-Request with mismatching Acct-Session-Id")
req = radius_das.DisconnectPacket(dict=dict, secret="secret",
Acct_Session_Id="12345678-87654321",
Event_Timestamp=int(time.time()))
send_and_check_reply(srv, req, pyrad.packet.DisconnectNAK, 503)
logger.info("Disconnect-Request with mismatching Acct-Session-Id (len)")
req = radius_das.DisconnectPacket(dict=dict, secret="secret",
Acct_Session_Id="12345678",
Event_Timestamp=int(time.time()))
send_and_check_reply(srv, req, pyrad.packet.DisconnectNAK, 503)
logger.info("Disconnect-Request with mismatching Acct-Multi-Session-Id")
req = radius_das.DisconnectPacket(dict=dict, secret="secret",
Acct_Multi_Session_Id="12345678+87654321",
Event_Timestamp=int(time.time()))
send_and_check_reply(srv, req, pyrad.packet.DisconnectNAK, 503)
logger.info("Disconnect-Request with mismatching Acct-Multi-Session-Id (len)")
req = radius_das.DisconnectPacket(dict=dict, secret="secret",
Acct_Multi_Session_Id="12345678",
Event_Timestamp=int(time.time()))
send_and_check_reply(srv, req, pyrad.packet.DisconnectNAK, 503)
logger.info("Disconnect-Request with no session identification attributes")
req = radius_das.DisconnectPacket(dict=dict, secret="secret",
Event_Timestamp=int(time.time()))
send_and_check_reply(srv, req, pyrad.packet.DisconnectNAK, 503)
ev = dev[0].wait_event(["CTRL-EVENT-DISCONNECTED"], timeout=1)
if ev is not None:
raise Exception("Unexpected disconnection")
logger.info("Disconnect-Request with mismatching NAS-IP-Address")
req = radius_das.DisconnectPacket(dict=dict, secret="secret",
NAS_IP_Address="192.168.3.4",
Acct_Session_Id=id,
Event_Timestamp=int(time.time()))
send_and_check_reply(srv, req, pyrad.packet.DisconnectNAK, 403)
logger.info("Disconnect-Request with mismatching NAS-Identifier")
req = radius_das.DisconnectPacket(dict=dict, secret="secret",
NAS_Identifier="unknown.example.com",
Acct_Session_Id=id,
Event_Timestamp=int(time.time()))
send_and_check_reply(srv, req, pyrad.packet.DisconnectNAK, 403)
ev = dev[0].wait_event(["CTRL-EVENT-DISCONNECTED"], timeout=1)
if ev is not None:
raise Exception("Unexpected disconnection")
logger.info("Disconnect-Request with matching Acct-Session-Id")
req = radius_das.DisconnectPacket(dict=dict, secret="secret",
NAS_IP_Address="127.0.0.1",
NAS_Identifier="nas.example.com",
Acct_Session_Id=id,
Event_Timestamp=int(time.time()))
send_and_check_reply(srv, req, pyrad.packet.DisconnectACK)
dev[0].wait_disconnected(timeout=10)
dev[0].wait_connected(timeout=10, error="Re-connection timed out")
logger.info("Disconnect-Request with matching Acct-Multi-Session-Id")
sta = hapd.get_sta(addr)
multi_sess_id = sta['authMultiSessionId']
req = radius_das.DisconnectPacket(dict=dict, secret="secret",
NAS_IP_Address="127.0.0.1",
NAS_Identifier="nas.example.com",
Acct_Multi_Session_Id=multi_sess_id,
Event_Timestamp=int(time.time()))
send_and_check_reply(srv, req, pyrad.packet.DisconnectACK)
dev[0].wait_disconnected(timeout=10)
dev[0].wait_connected(timeout=10, error="Re-connection timed out")
logger.info("Disconnect-Request with matching User-Name")
req = radius_das.DisconnectPacket(dict=dict, secret="secret",
NAS_Identifier="nas.example.com",
User_Name="psk.user@example.com",
Event_Timestamp=int(time.time()))
send_and_check_reply(srv, req, pyrad.packet.DisconnectACK)
dev[0].wait_disconnected(timeout=10)
dev[0].wait_connected(timeout=10, error="Re-connection timed out")
logger.info("Disconnect-Request with matching Calling-Station-Id")
req = radius_das.DisconnectPacket(dict=dict, secret="secret",
NAS_IP_Address="127.0.0.1",
Calling_Station_Id=addr,
Event_Timestamp=int(time.time()))
send_and_check_reply(srv, req, pyrad.packet.DisconnectACK)
dev[0].wait_disconnected(timeout=10)
ev = dev[0].wait_event(["CTRL-EVENT-EAP-STARTED", "CTRL-EVENT-CONNECTED"])
if ev is None:
raise Exception("Timeout while waiting for re-connection")
if "CTRL-EVENT-EAP-STARTED" not in ev:
raise Exception("Unexpected skipping of EAP authentication in reconnection")
dev[0].wait_connected(timeout=10, error="Re-connection timed out")
logger.info("Disconnect-Request with matching Calling-Station-Id and non-matching CUI")
req = radius_das.DisconnectPacket(dict=dict, secret="secret",
Calling_Station_Id=addr,
Chargeable_User_Identity="foo@example.com",
Event_Timestamp=int(time.time()))
send_and_check_reply(srv, req, pyrad.packet.DisconnectNAK, error_cause=503)
logger.info("Disconnect-Request with matching CUI")
dev[1].connect("radius-das", key_mgmt="WPA-EAP",
eap="GPSK", identity="gpsk-cui",
password="abcdefghijklmnop0123456789abcdef",
scan_freq="2412")
req = radius_das.DisconnectPacket(dict=dict, secret="secret",
Chargeable_User_Identity="gpsk-chargeable-user-identity",
Event_Timestamp=int(time.time()))
send_and_check_reply(srv, req, pyrad.packet.DisconnectACK)
dev[1].wait_disconnected(timeout=10)
dev[1].wait_connected(timeout=10, error="Re-connection timed out")
ev = dev[0].wait_event(["CTRL-EVENT-DISCONNECTED"], timeout=1)
if ev is not None:
raise Exception("Unexpected disconnection")
connect(dev[2], "radius-das")
logger.info("Disconnect-Request with matching User-Name - multiple sessions matching")
req = radius_das.DisconnectPacket(dict=dict, secret="secret",
NAS_Identifier="nas.example.com",
User_Name="psk.user@example.com",
Event_Timestamp=int(time.time()))
send_and_check_reply(srv, req, pyrad.packet.DisconnectNAK, error_cause=508)
logger.info("Disconnect-Request with User-Name matching multiple sessions, Calling-Station-Id only one")
req = radius_das.DisconnectPacket(dict=dict, secret="secret",
NAS_Identifier="nas.example.com",
Calling_Station_Id=addr,
User_Name="psk.user@example.com",
Event_Timestamp=int(time.time()))
send_and_check_reply(srv, req, pyrad.packet.DisconnectACK)
dev[0].wait_disconnected(timeout=10)
dev[0].wait_connected(timeout=10, error="Re-connection timed out")
ev = dev[2].wait_event(["CTRL-EVENT-DISCONNECTED"], timeout=1)
if ev is not None:
raise Exception("Unexpected disconnection")
logger.info("Disconnect-Request with matching Acct-Multi-Session-Id after disassociation")
sta = hapd.get_sta(addr)
multi_sess_id = sta['authMultiSessionId']
dev[0].request("DISCONNECT")
dev[0].wait_disconnected(timeout=10)
req = radius_das.DisconnectPacket(dict=dict, secret="secret",
NAS_IP_Address="127.0.0.1",
NAS_Identifier="nas.example.com",
Acct_Multi_Session_Id=multi_sess_id,
Event_Timestamp=int(time.time()))
send_and_check_reply(srv, req, pyrad.packet.DisconnectACK)
dev[0].request("RECONNECT")
ev = dev[0].wait_event(["CTRL-EVENT-EAP-STARTED"], timeout=15)
if ev is None:
raise Exception("Timeout on EAP start")
dev[0].wait_connected(timeout=15)
logger.info("Disconnect-Request with matching User-Name after disassociation")
dev[0].request("DISCONNECT")
dev[0].wait_disconnected(timeout=10)
dev[2].request("DISCONNECT")
dev[2].wait_disconnected(timeout=10)
req = radius_das.DisconnectPacket(dict=dict, secret="secret",
NAS_IP_Address="127.0.0.1",
NAS_Identifier="nas.example.com",
User_Name="psk.user@example.com",
Event_Timestamp=int(time.time()))
send_and_check_reply(srv, req, pyrad.packet.DisconnectACK)
logger.info("Disconnect-Request with matching CUI after disassociation")
dev[1].request("DISCONNECT")
dev[1].wait_disconnected(timeout=10)
req = radius_das.DisconnectPacket(dict=dict, secret="secret",
NAS_IP_Address="127.0.0.1",
NAS_Identifier="nas.example.com",
Chargeable_User_Identity="gpsk-chargeable-user-identity",
Event_Timestamp=int(time.time()))
send_and_check_reply(srv, req, pyrad.packet.DisconnectACK)
logger.info("Disconnect-Request with matching Calling-Station-Id after disassociation")
dev[0].request("RECONNECT")
ev = dev[0].wait_event(["CTRL-EVENT-EAP-STARTED"], timeout=15)
if ev is None:
raise Exception("Timeout on EAP start")
dev[0].wait_connected(timeout=15)
dev[0].request("DISCONNECT")
dev[0].wait_disconnected(timeout=10)
req = radius_das.DisconnectPacket(dict=dict, secret="secret",
NAS_IP_Address="127.0.0.1",
NAS_Identifier="nas.example.com",
Calling_Station_Id=addr,
Event_Timestamp=int(time.time()))
send_and_check_reply(srv, req, pyrad.packet.DisconnectACK)
logger.info("Disconnect-Request with mismatching Calling-Station-Id after disassociation")
req = radius_das.DisconnectPacket(dict=dict, secret="secret",
NAS_IP_Address="127.0.0.1",
NAS_Identifier="nas.example.com",
Calling_Station_Id=addr,
Event_Timestamp=int(time.time()))
send_and_check_reply(srv, req, pyrad.packet.DisconnectNAK, error_cause=503)
def test_radius_das_coa(dev, apdev):
"""RADIUS Dynamic Authorization Extensions - CoA"""
try:
import pyrad.client
import pyrad.packet
import pyrad.dictionary
import radius_das
except ImportError:
raise HwsimSkip("No pyrad modules available")
params = hostapd.wpa2_eap_params(ssid="radius-das")
params['radius_das_port'] = "3799"
params['radius_das_client'] = "127.0.0.1 secret"
params['radius_das_require_event_timestamp'] = "1"
hapd = hostapd.add_ap(apdev[0], params)
connect(dev[0], "radius-das")
addr = dev[0].p2p_interface_addr()
sta = hapd.get_sta(addr)
id = sta['dot1xAuthSessionId']
dict = pyrad.dictionary.Dictionary("dictionary.radius")
srv = pyrad.client.Client(server="127.0.0.1", acctport=3799,
secret="secret", dict=dict)
srv.retries = 1
srv.timeout = 1
# hostapd does not currently support CoA-Request, so NAK is expected
logger.info("CoA-Request with matching Acct-Session-Id")
req = radius_das.CoAPacket(dict=dict, secret="secret",
Acct_Session_Id=id,
Event_Timestamp=int(time.time()))
send_and_check_reply(srv, req, pyrad.packet.CoANAK, error_cause=405)
def test_radius_ipv6(dev, apdev):
"""RADIUS connection over IPv6"""
params = {}
params['ssid'] = 'as'
params['beacon_int'] = '2000'
params['radius_server_clients'] = 'auth_serv/radius_clients_ipv6.conf'
params['radius_server_ipv6'] = '1'
params['radius_server_auth_port'] = '18129'
params['radius_server_acct_port'] = '18139'
params['eap_server'] = '1'
params['eap_user_file'] = 'auth_serv/eap_user.conf'
params['ca_cert'] = 'auth_serv/ca.pem'
params['server_cert'] = 'auth_serv/server.pem'
params['private_key'] = 'auth_serv/server.key'
hostapd.add_ap(apdev[1], params)
params = hostapd.wpa2_eap_params(ssid="radius-ipv6")
params['auth_server_addr'] = "::0"
params['auth_server_port'] = "18129"
params['acct_server_addr'] = "::0"
params['acct_server_port'] = "18139"
params['acct_server_shared_secret'] = "radius"
params['own_ip_addr'] = "::0"
hostapd.add_ap(apdev[0], params)
connect(dev[0], "radius-ipv6")
def test_radius_macacl(dev, apdev):
"""RADIUS MAC ACL"""
params = hostapd.radius_params()
params["ssid"] = "radius"
params["macaddr_acl"] = "2"
hostapd.add_ap(apdev[0], params)
dev[0].connect("radius", key_mgmt="NONE", scan_freq="2412")
def test_radius_macacl_acct(dev, apdev):
"""RADIUS MAC ACL and accounting enabled"""
params = hostapd.radius_params()
params["ssid"] = "radius"
params["macaddr_acl"] = "2"
params['acct_server_addr'] = "127.0.0.1"
params['acct_server_port'] = "1813"
params['acct_server_shared_secret'] = "radius"
hostapd.add_ap(apdev[0], params)
dev[0].connect("radius", key_mgmt="NONE", scan_freq="2412")
dev[1].connect("radius", key_mgmt="NONE", scan_freq="2412")
dev[1].request("DISCONNECT")
dev[1].wait_disconnected()
dev[1].request("RECONNECT")
def test_radius_failover(dev, apdev):
"""RADIUS Authentication and Accounting server failover"""
subprocess.call(['ip', 'ro', 'replace', '192.168.213.17', 'dev', 'lo'])
as_hapd = hostapd.Hostapd("as")
as_mib_start = as_hapd.get_mib(param="radius_server")
params = hostapd.wpa2_eap_params(ssid="radius-failover")
params["auth_server_addr"] = "192.168.213.17"
params["auth_server_port"] = "1812"
params["auth_server_shared_secret"] = "testing"
params['acct_server_addr'] = "192.168.213.17"
params['acct_server_port'] = "1813"
params['acct_server_shared_secret'] = "testing"
params['radius_retry_primary_interval'] = "20"
hapd = hostapd.add_ap(apdev[0], params, no_enable=True)
hapd.set("auth_server_addr", "127.0.0.1")
hapd.set("auth_server_port", "1812")
hapd.set("auth_server_shared_secret", "radius")
hapd.set('acct_server_addr', "127.0.0.1")
hapd.set('acct_server_port', "1813")
hapd.set('acct_server_shared_secret', "radius")
hapd.enable()
ev = hapd.wait_event(["AP-ENABLED", "AP-DISABLED"], timeout=30)
if ev is None:
raise Exception("AP startup timed out")
if "AP-ENABLED" not in ev:
raise Exception("AP startup failed")
start = os.times()[4]
try:
subprocess.call(['ip', 'ro', 'replace', 'prohibit', '192.168.213.17'])
dev[0].request("SET EAPOL::authPeriod 5")
connect(dev[0], "radius-failover", wait_connect=False)
dev[0].wait_connected(timeout=20)
finally:
dev[0].request("SET EAPOL::authPeriod 30")
subprocess.call(['ip', 'ro', 'del', '192.168.213.17'])
as_mib_end = as_hapd.get_mib(param="radius_server")
req_s = int(as_mib_start['radiusAccServTotalRequests'])
req_e = int(as_mib_end['radiusAccServTotalRequests'])
if req_e <= req_s:
raise Exception("Unexpected RADIUS server acct MIB value")
end = os.times()[4]
try:
subprocess.call(['ip', 'ro', 'replace', 'prohibit', '192.168.213.17'])
dev[1].request("SET EAPOL::authPeriod 5")
if end - start < 21:
time.sleep(21 - (end - start))
connect(dev[1], "radius-failover", wait_connect=False)
dev[1].wait_connected(timeout=20)
finally:
dev[1].request("SET EAPOL::authPeriod 30")
subprocess.call(['ip', 'ro', 'del', '192.168.213.17'])
def run_pyrad_server(srv, t_events):
srv.RunWithStop(t_events)
def test_radius_protocol(dev, apdev):
"""RADIUS Authentication protocol tests with a fake server"""
try:
import pyrad.server
import pyrad.packet
import pyrad.dictionary
except ImportError:
raise HwsimSkip("No pyrad modules available")
class TestServer(pyrad.server.Server):
def _HandleAuthPacket(self, pkt):
pyrad.server.Server._HandleAuthPacket(self, pkt)
logger.info("Received authentication request")
reply = self.CreateReplyPacket(pkt)
reply.code = pyrad.packet.AccessAccept
if self.t_events['msg_auth'].is_set():
logger.info("Add Message-Authenticator")
if self.t_events['wrong_secret'].is_set():
logger.info("Use incorrect RADIUS shared secret")
pw = "incorrect"
else:
pw = reply.secret
hmac_obj = hmac.new(pw)
hmac_obj.update(struct.pack("B", reply.code))
hmac_obj.update(struct.pack("B", reply.id))
# reply attributes
reply.AddAttribute("Message-Authenticator",
"\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00")
attrs = reply._PktEncodeAttributes()
# Length
flen = 4 + 16 + len(attrs)
hmac_obj.update(struct.pack(">H", flen))
hmac_obj.update(pkt.authenticator)
hmac_obj.update(attrs)
if self.t_events['double_msg_auth'].is_set():
logger.info("Include two Message-Authenticator attributes")
else:
del reply[80]
reply.AddAttribute("Message-Authenticator", hmac_obj.digest())
self.SendReplyPacket(pkt.fd, reply)
def RunWithStop(self, t_events):
self._poll = select.poll()
self._fdmap = {}
self._PrepareSockets()
self.t_events = t_events
while not t_events['stop'].is_set():
for (fd, event) in self._poll.poll(1000):
if event == select.POLLIN:
try:
fdo = self._fdmap[fd]
self._ProcessInput(fdo)
except pyrad.server.ServerPacketError as err:
logger.info("pyrad server dropping packet: " + str(err))
except pyrad.packet.PacketError as err:
logger.info("pyrad server received invalid packet: " + str(err))
else:
logger.error("Unexpected event in pyrad server main loop")
srv = TestServer(dict=pyrad.dictionary.Dictionary("dictionary.radius"),
authport=18138, acctport=18139)
srv.hosts["127.0.0.1"] = pyrad.server.RemoteHost("127.0.0.1",
"radius",
"localhost")
srv.BindToAddress("")
t_events = {}
t_events['stop'] = threading.Event()
t_events['msg_auth'] = threading.Event()
t_events['wrong_secret'] = threading.Event()
t_events['double_msg_auth'] = threading.Event()
t = threading.Thread(target=run_pyrad_server, args=(srv, t_events))
t.start()
try:
params = hostapd.wpa2_eap_params(ssid="radius-test")
params['auth_server_port'] = "18138"
hapd = hostapd.add_ap(apdev[0], params)
connect(dev[0], "radius-test", wait_connect=False)
ev = dev[0].wait_event(["CTRL-EVENT-EAP-STARTED"], timeout=15)
if ev is None:
raise Exception("Timeout on EAP start")
time.sleep(1)
dev[0].request("REMOVE_NETWORK all")
time.sleep(0.1)
dev[0].dump_monitor()
t_events['msg_auth'].set()
t_events['wrong_secret'].set()
connect(dev[0], "radius-test", wait_connect=False)
time.sleep(1)
dev[0].request("REMOVE_NETWORK all")
time.sleep(0.1)
dev[0].dump_monitor()
t_events['wrong_secret'].clear()
connect(dev[0], "radius-test", wait_connect=False)
time.sleep(1)
dev[0].request("REMOVE_NETWORK all")
time.sleep(0.1)
dev[0].dump_monitor()
t_events['double_msg_auth'].set()
connect(dev[0], "radius-test", wait_connect=False)
time.sleep(1)
finally:
t_events['stop'].set()
t.join()
def test_radius_psk(dev, apdev):
"""WPA2 with PSK from RADIUS"""
try:
import pyrad.server
import pyrad.packet
import pyrad.dictionary
except ImportError:
raise HwsimSkip("No pyrad modules available")
class TestServer(pyrad.server.Server):
def _HandleAuthPacket(self, pkt):
pyrad.server.Server._HandleAuthPacket(self, pkt)
logger.info("Received authentication request")
reply = self.CreateReplyPacket(pkt)
reply.code = pyrad.packet.AccessAccept
a = "\xab\xcd"
secret = reply.secret
if self.t_events['long'].is_set():
p = b'\x10' + "0123456789abcdef" + 15 * b'\x00'
b = hashlib.md5(secret + pkt.authenticator + a).digest()
pp = bytearray(p[0:16])
bb = bytearray(b)
cc = bytearray(pp[i] ^ bb[i] for i in range(len(bb)))
b = hashlib.md5(reply.secret + bytes(cc)).digest()
pp = bytearray(p[16:32])
bb = bytearray(b)
cc += bytearray(pp[i] ^ bb[i] for i in range(len(bb)))
data = '\x00' + a + bytes(cc)
else:
p = b'\x08' + "12345678" + 7 * b'\x00'
b = hashlib.md5(secret + pkt.authenticator + a).digest()
pp = bytearray(p)
bb = bytearray(b)
cc = bytearray(pp[i] ^ bb[i] for i in range(len(bb)))
data = '\x00' + a + bytes(cc)
reply.AddAttribute("Tunnel-Password", data)
self.SendReplyPacket(pkt.fd, reply)
def RunWithStop(self, t_events):
self._poll = select.poll()
self._fdmap = {}
self._PrepareSockets()
self.t_events = t_events
while not t_events['stop'].is_set():
for (fd, event) in self._poll.poll(1000):
if event == select.POLLIN:
try:
fdo = self._fdmap[fd]
self._ProcessInput(fdo)
except pyrad.server.ServerPacketError as err:
logger.info("pyrad server dropping packet: " + str(err))
except pyrad.packet.PacketError as err:
logger.info("pyrad server received invalid packet: " + str(err))
else:
logger.error("Unexpected event in pyrad server main loop")
srv = TestServer(dict=pyrad.dictionary.Dictionary("dictionary.radius"),
authport=18138, acctport=18139)
srv.hosts["127.0.0.1"] = pyrad.server.RemoteHost("127.0.0.1",
"radius",
"localhost")
srv.BindToAddress("")
t_events = {}
t_events['stop'] = threading.Event()
t_events['long'] = threading.Event()
t = threading.Thread(target=run_pyrad_server, args=(srv, t_events))
t.start()
try:
ssid = "test-wpa2-psk"
params = hostapd.radius_params()
params['ssid'] = ssid
params["wpa"] = "2"
params["wpa_key_mgmt"] = "WPA-PSK"
params["rsn_pairwise"] = "CCMP"
params['macaddr_acl'] = '2'
params['wpa_psk_radius'] = '2'
params['auth_server_port'] = "18138"
hapd = hostapd.add_ap(apdev[0], params)
dev[0].connect(ssid, psk="12345678", scan_freq="2412")
t_events['long'].set()
dev[1].connect(ssid, psk="0123456789abcdef", scan_freq="2412")
finally:
t_events['stop'].set()
t.join()
def test_radius_psk_invalid(dev, apdev):
"""WPA2 with invalid PSK from RADIUS"""
try:
import pyrad.server
import pyrad.packet
import pyrad.dictionary
except ImportError:
raise HwsimSkip("No pyrad modules available")
class TestServer(pyrad.server.Server):
def _HandleAuthPacket(self, pkt):
pyrad.server.Server._HandleAuthPacket(self, pkt)
logger.info("Received authentication request")
reply = self.CreateReplyPacket(pkt)
reply.code = pyrad.packet.AccessAccept
a = "\xab\xcd"
secret = reply.secret
p = b'\x07' + "1234567" + 8 * b'\x00'
b = hashlib.md5(secret + pkt.authenticator + a).digest()
pp = bytearray(p)
bb = bytearray(b)
cc = bytearray(pp[i] ^ bb[i] for i in range(len(bb)))
data = '\x00' + a + bytes(cc)
reply.AddAttribute("Tunnel-Password", data)
self.SendReplyPacket(pkt.fd, reply)
def RunWithStop(self, t_events):
self._poll = select.poll()
self._fdmap = {}
self._PrepareSockets()
self.t_events = t_events
while not t_events['stop'].is_set():
for (fd, event) in self._poll.poll(1000):
if event == select.POLLIN:
try:
fdo = self._fdmap[fd]
self._ProcessInput(fdo)
except pyrad.server.ServerPacketError as err:
logger.info("pyrad server dropping packet: " + str(err))
except pyrad.packet.PacketError as err:
logger.info("pyrad server received invalid packet: " + str(err))
else:
logger.error("Unexpected event in pyrad server main loop")
srv = TestServer(dict=pyrad.dictionary.Dictionary("dictionary.radius"),
authport=18138, acctport=18139)
srv.hosts["127.0.0.1"] = pyrad.server.RemoteHost("127.0.0.1",
"radius",
"localhost")
srv.BindToAddress("")
t_events = {}
t_events['stop'] = threading.Event()
t = threading.Thread(target=run_pyrad_server, args=(srv, t_events))
t.start()
try:
ssid = "test-wpa2-psk"
params = hostapd.radius_params()
params['ssid'] = ssid
params["wpa"] = "2"
params["wpa_key_mgmt"] = "WPA-PSK"
params["rsn_pairwise"] = "CCMP"
params['macaddr_acl'] = '2'
params['wpa_psk_radius'] = '2'
params['auth_server_port'] = "18138"
hapd = hostapd.add_ap(apdev[0], params)
dev[0].connect(ssid, psk="12345678", scan_freq="2412",
wait_connect=False)
time.sleep(1)
finally:
t_events['stop'].set()
t.join()
def test_radius_auth_force_client_addr(dev, apdev):
"""RADIUS client address specified"""
params = hostapd.wpa2_eap_params(ssid="radius-auth")
params['radius_client_addr'] = "127.0.0.1"
hapd = hostapd.add_ap(apdev[0], params)
connect(dev[0], "radius-auth")
def test_radius_auth_force_invalid_client_addr(dev, apdev):
"""RADIUS client address specified and invalid address"""
params = hostapd.wpa2_eap_params(ssid="radius-auth")
#params['radius_client_addr'] = "10.11.12.14"
params['radius_client_addr'] = "1::2"
hapd = hostapd.add_ap(apdev[0], params)
connect(dev[0], "radius-auth", wait_connect=False)
ev = dev[0].wait_event(["CTRL-EVENT-EAP-STARTED"])
if ev is None:
raise Exception("Timeout on EAP start")
ev = dev[0].wait_event(["CTRL-EVENT-CONNECTED"], timeout=1)
if ev is not None:
raise Exception("Unexpected connection")
def add_message_auth(req):
req.authenticator = req.CreateAuthenticator()
hmac_obj = hmac.new(req.secret)
hmac_obj.update(struct.pack("B", req.code))
hmac_obj.update(struct.pack("B", req.id))
# request attributes
req.AddAttribute("Message-Authenticator",
"\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00")
attrs = req._PktEncodeAttributes()
# Length
flen = 4 + 16 + len(attrs)
hmac_obj.update(struct.pack(">H", flen))
hmac_obj.update(req.authenticator)
hmac_obj.update(attrs)
del req[80]
req.AddAttribute("Message-Authenticator", hmac_obj.digest())
def test_radius_server_failures(dev, apdev):
"""RADIUS server failure cases"""
try:
import pyrad.client
import pyrad.packet
import pyrad.dictionary
except ImportError:
raise HwsimSkip("No pyrad modules available")
dict = pyrad.dictionary.Dictionary("dictionary.radius")
client = pyrad.client.Client(server="127.0.0.1", authport=1812,
secret="radius", dict=dict)
client.retries = 1
client.timeout = 1
# unexpected State
req = client.CreateAuthPacket(code=pyrad.packet.AccessRequest,
User_Name="foo")
req['State'] = 'foo-state'
add_message_auth(req)
reply = client.SendPacket(req)
if reply.code != pyrad.packet.AccessReject:
raise Exception("Unexpected RADIUS response code " + str(reply.code))
# no EAP-Message
req = client.CreateAuthPacket(code=pyrad.packet.AccessRequest,
User_Name="foo")
add_message_auth(req)
try:
reply = client.SendPacket(req)
raise Exception("Unexpected response")
except pyrad.client.Timeout:
pass
def test_ap_vlan_wpa2_psk_radius_required(dev, apdev):
"""AP VLAN with WPA2-PSK and RADIUS attributes required"""
try:
import pyrad.server
import pyrad.packet
import pyrad.dictionary
except ImportError:
raise HwsimSkip("No pyrad modules available")
class TestServer(pyrad.server.Server):
def _HandleAuthPacket(self, pkt):
pyrad.server.Server._HandleAuthPacket(self, pkt)
logger.info("Received authentication request")
reply = self.CreateReplyPacket(pkt)
reply.code = pyrad.packet.AccessAccept
secret = reply.secret
if self.t_events['extra'].is_set():
reply.AddAttribute("Chargeable-User-Identity", "test-cui")
reply.AddAttribute("User-Name", "test-user")
if self.t_events['long'].is_set():
reply.AddAttribute("Tunnel-Type", 13)
reply.AddAttribute("Tunnel-Medium-Type", 6)
reply.AddAttribute("Tunnel-Private-Group-ID", "1")
self.SendReplyPacket(pkt.fd, reply)
def RunWithStop(self, t_events):
self._poll = select.poll()
self._fdmap = {}
self._PrepareSockets()
self.t_events = t_events
while not t_events['stop'].is_set():
for (fd, event) in self._poll.poll(1000):
if event == select.POLLIN:
try:
fdo = self._fdmap[fd]
self._ProcessInput(fdo)
except pyrad.server.ServerPacketError as err:
logger.info("pyrad server dropping packet: " + str(err))
except pyrad.packet.PacketError as err:
logger.info("pyrad server received invalid packet: " + str(err))
else:
logger.error("Unexpected event in pyrad server main loop")
srv = TestServer(dict=pyrad.dictionary.Dictionary("dictionary.radius"),
authport=18138, acctport=18139)
srv.hosts["127.0.0.1"] = pyrad.server.RemoteHost("127.0.0.1",
"radius",
"localhost")
srv.BindToAddress("")
t_events = {}
t_events['stop'] = threading.Event()
t_events['long'] = threading.Event()
t_events['extra'] = threading.Event()
t = threading.Thread(target=run_pyrad_server, args=(srv, t_events))
t.start()
try:
ssid = "test-wpa2-psk"
params = hostapd.radius_params()
params['ssid'] = ssid
params["wpa"] = "2"
params["wpa_key_mgmt"] = "WPA-PSK"
params["rsn_pairwise"] = "CCMP"
params['macaddr_acl'] = '2'
params['dynamic_vlan'] = "2"
params['wpa_passphrase'] = '0123456789abcdefghi'
params['auth_server_port'] = "18138"
hapd = hostapd.add_ap(apdev[0], params)
logger.info("connecting without VLAN")
dev[0].connect(ssid, psk="0123456789abcdefghi", scan_freq="2412",
wait_connect=False)
ev = dev[0].wait_event(["CTRL-EVENT-CONNECTED",
"CTRL-EVENT-SSID-TEMP-DISABLED"], timeout=20)
if ev is None:
raise Exception("Timeout on connection attempt")
if "CTRL-EVENT-CONNECTED" in ev:
raise Exception("Unexpected success without vlan parameters")
logger.info("connecting without VLAN failed as expected")
logger.info("connecting without VLAN (CUI/User-Name)")
t_events['extra'].set()
dev[1].connect(ssid, psk="0123456789abcdefghi", scan_freq="2412",
wait_connect=False)
ev = dev[1].wait_event(["CTRL-EVENT-CONNECTED",
"CTRL-EVENT-SSID-TEMP-DISABLED"], timeout=20)
if ev is None:
raise Exception("Timeout on connection attempt")
if "CTRL-EVENT-CONNECTED" in ev:
raise Exception("Unexpected success without vlan parameters(2)")
logger.info("connecting without VLAN failed as expected(2)")
t_events['extra'].clear()
t_events['long'].set()
logger.info("connecting with VLAN")
dev[2].connect(ssid, psk="0123456789abcdefghi", scan_freq="2412",
wait_connect=False)
ev = dev[2].wait_event(["CTRL-EVENT-CONNECTED",
"CTRL-EVENT-SSID-TEMP-DISABLED"], timeout=20)
if ev is None:
raise Exception("Timeout on connection attempt")
if "CTRL-EVENT-SSID-TEMP-DISABLED" in ev:
raise Exception("Unexpected failure with vlan parameters")
logger.info("connecting with VLAN succeeded as expected")
finally:
t_events['stop'].set()
t.join()
def test_radius_mppe_failure(dev, apdev):
"""RADIUS failure when adding MPPE keys"""
params = { "ssid": "as", "beacon_int": "2000",
"radius_server_clients": "auth_serv/radius_clients.conf",
"radius_server_auth_port": '18127',
"eap_server": "1",
"eap_user_file": "auth_serv/eap_user.conf",
"ca_cert": "auth_serv/ca.pem",
"server_cert": "auth_serv/server.pem",
"private_key": "auth_serv/server.key" }
authsrv = hostapd.add_ap(apdev[1], params)
params = hostapd.wpa2_eap_params(ssid="test-wpa2-eap")
params['auth_server_port'] = "18127"
hapd = hostapd.add_ap(apdev[0], params)
with fail_test(authsrv, 1, "os_get_random;radius_msg_add_mppe_keys"):
dev[0].connect("test-wpa2-eap", key_mgmt="WPA-EAP", eap="TTLS",
identity="user", anonymous_identity="ttls",
password="password",
ca_cert="auth_serv/ca.pem", phase2="autheap=GTC",
wait_connect=False, scan_freq="2412")
dev[0].wait_disconnected()
dev[0].request("REMOVE_NETWORK all")
|
active_count.py | import threading
import time
import random
def myThread(i):
print(f'Thread {i} started')
time.sleep(random.randint(1, 5))
print(f'Thread {i} finished')
def main():
for i in range(random.randint(2, 50)):
thread = threading.Thread(target=myThread, args=(i,))
thread.start()
time.sleep(4)
# main thread 1 + myThread 14 = 15
print(f'Total Number of Active Threads: {threading.active_count()}')
if __name__ == '__main__':
main()
'''
...
Total Number of Active Threads: 15
Thread 0 finished
Thread 3 finished
Thread 1 finished
Thread 4 finished
Thread 14 finished
Thread 17 finished
Thread 21 finished
Thread 25 finished
Thread 38 finished
Thread 34 finished
Thread 31 finished
Thread 43 finished
Thread 29 finished
Thread 41 finished
'''
|
app.py | import multiprocessing
from threading import Thread
from flask import Flask, render_template, request, jsonify
from flask_httpauth import HTTPBasicAuth
from werkzeug.security import check_password_hash
from dataclasses import dataclass
from datetime import datetime
import scanner
import json
import config
import scheduler
# flask server app
manager = multiprocessing.Manager()
vulnerable_websites = manager.list()
app = Flask(__name__)
auth = HTTPBasicAuth()
processes = []
@auth.verify_password
def verify_password(username, password):
""" Verify given user and password with list stored in config file. """
if username in config.USERS and \
check_password_hash(config.USERS.get(username), password):
return username
@app.route('/')
@auth.login_required
def index():
""" Render index.html with basic auth. """
# get data for table
result = scanner.session.query(scanner.Server)\
.join(scanner.Vulnerabilities, scanner.Server.vulnerability_id == scanner.Vulnerabilities.id)\
.order_by(scanner.Server.timestamp.desc())\
.limit(8)
records = []
for record in result:
records.append({
'id': record.id,
'url': record.url,
'vulnerability_name': record.vulnerability.name,
'severity': record.vulnerability.severity_level,
'timestamp': record.timestamp,
'confidence': record.confidence,
'description': record.vulnerability.description})
return render_template('index.html.j2', records=records)
@auth.login_required
@app.route('/api/hardware_monitor')
def hardware_monitor():
""" Endpoint for frontend with information about CPU, RAM and disk usage """
file = open('./logs/hardware/hardware.json', 'r')
statistics = json.load(file)
file.close()
return '{ \"STATISTICS\": ' + str(statistics).replace('\'', '"') + '}'
# @auth.login_required
@app.route('/api/get_servers', methods=['GET'])
def servers_dump():
""" Page with all results """
vulnerability_id = request.args.get('vulnerability_id') # int
if vulnerability_id is None:
vulnerability_id = scanner.Server.vulnerability_id
lowest_severity_level = request.args.get('lowest_severity_level') # int
highest_severity_level = request.args.get('highest_severity_level') # int
if lowest_severity_level is None:
lowest_severity_level = 0
if highest_severity_level is None:
highest_severity_level = 10
lowest_confidence = request.args.get('lowest_confidence') # int
highest_confidence = request.args.get('highest_confidence') # int
if lowest_confidence is None:
lowest_confidence = 0
if highest_confidence is None:
highest_confidence = 10
beginning_time_period = request.args.get('beginning_time_period') # int
ending_time_period = request.args.get('ending_time_period') # int
if beginning_time_period is None:
beginning_time_period = '01-01-0001_00:00:00'
if ending_time_period is None:
ending_time_period = str(datetime.now().strftime('%d-%m-%Y_%H:%M:%S'))
limit = request.args.get('limit')
count = request.args.get('count') # bool
result = scanner.session.query(scanner.Server)
try:
result = result.join(scanner.Vulnerabilities) \
.filter((int(lowest_severity_level) <= scanner.Vulnerabilities.severity_level)
& (scanner.Vulnerabilities.severity_level <= int(highest_severity_level))
& (datetime.strptime(beginning_time_period, '%d-%m-%Y_%H:%M:%S') <= scanner.Server.timestamp)
& (scanner.Server.timestamp <= datetime.strptime(ending_time_period, '%d-%m-%Y_%H:%M:%S'))
& (lowest_confidence <= scanner.Server.confidence)
& (scanner.Server.confidence <= highest_confidence)
& (scanner.Server.vulnerability_id == vulnerability_id))
except ValueError:
return 'Probably incorrect datetime format, should be %d-%m-%Y_%H:%M:%S', 400
if count:
result = result.count()
return str(result.real)
serialized_list = []
if limit is None:
for record in result.order_by(scanner.Server.timestamp.desc()).all():
serialized_list.append(record.serialize())
else:
try:
for record in result.order_by(scanner.Server.timestamp.desc()).limit(limit):
serialized_list.append(record.serialize())
except ValueError:
return 'Probably incorrect datetime format, should be %d-%m-%Y_%H:%M:%S', 400
return json.dumps(serialized_list)
def run_workers():
""" Start new threads with web_crawlers """
for i in range(0, len(config.URL_QUEUE)):
crawler_obj = scanner.WebsiteCrawler([config.URL_QUEUE[i]],
False,
config.BLACK_LIST,
vulnerable_websites)
process = multiprocessing.Process(target=crawler_obj.run)
process.start()
processes.append(process)
for process in processes:
process.join()
Thread(target=run_workers).start()
|
md.py | import asyncio
import threading
import datetime
from functools import partial
import pandas as pd
from asyncio import AbstractEventLoop
from typing import List
from jotdx.consts import MARKET_SH, MARKET_SZ
from jotdx.quotes import Quotes
from joconst.maps import INTERVAL_TDX_MAP
from vnpy.trader.gateway import BaseGateway
from vnpy.trader.database import DATETIME_TZ
from vnpy.trader.object import ContractData, HistoryRequest, BarData, SubscribeRequest, TickData
from vnpy.trader.constant import Product, Exchange, Interval
from vnpy.trader.utility import get_file_path, load_pickle, save_pickle
class MarketDataMD:
def __init__(self, gateway: BaseGateway):
self.gateway = gateway
self.api = None
self.api_subscribe_req_list = []
self.thread: threading.Thread = None
self.loop: AbstractEventLoop = None
self.contracts_dict = {
Product.EQUITY: dict(),
Product.BOND: dict(),
Product.ETF: dict(),
}
self.save_contracts_pkl_file_name = f"{self.gateway.gateway_name}_contracts.pkl"
def start_loop(self, loop):
"""
轮询, 使用jotdx查询3s变化的 price 和 vol 信息, 生成缺少bid和ask的tick
"""
asyncio.set_event_loop(loop)
try:
self.gateway.write_log("行情线程中启动协程 loop ...")
loop.run_forever()
except BaseException as err:
self.gateway.write_log("行情线程中启动协程 loop 出现问题!")
self.gateway.write_log(err)
def connect(self, bestip: bool):
self.api = Quotes.factory(market='std', bestip=bestip, heartbeat=True, multithread=True)
self.query_contract()
try:
self.loop = asyncio.new_event_loop() # 在当前线程下创建时间循环,(未启用),在start_loop里面启动它
self.thread = threading.Thread(target=self.start_loop, args=(self.loop,)) # 通过当前线程开启新的线程去启动事件循环
self.gateway.write_log("启动行情线程...")
self.thread.start()
except BaseException as err:
self.gateway.write_log("行情线程启动出现问题!")
self.gateway.write_log(err)
def trans_tick_df_to_tick_data(self, tick_df, req: SubscribeRequest) -> TickData:
# buyorsell, 0 buy, 1 sell
# buyorsell = tick_df['buyorsell'][0]
if any(req.symbol.startswith(stock_code) for stock_code in ["688", "60", "002", "000", "300"]):
last_price = tick_df['price'][0]
name = self.contracts_dict[Product.EQUITY][req.vt_symbol].name
elif any(req.symbol.startswith(bond_code) for bond_code in ["110", "113", "127", "128", "123"]):
last_price = round(tick_df['price'][0] / 10, 2)
name = self.contracts_dict[Product.BOND][req.vt_symbol].name
elif any(req.symbol.startswith(etf_code) for etf_code in ["58", "51", "56", "15"]):
last_price = round(tick_df['price'][0] / 10, 2)
name = self.contracts_dict[Product.ETF][req.vt_symbol].name
else:
last_price = 0.0
name = "未知"
return TickData(
gateway_name=self.gateway.gateway_name,
name=name,
symbol=req.symbol,
exchange=req.exchange,
datetime=datetime.datetime.now(DATETIME_TZ),
volume=tick_df['vol'][0],
# num 放到turnover, 因为在bargenerater里面,
# turnover是累加计算的, open_interest 是不算累加的而取截面的
turnover=tick_df['num'][0],
last_price=last_price,
)
async def query_tick(self, req: SubscribeRequest):
client = Quotes.factory(market='std')
loop = asyncio.get_event_loop()
params = {"symbol": req.symbol, "start": 0, "offset": 1}
last_tick_df = await loop.run_in_executor(None, partial(client.transaction, **params))
tz = DATETIME_TZ
tick_datetime = datetime.datetime.now(tz)
am_start_datetime = datetime.datetime(
year=tick_datetime.year, month=tick_datetime.month, day=tick_datetime.day,
hour=9, minute=30, second=0, microsecond=0, tzinfo=tz)
am_end_datetime = datetime.datetime(
year=tick_datetime.year, month=tick_datetime.month, day=tick_datetime.day,
hour=11, minute=30, second=0, microsecond=0, tzinfo=tz)
pm_start_datetime = datetime.datetime(
year=tick_datetime.year, month=tick_datetime.month, day=tick_datetime.day,
hour=13, minute=0, second=0, microsecond=0, tzinfo=tz)
pm_end_datetime = datetime.datetime(
year=tick_datetime.year, month=tick_datetime.month, day=tick_datetime.day,
hour=15, minute=0, second=0, microsecond=0, tzinfo=tz)
while True:
if (am_start_datetime <= tick_datetime <= am_end_datetime) \
or (pm_start_datetime <= tick_datetime <= pm_end_datetime):
df1 = await loop.run_in_executor(None, partial(client.transaction, **params))
last_tick_df = last_tick_df.append(df1).drop_duplicates()
if len(last_tick_df) > 1:
last_tick_df = df1
tick = self.trans_tick_df_to_tick_data(last_tick_df, req)
self.gateway.on_tick(tick)
await asyncio.sleep(1.5)
df2 = await loop.run_in_executor(None, partial(client.transaction, **params))
last_tick_df = last_tick_df.append(df2).drop_duplicates()
if len(last_tick_df) > 1:
last_tick_df = df2
tick = self.trans_tick_df_to_tick_data(last_tick_df, req)
self.gateway.on_tick(tick)
await asyncio.sleep(1.5)
# 这里注意要更新时间
tick_datetime = datetime.datetime.now(tz)
else:
# 起到 heartbeat 的作用
_ = await loop.run_in_executor(None, partial(client.transaction, **params))
await asyncio.sleep(3)
tick_datetime = datetime.datetime.now(tz)
@staticmethod
def drop_unused_bond_df_row(df, unused_symbol_list):
if unused_symbol_list:
return df[~df['code'].isin(unused_symbol_list)]
return df
def query_contract(self) -> None:
contract_pkl_file_path = get_file_path(self.save_contracts_pkl_file_name)
if contract_pkl_file_path.exists():
# 判断文件更新日期, 如果当前日期 == 更新日期, 原则上每天只更新一次
# 读取本地缓存文件
update_date = datetime.datetime.fromtimestamp(
contract_pkl_file_path.stat().st_mtime).date()
if update_date == datetime.date.today():
self.gateway.write_log("行情接口开始加载本地合约信息 ...")
self.contracts_dict = load_pickle(self.save_contracts_pkl_file_name)
[[self.gateway.on_contract(contract) for contract in v.values()] for v in self.contracts_dict.values()]
return
try:
self.gateway.write_log("行情接口开始获取合约信息 ...")
sh_df = self.api.stocks(market=MARKET_SH)
sh_stock_df = sh_df[sh_df['code'].str.contains("^((688)[\d]{3}|(60[\d]{4}))$")]
sh_bond_df = sh_df[sh_df['code'].str.contains("^(110|113)[\d]{3}$")]
sh_etf_df = sh_df[sh_df['code'].str.contains("^(58|51|56)[\d]{4}$")]
sz_df = self.api.stocks(market=MARKET_SZ)
sz_stock_df = sz_df[sz_df['code'].str.contains("^((002|000|300)[\d]{3})$")]
sz_bond_df = sz_df[sz_df['code'].str.contains("^((127|128|123)[\d]{3})$")]
sz_etf_df = sz_df[sz_df['code'].str.contains("^(15)[\d]{4}$")]
sh_bond_df = self.drop_unused_bond_df_row(
sh_bond_df,
["110801", "110802", "110804", "110807", "110808",
"110810", "110811", "110812", "110813",
"113633", "113634", "113635", "113636"]
)
# sz_bond_df = self.drop_unused_bond_df_row(sz_bond_df, ["110801", "110802"])
exchange_list = [Exchange.SSE, Exchange.SZSE]
for stock_df, exchange in zip([sh_stock_df, sz_stock_df], exchange_list):
for row in stock_df.iterrows():
row = row[1]
contract: ContractData = ContractData(
symbol=row['code'],
exchange=exchange,
name=row["name"],
pricetick=0.01,
size=1,
min_volume=row['volunit'],
product=Product.EQUITY,
history_data=True,
gateway_name=self.gateway.gateway_name,
)
self.gateway.on_contract(contract)
self.contracts_dict[Product.EQUITY][contract.vt_symbol] = contract
for bond_df, exchange in zip([sh_bond_df, sz_bond_df], exchange_list):
for row in bond_df.iterrows():
row = row[1]
contract: ContractData = ContractData(
symbol=row['code'],
exchange=exchange,
name=row["name"],
pricetick=0.01,
size=1,
min_volume=row['volunit'],
product=Product.BOND,
history_data=True,
gateway_name=self.gateway.gateway_name,
)
self.gateway.on_contract(contract)
self.contracts_dict[Product.BOND][contract.vt_symbol] = contract
for etf_df, exchange in zip([sh_etf_df, sz_etf_df], exchange_list):
for row in etf_df.iterrows():
row = row[1]
contract: ContractData = ContractData(
symbol=row['code'],
exchange=exchange,
name=row["name"],
pricetick=0.01,
size=1,
min_volume=row['volunit'],
product=Product.ETF,
history_data=True,
gateway_name=self.gateway.gateway_name,
)
self.gateway.on_contract(contract)
self.contracts_dict[Product.ETF][contract.vt_symbol] = contract
try:
save_pickle(self.save_contracts_pkl_file_name, self.contracts_dict)
self.gateway.write_log("本地保存合约信息成功!")
except BaseException as err:
self.gateway.write_log("本地保存合约信息失败!")
self.gateway.write_log(err)
except Exception as e:
self.gateway.write_log(f"jotdx 行情接口获取合约信息出错: {e}")
def query_history(self, req: HistoryRequest) -> List[BarData]:
history = []
if req.end is None:
offset = datetime.datetime.now(tz=DATETIME_TZ) - req.start
offset = offset.days * 3600 * 4 + offset.seconds # 每天交易4小时, 乘4 而不是24
else:
offset = req.end - req.start
offset = offset.days * 3600 * 4 + offset.seconds
if req.interval == Interval.MINUTE:
offset = offset / 60
elif req.interval == Interval.MINUTE_5:
offset = offset / 60 / 5
elif req.interval == Interval.MINUTE_15:
offset = offset / 60 / 15
elif req.interval == Interval.MINUTE_30:
offset = offset / 60 / 30
elif req.interval == Interval.HOUR:
offset = offset / 60 / 60
elif req.interval == Interval.DAILY:
offset = offset / 60 / 60 / 4
elif req.interval == Interval.WEEKLY:
offset = offset / 60 / 60 / 4 / 5
offset_const = 800 # pytdx 单次查询数据数目最大上限
try:
if offset > offset_const:
start = 0
df = self.api.bars(
symbol=req.symbol,
frequency=INTERVAL_TDX_MAP[req.interval],
offset=offset_const,
start=start
)
while offset > offset_const:
start += offset_const
offset -= offset_const
offset_const_df = self.api.bars(
symbol=req.symbol,
frequency=INTERVAL_TDX_MAP[req.interval],
offset=offset_const,
start=start
)
df = offset_const_df.append(df)
if len(offset_const_df) < offset_const:
offset = 0
if offset > 0:
start += offset_const
res_df = self.api.bars(
symbol=req.symbol,
frequency=INTERVAL_TDX_MAP[req.interval],
offset=offset,
start=start
)
if len(res_df) != 0:
df = res_df.append(df)
else:
df = self.api.bars(
symbol=req.symbol,
frequency=INTERVAL_TDX_MAP[req.interval],
offset=int(offset)
)
except Exception as e:
self.gateway.write_log(f"数据获取失败 {req}")
self.gateway.write_log(f"Exception : {e}")
return []
if df.empty:
return []
# 因为 req 的 start 和 end datetime 是带tzinfo的, 所以这里将datetime列进行添加tzinfo处理
df['datetime'] = pd.to_datetime(df['datetime'])
df.set_index('datetime', inplace=True)
df = df.tz_localize(DATETIME_TZ)
df.reset_index(inplace=True)
df = df[(df['datetime'] >= req.start) & (df['datetime'] <= req.end + datetime.timedelta(days=1))]
self.gateway.write_log(f"查询历史数据成功, {req.start} -> {req.end}, 共{len(df)}条数据, 开始转换数据...")
for _, series in df.iterrows():
history.append(
BarData(
gateway_name=self.gateway.gateway_name,
symbol=req.symbol,
exchange=req.exchange,
datetime=series['datetime'],
interval=req.interval,
volume=series['vol'],
turnover=series['amount'],
open_interest=0.0,
open_price=series['open'],
high_price=series['high'],
low_price=series['low'],
close_price=series['close']
)
)
return history
def close(self):
if self.api is not None:
self.api.close()
self.gateway.write_log("行情服务器断开连接")
|
tcp_server.py | #! /use/bin/env python
# -*- coding: utf-8 -*-
import socket
import threading
BIND_IP = "0.0.0.0"
BIND_PORT = 8080
server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server.bind((BIND_IP, BIND_PORT))
server.listen(5)
print "[*] Listening on %s:%d " % (BIND_IP, BIND_PORT)
def handle_client(client_socket):
request = client_socket.recv(1024)
print "[*] Received: %s" % request
client_socket.send("ACK!")
client_socket.close()
while True:
client, addr = server.accept()
print "[*] Accepted connection from %s:%d" % (addr[0], addr[1])
client_handle = threading.Thread(target=handle_client, args=(client, ))
client_handle.start()
|
scheduler.py | import Queue
import threading
import time
import json
from logging import getLogger, StreamHandler, Formatter, DEBUG, INFO, ERROR, FileHandler, NullHandler
import traceback
import os
import sys
import uuid
Q_JSON_FILENAME = 'qu.json'
RESULT_PATH = '/home/muneel/Documents/results/'
class MLOGGER:
@staticmethod
def get_logger(name):
if not name:
raise ValueError('Name parameter can not be empty.')
return MLOGGER(name)
@staticmethod
def __create_stream_handler(level):
handler = StreamHandler()
handler.setLevel(level)
handler.setFormatter(
Formatter('%(asctime)s - %(levelname)s - %(instance_id)s - %(message)s', '%Y-%m-%d %H:%M:%S'))
return handler
@staticmethod
def __create_file_handler(level, filename):
filename_path = str(os.path.dirname(os.path.realpath(__file__))) + '/' + str(filename)
fileHandler = FileHandler(filename_path, mode='w')
fileHandler.setLevel(level)
fileHandler.setFormatter(
Formatter('%(asctime)s - %(levelname)s - %(instance_id)s - %(message)s', '%Y-%m-%d %H:%M:%S'))
return fileHandler
def __init__(self, name, level=INFO, logtype='CONSOLE', filename=None):
# logtype : {'CONSOLE', 'FILE', 'BOTH', 'NONE'}
# level : {INFO, DEBUG, ERROR}
self.user_variables = {}
self.user_variables['instance_id'] = self.__class__.__name__
self.logger = getLogger(name)
self.logger.setLevel(level)
if logtype == 'CONSOLE':
self.logger.addHandler(MLOGGER.__create_stream_handler(level))
elif logtype == 'FILE':
if filename is not None:
self.logger.addHandler(MLOGGER.__create_file_handler(level, filename))
else:
raise ValueError('filename cannot be empty')
sys.exit()
elif logtype == 'BOTH':
self.logger.addHandler(MLOGGER.__create_stream_handler(level))
if filename is not None:
self.logger.addHandler(MLOGGER.__create_file_handler(level, filename))
else:
raise ValueError('filename cannot be empty')
sys.exit()
elif logtype == 'NONE':
self.logger.addHandler(NullHandler())
def __set_message(self, message):
tb = traceback.extract_stack()
return (tb[1][2] + ' - ' + message)
def debug(self, message):
self.logger.debug(self.__set_message(message), extra=self.user_variables)
def info(self, message):
self.logger.info(self.__set_message(message), extra=self.user_variables)
def warn(self, message):
self.logger.warn(self.__set_message(message), extra=self.user_variables)
def error(self, message):
self.logger.error(self.__set_message(message), extra=self.user_variables)
class Scheduler(MLOGGER):
def __init__(self):
MLOGGER.__init__(self, 'Scheduler', level=DEBUG, logtype='CONSOLE', filename='scheduler_log.log')
self.q = Queue.Queue()
self.lock = False
def load_json(self, filename):
""" Loads json into python dictionary from given filename
Args:
filename (str): filename of the json file
Returns:
None
Raises:
Exception: when unable to open file
"""
try:
with open(filename) as data_file:
data = json.load(data_file)
data_file.close()
return data
except Exception as e:
self.error('%s' % e)
def dump_json(self, filename, data):
""" Dumps dictionary into given json filename
Args:
filename (str): filename of the json file
data (dict): dictionary
Returns:
None
Raises:
Exception: when unable to open file
"""
try:
with open(filename, 'w') as outfile:
json.dump(data, outfile)
outfile.close()
except Exception as e:
self.error('%s' % e)
def add_work(self, work, load_work=False):
""" Adds work to the queue
Args:
work (dict): work is a dictionary an example can be following
::
{
'name' :'John'
}
load_work (bool): Used when adding work from the file, only done in initial start
Returns:
dict: Returns work dictionary that was added to the queue
Notes:
load_work() method calls this
"""
# Add work recieved into the queue
# Before adding into the queue it prepares by adding status
# And uid
self.info('Recieved Work %s' % work)
if load_work is False:
self.__lock_queue()
work = self.prepare_work(work)
data = self.load_json(Q_JSON_FILENAME)
self.debug('Current json queue : %s' % data)
data.append(work)
self.debug('Added %s to json file' % work)
self.debug('Current json queue : %s' % data)
self.dump_json(Q_JSON_FILENAME, data)
self.__unlock_queue()
self.q.put(work)
return work
def prepare_work(self, work):
""" Appends status and uid, creates a directory with uid
Args:
work (dict): work dictionary
Returns:
dict: returns work with following appends
::
{
'status' : 'pending',
'uid' : 'uid'
}
"""
# This add uid to the work i.e and status
# Also creates a directory with the same uid under which result.json is created
# with same information so that it can be accessed later
uid = str(uuid.uuid4())
self.debug('uid generated : %s' % uid)
work.update({'uid': uid, 'status': 'pending'})
self.debug('work updated : %s' % work)
os.mkdir(RESULT_PATH + uid)
self.debug('directory created %s' % (RESULT_PATH + uid))
json_file = RESULT_PATH + work['uid'] + '/result.json'
self.dump_json(json_file, work)
self.debug('result.json created : %s' % json_file)
return work
def remove_work(self):
""" Currently this is not possible.
"""
pass
def get_work(self):
""" Prints and return current work queue
Args:
None
Returns:
dict: returns work queue
"""
# Print and returns the current queue
self.info('Start of current queue')
cur_queue = []
for work in list(self.q.queue):
self.info('%s' % work)
cur_queue.append(work)
self.info('End of current queue')
return cur_queue
def get_result(self, work):
""" Return result of the given work
Args:
work (dict): work dictionary
Returns:
dict: returns work
::
{
'status' : '',
'uid' : 'uid'
}
"""
# Return result of the work by accessing it through uid
self.info('Getting result for %s :' % work['uid'])
json_file = RESULT_PATH + work['uid'] + '/result.json'
result = self.load_json(json_file)
self.debug('Result : %s' % result)
return result
def __lock_queue(self):
""" Locks queue
"""
self.debug('Queue Locked')
self.lock = True
return self.lock_queue
def __unlock_queue(self):
"""Unlocks queue"""
self.debug('Queue Unlocked')
self.lock = False
return self.lock_queue
def work(self):
""" Actual work to do by Worker Thread
Args:
None
Returns:
None
"""
# Main Work
while True:
while not self.q.empty() and self.lock is False:
work = self.q.get()
self.info('Working on : %s' % work)
data = self.load_json(Q_JSON_FILENAME)
print data
data.pop(0)
self.debug('Current json file data: %s' % data)
self.dump_json(Q_JSON_FILENAME, data)
json_file = RESULT_PATH + work['uid'] + '/result.json'
# Do your work here
time.sleep(10)
work['status'] = 'done'
self.dump_json(json_file, work)
self.debug('result.json updated : %s' % json_file)
# Create directory
def start_work(self):
""" Starts worker thread
Args:
None
Returns:
None
"""
# Starts working Thread
self.debug('Starting Scheduler thread')
t = threading.Thread(name="ConsumerThread-", target=self.work, args=())
t.daemon = True
t.start()
def load_work(self):
""" Loads work from json file
Args:
None
Returns:
None
"""
# Load work from json file only 1st time of starting
self.info('Loading work from json file')
data = self.load_json(Q_JSON_FILENAME)
self.debug('Data to be added to queue : %s' % data)
self.add_works(data)
def add_works(self, data):
""" Adds multiple work in the queue
Args:
data (dict): work dictionary
Returns:
None
"""
# Add multiple work into the queue
for d in data:
self.add_work(d, True)
return
'''
s = Scheduler()
s.load_work()
#s.add_work({'name' : 'karrar'})
s.get_work()
s.start_work()
time.sleep(10)
w = s.add_work({'name' : 'khawaja'})
s.get_work()
time.sleep(5)
#print s.get_result(w)
while True:
time.sleep(1)
'''
|
test_socket.py | import unittest
from test import support
import errno
import io
import itertools
import socket
import select
import tempfile
import time
import traceback
import queue
import sys
import os
import array
import contextlib
from weakref import proxy
import signal
import math
import pickle
import struct
import random
import shutil
import string
import _thread as thread
import threading
try:
import multiprocessing
except ImportError:
multiprocessing = False
try:
import fcntl
except ImportError:
fcntl = None
HOST = support.HOST
MSG = 'Michael Gilfix was here\u1234\r\n'.encode('utf-8') ## test unicode string and carriage return
MAIN_TIMEOUT = 60.0
VSOCKPORT = 1234
try:
import _socket
except ImportError:
_socket = None
def get_cid():
if fcntl is None:
return None
try:
with open("/dev/vsock", "rb") as f:
r = fcntl.ioctl(f, socket.IOCTL_VM_SOCKETS_GET_LOCAL_CID, " ")
except OSError:
return None
else:
return struct.unpack("I", r)[0]
def _have_socket_can():
"""Check whether CAN sockets are supported on this host."""
try:
s = socket.socket(socket.PF_CAN, socket.SOCK_RAW, socket.CAN_RAW)
except (AttributeError, OSError):
return False
else:
s.close()
return True
def _have_socket_can_isotp():
"""Check whether CAN ISOTP sockets are supported on this host."""
try:
s = socket.socket(socket.PF_CAN, socket.SOCK_DGRAM, socket.CAN_ISOTP)
except (AttributeError, OSError):
return False
else:
s.close()
return True
def _have_socket_rds():
"""Check whether RDS sockets are supported on this host."""
try:
s = socket.socket(socket.PF_RDS, socket.SOCK_SEQPACKET, 0)
except (AttributeError, OSError):
return False
else:
s.close()
return True
def _have_socket_alg():
"""Check whether AF_ALG sockets are supported on this host."""
try:
s = socket.socket(socket.AF_ALG, socket.SOCK_SEQPACKET, 0)
except (AttributeError, OSError):
return False
else:
s.close()
return True
def _have_socket_vsock():
"""Check whether AF_VSOCK sockets are supported on this host."""
ret = get_cid() is not None
return ret
def _is_fd_in_blocking_mode(sock):
return not bool(
fcntl.fcntl(sock, fcntl.F_GETFL, os.O_NONBLOCK) & os.O_NONBLOCK)
HAVE_SOCKET_CAN = _have_socket_can()
HAVE_SOCKET_CAN_ISOTP = _have_socket_can_isotp()
HAVE_SOCKET_RDS = _have_socket_rds()
HAVE_SOCKET_ALG = _have_socket_alg()
HAVE_SOCKET_VSOCK = _have_socket_vsock()
# Size in bytes of the int type
SIZEOF_INT = array.array("i").itemsize
class SocketTCPTest(unittest.TestCase):
def setUp(self):
self.serv = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.port = support.bind_port(self.serv)
self.serv.listen()
def tearDown(self):
self.serv.close()
self.serv = None
class SocketUDPTest(unittest.TestCase):
def setUp(self):
self.serv = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.port = support.bind_port(self.serv)
def tearDown(self):
self.serv.close()
self.serv = None
class ThreadSafeCleanupTestCase(unittest.TestCase):
"""Subclass of unittest.TestCase with thread-safe cleanup methods.
This subclass protects the addCleanup() and doCleanups() methods
with a recursive lock.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._cleanup_lock = threading.RLock()
def addCleanup(self, *args, **kwargs):
with self._cleanup_lock:
return super().addCleanup(*args, **kwargs)
def doCleanups(self, *args, **kwargs):
with self._cleanup_lock:
return super().doCleanups(*args, **kwargs)
class SocketCANTest(unittest.TestCase):
"""To be able to run this test, a `vcan0` CAN interface can be created with
the following commands:
# modprobe vcan
# ip link add dev vcan0 type vcan
# ifconfig vcan0 up
"""
interface = 'vcan0'
bufsize = 128
"""The CAN frame structure is defined in <linux/can.h>:
struct can_frame {
canid_t can_id; /* 32 bit CAN_ID + EFF/RTR/ERR flags */
__u8 can_dlc; /* data length code: 0 .. 8 */
__u8 data[8] __attribute__((aligned(8)));
};
"""
can_frame_fmt = "=IB3x8s"
can_frame_size = struct.calcsize(can_frame_fmt)
"""The Broadcast Management Command frame structure is defined
in <linux/can/bcm.h>:
struct bcm_msg_head {
__u32 opcode;
__u32 flags;
__u32 count;
struct timeval ival1, ival2;
canid_t can_id;
__u32 nframes;
struct can_frame frames[0];
}
`bcm_msg_head` must be 8 bytes aligned because of the `frames` member (see
`struct can_frame` definition). Must use native not standard types for packing.
"""
bcm_cmd_msg_fmt = "@3I4l2I"
bcm_cmd_msg_fmt += "x" * (struct.calcsize(bcm_cmd_msg_fmt) % 8)
def setUp(self):
self.s = socket.socket(socket.PF_CAN, socket.SOCK_RAW, socket.CAN_RAW)
self.addCleanup(self.s.close)
try:
self.s.bind((self.interface,))
except OSError:
self.skipTest('network interface `%s` does not exist' %
self.interface)
class SocketRDSTest(unittest.TestCase):
"""To be able to run this test, the `rds` kernel module must be loaded:
# modprobe rds
"""
bufsize = 8192
def setUp(self):
self.serv = socket.socket(socket.PF_RDS, socket.SOCK_SEQPACKET, 0)
self.addCleanup(self.serv.close)
try:
self.port = support.bind_port(self.serv)
except OSError:
self.skipTest('unable to bind RDS socket')
class ThreadableTest:
"""Threadable Test class
The ThreadableTest class makes it easy to create a threaded
client/server pair from an existing unit test. To create a
new threaded class from an existing unit test, use multiple
inheritance:
class NewClass (OldClass, ThreadableTest):
pass
This class defines two new fixture functions with obvious
purposes for overriding:
clientSetUp ()
clientTearDown ()
Any new test functions within the class must then define
tests in pairs, where the test name is preceded with a
'_' to indicate the client portion of the test. Ex:
def testFoo(self):
# Server portion
def _testFoo(self):
# Client portion
Any exceptions raised by the clients during their tests
are caught and transferred to the main thread to alert
the testing framework.
Note, the server setup function cannot call any blocking
functions that rely on the client thread during setup,
unless serverExplicitReady() is called just before
the blocking call (such as in setting up a client/server
connection and performing the accept() in setUp().
"""
def __init__(self):
# Swap the true setup function
self.__setUp = self.setUp
self.__tearDown = self.tearDown
self.setUp = self._setUp
self.tearDown = self._tearDown
def serverExplicitReady(self):
"""This method allows the server to explicitly indicate that
it wants the client thread to proceed. This is useful if the
server is about to execute a blocking routine that is
dependent upon the client thread during its setup routine."""
self.server_ready.set()
def _setUp(self):
self.wait_threads = support.wait_threads_exit()
self.wait_threads.__enter__()
self.server_ready = threading.Event()
self.client_ready = threading.Event()
self.done = threading.Event()
self.queue = queue.Queue(1)
self.server_crashed = False
# Do some munging to start the client test.
methodname = self.id()
i = methodname.rfind('.')
methodname = methodname[i+1:]
test_method = getattr(self, '_' + methodname)
self.client_thread = thread.start_new_thread(
self.clientRun, (test_method,))
try:
self.__setUp()
except:
self.server_crashed = True
raise
finally:
self.server_ready.set()
self.client_ready.wait()
def _tearDown(self):
self.__tearDown()
self.done.wait()
self.wait_threads.__exit__(None, None, None)
if self.queue.qsize():
exc = self.queue.get()
raise exc
def clientRun(self, test_func):
self.server_ready.wait()
try:
self.clientSetUp()
except BaseException as e:
self.queue.put(e)
self.clientTearDown()
return
finally:
self.client_ready.set()
if self.server_crashed:
self.clientTearDown()
return
if not hasattr(test_func, '__call__'):
raise TypeError("test_func must be a callable function")
try:
test_func()
except BaseException as e:
self.queue.put(e)
finally:
self.clientTearDown()
def clientSetUp(self):
raise NotImplementedError("clientSetUp must be implemented.")
def clientTearDown(self):
self.done.set()
thread.exit()
class ThreadedTCPSocketTest(SocketTCPTest, ThreadableTest):
def __init__(self, methodName='runTest'):
SocketTCPTest.__init__(self, methodName=methodName)
ThreadableTest.__init__(self)
def clientSetUp(self):
self.cli = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
def clientTearDown(self):
self.cli.close()
self.cli = None
ThreadableTest.clientTearDown(self)
class ThreadedUDPSocketTest(SocketUDPTest, ThreadableTest):
def __init__(self, methodName='runTest'):
SocketUDPTest.__init__(self, methodName=methodName)
ThreadableTest.__init__(self)
def clientSetUp(self):
self.cli = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
def clientTearDown(self):
self.cli.close()
self.cli = None
ThreadableTest.clientTearDown(self)
class ThreadedCANSocketTest(SocketCANTest, ThreadableTest):
def __init__(self, methodName='runTest'):
SocketCANTest.__init__(self, methodName=methodName)
ThreadableTest.__init__(self)
def clientSetUp(self):
self.cli = socket.socket(socket.PF_CAN, socket.SOCK_RAW, socket.CAN_RAW)
try:
self.cli.bind((self.interface,))
except OSError:
# skipTest should not be called here, and will be called in the
# server instead
pass
def clientTearDown(self):
self.cli.close()
self.cli = None
ThreadableTest.clientTearDown(self)
class ThreadedRDSSocketTest(SocketRDSTest, ThreadableTest):
def __init__(self, methodName='runTest'):
SocketRDSTest.__init__(self, methodName=methodName)
ThreadableTest.__init__(self)
def clientSetUp(self):
self.cli = socket.socket(socket.PF_RDS, socket.SOCK_SEQPACKET, 0)
try:
# RDS sockets must be bound explicitly to send or receive data
self.cli.bind((HOST, 0))
self.cli_addr = self.cli.getsockname()
except OSError:
# skipTest should not be called here, and will be called in the
# server instead
pass
def clientTearDown(self):
self.cli.close()
self.cli = None
ThreadableTest.clientTearDown(self)
@unittest.skipIf(fcntl is None, "need fcntl")
@unittest.skipUnless(HAVE_SOCKET_VSOCK,
'VSOCK sockets required for this test.')
@unittest.skipUnless(get_cid() != 2,
"This test can only be run on a virtual guest.")
class ThreadedVSOCKSocketStreamTest(unittest.TestCase, ThreadableTest):
def __init__(self, methodName='runTest'):
unittest.TestCase.__init__(self, methodName=methodName)
ThreadableTest.__init__(self)
def setUp(self):
self.serv = socket.socket(socket.AF_VSOCK, socket.SOCK_STREAM)
self.addCleanup(self.serv.close)
self.serv.bind((socket.VMADDR_CID_ANY, VSOCKPORT))
self.serv.listen()
self.serverExplicitReady()
self.conn, self.connaddr = self.serv.accept()
self.addCleanup(self.conn.close)
def clientSetUp(self):
time.sleep(0.1)
self.cli = socket.socket(socket.AF_VSOCK, socket.SOCK_STREAM)
self.addCleanup(self.cli.close)
cid = get_cid()
self.cli.connect((cid, VSOCKPORT))
def testStream(self):
msg = self.conn.recv(1024)
self.assertEqual(msg, MSG)
def _testStream(self):
self.cli.send(MSG)
self.cli.close()
class SocketConnectedTest(ThreadedTCPSocketTest):
"""Socket tests for client-server connection.
self.cli_conn is a client socket connected to the server. The
setUp() method guarantees that it is connected to the server.
"""
def __init__(self, methodName='runTest'):
ThreadedTCPSocketTest.__init__(self, methodName=methodName)
def setUp(self):
ThreadedTCPSocketTest.setUp(self)
# Indicate explicitly we're ready for the client thread to
# proceed and then perform the blocking call to accept
self.serverExplicitReady()
conn, addr = self.serv.accept()
self.cli_conn = conn
def tearDown(self):
self.cli_conn.close()
self.cli_conn = None
ThreadedTCPSocketTest.tearDown(self)
def clientSetUp(self):
ThreadedTCPSocketTest.clientSetUp(self)
self.cli.connect((HOST, self.port))
self.serv_conn = self.cli
def clientTearDown(self):
self.serv_conn.close()
self.serv_conn = None
ThreadedTCPSocketTest.clientTearDown(self)
class SocketPairTest(unittest.TestCase, ThreadableTest):
def __init__(self, methodName='runTest'):
unittest.TestCase.__init__(self, methodName=methodName)
ThreadableTest.__init__(self)
def setUp(self):
self.serv, self.cli = socket.socketpair()
def tearDown(self):
self.serv.close()
self.serv = None
def clientSetUp(self):
pass
def clientTearDown(self):
self.cli.close()
self.cli = None
ThreadableTest.clientTearDown(self)
# The following classes are used by the sendmsg()/recvmsg() tests.
# Combining, for instance, ConnectedStreamTestMixin and TCPTestBase
# gives a drop-in replacement for SocketConnectedTest, but different
# address families can be used, and the attributes serv_addr and
# cli_addr will be set to the addresses of the endpoints.
class SocketTestBase(unittest.TestCase):
"""A base class for socket tests.
Subclasses must provide methods newSocket() to return a new socket
and bindSock(sock) to bind it to an unused address.
Creates a socket self.serv and sets self.serv_addr to its address.
"""
def setUp(self):
self.serv = self.newSocket()
self.bindServer()
def bindServer(self):
"""Bind server socket and set self.serv_addr to its address."""
self.bindSock(self.serv)
self.serv_addr = self.serv.getsockname()
def tearDown(self):
self.serv.close()
self.serv = None
class SocketListeningTestMixin(SocketTestBase):
"""Mixin to listen on the server socket."""
def setUp(self):
super().setUp()
self.serv.listen()
class ThreadedSocketTestMixin(ThreadSafeCleanupTestCase, SocketTestBase,
ThreadableTest):
"""Mixin to add client socket and allow client/server tests.
Client socket is self.cli and its address is self.cli_addr. See
ThreadableTest for usage information.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
ThreadableTest.__init__(self)
def clientSetUp(self):
self.cli = self.newClientSocket()
self.bindClient()
def newClientSocket(self):
"""Return a new socket for use as client."""
return self.newSocket()
def bindClient(self):
"""Bind client socket and set self.cli_addr to its address."""
self.bindSock(self.cli)
self.cli_addr = self.cli.getsockname()
def clientTearDown(self):
self.cli.close()
self.cli = None
ThreadableTest.clientTearDown(self)
class ConnectedStreamTestMixin(SocketListeningTestMixin,
ThreadedSocketTestMixin):
"""Mixin to allow client/server stream tests with connected client.
Server's socket representing connection to client is self.cli_conn
and client's connection to server is self.serv_conn. (Based on
SocketConnectedTest.)
"""
def setUp(self):
super().setUp()
# Indicate explicitly we're ready for the client thread to
# proceed and then perform the blocking call to accept
self.serverExplicitReady()
conn, addr = self.serv.accept()
self.cli_conn = conn
def tearDown(self):
self.cli_conn.close()
self.cli_conn = None
super().tearDown()
def clientSetUp(self):
super().clientSetUp()
self.cli.connect(self.serv_addr)
self.serv_conn = self.cli
def clientTearDown(self):
try:
self.serv_conn.close()
self.serv_conn = None
except AttributeError:
pass
super().clientTearDown()
class UnixSocketTestBase(SocketTestBase):
"""Base class for Unix-domain socket tests."""
# This class is used for file descriptor passing tests, so we
# create the sockets in a private directory so that other users
# can't send anything that might be problematic for a privileged
# user running the tests.
def setUp(self):
self.dir_path = tempfile.mkdtemp()
self.addCleanup(os.rmdir, self.dir_path)
super().setUp()
def bindSock(self, sock):
path = tempfile.mktemp(dir=self.dir_path)
support.bind_unix_socket(sock, path)
self.addCleanup(support.unlink, path)
class UnixStreamBase(UnixSocketTestBase):
"""Base class for Unix-domain SOCK_STREAM tests."""
def newSocket(self):
return socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
class InetTestBase(SocketTestBase):
"""Base class for IPv4 socket tests."""
host = HOST
def setUp(self):
super().setUp()
self.port = self.serv_addr[1]
def bindSock(self, sock):
support.bind_port(sock, host=self.host)
class TCPTestBase(InetTestBase):
"""Base class for TCP-over-IPv4 tests."""
def newSocket(self):
return socket.socket(socket.AF_INET, socket.SOCK_STREAM)
class UDPTestBase(InetTestBase):
"""Base class for UDP-over-IPv4 tests."""
def newSocket(self):
return socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
class SCTPStreamBase(InetTestBase):
"""Base class for SCTP tests in one-to-one (SOCK_STREAM) mode."""
def newSocket(self):
return socket.socket(socket.AF_INET, socket.SOCK_STREAM,
socket.IPPROTO_SCTP)
class Inet6TestBase(InetTestBase):
"""Base class for IPv6 socket tests."""
host = support.HOSTv6
class UDP6TestBase(Inet6TestBase):
"""Base class for UDP-over-IPv6 tests."""
def newSocket(self):
return socket.socket(socket.AF_INET6, socket.SOCK_DGRAM)
# Test-skipping decorators for use with ThreadableTest.
def skipWithClientIf(condition, reason):
"""Skip decorated test if condition is true, add client_skip decorator.
If the decorated object is not a class, sets its attribute
"client_skip" to a decorator which will return an empty function
if the test is to be skipped, or the original function if it is
not. This can be used to avoid running the client part of a
skipped test when using ThreadableTest.
"""
def client_pass(*args, **kwargs):
pass
def skipdec(obj):
retval = unittest.skip(reason)(obj)
if not isinstance(obj, type):
retval.client_skip = lambda f: client_pass
return retval
def noskipdec(obj):
if not (isinstance(obj, type) or hasattr(obj, "client_skip")):
obj.client_skip = lambda f: f
return obj
return skipdec if condition else noskipdec
def requireAttrs(obj, *attributes):
"""Skip decorated test if obj is missing any of the given attributes.
Sets client_skip attribute as skipWithClientIf() does.
"""
missing = [name for name in attributes if not hasattr(obj, name)]
return skipWithClientIf(
missing, "don't have " + ", ".join(name for name in missing))
def requireSocket(*args):
"""Skip decorated test if a socket cannot be created with given arguments.
When an argument is given as a string, will use the value of that
attribute of the socket module, or skip the test if it doesn't
exist. Sets client_skip attribute as skipWithClientIf() does.
"""
err = None
missing = [obj for obj in args if
isinstance(obj, str) and not hasattr(socket, obj)]
if missing:
err = "don't have " + ", ".join(name for name in missing)
else:
callargs = [getattr(socket, obj) if isinstance(obj, str) else obj
for obj in args]
try:
s = socket.socket(*callargs)
except OSError as e:
# XXX: check errno?
err = str(e)
else:
s.close()
return skipWithClientIf(
err is not None,
"can't create socket({0}): {1}".format(
", ".join(str(o) for o in args), err))
#######################################################################
## Begin Tests
class GeneralModuleTests(unittest.TestCase):
def test_SocketType_is_socketobject(self):
import _socket
self.assertTrue(socket.SocketType is _socket.socket)
s = socket.socket()
self.assertIsInstance(s, socket.SocketType)
s.close()
def test_repr(self):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
with s:
self.assertIn('fd=%i' % s.fileno(), repr(s))
self.assertIn('family=%s' % socket.AF_INET, repr(s))
self.assertIn('type=%s' % socket.SOCK_STREAM, repr(s))
self.assertIn('proto=0', repr(s))
self.assertNotIn('raddr', repr(s))
s.bind(('127.0.0.1', 0))
self.assertIn('laddr', repr(s))
self.assertIn(str(s.getsockname()), repr(s))
self.assertIn('[closed]', repr(s))
self.assertNotIn('laddr', repr(s))
@unittest.skipUnless(_socket is not None, 'need _socket module')
def test_csocket_repr(self):
s = _socket.socket(_socket.AF_INET, _socket.SOCK_STREAM)
try:
expected = ('<socket object, fd=%s, family=%s, type=%s, proto=%s>'
% (s.fileno(), s.family, s.type, s.proto))
self.assertEqual(repr(s), expected)
finally:
s.close()
expected = ('<socket object, fd=-1, family=%s, type=%s, proto=%s>'
% (s.family, s.type, s.proto))
self.assertEqual(repr(s), expected)
def test_weakref(self):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
p = proxy(s)
self.assertEqual(p.fileno(), s.fileno())
s.close()
s = None
try:
p.fileno()
except ReferenceError:
pass
else:
self.fail('Socket proxy still exists')
def testSocketError(self):
# Testing socket module exceptions
msg = "Error raising socket exception (%s)."
with self.assertRaises(OSError, msg=msg % 'OSError'):
raise OSError
with self.assertRaises(OSError, msg=msg % 'socket.herror'):
raise socket.herror
with self.assertRaises(OSError, msg=msg % 'socket.gaierror'):
raise socket.gaierror
def testSendtoErrors(self):
# Testing that sendto doesn't mask failures. See #10169.
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.addCleanup(s.close)
s.bind(('', 0))
sockname = s.getsockname()
# 2 args
with self.assertRaises(TypeError) as cm:
s.sendto('\u2620', sockname)
self.assertEqual(str(cm.exception),
"a bytes-like object is required, not 'str'")
with self.assertRaises(TypeError) as cm:
s.sendto(5j, sockname)
self.assertEqual(str(cm.exception),
"a bytes-like object is required, not 'complex'")
with self.assertRaises(TypeError) as cm:
s.sendto(b'foo', None)
self.assertIn('not NoneType',str(cm.exception))
# 3 args
with self.assertRaises(TypeError) as cm:
s.sendto('\u2620', 0, sockname)
self.assertEqual(str(cm.exception),
"a bytes-like object is required, not 'str'")
with self.assertRaises(TypeError) as cm:
s.sendto(5j, 0, sockname)
self.assertEqual(str(cm.exception),
"a bytes-like object is required, not 'complex'")
with self.assertRaises(TypeError) as cm:
s.sendto(b'foo', 0, None)
self.assertIn('not NoneType', str(cm.exception))
with self.assertRaises(TypeError) as cm:
s.sendto(b'foo', 'bar', sockname)
self.assertIn('an integer is required', str(cm.exception))
with self.assertRaises(TypeError) as cm:
s.sendto(b'foo', None, None)
self.assertIn('an integer is required', str(cm.exception))
# wrong number of args
with self.assertRaises(TypeError) as cm:
s.sendto(b'foo')
self.assertIn('(1 given)', str(cm.exception))
with self.assertRaises(TypeError) as cm:
s.sendto(b'foo', 0, sockname, 4)
self.assertIn('(4 given)', str(cm.exception))
def testCrucialConstants(self):
# Testing for mission critical constants
socket.AF_INET
socket.SOCK_STREAM
socket.SOCK_DGRAM
socket.SOCK_RAW
socket.SOCK_RDM
socket.SOCK_SEQPACKET
socket.SOL_SOCKET
socket.SO_REUSEADDR
def testHostnameRes(self):
# Testing hostname resolution mechanisms
hostname = socket.gethostname()
try:
ip = socket.gethostbyname(hostname)
except OSError:
# Probably name lookup wasn't set up right; skip this test
self.skipTest('name lookup failure')
self.assertTrue(ip.find('.') >= 0, "Error resolving host to ip.")
try:
hname, aliases, ipaddrs = socket.gethostbyaddr(ip)
except OSError:
# Probably a similar problem as above; skip this test
self.skipTest('name lookup failure')
all_host_names = [hostname, hname] + aliases
fqhn = socket.getfqdn(ip)
if not fqhn in all_host_names:
self.fail("Error testing host resolution mechanisms. (fqdn: %s, all: %s)" % (fqhn, repr(all_host_names)))
def test_host_resolution(self):
for addr in [support.HOSTv4, '10.0.0.1', '255.255.255.255']:
self.assertEqual(socket.gethostbyname(addr), addr)
# we don't test support.HOSTv6 because there's a chance it doesn't have
# a matching name entry (e.g. 'ip6-localhost')
for host in [support.HOSTv4]:
self.assertIn(host, socket.gethostbyaddr(host)[2])
def test_host_resolution_bad_address(self):
# These are all malformed IP addresses and expected not to resolve to
# any result. But some ISPs, e.g. AWS, may successfully resolve these
# IPs.
explanation = (
"resolving an invalid IP address did not raise OSError; "
"can be caused by a broken DNS server"
)
for addr in ['0.1.1.~1', '1+.1.1.1', '::1q', '::1::2',
'1:1:1:1:1:1:1:1:1']:
with self.assertRaises(OSError, msg=addr):
socket.gethostbyname(addr)
with self.assertRaises(OSError, msg=explanation):
socket.gethostbyaddr(addr)
@unittest.skipUnless(hasattr(socket, 'sethostname'), "test needs socket.sethostname()")
@unittest.skipUnless(hasattr(socket, 'gethostname'), "test needs socket.gethostname()")
def test_sethostname(self):
oldhn = socket.gethostname()
try:
socket.sethostname('new')
except OSError as e:
if e.errno == errno.EPERM:
self.skipTest("test should be run as root")
else:
raise
try:
# running test as root!
self.assertEqual(socket.gethostname(), 'new')
# Should work with bytes objects too
socket.sethostname(b'bar')
self.assertEqual(socket.gethostname(), 'bar')
finally:
socket.sethostname(oldhn)
@unittest.skipUnless(hasattr(socket, 'if_nameindex'),
'socket.if_nameindex() not available.')
def testInterfaceNameIndex(self):
interfaces = socket.if_nameindex()
for index, name in interfaces:
self.assertIsInstance(index, int)
self.assertIsInstance(name, str)
# interface indices are non-zero integers
self.assertGreater(index, 0)
_index = socket.if_nametoindex(name)
self.assertIsInstance(_index, int)
self.assertEqual(index, _index)
_name = socket.if_indextoname(index)
self.assertIsInstance(_name, str)
self.assertEqual(name, _name)
@unittest.skipUnless(hasattr(socket, 'if_nameindex'),
'socket.if_nameindex() not available.')
def testInvalidInterfaceNameIndex(self):
# test nonexistent interface index/name
self.assertRaises(OSError, socket.if_indextoname, 0)
self.assertRaises(OSError, socket.if_nametoindex, '_DEADBEEF')
# test with invalid values
self.assertRaises(TypeError, socket.if_nametoindex, 0)
self.assertRaises(TypeError, socket.if_indextoname, '_DEADBEEF')
@unittest.skipUnless(hasattr(sys, 'getrefcount'),
'test needs sys.getrefcount()')
def testRefCountGetNameInfo(self):
# Testing reference count for getnameinfo
try:
# On some versions, this loses a reference
orig = sys.getrefcount(__name__)
socket.getnameinfo(__name__,0)
except TypeError:
if sys.getrefcount(__name__) != orig:
self.fail("socket.getnameinfo loses a reference")
def testInterpreterCrash(self):
# Making sure getnameinfo doesn't crash the interpreter
try:
# On some versions, this crashes the interpreter.
socket.getnameinfo(('x', 0, 0, 0), 0)
except OSError:
pass
def testNtoH(self):
# This just checks that htons etc. are their own inverse,
# when looking at the lower 16 or 32 bits.
sizes = {socket.htonl: 32, socket.ntohl: 32,
socket.htons: 16, socket.ntohs: 16}
for func, size in sizes.items():
mask = (1<<size) - 1
for i in (0, 1, 0xffff, ~0xffff, 2, 0x01234567, 0x76543210):
self.assertEqual(i & mask, func(func(i&mask)) & mask)
swapped = func(mask)
self.assertEqual(swapped & mask, mask)
self.assertRaises(OverflowError, func, 1<<34)
@support.cpython_only
def testNtoHErrors(self):
import _testcapi
s_good_values = [0, 1, 2, 0xffff]
l_good_values = s_good_values + [0xffffffff]
l_bad_values = [-1, -2, 1<<32, 1<<1000]
s_bad_values = l_bad_values + [_testcapi.INT_MIN - 1,
_testcapi.INT_MAX + 1]
s_deprecated_values = [1<<16, _testcapi.INT_MAX]
for k in s_good_values:
socket.ntohs(k)
socket.htons(k)
for k in l_good_values:
socket.ntohl(k)
socket.htonl(k)
for k in s_bad_values:
self.assertRaises(OverflowError, socket.ntohs, k)
self.assertRaises(OverflowError, socket.htons, k)
for k in l_bad_values:
self.assertRaises(OverflowError, socket.ntohl, k)
self.assertRaises(OverflowError, socket.htonl, k)
for k in s_deprecated_values:
self.assertWarns(DeprecationWarning, socket.ntohs, k)
self.assertWarns(DeprecationWarning, socket.htons, k)
def testGetServBy(self):
eq = self.assertEqual
# Find one service that exists, then check all the related interfaces.
# I've ordered this by protocols that have both a tcp and udp
# protocol, at least for modern Linuxes.
if (sys.platform.startswith(('freebsd', 'netbsd', 'gnukfreebsd'))
or sys.platform in ('linux', 'darwin')):
# avoid the 'echo' service on this platform, as there is an
# assumption breaking non-standard port/protocol entry
services = ('daytime', 'qotd', 'domain')
else:
services = ('echo', 'daytime', 'domain')
for service in services:
try:
port = socket.getservbyname(service, 'tcp')
break
except OSError:
pass
else:
raise OSError
# Try same call with optional protocol omitted
# Issue #26936: Android getservbyname() was broken before API 23.
if (not hasattr(sys, 'getandroidapilevel') or
sys.getandroidapilevel() >= 23):
port2 = socket.getservbyname(service)
eq(port, port2)
# Try udp, but don't barf if it doesn't exist
try:
udpport = socket.getservbyname(service, 'udp')
except OSError:
udpport = None
else:
eq(udpport, port)
# Now make sure the lookup by port returns the same service name
# Issue #26936: Android getservbyport() is broken.
if not support.is_android:
eq(socket.getservbyport(port2), service)
eq(socket.getservbyport(port, 'tcp'), service)
if udpport is not None:
eq(socket.getservbyport(udpport, 'udp'), service)
# Make sure getservbyport does not accept out of range ports.
self.assertRaises(OverflowError, socket.getservbyport, -1)
self.assertRaises(OverflowError, socket.getservbyport, 65536)
def testDefaultTimeout(self):
# Testing default timeout
# The default timeout should initially be None
self.assertEqual(socket.getdefaulttimeout(), None)
s = socket.socket()
self.assertEqual(s.gettimeout(), None)
s.close()
# Set the default timeout to 10, and see if it propagates
socket.setdefaulttimeout(10)
self.assertEqual(socket.getdefaulttimeout(), 10)
s = socket.socket()
self.assertEqual(s.gettimeout(), 10)
s.close()
# Reset the default timeout to None, and see if it propagates
socket.setdefaulttimeout(None)
self.assertEqual(socket.getdefaulttimeout(), None)
s = socket.socket()
self.assertEqual(s.gettimeout(), None)
s.close()
# Check that setting it to an invalid value raises ValueError
self.assertRaises(ValueError, socket.setdefaulttimeout, -1)
# Check that setting it to an invalid type raises TypeError
self.assertRaises(TypeError, socket.setdefaulttimeout, "spam")
@unittest.skipUnless(hasattr(socket, 'inet_aton'),
'test needs socket.inet_aton()')
def testIPv4_inet_aton_fourbytes(self):
# Test that issue1008086 and issue767150 are fixed.
# It must return 4 bytes.
self.assertEqual(b'\x00'*4, socket.inet_aton('0.0.0.0'))
self.assertEqual(b'\xff'*4, socket.inet_aton('255.255.255.255'))
@unittest.skipUnless(hasattr(socket, 'inet_pton'),
'test needs socket.inet_pton()')
def testIPv4toString(self):
from socket import inet_aton as f, inet_pton, AF_INET
g = lambda a: inet_pton(AF_INET, a)
assertInvalid = lambda func,a: self.assertRaises(
(OSError, ValueError), func, a
)
self.assertEqual(b'\x00\x00\x00\x00', f('0.0.0.0'))
self.assertEqual(b'\xff\x00\xff\x00', f('255.0.255.0'))
self.assertEqual(b'\xaa\xaa\xaa\xaa', f('170.170.170.170'))
self.assertEqual(b'\x01\x02\x03\x04', f('1.2.3.4'))
self.assertEqual(b'\xff\xff\xff\xff', f('255.255.255.255'))
# bpo-29972: inet_pton() doesn't fail on AIX
if not sys.platform.startswith('aix'):
assertInvalid(f, '0.0.0.')
assertInvalid(f, '300.0.0.0')
assertInvalid(f, 'a.0.0.0')
assertInvalid(f, '1.2.3.4.5')
assertInvalid(f, '::1')
self.assertEqual(b'\x00\x00\x00\x00', g('0.0.0.0'))
self.assertEqual(b'\xff\x00\xff\x00', g('255.0.255.0'))
self.assertEqual(b'\xaa\xaa\xaa\xaa', g('170.170.170.170'))
self.assertEqual(b'\xff\xff\xff\xff', g('255.255.255.255'))
assertInvalid(g, '0.0.0.')
assertInvalid(g, '300.0.0.0')
assertInvalid(g, 'a.0.0.0')
assertInvalid(g, '1.2.3.4.5')
assertInvalid(g, '::1')
@unittest.skipUnless(hasattr(socket, 'inet_pton'),
'test needs socket.inet_pton()')
def testIPv6toString(self):
try:
from socket import inet_pton, AF_INET6, has_ipv6
if not has_ipv6:
self.skipTest('IPv6 not available')
except ImportError:
self.skipTest('could not import needed symbols from socket')
if sys.platform == "win32":
try:
inet_pton(AF_INET6, '::')
except OSError as e:
if e.winerror == 10022:
self.skipTest('IPv6 might not be supported')
f = lambda a: inet_pton(AF_INET6, a)
assertInvalid = lambda a: self.assertRaises(
(OSError, ValueError), f, a
)
self.assertEqual(b'\x00' * 16, f('::'))
self.assertEqual(b'\x00' * 16, f('0::0'))
self.assertEqual(b'\x00\x01' + b'\x00' * 14, f('1::'))
self.assertEqual(
b'\x45\xef\x76\xcb\x00\x1a\x56\xef\xaf\xeb\x0b\xac\x19\x24\xae\xae',
f('45ef:76cb:1a:56ef:afeb:bac:1924:aeae')
)
self.assertEqual(
b'\xad\x42\x0a\xbc' + b'\x00' * 4 + b'\x01\x27\x00\x00\x02\x54\x00\x02',
f('ad42:abc::127:0:254:2')
)
self.assertEqual(b'\x00\x12\x00\x0a' + b'\x00' * 12, f('12:a::'))
assertInvalid('0x20::')
assertInvalid(':::')
assertInvalid('::0::')
assertInvalid('1::abc::')
assertInvalid('1::abc::def')
assertInvalid('1:2:3:4:5:6')
assertInvalid('1:2:3:4:5:6:7:8:0')
# bpo-29972: inet_pton() doesn't fail on AIX
if not sys.platform.startswith('aix'):
assertInvalid('1:2:3:4:5:6:')
assertInvalid('1:2:3:4:5:6:7:8:')
self.assertEqual(b'\x00' * 12 + b'\xfe\x2a\x17\x40',
f('::254.42.23.64')
)
self.assertEqual(
b'\x00\x42' + b'\x00' * 8 + b'\xa2\x9b\xfe\x2a\x17\x40',
f('42::a29b:254.42.23.64')
)
self.assertEqual(
b'\x00\x42\xa8\xb9\x00\x00\x00\x02\xff\xff\xa2\x9b\xfe\x2a\x17\x40',
f('42:a8b9:0:2:ffff:a29b:254.42.23.64')
)
assertInvalid('255.254.253.252')
assertInvalid('1::260.2.3.0')
assertInvalid('1::0.be.e.0')
assertInvalid('1:2:3:4:5:6:7:1.2.3.4')
assertInvalid('::1.2.3.4:0')
assertInvalid('0.100.200.0:3:4:5:6:7:8')
@unittest.skipUnless(hasattr(socket, 'inet_ntop'),
'test needs socket.inet_ntop()')
def testStringToIPv4(self):
from socket import inet_ntoa as f, inet_ntop, AF_INET
g = lambda a: inet_ntop(AF_INET, a)
assertInvalid = lambda func,a: self.assertRaises(
(OSError, ValueError), func, a
)
self.assertEqual('1.0.1.0', f(b'\x01\x00\x01\x00'))
self.assertEqual('170.85.170.85', f(b'\xaa\x55\xaa\x55'))
self.assertEqual('255.255.255.255', f(b'\xff\xff\xff\xff'))
self.assertEqual('1.2.3.4', f(b'\x01\x02\x03\x04'))
assertInvalid(f, b'\x00' * 3)
assertInvalid(f, b'\x00' * 5)
assertInvalid(f, b'\x00' * 16)
self.assertEqual('170.85.170.85', f(bytearray(b'\xaa\x55\xaa\x55')))
self.assertEqual('1.0.1.0', g(b'\x01\x00\x01\x00'))
self.assertEqual('170.85.170.85', g(b'\xaa\x55\xaa\x55'))
self.assertEqual('255.255.255.255', g(b'\xff\xff\xff\xff'))
assertInvalid(g, b'\x00' * 3)
assertInvalid(g, b'\x00' * 5)
assertInvalid(g, b'\x00' * 16)
self.assertEqual('170.85.170.85', g(bytearray(b'\xaa\x55\xaa\x55')))
@unittest.skipUnless(hasattr(socket, 'inet_ntop'),
'test needs socket.inet_ntop()')
def testStringToIPv6(self):
try:
from socket import inet_ntop, AF_INET6, has_ipv6
if not has_ipv6:
self.skipTest('IPv6 not available')
except ImportError:
self.skipTest('could not import needed symbols from socket')
if sys.platform == "win32":
try:
inet_ntop(AF_INET6, b'\x00' * 16)
except OSError as e:
if e.winerror == 10022:
self.skipTest('IPv6 might not be supported')
f = lambda a: inet_ntop(AF_INET6, a)
assertInvalid = lambda a: self.assertRaises(
(OSError, ValueError), f, a
)
self.assertEqual('::', f(b'\x00' * 16))
self.assertEqual('::1', f(b'\x00' * 15 + b'\x01'))
self.assertEqual(
'aef:b01:506:1001:ffff:9997:55:170',
f(b'\x0a\xef\x0b\x01\x05\x06\x10\x01\xff\xff\x99\x97\x00\x55\x01\x70')
)
self.assertEqual('::1', f(bytearray(b'\x00' * 15 + b'\x01')))
assertInvalid(b'\x12' * 15)
assertInvalid(b'\x12' * 17)
assertInvalid(b'\x12' * 4)
# XXX The following don't test module-level functionality...
def testSockName(self):
# Testing getsockname()
port = support.find_unused_port()
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.addCleanup(sock.close)
sock.bind(("0.0.0.0", port))
name = sock.getsockname()
# XXX(nnorwitz): http://tinyurl.com/os5jz seems to indicate
# it reasonable to get the host's addr in addition to 0.0.0.0.
# At least for eCos. This is required for the S/390 to pass.
try:
my_ip_addr = socket.gethostbyname(socket.gethostname())
except OSError:
# Probably name lookup wasn't set up right; skip this test
self.skipTest('name lookup failure')
self.assertIn(name[0], ("0.0.0.0", my_ip_addr), '%s invalid' % name[0])
self.assertEqual(name[1], port)
def testGetSockOpt(self):
# Testing getsockopt()
# We know a socket should start without reuse==0
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.addCleanup(sock.close)
reuse = sock.getsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR)
self.assertFalse(reuse != 0, "initial mode is reuse")
def testSetSockOpt(self):
# Testing setsockopt()
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.addCleanup(sock.close)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
reuse = sock.getsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR)
self.assertFalse(reuse == 0, "failed to set reuse mode")
def testSendAfterClose(self):
# testing send() after close() with timeout
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.settimeout(1)
sock.close()
self.assertRaises(OSError, sock.send, b"spam")
def testCloseException(self):
sock = socket.socket()
sock.bind((socket._LOCALHOST, 0))
socket.socket(fileno=sock.fileno()).close()
try:
sock.close()
except OSError as err:
# Winsock apparently raises ENOTSOCK
self.assertIn(err.errno, (errno.EBADF, errno.ENOTSOCK))
else:
self.fail("close() should raise EBADF/ENOTSOCK")
def testNewAttributes(self):
# testing .family, .type and .protocol
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.assertEqual(sock.family, socket.AF_INET)
if hasattr(socket, 'SOCK_CLOEXEC'):
self.assertIn(sock.type,
(socket.SOCK_STREAM | socket.SOCK_CLOEXEC,
socket.SOCK_STREAM))
else:
self.assertEqual(sock.type, socket.SOCK_STREAM)
self.assertEqual(sock.proto, 0)
sock.close()
def test_getsockaddrarg(self):
sock = socket.socket()
self.addCleanup(sock.close)
port = support.find_unused_port()
big_port = port + 65536
neg_port = port - 65536
self.assertRaises(OverflowError, sock.bind, (HOST, big_port))
self.assertRaises(OverflowError, sock.bind, (HOST, neg_port))
# Since find_unused_port() is inherently subject to race conditions, we
# call it a couple times if necessary.
for i in itertools.count():
port = support.find_unused_port()
try:
sock.bind((HOST, port))
except OSError as e:
if e.errno != errno.EADDRINUSE or i == 5:
raise
else:
break
@unittest.skipUnless(os.name == "nt", "Windows specific")
def test_sock_ioctl(self):
self.assertTrue(hasattr(socket.socket, 'ioctl'))
self.assertTrue(hasattr(socket, 'SIO_RCVALL'))
self.assertTrue(hasattr(socket, 'RCVALL_ON'))
self.assertTrue(hasattr(socket, 'RCVALL_OFF'))
self.assertTrue(hasattr(socket, 'SIO_KEEPALIVE_VALS'))
s = socket.socket()
self.addCleanup(s.close)
self.assertRaises(ValueError, s.ioctl, -1, None)
s.ioctl(socket.SIO_KEEPALIVE_VALS, (1, 100, 100))
@unittest.skipUnless(os.name == "nt", "Windows specific")
@unittest.skipUnless(hasattr(socket, 'SIO_LOOPBACK_FAST_PATH'),
'Loopback fast path support required for this test')
def test_sio_loopback_fast_path(self):
s = socket.socket()
self.addCleanup(s.close)
try:
s.ioctl(socket.SIO_LOOPBACK_FAST_PATH, True)
except OSError as exc:
WSAEOPNOTSUPP = 10045
if exc.winerror == WSAEOPNOTSUPP:
self.skipTest("SIO_LOOPBACK_FAST_PATH is defined but "
"doesn't implemented in this Windows version")
raise
self.assertRaises(TypeError, s.ioctl, socket.SIO_LOOPBACK_FAST_PATH, None)
def testGetaddrinfo(self):
try:
socket.getaddrinfo('localhost', 80)
except socket.gaierror as err:
if err.errno == socket.EAI_SERVICE:
# see http://bugs.python.org/issue1282647
self.skipTest("buggy libc version")
raise
# len of every sequence is supposed to be == 5
for info in socket.getaddrinfo(HOST, None):
self.assertEqual(len(info), 5)
# host can be a domain name, a string representation of an
# IPv4/v6 address or None
socket.getaddrinfo('localhost', 80)
socket.getaddrinfo('127.0.0.1', 80)
socket.getaddrinfo(None, 80)
if support.IPV6_ENABLED:
socket.getaddrinfo('::1', 80)
# port can be a string service name such as "http", a numeric
# port number or None
# Issue #26936: Android getaddrinfo() was broken before API level 23.
if (not hasattr(sys, 'getandroidapilevel') or
sys.getandroidapilevel() >= 23):
socket.getaddrinfo(HOST, "http")
socket.getaddrinfo(HOST, 80)
socket.getaddrinfo(HOST, None)
# test family and socktype filters
infos = socket.getaddrinfo(HOST, 80, socket.AF_INET, socket.SOCK_STREAM)
for family, type, _, _, _ in infos:
self.assertEqual(family, socket.AF_INET)
self.assertEqual(str(family), 'AddressFamily.AF_INET')
self.assertEqual(type, socket.SOCK_STREAM)
self.assertEqual(str(type), 'SocketKind.SOCK_STREAM')
infos = socket.getaddrinfo(HOST, None, 0, socket.SOCK_STREAM)
for _, socktype, _, _, _ in infos:
self.assertEqual(socktype, socket.SOCK_STREAM)
# test proto and flags arguments
socket.getaddrinfo(HOST, None, 0, 0, socket.SOL_TCP)
socket.getaddrinfo(HOST, None, 0, 0, 0, socket.AI_PASSIVE)
# a server willing to support both IPv4 and IPv6 will
# usually do this
socket.getaddrinfo(None, 0, socket.AF_UNSPEC, socket.SOCK_STREAM, 0,
socket.AI_PASSIVE)
# test keyword arguments
a = socket.getaddrinfo(HOST, None)
b = socket.getaddrinfo(host=HOST, port=None)
self.assertEqual(a, b)
a = socket.getaddrinfo(HOST, None, socket.AF_INET)
b = socket.getaddrinfo(HOST, None, family=socket.AF_INET)
self.assertEqual(a, b)
a = socket.getaddrinfo(HOST, None, 0, socket.SOCK_STREAM)
b = socket.getaddrinfo(HOST, None, type=socket.SOCK_STREAM)
self.assertEqual(a, b)
a = socket.getaddrinfo(HOST, None, 0, 0, socket.SOL_TCP)
b = socket.getaddrinfo(HOST, None, proto=socket.SOL_TCP)
self.assertEqual(a, b)
a = socket.getaddrinfo(HOST, None, 0, 0, 0, socket.AI_PASSIVE)
b = socket.getaddrinfo(HOST, None, flags=socket.AI_PASSIVE)
self.assertEqual(a, b)
a = socket.getaddrinfo(None, 0, socket.AF_UNSPEC, socket.SOCK_STREAM, 0,
socket.AI_PASSIVE)
b = socket.getaddrinfo(host=None, port=0, family=socket.AF_UNSPEC,
type=socket.SOCK_STREAM, proto=0,
flags=socket.AI_PASSIVE)
self.assertEqual(a, b)
# Issue #6697.
self.assertRaises(UnicodeEncodeError, socket.getaddrinfo, 'localhost', '\uD800')
# Issue 17269: test workaround for OS X platform bug segfault
if hasattr(socket, 'AI_NUMERICSERV'):
try:
# The arguments here are undefined and the call may succeed
# or fail. All we care here is that it doesn't segfault.
socket.getaddrinfo("localhost", None, 0, 0, 0,
socket.AI_NUMERICSERV)
except socket.gaierror:
pass
def test_getnameinfo(self):
# only IP addresses are allowed
self.assertRaises(OSError, socket.getnameinfo, ('mail.python.org',0), 0)
@unittest.skipUnless(support.is_resource_enabled('network'),
'network is not enabled')
def test_idna(self):
# Check for internet access before running test
# (issue #12804, issue #25138).
with support.transient_internet('python.org'):
socket.gethostbyname('python.org')
# these should all be successful
domain = 'испытание.pythontest.net'
socket.gethostbyname(domain)
socket.gethostbyname_ex(domain)
socket.getaddrinfo(domain,0,socket.AF_UNSPEC,socket.SOCK_STREAM)
# this may not work if the forward lookup chooses the IPv6 address, as that doesn't
# have a reverse entry yet
# socket.gethostbyaddr('испытание.python.org')
def check_sendall_interrupted(self, with_timeout):
# socketpair() is not strictly required, but it makes things easier.
if not hasattr(signal, 'alarm') or not hasattr(socket, 'socketpair'):
self.skipTest("signal.alarm and socket.socketpair required for this test")
# Our signal handlers clobber the C errno by calling a math function
# with an invalid domain value.
def ok_handler(*args):
self.assertRaises(ValueError, math.acosh, 0)
def raising_handler(*args):
self.assertRaises(ValueError, math.acosh, 0)
1 // 0
c, s = socket.socketpair()
old_alarm = signal.signal(signal.SIGALRM, raising_handler)
try:
if with_timeout:
# Just above the one second minimum for signal.alarm
c.settimeout(1.5)
with self.assertRaises(ZeroDivisionError):
signal.alarm(1)
c.sendall(b"x" * support.SOCK_MAX_SIZE)
if with_timeout:
signal.signal(signal.SIGALRM, ok_handler)
signal.alarm(1)
self.assertRaises(socket.timeout, c.sendall,
b"x" * support.SOCK_MAX_SIZE)
finally:
signal.alarm(0)
signal.signal(signal.SIGALRM, old_alarm)
c.close()
s.close()
def test_sendall_interrupted(self):
self.check_sendall_interrupted(False)
def test_sendall_interrupted_with_timeout(self):
self.check_sendall_interrupted(True)
def test_dealloc_warn(self):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
r = repr(sock)
with self.assertWarns(ResourceWarning) as cm:
sock = None
support.gc_collect()
self.assertIn(r, str(cm.warning.args[0]))
# An open socket file object gets dereferenced after the socket
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
f = sock.makefile('rb')
r = repr(sock)
sock = None
support.gc_collect()
with self.assertWarns(ResourceWarning):
f = None
support.gc_collect()
def test_name_closed_socketio(self):
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as sock:
fp = sock.makefile("rb")
fp.close()
self.assertEqual(repr(fp), "<_io.BufferedReader name=-1>")
def test_unusable_closed_socketio(self):
with socket.socket() as sock:
fp = sock.makefile("rb", buffering=0)
self.assertTrue(fp.readable())
self.assertFalse(fp.writable())
self.assertFalse(fp.seekable())
fp.close()
self.assertRaises(ValueError, fp.readable)
self.assertRaises(ValueError, fp.writable)
self.assertRaises(ValueError, fp.seekable)
def test_socket_close(self):
sock = socket.socket()
try:
sock.bind((HOST, 0))
socket.close(sock.fileno())
with self.assertRaises(OSError):
sock.listen(1)
finally:
with self.assertRaises(OSError):
# sock.close() fails with EBADF
sock.close()
with self.assertRaises(TypeError):
socket.close(None)
with self.assertRaises(OSError):
socket.close(-1)
def test_makefile_mode(self):
for mode in 'r', 'rb', 'rw', 'w', 'wb':
with self.subTest(mode=mode):
with socket.socket() as sock:
with sock.makefile(mode) as fp:
self.assertEqual(fp.mode, mode)
def test_makefile_invalid_mode(self):
for mode in 'rt', 'x', '+', 'a':
with self.subTest(mode=mode):
with socket.socket() as sock:
with self.assertRaisesRegex(ValueError, 'invalid mode'):
sock.makefile(mode)
def test_pickle(self):
sock = socket.socket()
with sock:
for protocol in range(pickle.HIGHEST_PROTOCOL + 1):
self.assertRaises(TypeError, pickle.dumps, sock, protocol)
for protocol in range(pickle.HIGHEST_PROTOCOL + 1):
family = pickle.loads(pickle.dumps(socket.AF_INET, protocol))
self.assertEqual(family, socket.AF_INET)
type = pickle.loads(pickle.dumps(socket.SOCK_STREAM, protocol))
self.assertEqual(type, socket.SOCK_STREAM)
def test_listen_backlog(self):
for backlog in 0, -1:
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as srv:
srv.bind((HOST, 0))
srv.listen(backlog)
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as srv:
srv.bind((HOST, 0))
srv.listen()
@support.cpython_only
def test_listen_backlog_overflow(self):
# Issue 15989
import _testcapi
srv = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
srv.bind((HOST, 0))
self.assertRaises(OverflowError, srv.listen, _testcapi.INT_MAX + 1)
srv.close()
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 required for this test.')
def test_flowinfo(self):
self.assertRaises(OverflowError, socket.getnameinfo,
(support.HOSTv6, 0, 0xffffffff), 0)
with socket.socket(socket.AF_INET6, socket.SOCK_STREAM) as s:
self.assertRaises(OverflowError, s.bind, (support.HOSTv6, 0, -10))
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 required for this test.')
def test_getaddrinfo_ipv6_basic(self):
((*_, sockaddr),) = socket.getaddrinfo(
'ff02::1de:c0:face:8D', # Note capital letter `D`.
1234, socket.AF_INET6,
socket.SOCK_DGRAM,
socket.IPPROTO_UDP
)
self.assertEqual(sockaddr, ('ff02::1de:c0:face:8d', 1234, 0, 0))
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 required for this test.')
@unittest.skipUnless(
hasattr(socket, 'if_nameindex'),
'if_nameindex is not supported')
def test_getaddrinfo_ipv6_scopeid_symbolic(self):
# Just pick up any network interface (Linux, Mac OS X)
(ifindex, test_interface) = socket.if_nameindex()[0]
((*_, sockaddr),) = socket.getaddrinfo(
'ff02::1de:c0:face:8D%' + test_interface,
1234, socket.AF_INET6,
socket.SOCK_DGRAM,
socket.IPPROTO_UDP
)
# Note missing interface name part in IPv6 address
self.assertEqual(sockaddr, ('ff02::1de:c0:face:8d', 1234, 0, ifindex))
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 required for this test.')
@unittest.skipUnless(
sys.platform == 'win32',
'Numeric scope id does not work or undocumented')
def test_getaddrinfo_ipv6_scopeid_numeric(self):
# Also works on Linux and Mac OS X, but is not documented (?)
# Windows, Linux and Max OS X allow nonexistent interface numbers here.
ifindex = 42
((*_, sockaddr),) = socket.getaddrinfo(
'ff02::1de:c0:face:8D%' + str(ifindex),
1234, socket.AF_INET6,
socket.SOCK_DGRAM,
socket.IPPROTO_UDP
)
# Note missing interface name part in IPv6 address
self.assertEqual(sockaddr, ('ff02::1de:c0:face:8d', 1234, 0, ifindex))
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 required for this test.')
@unittest.skipUnless(
hasattr(socket, 'if_nameindex'),
'if_nameindex is not supported')
def test_getnameinfo_ipv6_scopeid_symbolic(self):
# Just pick up any network interface.
(ifindex, test_interface) = socket.if_nameindex()[0]
sockaddr = ('ff02::1de:c0:face:8D', 1234, 0, ifindex) # Note capital letter `D`.
nameinfo = socket.getnameinfo(sockaddr, socket.NI_NUMERICHOST | socket.NI_NUMERICSERV)
self.assertEqual(nameinfo, ('ff02::1de:c0:face:8d%' + test_interface, '1234'))
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 required for this test.')
@unittest.skipUnless(
sys.platform == 'win32',
'Numeric scope id does not work or undocumented')
def test_getnameinfo_ipv6_scopeid_numeric(self):
# Also works on Linux (undocumented), but does not work on Mac OS X
# Windows and Linux allow nonexistent interface numbers here.
ifindex = 42
sockaddr = ('ff02::1de:c0:face:8D', 1234, 0, ifindex) # Note capital letter `D`.
nameinfo = socket.getnameinfo(sockaddr, socket.NI_NUMERICHOST | socket.NI_NUMERICSERV)
self.assertEqual(nameinfo, ('ff02::1de:c0:face:8d%' + str(ifindex), '1234'))
def test_str_for_enums(self):
# Make sure that the AF_* and SOCK_* constants have enum-like string
# reprs.
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
self.assertEqual(str(s.family), 'AddressFamily.AF_INET')
self.assertEqual(str(s.type), 'SocketKind.SOCK_STREAM')
def test_socket_consistent_sock_type(self):
SOCK_NONBLOCK = getattr(socket, 'SOCK_NONBLOCK', 0)
SOCK_CLOEXEC = getattr(socket, 'SOCK_CLOEXEC', 0)
sock_type = socket.SOCK_STREAM | SOCK_NONBLOCK | SOCK_CLOEXEC
with socket.socket(socket.AF_INET, sock_type) as s:
self.assertEqual(s.type, socket.SOCK_STREAM)
s.settimeout(1)
self.assertEqual(s.type, socket.SOCK_STREAM)
s.settimeout(0)
self.assertEqual(s.type, socket.SOCK_STREAM)
s.setblocking(True)
self.assertEqual(s.type, socket.SOCK_STREAM)
s.setblocking(False)
self.assertEqual(s.type, socket.SOCK_STREAM)
@unittest.skipIf(os.name == 'nt', 'Will not work on Windows')
def test_unknown_socket_family_repr(self):
# Test that when created with a family that's not one of the known
# AF_*/SOCK_* constants, socket.family just returns the number.
#
# To do this we fool socket.socket into believing it already has an
# open fd because on this path it doesn't actually verify the family and
# type and populates the socket object.
#
# On Windows this trick won't work, so the test is skipped.
fd, path = tempfile.mkstemp()
self.addCleanup(os.unlink, path)
unknown_family = max(socket.AddressFamily.__members__.values()) + 1
unknown_type = max(
kind
for name, kind in socket.SocketKind.__members__.items()
if name not in {'SOCK_NONBLOCK', 'SOCK_CLOEXEC'}
) + 1
with socket.socket(
family=unknown_family, type=unknown_type, proto=23,
fileno=fd) as s:
self.assertEqual(s.family, unknown_family)
self.assertEqual(s.type, unknown_type)
# some OS like macOS ignore proto
self.assertIn(s.proto, {0, 23})
@unittest.skipUnless(hasattr(os, 'sendfile'), 'test needs os.sendfile()')
def test__sendfile_use_sendfile(self):
class File:
def __init__(self, fd):
self.fd = fd
def fileno(self):
return self.fd
with socket.socket() as sock:
fd = os.open(os.curdir, os.O_RDONLY)
os.close(fd)
with self.assertRaises(socket._GiveupOnSendfile):
sock._sendfile_use_sendfile(File(fd))
with self.assertRaises(OverflowError):
sock._sendfile_use_sendfile(File(2**1000))
with self.assertRaises(TypeError):
sock._sendfile_use_sendfile(File(None))
def _test_socket_fileno(self, s, family, stype):
self.assertEqual(s.family, family)
self.assertEqual(s.type, stype)
fd = s.fileno()
s2 = socket.socket(fileno=fd)
self.addCleanup(s2.close)
# detach old fd to avoid double close
s.detach()
self.assertEqual(s2.family, family)
self.assertEqual(s2.type, stype)
self.assertEqual(s2.fileno(), fd)
def test_socket_fileno(self):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.addCleanup(s.close)
s.bind((support.HOST, 0))
self._test_socket_fileno(s, socket.AF_INET, socket.SOCK_STREAM)
if hasattr(socket, "SOCK_DGRAM"):
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.addCleanup(s.close)
s.bind((support.HOST, 0))
self._test_socket_fileno(s, socket.AF_INET, socket.SOCK_DGRAM)
if support.IPV6_ENABLED:
s = socket.socket(socket.AF_INET6, socket.SOCK_STREAM)
self.addCleanup(s.close)
s.bind((support.HOSTv6, 0, 0, 0))
self._test_socket_fileno(s, socket.AF_INET6, socket.SOCK_STREAM)
if hasattr(socket, "AF_UNIX"):
tmpdir = tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, tmpdir)
s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
self.addCleanup(s.close)
s.bind(os.path.join(tmpdir, 'socket'))
self._test_socket_fileno(s, socket.AF_UNIX, socket.SOCK_STREAM)
@unittest.skipUnless(HAVE_SOCKET_CAN, 'SocketCan required for this test.')
class BasicCANTest(unittest.TestCase):
def testCrucialConstants(self):
socket.AF_CAN
socket.PF_CAN
socket.CAN_RAW
@unittest.skipUnless(hasattr(socket, "CAN_BCM"),
'socket.CAN_BCM required for this test.')
def testBCMConstants(self):
socket.CAN_BCM
# opcodes
socket.CAN_BCM_TX_SETUP # create (cyclic) transmission task
socket.CAN_BCM_TX_DELETE # remove (cyclic) transmission task
socket.CAN_BCM_TX_READ # read properties of (cyclic) transmission task
socket.CAN_BCM_TX_SEND # send one CAN frame
socket.CAN_BCM_RX_SETUP # create RX content filter subscription
socket.CAN_BCM_RX_DELETE # remove RX content filter subscription
socket.CAN_BCM_RX_READ # read properties of RX content filter subscription
socket.CAN_BCM_TX_STATUS # reply to TX_READ request
socket.CAN_BCM_TX_EXPIRED # notification on performed transmissions (count=0)
socket.CAN_BCM_RX_STATUS # reply to RX_READ request
socket.CAN_BCM_RX_TIMEOUT # cyclic message is absent
socket.CAN_BCM_RX_CHANGED # updated CAN frame (detected content change)
def testCreateSocket(self):
with socket.socket(socket.PF_CAN, socket.SOCK_RAW, socket.CAN_RAW) as s:
pass
@unittest.skipUnless(hasattr(socket, "CAN_BCM"),
'socket.CAN_BCM required for this test.')
def testCreateBCMSocket(self):
with socket.socket(socket.PF_CAN, socket.SOCK_DGRAM, socket.CAN_BCM) as s:
pass
def testBindAny(self):
with socket.socket(socket.PF_CAN, socket.SOCK_RAW, socket.CAN_RAW) as s:
s.bind(('', ))
def testTooLongInterfaceName(self):
# most systems limit IFNAMSIZ to 16, take 1024 to be sure
with socket.socket(socket.PF_CAN, socket.SOCK_RAW, socket.CAN_RAW) as s:
self.assertRaisesRegex(OSError, 'interface name too long',
s.bind, ('x' * 1024,))
@unittest.skipUnless(hasattr(socket, "CAN_RAW_LOOPBACK"),
'socket.CAN_RAW_LOOPBACK required for this test.')
def testLoopback(self):
with socket.socket(socket.PF_CAN, socket.SOCK_RAW, socket.CAN_RAW) as s:
for loopback in (0, 1):
s.setsockopt(socket.SOL_CAN_RAW, socket.CAN_RAW_LOOPBACK,
loopback)
self.assertEqual(loopback,
s.getsockopt(socket.SOL_CAN_RAW, socket.CAN_RAW_LOOPBACK))
@unittest.skipUnless(hasattr(socket, "CAN_RAW_FILTER"),
'socket.CAN_RAW_FILTER required for this test.')
def testFilter(self):
can_id, can_mask = 0x200, 0x700
can_filter = struct.pack("=II", can_id, can_mask)
with socket.socket(socket.PF_CAN, socket.SOCK_RAW, socket.CAN_RAW) as s:
s.setsockopt(socket.SOL_CAN_RAW, socket.CAN_RAW_FILTER, can_filter)
self.assertEqual(can_filter,
s.getsockopt(socket.SOL_CAN_RAW, socket.CAN_RAW_FILTER, 8))
s.setsockopt(socket.SOL_CAN_RAW, socket.CAN_RAW_FILTER, bytearray(can_filter))
@unittest.skipUnless(HAVE_SOCKET_CAN, 'SocketCan required for this test.')
class CANTest(ThreadedCANSocketTest):
def __init__(self, methodName='runTest'):
ThreadedCANSocketTest.__init__(self, methodName=methodName)
@classmethod
def build_can_frame(cls, can_id, data):
"""Build a CAN frame."""
can_dlc = len(data)
data = data.ljust(8, b'\x00')
return struct.pack(cls.can_frame_fmt, can_id, can_dlc, data)
@classmethod
def dissect_can_frame(cls, frame):
"""Dissect a CAN frame."""
can_id, can_dlc, data = struct.unpack(cls.can_frame_fmt, frame)
return (can_id, can_dlc, data[:can_dlc])
def testSendFrame(self):
cf, addr = self.s.recvfrom(self.bufsize)
self.assertEqual(self.cf, cf)
self.assertEqual(addr[0], self.interface)
self.assertEqual(addr[1], socket.AF_CAN)
def _testSendFrame(self):
self.cf = self.build_can_frame(0x00, b'\x01\x02\x03\x04\x05')
self.cli.send(self.cf)
def testSendMaxFrame(self):
cf, addr = self.s.recvfrom(self.bufsize)
self.assertEqual(self.cf, cf)
def _testSendMaxFrame(self):
self.cf = self.build_can_frame(0x00, b'\x07' * 8)
self.cli.send(self.cf)
def testSendMultiFrames(self):
cf, addr = self.s.recvfrom(self.bufsize)
self.assertEqual(self.cf1, cf)
cf, addr = self.s.recvfrom(self.bufsize)
self.assertEqual(self.cf2, cf)
def _testSendMultiFrames(self):
self.cf1 = self.build_can_frame(0x07, b'\x44\x33\x22\x11')
self.cli.send(self.cf1)
self.cf2 = self.build_can_frame(0x12, b'\x99\x22\x33')
self.cli.send(self.cf2)
@unittest.skipUnless(hasattr(socket, "CAN_BCM"),
'socket.CAN_BCM required for this test.')
def _testBCM(self):
cf, addr = self.cli.recvfrom(self.bufsize)
self.assertEqual(self.cf, cf)
can_id, can_dlc, data = self.dissect_can_frame(cf)
self.assertEqual(self.can_id, can_id)
self.assertEqual(self.data, data)
@unittest.skipUnless(hasattr(socket, "CAN_BCM"),
'socket.CAN_BCM required for this test.')
def testBCM(self):
bcm = socket.socket(socket.PF_CAN, socket.SOCK_DGRAM, socket.CAN_BCM)
self.addCleanup(bcm.close)
bcm.connect((self.interface,))
self.can_id = 0x123
self.data = bytes([0xc0, 0xff, 0xee])
self.cf = self.build_can_frame(self.can_id, self.data)
opcode = socket.CAN_BCM_TX_SEND
flags = 0
count = 0
ival1_seconds = ival1_usec = ival2_seconds = ival2_usec = 0
bcm_can_id = 0x0222
nframes = 1
assert len(self.cf) == 16
header = struct.pack(self.bcm_cmd_msg_fmt,
opcode,
flags,
count,
ival1_seconds,
ival1_usec,
ival2_seconds,
ival2_usec,
bcm_can_id,
nframes,
)
header_plus_frame = header + self.cf
bytes_sent = bcm.send(header_plus_frame)
self.assertEqual(bytes_sent, len(header_plus_frame))
@unittest.skipUnless(HAVE_SOCKET_CAN_ISOTP, 'CAN ISOTP required for this test.')
class ISOTPTest(unittest.TestCase):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.interface = "vcan0"
def testCrucialConstants(self):
socket.AF_CAN
socket.PF_CAN
socket.CAN_ISOTP
socket.SOCK_DGRAM
def testCreateSocket(self):
with socket.socket(socket.PF_CAN, socket.SOCK_RAW, socket.CAN_RAW) as s:
pass
@unittest.skipUnless(hasattr(socket, "CAN_ISOTP"),
'socket.CAN_ISOTP required for this test.')
def testCreateISOTPSocket(self):
with socket.socket(socket.PF_CAN, socket.SOCK_DGRAM, socket.CAN_ISOTP) as s:
pass
def testTooLongInterfaceName(self):
# most systems limit IFNAMSIZ to 16, take 1024 to be sure
with socket.socket(socket.PF_CAN, socket.SOCK_DGRAM, socket.CAN_ISOTP) as s:
with self.assertRaisesRegex(OSError, 'interface name too long'):
s.bind(('x' * 1024, 1, 2))
def testBind(self):
try:
with socket.socket(socket.PF_CAN, socket.SOCK_DGRAM, socket.CAN_ISOTP) as s:
addr = self.interface, 0x123, 0x456
s.bind(addr)
self.assertEqual(s.getsockname(), addr)
except OSError as e:
if e.errno == errno.ENODEV:
self.skipTest('network interface `%s` does not exist' %
self.interface)
else:
raise
@unittest.skipUnless(HAVE_SOCKET_RDS, 'RDS sockets required for this test.')
class BasicRDSTest(unittest.TestCase):
def testCrucialConstants(self):
socket.AF_RDS
socket.PF_RDS
def testCreateSocket(self):
with socket.socket(socket.PF_RDS, socket.SOCK_SEQPACKET, 0) as s:
pass
def testSocketBufferSize(self):
bufsize = 16384
with socket.socket(socket.PF_RDS, socket.SOCK_SEQPACKET, 0) as s:
s.setsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF, bufsize)
s.setsockopt(socket.SOL_SOCKET, socket.SO_SNDBUF, bufsize)
@unittest.skipUnless(HAVE_SOCKET_RDS, 'RDS sockets required for this test.')
class RDSTest(ThreadedRDSSocketTest):
def __init__(self, methodName='runTest'):
ThreadedRDSSocketTest.__init__(self, methodName=methodName)
def setUp(self):
super().setUp()
self.evt = threading.Event()
def testSendAndRecv(self):
data, addr = self.serv.recvfrom(self.bufsize)
self.assertEqual(self.data, data)
self.assertEqual(self.cli_addr, addr)
def _testSendAndRecv(self):
self.data = b'spam'
self.cli.sendto(self.data, 0, (HOST, self.port))
def testPeek(self):
data, addr = self.serv.recvfrom(self.bufsize, socket.MSG_PEEK)
self.assertEqual(self.data, data)
data, addr = self.serv.recvfrom(self.bufsize)
self.assertEqual(self.data, data)
def _testPeek(self):
self.data = b'spam'
self.cli.sendto(self.data, 0, (HOST, self.port))
@requireAttrs(socket.socket, 'recvmsg')
def testSendAndRecvMsg(self):
data, ancdata, msg_flags, addr = self.serv.recvmsg(self.bufsize)
self.assertEqual(self.data, data)
@requireAttrs(socket.socket, 'sendmsg')
def _testSendAndRecvMsg(self):
self.data = b'hello ' * 10
self.cli.sendmsg([self.data], (), 0, (HOST, self.port))
def testSendAndRecvMulti(self):
data, addr = self.serv.recvfrom(self.bufsize)
self.assertEqual(self.data1, data)
data, addr = self.serv.recvfrom(self.bufsize)
self.assertEqual(self.data2, data)
def _testSendAndRecvMulti(self):
self.data1 = b'bacon'
self.cli.sendto(self.data1, 0, (HOST, self.port))
self.data2 = b'egg'
self.cli.sendto(self.data2, 0, (HOST, self.port))
def testSelect(self):
r, w, x = select.select([self.serv], [], [], 3.0)
self.assertIn(self.serv, r)
data, addr = self.serv.recvfrom(self.bufsize)
self.assertEqual(self.data, data)
def _testSelect(self):
self.data = b'select'
self.cli.sendto(self.data, 0, (HOST, self.port))
@unittest.skipIf(fcntl is None, "need fcntl")
@unittest.skipUnless(HAVE_SOCKET_VSOCK,
'VSOCK sockets required for this test.')
class BasicVSOCKTest(unittest.TestCase):
def testCrucialConstants(self):
socket.AF_VSOCK
def testVSOCKConstants(self):
socket.SO_VM_SOCKETS_BUFFER_SIZE
socket.SO_VM_SOCKETS_BUFFER_MIN_SIZE
socket.SO_VM_SOCKETS_BUFFER_MAX_SIZE
socket.VMADDR_CID_ANY
socket.VMADDR_PORT_ANY
socket.VMADDR_CID_HOST
socket.VM_SOCKETS_INVALID_VERSION
socket.IOCTL_VM_SOCKETS_GET_LOCAL_CID
def testCreateSocket(self):
with socket.socket(socket.AF_VSOCK, socket.SOCK_STREAM) as s:
pass
def testSocketBufferSize(self):
with socket.socket(socket.AF_VSOCK, socket.SOCK_STREAM) as s:
orig_max = s.getsockopt(socket.AF_VSOCK,
socket.SO_VM_SOCKETS_BUFFER_MAX_SIZE)
orig = s.getsockopt(socket.AF_VSOCK,
socket.SO_VM_SOCKETS_BUFFER_SIZE)
orig_min = s.getsockopt(socket.AF_VSOCK,
socket.SO_VM_SOCKETS_BUFFER_MIN_SIZE)
s.setsockopt(socket.AF_VSOCK,
socket.SO_VM_SOCKETS_BUFFER_MAX_SIZE, orig_max * 2)
s.setsockopt(socket.AF_VSOCK,
socket.SO_VM_SOCKETS_BUFFER_SIZE, orig * 2)
s.setsockopt(socket.AF_VSOCK,
socket.SO_VM_SOCKETS_BUFFER_MIN_SIZE, orig_min * 2)
self.assertEqual(orig_max * 2,
s.getsockopt(socket.AF_VSOCK,
socket.SO_VM_SOCKETS_BUFFER_MAX_SIZE))
self.assertEqual(orig * 2,
s.getsockopt(socket.AF_VSOCK,
socket.SO_VM_SOCKETS_BUFFER_SIZE))
self.assertEqual(orig_min * 2,
s.getsockopt(socket.AF_VSOCK,
socket.SO_VM_SOCKETS_BUFFER_MIN_SIZE))
class BasicTCPTest(SocketConnectedTest):
def __init__(self, methodName='runTest'):
SocketConnectedTest.__init__(self, methodName=methodName)
def testRecv(self):
# Testing large receive over TCP
msg = self.cli_conn.recv(1024)
self.assertEqual(msg, MSG)
def _testRecv(self):
self.serv_conn.send(MSG)
def testOverFlowRecv(self):
# Testing receive in chunks over TCP
seg1 = self.cli_conn.recv(len(MSG) - 3)
seg2 = self.cli_conn.recv(1024)
msg = seg1 + seg2
self.assertEqual(msg, MSG)
def _testOverFlowRecv(self):
self.serv_conn.send(MSG)
def testRecvFrom(self):
# Testing large recvfrom() over TCP
msg, addr = self.cli_conn.recvfrom(1024)
self.assertEqual(msg, MSG)
def _testRecvFrom(self):
self.serv_conn.send(MSG)
def testOverFlowRecvFrom(self):
# Testing recvfrom() in chunks over TCP
seg1, addr = self.cli_conn.recvfrom(len(MSG)-3)
seg2, addr = self.cli_conn.recvfrom(1024)
msg = seg1 + seg2
self.assertEqual(msg, MSG)
def _testOverFlowRecvFrom(self):
self.serv_conn.send(MSG)
def testSendAll(self):
# Testing sendall() with a 2048 byte string over TCP
msg = b''
while 1:
read = self.cli_conn.recv(1024)
if not read:
break
msg += read
self.assertEqual(msg, b'f' * 2048)
def _testSendAll(self):
big_chunk = b'f' * 2048
self.serv_conn.sendall(big_chunk)
def testFromFd(self):
# Testing fromfd()
fd = self.cli_conn.fileno()
sock = socket.fromfd(fd, socket.AF_INET, socket.SOCK_STREAM)
self.addCleanup(sock.close)
self.assertIsInstance(sock, socket.socket)
msg = sock.recv(1024)
self.assertEqual(msg, MSG)
def _testFromFd(self):
self.serv_conn.send(MSG)
def testDup(self):
# Testing dup()
sock = self.cli_conn.dup()
self.addCleanup(sock.close)
msg = sock.recv(1024)
self.assertEqual(msg, MSG)
def _testDup(self):
self.serv_conn.send(MSG)
def testShutdown(self):
# Testing shutdown()
msg = self.cli_conn.recv(1024)
self.assertEqual(msg, MSG)
# wait for _testShutdown to finish: on OS X, when the server
# closes the connection the client also becomes disconnected,
# and the client's shutdown call will fail. (Issue #4397.)
self.done.wait()
def _testShutdown(self):
self.serv_conn.send(MSG)
self.serv_conn.shutdown(2)
testShutdown_overflow = support.cpython_only(testShutdown)
@support.cpython_only
def _testShutdown_overflow(self):
import _testcapi
self.serv_conn.send(MSG)
# Issue 15989
self.assertRaises(OverflowError, self.serv_conn.shutdown,
_testcapi.INT_MAX + 1)
self.assertRaises(OverflowError, self.serv_conn.shutdown,
2 + (_testcapi.UINT_MAX + 1))
self.serv_conn.shutdown(2)
def testDetach(self):
# Testing detach()
fileno = self.cli_conn.fileno()
f = self.cli_conn.detach()
self.assertEqual(f, fileno)
# cli_conn cannot be used anymore...
self.assertTrue(self.cli_conn._closed)
self.assertRaises(OSError, self.cli_conn.recv, 1024)
self.cli_conn.close()
# ...but we can create another socket using the (still open)
# file descriptor
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM, fileno=f)
self.addCleanup(sock.close)
msg = sock.recv(1024)
self.assertEqual(msg, MSG)
def _testDetach(self):
self.serv_conn.send(MSG)
class BasicUDPTest(ThreadedUDPSocketTest):
def __init__(self, methodName='runTest'):
ThreadedUDPSocketTest.__init__(self, methodName=methodName)
def testSendtoAndRecv(self):
# Testing sendto() and Recv() over UDP
msg = self.serv.recv(len(MSG))
self.assertEqual(msg, MSG)
def _testSendtoAndRecv(self):
self.cli.sendto(MSG, 0, (HOST, self.port))
def testRecvFrom(self):
# Testing recvfrom() over UDP
msg, addr = self.serv.recvfrom(len(MSG))
self.assertEqual(msg, MSG)
def _testRecvFrom(self):
self.cli.sendto(MSG, 0, (HOST, self.port))
def testRecvFromNegative(self):
# Negative lengths passed to recvfrom should give ValueError.
self.assertRaises(ValueError, self.serv.recvfrom, -1)
def _testRecvFromNegative(self):
self.cli.sendto(MSG, 0, (HOST, self.port))
# Tests for the sendmsg()/recvmsg() interface. Where possible, the
# same test code is used with different families and types of socket
# (e.g. stream, datagram), and tests using recvmsg() are repeated
# using recvmsg_into().
#
# The generic test classes such as SendmsgTests and
# RecvmsgGenericTests inherit from SendrecvmsgBase and expect to be
# supplied with sockets cli_sock and serv_sock representing the
# client's and the server's end of the connection respectively, and
# attributes cli_addr and serv_addr holding their (numeric where
# appropriate) addresses.
#
# The final concrete test classes combine these with subclasses of
# SocketTestBase which set up client and server sockets of a specific
# type, and with subclasses of SendrecvmsgBase such as
# SendrecvmsgDgramBase and SendrecvmsgConnectedBase which map these
# sockets to cli_sock and serv_sock and override the methods and
# attributes of SendrecvmsgBase to fill in destination addresses if
# needed when sending, check for specific flags in msg_flags, etc.
#
# RecvmsgIntoMixin provides a version of doRecvmsg() implemented using
# recvmsg_into().
# XXX: like the other datagram (UDP) tests in this module, the code
# here assumes that datagram delivery on the local machine will be
# reliable.
class SendrecvmsgBase(ThreadSafeCleanupTestCase):
# Base class for sendmsg()/recvmsg() tests.
# Time in seconds to wait before considering a test failed, or
# None for no timeout. Not all tests actually set a timeout.
fail_timeout = 3.0
def setUp(self):
self.misc_event = threading.Event()
super().setUp()
def sendToServer(self, msg):
# Send msg to the server.
return self.cli_sock.send(msg)
# Tuple of alternative default arguments for sendmsg() when called
# via sendmsgToServer() (e.g. to include a destination address).
sendmsg_to_server_defaults = ()
def sendmsgToServer(self, *args):
# Call sendmsg() on self.cli_sock with the given arguments,
# filling in any arguments which are not supplied with the
# corresponding items of self.sendmsg_to_server_defaults, if
# any.
return self.cli_sock.sendmsg(
*(args + self.sendmsg_to_server_defaults[len(args):]))
def doRecvmsg(self, sock, bufsize, *args):
# Call recvmsg() on sock with given arguments and return its
# result. Should be used for tests which can use either
# recvmsg() or recvmsg_into() - RecvmsgIntoMixin overrides
# this method with one which emulates it using recvmsg_into(),
# thus allowing the same test to be used for both methods.
result = sock.recvmsg(bufsize, *args)
self.registerRecvmsgResult(result)
return result
def registerRecvmsgResult(self, result):
# Called by doRecvmsg() with the return value of recvmsg() or
# recvmsg_into(). Can be overridden to arrange cleanup based
# on the returned ancillary data, for instance.
pass
def checkRecvmsgAddress(self, addr1, addr2):
# Called to compare the received address with the address of
# the peer.
self.assertEqual(addr1, addr2)
# Flags that are normally unset in msg_flags
msg_flags_common_unset = 0
for name in ("MSG_CTRUNC", "MSG_OOB"):
msg_flags_common_unset |= getattr(socket, name, 0)
# Flags that are normally set
msg_flags_common_set = 0
# Flags set when a complete record has been received (e.g. MSG_EOR
# for SCTP)
msg_flags_eor_indicator = 0
# Flags set when a complete record has not been received
# (e.g. MSG_TRUNC for datagram sockets)
msg_flags_non_eor_indicator = 0
def checkFlags(self, flags, eor=None, checkset=0, checkunset=0, ignore=0):
# Method to check the value of msg_flags returned by recvmsg[_into]().
#
# Checks that all bits in msg_flags_common_set attribute are
# set in "flags" and all bits in msg_flags_common_unset are
# unset.
#
# The "eor" argument specifies whether the flags should
# indicate that a full record (or datagram) has been received.
# If "eor" is None, no checks are done; otherwise, checks
# that:
#
# * if "eor" is true, all bits in msg_flags_eor_indicator are
# set and all bits in msg_flags_non_eor_indicator are unset
#
# * if "eor" is false, all bits in msg_flags_non_eor_indicator
# are set and all bits in msg_flags_eor_indicator are unset
#
# If "checkset" and/or "checkunset" are supplied, they require
# the given bits to be set or unset respectively, overriding
# what the attributes require for those bits.
#
# If any bits are set in "ignore", they will not be checked,
# regardless of the other inputs.
#
# Will raise Exception if the inputs require a bit to be both
# set and unset, and it is not ignored.
defaultset = self.msg_flags_common_set
defaultunset = self.msg_flags_common_unset
if eor:
defaultset |= self.msg_flags_eor_indicator
defaultunset |= self.msg_flags_non_eor_indicator
elif eor is not None:
defaultset |= self.msg_flags_non_eor_indicator
defaultunset |= self.msg_flags_eor_indicator
# Function arguments override defaults
defaultset &= ~checkunset
defaultunset &= ~checkset
# Merge arguments with remaining defaults, and check for conflicts
checkset |= defaultset
checkunset |= defaultunset
inboth = checkset & checkunset & ~ignore
if inboth:
raise Exception("contradictory set, unset requirements for flags "
"{0:#x}".format(inboth))
# Compare with given msg_flags value
mask = (checkset | checkunset) & ~ignore
self.assertEqual(flags & mask, checkset & mask)
class RecvmsgIntoMixin(SendrecvmsgBase):
# Mixin to implement doRecvmsg() using recvmsg_into().
def doRecvmsg(self, sock, bufsize, *args):
buf = bytearray(bufsize)
result = sock.recvmsg_into([buf], *args)
self.registerRecvmsgResult(result)
self.assertGreaterEqual(result[0], 0)
self.assertLessEqual(result[0], bufsize)
return (bytes(buf[:result[0]]),) + result[1:]
class SendrecvmsgDgramFlagsBase(SendrecvmsgBase):
# Defines flags to be checked in msg_flags for datagram sockets.
@property
def msg_flags_non_eor_indicator(self):
return super().msg_flags_non_eor_indicator | socket.MSG_TRUNC
class SendrecvmsgSCTPFlagsBase(SendrecvmsgBase):
# Defines flags to be checked in msg_flags for SCTP sockets.
@property
def msg_flags_eor_indicator(self):
return super().msg_flags_eor_indicator | socket.MSG_EOR
class SendrecvmsgConnectionlessBase(SendrecvmsgBase):
# Base class for tests on connectionless-mode sockets. Users must
# supply sockets on attributes cli and serv to be mapped to
# cli_sock and serv_sock respectively.
@property
def serv_sock(self):
return self.serv
@property
def cli_sock(self):
return self.cli
@property
def sendmsg_to_server_defaults(self):
return ([], [], 0, self.serv_addr)
def sendToServer(self, msg):
return self.cli_sock.sendto(msg, self.serv_addr)
class SendrecvmsgConnectedBase(SendrecvmsgBase):
# Base class for tests on connected sockets. Users must supply
# sockets on attributes serv_conn and cli_conn (representing the
# connections *to* the server and the client), to be mapped to
# cli_sock and serv_sock respectively.
@property
def serv_sock(self):
return self.cli_conn
@property
def cli_sock(self):
return self.serv_conn
def checkRecvmsgAddress(self, addr1, addr2):
# Address is currently "unspecified" for a connected socket,
# so we don't examine it
pass
class SendrecvmsgServerTimeoutBase(SendrecvmsgBase):
# Base class to set a timeout on server's socket.
def setUp(self):
super().setUp()
self.serv_sock.settimeout(self.fail_timeout)
class SendmsgTests(SendrecvmsgServerTimeoutBase):
# Tests for sendmsg() which can use any socket type and do not
# involve recvmsg() or recvmsg_into().
def testSendmsg(self):
# Send a simple message with sendmsg().
self.assertEqual(self.serv_sock.recv(len(MSG)), MSG)
def _testSendmsg(self):
self.assertEqual(self.sendmsgToServer([MSG]), len(MSG))
def testSendmsgDataGenerator(self):
# Send from buffer obtained from a generator (not a sequence).
self.assertEqual(self.serv_sock.recv(len(MSG)), MSG)
def _testSendmsgDataGenerator(self):
self.assertEqual(self.sendmsgToServer((o for o in [MSG])),
len(MSG))
def testSendmsgAncillaryGenerator(self):
# Gather (empty) ancillary data from a generator.
self.assertEqual(self.serv_sock.recv(len(MSG)), MSG)
def _testSendmsgAncillaryGenerator(self):
self.assertEqual(self.sendmsgToServer([MSG], (o for o in [])),
len(MSG))
def testSendmsgArray(self):
# Send data from an array instead of the usual bytes object.
self.assertEqual(self.serv_sock.recv(len(MSG)), MSG)
def _testSendmsgArray(self):
self.assertEqual(self.sendmsgToServer([array.array("B", MSG)]),
len(MSG))
def testSendmsgGather(self):
# Send message data from more than one buffer (gather write).
self.assertEqual(self.serv_sock.recv(len(MSG)), MSG)
def _testSendmsgGather(self):
self.assertEqual(self.sendmsgToServer([MSG[:3], MSG[3:]]), len(MSG))
def testSendmsgBadArgs(self):
# Check that sendmsg() rejects invalid arguments.
self.assertEqual(self.serv_sock.recv(1000), b"done")
def _testSendmsgBadArgs(self):
self.assertRaises(TypeError, self.cli_sock.sendmsg)
self.assertRaises(TypeError, self.sendmsgToServer,
b"not in an iterable")
self.assertRaises(TypeError, self.sendmsgToServer,
object())
self.assertRaises(TypeError, self.sendmsgToServer,
[object()])
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG, object()])
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], object())
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [], object())
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [], 0, object())
self.sendToServer(b"done")
def testSendmsgBadCmsg(self):
# Check that invalid ancillary data items are rejected.
self.assertEqual(self.serv_sock.recv(1000), b"done")
def _testSendmsgBadCmsg(self):
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [object()])
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [(object(), 0, b"data")])
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [(0, object(), b"data")])
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [(0, 0, object())])
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [(0, 0)])
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [(0, 0, b"data", 42)])
self.sendToServer(b"done")
@requireAttrs(socket, "CMSG_SPACE")
def testSendmsgBadMultiCmsg(self):
# Check that invalid ancillary data items are rejected when
# more than one item is present.
self.assertEqual(self.serv_sock.recv(1000), b"done")
@testSendmsgBadMultiCmsg.client_skip
def _testSendmsgBadMultiCmsg(self):
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [0, 0, b""])
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [(0, 0, b""), object()])
self.sendToServer(b"done")
def testSendmsgExcessCmsgReject(self):
# Check that sendmsg() rejects excess ancillary data items
# when the number that can be sent is limited.
self.assertEqual(self.serv_sock.recv(1000), b"done")
def _testSendmsgExcessCmsgReject(self):
if not hasattr(socket, "CMSG_SPACE"):
# Can only send one item
with self.assertRaises(OSError) as cm:
self.sendmsgToServer([MSG], [(0, 0, b""), (0, 0, b"")])
self.assertIsNone(cm.exception.errno)
self.sendToServer(b"done")
def testSendmsgAfterClose(self):
# Check that sendmsg() fails on a closed socket.
pass
def _testSendmsgAfterClose(self):
self.cli_sock.close()
self.assertRaises(OSError, self.sendmsgToServer, [MSG])
class SendmsgStreamTests(SendmsgTests):
# Tests for sendmsg() which require a stream socket and do not
# involve recvmsg() or recvmsg_into().
def testSendmsgExplicitNoneAddr(self):
# Check that peer address can be specified as None.
self.assertEqual(self.serv_sock.recv(len(MSG)), MSG)
def _testSendmsgExplicitNoneAddr(self):
self.assertEqual(self.sendmsgToServer([MSG], [], 0, None), len(MSG))
def testSendmsgTimeout(self):
# Check that timeout works with sendmsg().
self.assertEqual(self.serv_sock.recv(512), b"a"*512)
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
def _testSendmsgTimeout(self):
try:
self.cli_sock.settimeout(0.03)
try:
while True:
self.sendmsgToServer([b"a"*512])
except socket.timeout:
pass
except OSError as exc:
if exc.errno != errno.ENOMEM:
raise
# bpo-33937 the test randomly fails on Travis CI with
# "OSError: [Errno 12] Cannot allocate memory"
else:
self.fail("socket.timeout not raised")
finally:
self.misc_event.set()
# XXX: would be nice to have more tests for sendmsg flags argument.
# Linux supports MSG_DONTWAIT when sending, but in general, it
# only works when receiving. Could add other platforms if they
# support it too.
@skipWithClientIf(sys.platform not in {"linux"},
"MSG_DONTWAIT not known to work on this platform when "
"sending")
def testSendmsgDontWait(self):
# Check that MSG_DONTWAIT in flags causes non-blocking behaviour.
self.assertEqual(self.serv_sock.recv(512), b"a"*512)
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
@testSendmsgDontWait.client_skip
def _testSendmsgDontWait(self):
try:
with self.assertRaises(OSError) as cm:
while True:
self.sendmsgToServer([b"a"*512], [], socket.MSG_DONTWAIT)
# bpo-33937: catch also ENOMEM, the test randomly fails on Travis CI
# with "OSError: [Errno 12] Cannot allocate memory"
self.assertIn(cm.exception.errno,
(errno.EAGAIN, errno.EWOULDBLOCK, errno.ENOMEM))
finally:
self.misc_event.set()
class SendmsgConnectionlessTests(SendmsgTests):
# Tests for sendmsg() which require a connectionless-mode
# (e.g. datagram) socket, and do not involve recvmsg() or
# recvmsg_into().
def testSendmsgNoDestAddr(self):
# Check that sendmsg() fails when no destination address is
# given for unconnected socket.
pass
def _testSendmsgNoDestAddr(self):
self.assertRaises(OSError, self.cli_sock.sendmsg,
[MSG])
self.assertRaises(OSError, self.cli_sock.sendmsg,
[MSG], [], 0, None)
class RecvmsgGenericTests(SendrecvmsgBase):
# Tests for recvmsg() which can also be emulated using
# recvmsg_into(), and can use any socket type.
def testRecvmsg(self):
# Receive a simple message with recvmsg[_into]().
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock, len(MSG))
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsg(self):
self.sendToServer(MSG)
def testRecvmsgExplicitDefaults(self):
# Test recvmsg[_into]() with default arguments provided explicitly.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), 0, 0)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsgExplicitDefaults(self):
self.sendToServer(MSG)
def testRecvmsgShorter(self):
# Receive a message smaller than buffer.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG) + 42)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsgShorter(self):
self.sendToServer(MSG)
def testRecvmsgTrunc(self):
# Receive part of message, check for truncation indicators.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG) - 3)
self.assertEqual(msg, MSG[:-3])
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=False)
def _testRecvmsgTrunc(self):
self.sendToServer(MSG)
def testRecvmsgShortAncillaryBuf(self):
# Test ancillary data buffer too small to hold any ancillary data.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), 1)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsgShortAncillaryBuf(self):
self.sendToServer(MSG)
def testRecvmsgLongAncillaryBuf(self):
# Test large ancillary data buffer.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), 10240)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsgLongAncillaryBuf(self):
self.sendToServer(MSG)
def testRecvmsgAfterClose(self):
# Check that recvmsg[_into]() fails on a closed socket.
self.serv_sock.close()
self.assertRaises(OSError, self.doRecvmsg, self.serv_sock, 1024)
def _testRecvmsgAfterClose(self):
pass
def testRecvmsgTimeout(self):
# Check that timeout works.
try:
self.serv_sock.settimeout(0.03)
self.assertRaises(socket.timeout,
self.doRecvmsg, self.serv_sock, len(MSG))
finally:
self.misc_event.set()
def _testRecvmsgTimeout(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
@requireAttrs(socket, "MSG_PEEK")
def testRecvmsgPeek(self):
# Check that MSG_PEEK in flags enables examination of pending
# data without consuming it.
# Receive part of data with MSG_PEEK.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG) - 3, 0,
socket.MSG_PEEK)
self.assertEqual(msg, MSG[:-3])
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
# Ignoring MSG_TRUNC here (so this test is the same for stream
# and datagram sockets). Some wording in POSIX seems to
# suggest that it needn't be set when peeking, but that may
# just be a slip.
self.checkFlags(flags, eor=False,
ignore=getattr(socket, "MSG_TRUNC", 0))
# Receive all data with MSG_PEEK.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), 0,
socket.MSG_PEEK)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
# Check that the same data can still be received normally.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock, len(MSG))
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
@testRecvmsgPeek.client_skip
def _testRecvmsgPeek(self):
self.sendToServer(MSG)
@requireAttrs(socket.socket, "sendmsg")
def testRecvmsgFromSendmsg(self):
# Test receiving with recvmsg[_into]() when message is sent
# using sendmsg().
self.serv_sock.settimeout(self.fail_timeout)
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock, len(MSG))
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
@testRecvmsgFromSendmsg.client_skip
def _testRecvmsgFromSendmsg(self):
self.assertEqual(self.sendmsgToServer([MSG[:3], MSG[3:]]), len(MSG))
class RecvmsgGenericStreamTests(RecvmsgGenericTests):
# Tests which require a stream socket and can use either recvmsg()
# or recvmsg_into().
def testRecvmsgEOF(self):
# Receive end-of-stream indicator (b"", peer socket closed).
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock, 1024)
self.assertEqual(msg, b"")
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=None) # Might not have end-of-record marker
def _testRecvmsgEOF(self):
self.cli_sock.close()
def testRecvmsgOverflow(self):
# Receive a message in more than one chunk.
seg1, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG) - 3)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=False)
seg2, ancdata, flags, addr = self.doRecvmsg(self.serv_sock, 1024)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
msg = seg1 + seg2
self.assertEqual(msg, MSG)
def _testRecvmsgOverflow(self):
self.sendToServer(MSG)
class RecvmsgTests(RecvmsgGenericTests):
# Tests for recvmsg() which can use any socket type.
def testRecvmsgBadArgs(self):
# Check that recvmsg() rejects invalid arguments.
self.assertRaises(TypeError, self.serv_sock.recvmsg)
self.assertRaises(ValueError, self.serv_sock.recvmsg,
-1, 0, 0)
self.assertRaises(ValueError, self.serv_sock.recvmsg,
len(MSG), -1, 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg,
[bytearray(10)], 0, 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg,
object(), 0, 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg,
len(MSG), object(), 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg,
len(MSG), 0, object())
msg, ancdata, flags, addr = self.serv_sock.recvmsg(len(MSG), 0, 0)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsgBadArgs(self):
self.sendToServer(MSG)
class RecvmsgIntoTests(RecvmsgIntoMixin, RecvmsgGenericTests):
# Tests for recvmsg_into() which can use any socket type.
def testRecvmsgIntoBadArgs(self):
# Check that recvmsg_into() rejects invalid arguments.
buf = bytearray(len(MSG))
self.assertRaises(TypeError, self.serv_sock.recvmsg_into)
self.assertRaises(TypeError, self.serv_sock.recvmsg_into,
len(MSG), 0, 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg_into,
buf, 0, 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg_into,
[object()], 0, 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg_into,
[b"I'm not writable"], 0, 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg_into,
[buf, object()], 0, 0)
self.assertRaises(ValueError, self.serv_sock.recvmsg_into,
[buf], -1, 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg_into,
[buf], object(), 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg_into,
[buf], 0, object())
nbytes, ancdata, flags, addr = self.serv_sock.recvmsg_into([buf], 0, 0)
self.assertEqual(nbytes, len(MSG))
self.assertEqual(buf, bytearray(MSG))
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsgIntoBadArgs(self):
self.sendToServer(MSG)
def testRecvmsgIntoGenerator(self):
# Receive into buffer obtained from a generator (not a sequence).
buf = bytearray(len(MSG))
nbytes, ancdata, flags, addr = self.serv_sock.recvmsg_into(
(o for o in [buf]))
self.assertEqual(nbytes, len(MSG))
self.assertEqual(buf, bytearray(MSG))
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsgIntoGenerator(self):
self.sendToServer(MSG)
def testRecvmsgIntoArray(self):
# Receive into an array rather than the usual bytearray.
buf = array.array("B", [0] * len(MSG))
nbytes, ancdata, flags, addr = self.serv_sock.recvmsg_into([buf])
self.assertEqual(nbytes, len(MSG))
self.assertEqual(buf.tobytes(), MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsgIntoArray(self):
self.sendToServer(MSG)
def testRecvmsgIntoScatter(self):
# Receive into multiple buffers (scatter write).
b1 = bytearray(b"----")
b2 = bytearray(b"0123456789")
b3 = bytearray(b"--------------")
nbytes, ancdata, flags, addr = self.serv_sock.recvmsg_into(
[b1, memoryview(b2)[2:9], b3])
self.assertEqual(nbytes, len(b"Mary had a little lamb"))
self.assertEqual(b1, bytearray(b"Mary"))
self.assertEqual(b2, bytearray(b"01 had a 9"))
self.assertEqual(b3, bytearray(b"little lamb---"))
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsgIntoScatter(self):
self.sendToServer(b"Mary had a little lamb")
class CmsgMacroTests(unittest.TestCase):
# Test the functions CMSG_LEN() and CMSG_SPACE(). Tests
# assumptions used by sendmsg() and recvmsg[_into](), which share
# code with these functions.
# Match the definition in socketmodule.c
try:
import _testcapi
except ImportError:
socklen_t_limit = 0x7fffffff
else:
socklen_t_limit = min(0x7fffffff, _testcapi.INT_MAX)
@requireAttrs(socket, "CMSG_LEN")
def testCMSG_LEN(self):
# Test CMSG_LEN() with various valid and invalid values,
# checking the assumptions used by recvmsg() and sendmsg().
toobig = self.socklen_t_limit - socket.CMSG_LEN(0) + 1
values = list(range(257)) + list(range(toobig - 257, toobig))
# struct cmsghdr has at least three members, two of which are ints
self.assertGreater(socket.CMSG_LEN(0), array.array("i").itemsize * 2)
for n in values:
ret = socket.CMSG_LEN(n)
# This is how recvmsg() calculates the data size
self.assertEqual(ret - socket.CMSG_LEN(0), n)
self.assertLessEqual(ret, self.socklen_t_limit)
self.assertRaises(OverflowError, socket.CMSG_LEN, -1)
# sendmsg() shares code with these functions, and requires
# that it reject values over the limit.
self.assertRaises(OverflowError, socket.CMSG_LEN, toobig)
self.assertRaises(OverflowError, socket.CMSG_LEN, sys.maxsize)
@requireAttrs(socket, "CMSG_SPACE")
def testCMSG_SPACE(self):
# Test CMSG_SPACE() with various valid and invalid values,
# checking the assumptions used by sendmsg().
toobig = self.socklen_t_limit - socket.CMSG_SPACE(1) + 1
values = list(range(257)) + list(range(toobig - 257, toobig))
last = socket.CMSG_SPACE(0)
# struct cmsghdr has at least three members, two of which are ints
self.assertGreater(last, array.array("i").itemsize * 2)
for n in values:
ret = socket.CMSG_SPACE(n)
self.assertGreaterEqual(ret, last)
self.assertGreaterEqual(ret, socket.CMSG_LEN(n))
self.assertGreaterEqual(ret, n + socket.CMSG_LEN(0))
self.assertLessEqual(ret, self.socklen_t_limit)
last = ret
self.assertRaises(OverflowError, socket.CMSG_SPACE, -1)
# sendmsg() shares code with these functions, and requires
# that it reject values over the limit.
self.assertRaises(OverflowError, socket.CMSG_SPACE, toobig)
self.assertRaises(OverflowError, socket.CMSG_SPACE, sys.maxsize)
class SCMRightsTest(SendrecvmsgServerTimeoutBase):
# Tests for file descriptor passing on Unix-domain sockets.
# Invalid file descriptor value that's unlikely to evaluate to a
# real FD even if one of its bytes is replaced with a different
# value (which shouldn't actually happen).
badfd = -0x5555
def newFDs(self, n):
# Return a list of n file descriptors for newly-created files
# containing their list indices as ASCII numbers.
fds = []
for i in range(n):
fd, path = tempfile.mkstemp()
self.addCleanup(os.unlink, path)
self.addCleanup(os.close, fd)
os.write(fd, str(i).encode())
fds.append(fd)
return fds
def checkFDs(self, fds):
# Check that the file descriptors in the given list contain
# their correct list indices as ASCII numbers.
for n, fd in enumerate(fds):
os.lseek(fd, 0, os.SEEK_SET)
self.assertEqual(os.read(fd, 1024), str(n).encode())
def registerRecvmsgResult(self, result):
self.addCleanup(self.closeRecvmsgFDs, result)
def closeRecvmsgFDs(self, recvmsg_result):
# Close all file descriptors specified in the ancillary data
# of the given return value from recvmsg() or recvmsg_into().
for cmsg_level, cmsg_type, cmsg_data in recvmsg_result[1]:
if (cmsg_level == socket.SOL_SOCKET and
cmsg_type == socket.SCM_RIGHTS):
fds = array.array("i")
fds.frombytes(cmsg_data[:
len(cmsg_data) - (len(cmsg_data) % fds.itemsize)])
for fd in fds:
os.close(fd)
def createAndSendFDs(self, n):
# Send n new file descriptors created by newFDs() to the
# server, with the constant MSG as the non-ancillary data.
self.assertEqual(
self.sendmsgToServer([MSG],
[(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", self.newFDs(n)))]),
len(MSG))
def checkRecvmsgFDs(self, numfds, result, maxcmsgs=1, ignoreflags=0):
# Check that constant MSG was received with numfds file
# descriptors in a maximum of maxcmsgs control messages (which
# must contain only complete integers). By default, check
# that MSG_CTRUNC is unset, but ignore any flags in
# ignoreflags.
msg, ancdata, flags, addr = result
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, checkunset=socket.MSG_CTRUNC,
ignore=ignoreflags)
self.assertIsInstance(ancdata, list)
self.assertLessEqual(len(ancdata), maxcmsgs)
fds = array.array("i")
for item in ancdata:
self.assertIsInstance(item, tuple)
cmsg_level, cmsg_type, cmsg_data = item
self.assertEqual(cmsg_level, socket.SOL_SOCKET)
self.assertEqual(cmsg_type, socket.SCM_RIGHTS)
self.assertIsInstance(cmsg_data, bytes)
self.assertEqual(len(cmsg_data) % SIZEOF_INT, 0)
fds.frombytes(cmsg_data)
self.assertEqual(len(fds), numfds)
self.checkFDs(fds)
def testFDPassSimple(self):
# Pass a single FD (array read from bytes object).
self.checkRecvmsgFDs(1, self.doRecvmsg(self.serv_sock,
len(MSG), 10240))
def _testFDPassSimple(self):
self.assertEqual(
self.sendmsgToServer(
[MSG],
[(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", self.newFDs(1)).tobytes())]),
len(MSG))
def testMultipleFDPass(self):
# Pass multiple FDs in a single array.
self.checkRecvmsgFDs(4, self.doRecvmsg(self.serv_sock,
len(MSG), 10240))
def _testMultipleFDPass(self):
self.createAndSendFDs(4)
@requireAttrs(socket, "CMSG_SPACE")
def testFDPassCMSG_SPACE(self):
# Test using CMSG_SPACE() to calculate ancillary buffer size.
self.checkRecvmsgFDs(
4, self.doRecvmsg(self.serv_sock, len(MSG),
socket.CMSG_SPACE(4 * SIZEOF_INT)))
@testFDPassCMSG_SPACE.client_skip
def _testFDPassCMSG_SPACE(self):
self.createAndSendFDs(4)
def testFDPassCMSG_LEN(self):
# Test using CMSG_LEN() to calculate ancillary buffer size.
self.checkRecvmsgFDs(1,
self.doRecvmsg(self.serv_sock, len(MSG),
socket.CMSG_LEN(4 * SIZEOF_INT)),
# RFC 3542 says implementations may set
# MSG_CTRUNC if there isn't enough space
# for trailing padding.
ignoreflags=socket.MSG_CTRUNC)
def _testFDPassCMSG_LEN(self):
self.createAndSendFDs(1)
@unittest.skipIf(sys.platform == "darwin", "skipping, see issue #12958")
@unittest.skipIf(sys.platform.startswith("aix"), "skipping, see issue #22397")
@requireAttrs(socket, "CMSG_SPACE")
def testFDPassSeparate(self):
# Pass two FDs in two separate arrays. Arrays may be combined
# into a single control message by the OS.
self.checkRecvmsgFDs(2,
self.doRecvmsg(self.serv_sock, len(MSG), 10240),
maxcmsgs=2)
@testFDPassSeparate.client_skip
@unittest.skipIf(sys.platform == "darwin", "skipping, see issue #12958")
@unittest.skipIf(sys.platform.startswith("aix"), "skipping, see issue #22397")
def _testFDPassSeparate(self):
fd0, fd1 = self.newFDs(2)
self.assertEqual(
self.sendmsgToServer([MSG], [(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", [fd0])),
(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", [fd1]))]),
len(MSG))
@unittest.skipIf(sys.platform == "darwin", "skipping, see issue #12958")
@unittest.skipIf(sys.platform.startswith("aix"), "skipping, see issue #22397")
@requireAttrs(socket, "CMSG_SPACE")
def testFDPassSeparateMinSpace(self):
# Pass two FDs in two separate arrays, receiving them into the
# minimum space for two arrays.
num_fds = 2
self.checkRecvmsgFDs(num_fds,
self.doRecvmsg(self.serv_sock, len(MSG),
socket.CMSG_SPACE(SIZEOF_INT) +
socket.CMSG_LEN(SIZEOF_INT * num_fds)),
maxcmsgs=2, ignoreflags=socket.MSG_CTRUNC)
@testFDPassSeparateMinSpace.client_skip
@unittest.skipIf(sys.platform == "darwin", "skipping, see issue #12958")
@unittest.skipIf(sys.platform.startswith("aix"), "skipping, see issue #22397")
def _testFDPassSeparateMinSpace(self):
fd0, fd1 = self.newFDs(2)
self.assertEqual(
self.sendmsgToServer([MSG], [(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", [fd0])),
(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", [fd1]))]),
len(MSG))
def sendAncillaryIfPossible(self, msg, ancdata):
# Try to send msg and ancdata to server, but if the system
# call fails, just send msg with no ancillary data.
try:
nbytes = self.sendmsgToServer([msg], ancdata)
except OSError as e:
# Check that it was the system call that failed
self.assertIsInstance(e.errno, int)
nbytes = self.sendmsgToServer([msg])
self.assertEqual(nbytes, len(msg))
@unittest.skipIf(sys.platform == "darwin", "see issue #24725")
def testFDPassEmpty(self):
# Try to pass an empty FD array. Can receive either no array
# or an empty array.
self.checkRecvmsgFDs(0, self.doRecvmsg(self.serv_sock,
len(MSG), 10240),
ignoreflags=socket.MSG_CTRUNC)
def _testFDPassEmpty(self):
self.sendAncillaryIfPossible(MSG, [(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
b"")])
def testFDPassPartialInt(self):
# Try to pass a truncated FD array.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), 10240)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, ignore=socket.MSG_CTRUNC)
self.assertLessEqual(len(ancdata), 1)
for cmsg_level, cmsg_type, cmsg_data in ancdata:
self.assertEqual(cmsg_level, socket.SOL_SOCKET)
self.assertEqual(cmsg_type, socket.SCM_RIGHTS)
self.assertLess(len(cmsg_data), SIZEOF_INT)
def _testFDPassPartialInt(self):
self.sendAncillaryIfPossible(
MSG,
[(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", [self.badfd]).tobytes()[:-1])])
@requireAttrs(socket, "CMSG_SPACE")
def testFDPassPartialIntInMiddle(self):
# Try to pass two FD arrays, the first of which is truncated.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), 10240)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, ignore=socket.MSG_CTRUNC)
self.assertLessEqual(len(ancdata), 2)
fds = array.array("i")
# Arrays may have been combined in a single control message
for cmsg_level, cmsg_type, cmsg_data in ancdata:
self.assertEqual(cmsg_level, socket.SOL_SOCKET)
self.assertEqual(cmsg_type, socket.SCM_RIGHTS)
fds.frombytes(cmsg_data[:
len(cmsg_data) - (len(cmsg_data) % fds.itemsize)])
self.assertLessEqual(len(fds), 2)
self.checkFDs(fds)
@testFDPassPartialIntInMiddle.client_skip
def _testFDPassPartialIntInMiddle(self):
fd0, fd1 = self.newFDs(2)
self.sendAncillaryIfPossible(
MSG,
[(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", [fd0, self.badfd]).tobytes()[:-1]),
(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", [fd1]))])
def checkTruncatedHeader(self, result, ignoreflags=0):
# Check that no ancillary data items are returned when data is
# truncated inside the cmsghdr structure.
msg, ancdata, flags, addr = result
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True, checkset=socket.MSG_CTRUNC,
ignore=ignoreflags)
def testCmsgTruncNoBufSize(self):
# Check that no ancillary data is received when no buffer size
# is specified.
self.checkTruncatedHeader(self.doRecvmsg(self.serv_sock, len(MSG)),
# BSD seems to set MSG_CTRUNC only
# if an item has been partially
# received.
ignoreflags=socket.MSG_CTRUNC)
def _testCmsgTruncNoBufSize(self):
self.createAndSendFDs(1)
def testCmsgTrunc0(self):
# Check that no ancillary data is received when buffer size is 0.
self.checkTruncatedHeader(self.doRecvmsg(self.serv_sock, len(MSG), 0),
ignoreflags=socket.MSG_CTRUNC)
def _testCmsgTrunc0(self):
self.createAndSendFDs(1)
# Check that no ancillary data is returned for various non-zero
# (but still too small) buffer sizes.
def testCmsgTrunc1(self):
self.checkTruncatedHeader(self.doRecvmsg(self.serv_sock, len(MSG), 1))
def _testCmsgTrunc1(self):
self.createAndSendFDs(1)
def testCmsgTrunc2Int(self):
# The cmsghdr structure has at least three members, two of
# which are ints, so we still shouldn't see any ancillary
# data.
self.checkTruncatedHeader(self.doRecvmsg(self.serv_sock, len(MSG),
SIZEOF_INT * 2))
def _testCmsgTrunc2Int(self):
self.createAndSendFDs(1)
def testCmsgTruncLen0Minus1(self):
self.checkTruncatedHeader(self.doRecvmsg(self.serv_sock, len(MSG),
socket.CMSG_LEN(0) - 1))
def _testCmsgTruncLen0Minus1(self):
self.createAndSendFDs(1)
# The following tests try to truncate the control message in the
# middle of the FD array.
def checkTruncatedArray(self, ancbuf, maxdata, mindata=0):
# Check that file descriptor data is truncated to between
# mindata and maxdata bytes when received with buffer size
# ancbuf, and that any complete file descriptor numbers are
# valid.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), ancbuf)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, checkset=socket.MSG_CTRUNC)
if mindata == 0 and ancdata == []:
return
self.assertEqual(len(ancdata), 1)
cmsg_level, cmsg_type, cmsg_data = ancdata[0]
self.assertEqual(cmsg_level, socket.SOL_SOCKET)
self.assertEqual(cmsg_type, socket.SCM_RIGHTS)
self.assertGreaterEqual(len(cmsg_data), mindata)
self.assertLessEqual(len(cmsg_data), maxdata)
fds = array.array("i")
fds.frombytes(cmsg_data[:
len(cmsg_data) - (len(cmsg_data) % fds.itemsize)])
self.checkFDs(fds)
def testCmsgTruncLen0(self):
self.checkTruncatedArray(ancbuf=socket.CMSG_LEN(0), maxdata=0)
def _testCmsgTruncLen0(self):
self.createAndSendFDs(1)
def testCmsgTruncLen0Plus1(self):
self.checkTruncatedArray(ancbuf=socket.CMSG_LEN(0) + 1, maxdata=1)
def _testCmsgTruncLen0Plus1(self):
self.createAndSendFDs(2)
def testCmsgTruncLen1(self):
self.checkTruncatedArray(ancbuf=socket.CMSG_LEN(SIZEOF_INT),
maxdata=SIZEOF_INT)
def _testCmsgTruncLen1(self):
self.createAndSendFDs(2)
def testCmsgTruncLen2Minus1(self):
self.checkTruncatedArray(ancbuf=socket.CMSG_LEN(2 * SIZEOF_INT) - 1,
maxdata=(2 * SIZEOF_INT) - 1)
def _testCmsgTruncLen2Minus1(self):
self.createAndSendFDs(2)
class RFC3542AncillaryTest(SendrecvmsgServerTimeoutBase):
# Test sendmsg() and recvmsg[_into]() using the ancillary data
# features of the RFC 3542 Advanced Sockets API for IPv6.
# Currently we can only handle certain data items (e.g. traffic
# class, hop limit, MTU discovery and fragmentation settings)
# without resorting to unportable means such as the struct module,
# but the tests here are aimed at testing the ancillary data
# handling in sendmsg() and recvmsg() rather than the IPv6 API
# itself.
# Test value to use when setting hop limit of packet
hop_limit = 2
# Test value to use when setting traffic class of packet.
# -1 means "use kernel default".
traffic_class = -1
def ancillaryMapping(self, ancdata):
# Given ancillary data list ancdata, return a mapping from
# pairs (cmsg_level, cmsg_type) to corresponding cmsg_data.
# Check that no (level, type) pair appears more than once.
d = {}
for cmsg_level, cmsg_type, cmsg_data in ancdata:
self.assertNotIn((cmsg_level, cmsg_type), d)
d[(cmsg_level, cmsg_type)] = cmsg_data
return d
def checkHopLimit(self, ancbufsize, maxhop=255, ignoreflags=0):
# Receive hop limit into ancbufsize bytes of ancillary data
# space. Check that data is MSG, ancillary data is not
# truncated (but ignore any flags in ignoreflags), and hop
# limit is between 0 and maxhop inclusive.
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVHOPLIMIT, 1)
self.misc_event.set()
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), ancbufsize)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, checkunset=socket.MSG_CTRUNC,
ignore=ignoreflags)
self.assertEqual(len(ancdata), 1)
self.assertIsInstance(ancdata[0], tuple)
cmsg_level, cmsg_type, cmsg_data = ancdata[0]
self.assertEqual(cmsg_level, socket.IPPROTO_IPV6)
self.assertEqual(cmsg_type, socket.IPV6_HOPLIMIT)
self.assertIsInstance(cmsg_data, bytes)
self.assertEqual(len(cmsg_data), SIZEOF_INT)
a = array.array("i")
a.frombytes(cmsg_data)
self.assertGreaterEqual(a[0], 0)
self.assertLessEqual(a[0], maxhop)
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testRecvHopLimit(self):
# Test receiving the packet hop limit as ancillary data.
self.checkHopLimit(ancbufsize=10240)
@testRecvHopLimit.client_skip
def _testRecvHopLimit(self):
# Need to wait until server has asked to receive ancillary
# data, as implementations are not required to buffer it
# otherwise.
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testRecvHopLimitCMSG_SPACE(self):
# Test receiving hop limit, using CMSG_SPACE to calculate buffer size.
self.checkHopLimit(ancbufsize=socket.CMSG_SPACE(SIZEOF_INT))
@testRecvHopLimitCMSG_SPACE.client_skip
def _testRecvHopLimitCMSG_SPACE(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
# Could test receiving into buffer sized using CMSG_LEN, but RFC
# 3542 says portable applications must provide space for trailing
# padding. Implementations may set MSG_CTRUNC if there isn't
# enough space for the padding.
@requireAttrs(socket.socket, "sendmsg")
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testSetHopLimit(self):
# Test setting hop limit on outgoing packet and receiving it
# at the other end.
self.checkHopLimit(ancbufsize=10240, maxhop=self.hop_limit)
@testSetHopLimit.client_skip
def _testSetHopLimit(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.assertEqual(
self.sendmsgToServer([MSG],
[(socket.IPPROTO_IPV6, socket.IPV6_HOPLIMIT,
array.array("i", [self.hop_limit]))]),
len(MSG))
def checkTrafficClassAndHopLimit(self, ancbufsize, maxhop=255,
ignoreflags=0):
# Receive traffic class and hop limit into ancbufsize bytes of
# ancillary data space. Check that data is MSG, ancillary
# data is not truncated (but ignore any flags in ignoreflags),
# and traffic class and hop limit are in range (hop limit no
# more than maxhop).
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVHOPLIMIT, 1)
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVTCLASS, 1)
self.misc_event.set()
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), ancbufsize)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, checkunset=socket.MSG_CTRUNC,
ignore=ignoreflags)
self.assertEqual(len(ancdata), 2)
ancmap = self.ancillaryMapping(ancdata)
tcdata = ancmap[(socket.IPPROTO_IPV6, socket.IPV6_TCLASS)]
self.assertEqual(len(tcdata), SIZEOF_INT)
a = array.array("i")
a.frombytes(tcdata)
self.assertGreaterEqual(a[0], 0)
self.assertLessEqual(a[0], 255)
hldata = ancmap[(socket.IPPROTO_IPV6, socket.IPV6_HOPLIMIT)]
self.assertEqual(len(hldata), SIZEOF_INT)
a = array.array("i")
a.frombytes(hldata)
self.assertGreaterEqual(a[0], 0)
self.assertLessEqual(a[0], maxhop)
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testRecvTrafficClassAndHopLimit(self):
# Test receiving traffic class and hop limit as ancillary data.
self.checkTrafficClassAndHopLimit(ancbufsize=10240)
@testRecvTrafficClassAndHopLimit.client_skip
def _testRecvTrafficClassAndHopLimit(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testRecvTrafficClassAndHopLimitCMSG_SPACE(self):
# Test receiving traffic class and hop limit, using
# CMSG_SPACE() to calculate buffer size.
self.checkTrafficClassAndHopLimit(
ancbufsize=socket.CMSG_SPACE(SIZEOF_INT) * 2)
@testRecvTrafficClassAndHopLimitCMSG_SPACE.client_skip
def _testRecvTrafficClassAndHopLimitCMSG_SPACE(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket.socket, "sendmsg")
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testSetTrafficClassAndHopLimit(self):
# Test setting traffic class and hop limit on outgoing packet,
# and receiving them at the other end.
self.checkTrafficClassAndHopLimit(ancbufsize=10240,
maxhop=self.hop_limit)
@testSetTrafficClassAndHopLimit.client_skip
def _testSetTrafficClassAndHopLimit(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.assertEqual(
self.sendmsgToServer([MSG],
[(socket.IPPROTO_IPV6, socket.IPV6_TCLASS,
array.array("i", [self.traffic_class])),
(socket.IPPROTO_IPV6, socket.IPV6_HOPLIMIT,
array.array("i", [self.hop_limit]))]),
len(MSG))
@requireAttrs(socket.socket, "sendmsg")
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testOddCmsgSize(self):
# Try to send ancillary data with first item one byte too
# long. Fall back to sending with correct size if this fails,
# and check that second item was handled correctly.
self.checkTrafficClassAndHopLimit(ancbufsize=10240,
maxhop=self.hop_limit)
@testOddCmsgSize.client_skip
def _testOddCmsgSize(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
try:
nbytes = self.sendmsgToServer(
[MSG],
[(socket.IPPROTO_IPV6, socket.IPV6_TCLASS,
array.array("i", [self.traffic_class]).tobytes() + b"\x00"),
(socket.IPPROTO_IPV6, socket.IPV6_HOPLIMIT,
array.array("i", [self.hop_limit]))])
except OSError as e:
self.assertIsInstance(e.errno, int)
nbytes = self.sendmsgToServer(
[MSG],
[(socket.IPPROTO_IPV6, socket.IPV6_TCLASS,
array.array("i", [self.traffic_class])),
(socket.IPPROTO_IPV6, socket.IPV6_HOPLIMIT,
array.array("i", [self.hop_limit]))])
self.assertEqual(nbytes, len(MSG))
# Tests for proper handling of truncated ancillary data
def checkHopLimitTruncatedHeader(self, ancbufsize, ignoreflags=0):
# Receive hop limit into ancbufsize bytes of ancillary data
# space, which should be too small to contain the ancillary
# data header (if ancbufsize is None, pass no second argument
# to recvmsg()). Check that data is MSG, MSG_CTRUNC is set
# (unless included in ignoreflags), and no ancillary data is
# returned.
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVHOPLIMIT, 1)
self.misc_event.set()
args = () if ancbufsize is None else (ancbufsize,)
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), *args)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True, checkset=socket.MSG_CTRUNC,
ignore=ignoreflags)
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testCmsgTruncNoBufSize(self):
# Check that no ancillary data is received when no ancillary
# buffer size is provided.
self.checkHopLimitTruncatedHeader(ancbufsize=None,
# BSD seems to set
# MSG_CTRUNC only if an item
# has been partially
# received.
ignoreflags=socket.MSG_CTRUNC)
@testCmsgTruncNoBufSize.client_skip
def _testCmsgTruncNoBufSize(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testSingleCmsgTrunc0(self):
# Check that no ancillary data is received when ancillary
# buffer size is zero.
self.checkHopLimitTruncatedHeader(ancbufsize=0,
ignoreflags=socket.MSG_CTRUNC)
@testSingleCmsgTrunc0.client_skip
def _testSingleCmsgTrunc0(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
# Check that no ancillary data is returned for various non-zero
# (but still too small) buffer sizes.
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testSingleCmsgTrunc1(self):
self.checkHopLimitTruncatedHeader(ancbufsize=1)
@testSingleCmsgTrunc1.client_skip
def _testSingleCmsgTrunc1(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testSingleCmsgTrunc2Int(self):
self.checkHopLimitTruncatedHeader(ancbufsize=2 * SIZEOF_INT)
@testSingleCmsgTrunc2Int.client_skip
def _testSingleCmsgTrunc2Int(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testSingleCmsgTruncLen0Minus1(self):
self.checkHopLimitTruncatedHeader(ancbufsize=socket.CMSG_LEN(0) - 1)
@testSingleCmsgTruncLen0Minus1.client_skip
def _testSingleCmsgTruncLen0Minus1(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testSingleCmsgTruncInData(self):
# Test truncation of a control message inside its associated
# data. The message may be returned with its data truncated,
# or not returned at all.
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVHOPLIMIT, 1)
self.misc_event.set()
msg, ancdata, flags, addr = self.doRecvmsg(
self.serv_sock, len(MSG), socket.CMSG_LEN(SIZEOF_INT) - 1)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, checkset=socket.MSG_CTRUNC)
self.assertLessEqual(len(ancdata), 1)
if ancdata:
cmsg_level, cmsg_type, cmsg_data = ancdata[0]
self.assertEqual(cmsg_level, socket.IPPROTO_IPV6)
self.assertEqual(cmsg_type, socket.IPV6_HOPLIMIT)
self.assertLess(len(cmsg_data), SIZEOF_INT)
@testSingleCmsgTruncInData.client_skip
def _testSingleCmsgTruncInData(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
def checkTruncatedSecondHeader(self, ancbufsize, ignoreflags=0):
# Receive traffic class and hop limit into ancbufsize bytes of
# ancillary data space, which should be large enough to
# contain the first item, but too small to contain the header
# of the second. Check that data is MSG, MSG_CTRUNC is set
# (unless included in ignoreflags), and only one ancillary
# data item is returned.
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVHOPLIMIT, 1)
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVTCLASS, 1)
self.misc_event.set()
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), ancbufsize)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, checkset=socket.MSG_CTRUNC,
ignore=ignoreflags)
self.assertEqual(len(ancdata), 1)
cmsg_level, cmsg_type, cmsg_data = ancdata[0]
self.assertEqual(cmsg_level, socket.IPPROTO_IPV6)
self.assertIn(cmsg_type, {socket.IPV6_TCLASS, socket.IPV6_HOPLIMIT})
self.assertEqual(len(cmsg_data), SIZEOF_INT)
a = array.array("i")
a.frombytes(cmsg_data)
self.assertGreaterEqual(a[0], 0)
self.assertLessEqual(a[0], 255)
# Try the above test with various buffer sizes.
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testSecondCmsgTrunc0(self):
self.checkTruncatedSecondHeader(socket.CMSG_SPACE(SIZEOF_INT),
ignoreflags=socket.MSG_CTRUNC)
@testSecondCmsgTrunc0.client_skip
def _testSecondCmsgTrunc0(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testSecondCmsgTrunc1(self):
self.checkTruncatedSecondHeader(socket.CMSG_SPACE(SIZEOF_INT) + 1)
@testSecondCmsgTrunc1.client_skip
def _testSecondCmsgTrunc1(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testSecondCmsgTrunc2Int(self):
self.checkTruncatedSecondHeader(socket.CMSG_SPACE(SIZEOF_INT) +
2 * SIZEOF_INT)
@testSecondCmsgTrunc2Int.client_skip
def _testSecondCmsgTrunc2Int(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testSecondCmsgTruncLen0Minus1(self):
self.checkTruncatedSecondHeader(socket.CMSG_SPACE(SIZEOF_INT) +
socket.CMSG_LEN(0) - 1)
@testSecondCmsgTruncLen0Minus1.client_skip
def _testSecondCmsgTruncLen0Minus1(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testSecomdCmsgTruncInData(self):
# Test truncation of the second of two control messages inside
# its associated data.
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVHOPLIMIT, 1)
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVTCLASS, 1)
self.misc_event.set()
msg, ancdata, flags, addr = self.doRecvmsg(
self.serv_sock, len(MSG),
socket.CMSG_SPACE(SIZEOF_INT) + socket.CMSG_LEN(SIZEOF_INT) - 1)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, checkset=socket.MSG_CTRUNC)
cmsg_types = {socket.IPV6_TCLASS, socket.IPV6_HOPLIMIT}
cmsg_level, cmsg_type, cmsg_data = ancdata.pop(0)
self.assertEqual(cmsg_level, socket.IPPROTO_IPV6)
cmsg_types.remove(cmsg_type)
self.assertEqual(len(cmsg_data), SIZEOF_INT)
a = array.array("i")
a.frombytes(cmsg_data)
self.assertGreaterEqual(a[0], 0)
self.assertLessEqual(a[0], 255)
if ancdata:
cmsg_level, cmsg_type, cmsg_data = ancdata.pop(0)
self.assertEqual(cmsg_level, socket.IPPROTO_IPV6)
cmsg_types.remove(cmsg_type)
self.assertLess(len(cmsg_data), SIZEOF_INT)
self.assertEqual(ancdata, [])
@testSecomdCmsgTruncInData.client_skip
def _testSecomdCmsgTruncInData(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
# Derive concrete test classes for different socket types.
class SendrecvmsgUDPTestBase(SendrecvmsgDgramFlagsBase,
SendrecvmsgConnectionlessBase,
ThreadedSocketTestMixin, UDPTestBase):
pass
@requireAttrs(socket.socket, "sendmsg")
class SendmsgUDPTest(SendmsgConnectionlessTests, SendrecvmsgUDPTestBase):
pass
@requireAttrs(socket.socket, "recvmsg")
class RecvmsgUDPTest(RecvmsgTests, SendrecvmsgUDPTestBase):
pass
@requireAttrs(socket.socket, "recvmsg_into")
class RecvmsgIntoUDPTest(RecvmsgIntoTests, SendrecvmsgUDPTestBase):
pass
class SendrecvmsgUDP6TestBase(SendrecvmsgDgramFlagsBase,
SendrecvmsgConnectionlessBase,
ThreadedSocketTestMixin, UDP6TestBase):
def checkRecvmsgAddress(self, addr1, addr2):
# Called to compare the received address with the address of
# the peer, ignoring scope ID
self.assertEqual(addr1[:-1], addr2[:-1])
@requireAttrs(socket.socket, "sendmsg")
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 required for this test.')
@requireSocket("AF_INET6", "SOCK_DGRAM")
class SendmsgUDP6Test(SendmsgConnectionlessTests, SendrecvmsgUDP6TestBase):
pass
@requireAttrs(socket.socket, "recvmsg")
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 required for this test.')
@requireSocket("AF_INET6", "SOCK_DGRAM")
class RecvmsgUDP6Test(RecvmsgTests, SendrecvmsgUDP6TestBase):
pass
@requireAttrs(socket.socket, "recvmsg_into")
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 required for this test.')
@requireSocket("AF_INET6", "SOCK_DGRAM")
class RecvmsgIntoUDP6Test(RecvmsgIntoTests, SendrecvmsgUDP6TestBase):
pass
@requireAttrs(socket.socket, "recvmsg")
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 required for this test.')
@requireAttrs(socket, "IPPROTO_IPV6")
@requireSocket("AF_INET6", "SOCK_DGRAM")
class RecvmsgRFC3542AncillaryUDP6Test(RFC3542AncillaryTest,
SendrecvmsgUDP6TestBase):
pass
@requireAttrs(socket.socket, "recvmsg_into")
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 required for this test.')
@requireAttrs(socket, "IPPROTO_IPV6")
@requireSocket("AF_INET6", "SOCK_DGRAM")
class RecvmsgIntoRFC3542AncillaryUDP6Test(RecvmsgIntoMixin,
RFC3542AncillaryTest,
SendrecvmsgUDP6TestBase):
pass
class SendrecvmsgTCPTestBase(SendrecvmsgConnectedBase,
ConnectedStreamTestMixin, TCPTestBase):
pass
@requireAttrs(socket.socket, "sendmsg")
class SendmsgTCPTest(SendmsgStreamTests, SendrecvmsgTCPTestBase):
pass
@requireAttrs(socket.socket, "recvmsg")
class RecvmsgTCPTest(RecvmsgTests, RecvmsgGenericStreamTests,
SendrecvmsgTCPTestBase):
pass
@requireAttrs(socket.socket, "recvmsg_into")
class RecvmsgIntoTCPTest(RecvmsgIntoTests, RecvmsgGenericStreamTests,
SendrecvmsgTCPTestBase):
pass
class SendrecvmsgSCTPStreamTestBase(SendrecvmsgSCTPFlagsBase,
SendrecvmsgConnectedBase,
ConnectedStreamTestMixin, SCTPStreamBase):
pass
@requireAttrs(socket.socket, "sendmsg")
@requireSocket("AF_INET", "SOCK_STREAM", "IPPROTO_SCTP")
class SendmsgSCTPStreamTest(SendmsgStreamTests, SendrecvmsgSCTPStreamTestBase):
pass
@requireAttrs(socket.socket, "recvmsg")
@requireSocket("AF_INET", "SOCK_STREAM", "IPPROTO_SCTP")
class RecvmsgSCTPStreamTest(RecvmsgTests, RecvmsgGenericStreamTests,
SendrecvmsgSCTPStreamTestBase):
def testRecvmsgEOF(self):
try:
super(RecvmsgSCTPStreamTest, self).testRecvmsgEOF()
except OSError as e:
if e.errno != errno.ENOTCONN:
raise
self.skipTest("sporadic ENOTCONN (kernel issue?) - see issue #13876")
@requireAttrs(socket.socket, "recvmsg_into")
@requireSocket("AF_INET", "SOCK_STREAM", "IPPROTO_SCTP")
class RecvmsgIntoSCTPStreamTest(RecvmsgIntoTests, RecvmsgGenericStreamTests,
SendrecvmsgSCTPStreamTestBase):
def testRecvmsgEOF(self):
try:
super(RecvmsgIntoSCTPStreamTest, self).testRecvmsgEOF()
except OSError as e:
if e.errno != errno.ENOTCONN:
raise
self.skipTest("sporadic ENOTCONN (kernel issue?) - see issue #13876")
class SendrecvmsgUnixStreamTestBase(SendrecvmsgConnectedBase,
ConnectedStreamTestMixin, UnixStreamBase):
pass
@requireAttrs(socket.socket, "sendmsg")
@requireAttrs(socket, "AF_UNIX")
class SendmsgUnixStreamTest(SendmsgStreamTests, SendrecvmsgUnixStreamTestBase):
pass
@requireAttrs(socket.socket, "recvmsg")
@requireAttrs(socket, "AF_UNIX")
class RecvmsgUnixStreamTest(RecvmsgTests, RecvmsgGenericStreamTests,
SendrecvmsgUnixStreamTestBase):
pass
@requireAttrs(socket.socket, "recvmsg_into")
@requireAttrs(socket, "AF_UNIX")
class RecvmsgIntoUnixStreamTest(RecvmsgIntoTests, RecvmsgGenericStreamTests,
SendrecvmsgUnixStreamTestBase):
pass
@requireAttrs(socket.socket, "sendmsg", "recvmsg")
@requireAttrs(socket, "AF_UNIX", "SOL_SOCKET", "SCM_RIGHTS")
class RecvmsgSCMRightsStreamTest(SCMRightsTest, SendrecvmsgUnixStreamTestBase):
pass
@requireAttrs(socket.socket, "sendmsg", "recvmsg_into")
@requireAttrs(socket, "AF_UNIX", "SOL_SOCKET", "SCM_RIGHTS")
class RecvmsgIntoSCMRightsStreamTest(RecvmsgIntoMixin, SCMRightsTest,
SendrecvmsgUnixStreamTestBase):
pass
# Test interrupting the interruptible send/receive methods with a
# signal when a timeout is set. These tests avoid having multiple
# threads alive during the test so that the OS cannot deliver the
# signal to the wrong one.
class InterruptedTimeoutBase(unittest.TestCase):
# Base class for interrupted send/receive tests. Installs an
# empty handler for SIGALRM and removes it on teardown, along with
# any scheduled alarms.
def setUp(self):
super().setUp()
orig_alrm_handler = signal.signal(signal.SIGALRM,
lambda signum, frame: 1 / 0)
self.addCleanup(signal.signal, signal.SIGALRM, orig_alrm_handler)
# Timeout for socket operations
timeout = 4.0
# Provide setAlarm() method to schedule delivery of SIGALRM after
# given number of seconds, or cancel it if zero, and an
# appropriate time value to use. Use setitimer() if available.
if hasattr(signal, "setitimer"):
alarm_time = 0.05
def setAlarm(self, seconds):
signal.setitimer(signal.ITIMER_REAL, seconds)
else:
# Old systems may deliver the alarm up to one second early
alarm_time = 2
def setAlarm(self, seconds):
signal.alarm(seconds)
# Require siginterrupt() in order to ensure that system calls are
# interrupted by default.
@requireAttrs(signal, "siginterrupt")
@unittest.skipUnless(hasattr(signal, "alarm") or hasattr(signal, "setitimer"),
"Don't have signal.alarm or signal.setitimer")
class InterruptedRecvTimeoutTest(InterruptedTimeoutBase, UDPTestBase):
# Test interrupting the recv*() methods with signals when a
# timeout is set.
def setUp(self):
super().setUp()
self.serv.settimeout(self.timeout)
def checkInterruptedRecv(self, func, *args, **kwargs):
# Check that func(*args, **kwargs) raises
# errno of EINTR when interrupted by a signal.
try:
self.setAlarm(self.alarm_time)
with self.assertRaises(ZeroDivisionError) as cm:
func(*args, **kwargs)
finally:
self.setAlarm(0)
def testInterruptedRecvTimeout(self):
self.checkInterruptedRecv(self.serv.recv, 1024)
def testInterruptedRecvIntoTimeout(self):
self.checkInterruptedRecv(self.serv.recv_into, bytearray(1024))
def testInterruptedRecvfromTimeout(self):
self.checkInterruptedRecv(self.serv.recvfrom, 1024)
def testInterruptedRecvfromIntoTimeout(self):
self.checkInterruptedRecv(self.serv.recvfrom_into, bytearray(1024))
@requireAttrs(socket.socket, "recvmsg")
def testInterruptedRecvmsgTimeout(self):
self.checkInterruptedRecv(self.serv.recvmsg, 1024)
@requireAttrs(socket.socket, "recvmsg_into")
def testInterruptedRecvmsgIntoTimeout(self):
self.checkInterruptedRecv(self.serv.recvmsg_into, [bytearray(1024)])
# Require siginterrupt() in order to ensure that system calls are
# interrupted by default.
@requireAttrs(signal, "siginterrupt")
@unittest.skipUnless(hasattr(signal, "alarm") or hasattr(signal, "setitimer"),
"Don't have signal.alarm or signal.setitimer")
class InterruptedSendTimeoutTest(InterruptedTimeoutBase,
ThreadSafeCleanupTestCase,
SocketListeningTestMixin, TCPTestBase):
# Test interrupting the interruptible send*() methods with signals
# when a timeout is set.
def setUp(self):
super().setUp()
self.serv_conn = self.newSocket()
self.addCleanup(self.serv_conn.close)
# Use a thread to complete the connection, but wait for it to
# terminate before running the test, so that there is only one
# thread to accept the signal.
cli_thread = threading.Thread(target=self.doConnect)
cli_thread.start()
self.cli_conn, addr = self.serv.accept()
self.addCleanup(self.cli_conn.close)
cli_thread.join()
self.serv_conn.settimeout(self.timeout)
def doConnect(self):
self.serv_conn.connect(self.serv_addr)
def checkInterruptedSend(self, func, *args, **kwargs):
# Check that func(*args, **kwargs), run in a loop, raises
# OSError with an errno of EINTR when interrupted by a
# signal.
try:
with self.assertRaises(ZeroDivisionError) as cm:
while True:
self.setAlarm(self.alarm_time)
func(*args, **kwargs)
finally:
self.setAlarm(0)
# Issue #12958: The following tests have problems on OS X prior to 10.7
@support.requires_mac_ver(10, 7)
def testInterruptedSendTimeout(self):
self.checkInterruptedSend(self.serv_conn.send, b"a"*512)
@support.requires_mac_ver(10, 7)
def testInterruptedSendtoTimeout(self):
# Passing an actual address here as Python's wrapper for
# sendto() doesn't allow passing a zero-length one; POSIX
# requires that the address is ignored since the socket is
# connection-mode, however.
self.checkInterruptedSend(self.serv_conn.sendto, b"a"*512,
self.serv_addr)
@support.requires_mac_ver(10, 7)
@requireAttrs(socket.socket, "sendmsg")
def testInterruptedSendmsgTimeout(self):
self.checkInterruptedSend(self.serv_conn.sendmsg, [b"a"*512])
class TCPCloserTest(ThreadedTCPSocketTest):
def testClose(self):
conn, addr = self.serv.accept()
conn.close()
sd = self.cli
read, write, err = select.select([sd], [], [], 1.0)
self.assertEqual(read, [sd])
self.assertEqual(sd.recv(1), b'')
# Calling close() many times should be safe.
conn.close()
conn.close()
def _testClose(self):
self.cli.connect((HOST, self.port))
time.sleep(1.0)
class BasicSocketPairTest(SocketPairTest):
def __init__(self, methodName='runTest'):
SocketPairTest.__init__(self, methodName=methodName)
def _check_defaults(self, sock):
self.assertIsInstance(sock, socket.socket)
if hasattr(socket, 'AF_UNIX'):
self.assertEqual(sock.family, socket.AF_UNIX)
else:
self.assertEqual(sock.family, socket.AF_INET)
self.assertEqual(sock.type, socket.SOCK_STREAM)
self.assertEqual(sock.proto, 0)
def _testDefaults(self):
self._check_defaults(self.cli)
def testDefaults(self):
self._check_defaults(self.serv)
def testRecv(self):
msg = self.serv.recv(1024)
self.assertEqual(msg, MSG)
def _testRecv(self):
self.cli.send(MSG)
def testSend(self):
self.serv.send(MSG)
def _testSend(self):
msg = self.cli.recv(1024)
self.assertEqual(msg, MSG)
class NonBlockingTCPTests(ThreadedTCPSocketTest):
def __init__(self, methodName='runTest'):
self.event = threading.Event()
ThreadedTCPSocketTest.__init__(self, methodName=methodName)
def testSetBlocking(self):
# Testing whether set blocking works
self.serv.setblocking(True)
self.assertIsNone(self.serv.gettimeout())
self.assertTrue(self.serv.getblocking())
if fcntl:
self.assertTrue(_is_fd_in_blocking_mode(self.serv))
self.serv.setblocking(False)
self.assertEqual(self.serv.gettimeout(), 0.0)
self.assertFalse(self.serv.getblocking())
if fcntl:
self.assertFalse(_is_fd_in_blocking_mode(self.serv))
self.serv.settimeout(None)
self.assertTrue(self.serv.getblocking())
if fcntl:
self.assertTrue(_is_fd_in_blocking_mode(self.serv))
self.serv.settimeout(0)
self.assertFalse(self.serv.getblocking())
self.assertEqual(self.serv.gettimeout(), 0)
if fcntl:
self.assertFalse(_is_fd_in_blocking_mode(self.serv))
self.serv.settimeout(10)
self.assertTrue(self.serv.getblocking())
self.assertEqual(self.serv.gettimeout(), 10)
if fcntl:
# When a Python socket has a non-zero timeout, it's
# switched internally to a non-blocking mode.
# Later, sock.sendall(), sock.recv(), and other socket
# operations use a `select()` call and handle EWOULDBLOCK/EGAIN
# on all socket operations. That's how timeouts are
# enforced.
self.assertFalse(_is_fd_in_blocking_mode(self.serv))
self.serv.settimeout(0)
self.assertFalse(self.serv.getblocking())
if fcntl:
self.assertFalse(_is_fd_in_blocking_mode(self.serv))
start = time.time()
try:
self.serv.accept()
except OSError:
pass
end = time.time()
self.assertTrue((end - start) < 1.0, "Error setting non-blocking mode.")
def _testSetBlocking(self):
pass
@support.cpython_only
def testSetBlocking_overflow(self):
# Issue 15989
import _testcapi
if _testcapi.UINT_MAX >= _testcapi.ULONG_MAX:
self.skipTest('needs UINT_MAX < ULONG_MAX')
self.serv.setblocking(False)
self.assertEqual(self.serv.gettimeout(), 0.0)
self.serv.setblocking(_testcapi.UINT_MAX + 1)
self.assertIsNone(self.serv.gettimeout())
_testSetBlocking_overflow = support.cpython_only(_testSetBlocking)
@unittest.skipUnless(hasattr(socket, 'SOCK_NONBLOCK'),
'test needs socket.SOCK_NONBLOCK')
@support.requires_linux_version(2, 6, 28)
def testInitNonBlocking(self):
# reinit server socket
self.serv.close()
self.serv = socket.socket(socket.AF_INET, socket.SOCK_STREAM |
socket.SOCK_NONBLOCK)
self.assertFalse(self.serv.getblocking())
self.assertEqual(self.serv.gettimeout(), 0)
self.port = support.bind_port(self.serv)
self.serv.listen()
# actual testing
start = time.time()
try:
self.serv.accept()
except OSError:
pass
end = time.time()
self.assertTrue((end - start) < 1.0, "Error creating with non-blocking mode.")
def _testInitNonBlocking(self):
pass
def testInheritFlags(self):
# Issue #7995: when calling accept() on a listening socket with a
# timeout, the resulting socket should not be non-blocking.
self.serv.settimeout(10)
try:
conn, addr = self.serv.accept()
message = conn.recv(len(MSG))
finally:
conn.close()
self.serv.settimeout(None)
def _testInheritFlags(self):
time.sleep(0.1)
self.cli.connect((HOST, self.port))
time.sleep(0.5)
self.cli.send(MSG)
def testAccept(self):
# Testing non-blocking accept
self.serv.setblocking(0)
# connect() didn't start: non-blocking accept() fails
with self.assertRaises(BlockingIOError):
conn, addr = self.serv.accept()
self.event.set()
read, write, err = select.select([self.serv], [], [], MAIN_TIMEOUT)
if self.serv not in read:
self.fail("Error trying to do accept after select.")
# connect() completed: non-blocking accept() doesn't block
conn, addr = self.serv.accept()
self.addCleanup(conn.close)
self.assertIsNone(conn.gettimeout())
def _testAccept(self):
# don't connect before event is set to check
# that non-blocking accept() raises BlockingIOError
self.event.wait()
self.cli.connect((HOST, self.port))
def testConnect(self):
# Testing non-blocking connect
conn, addr = self.serv.accept()
conn.close()
def _testConnect(self):
self.cli.settimeout(10)
self.cli.connect((HOST, self.port))
def testRecv(self):
# Testing non-blocking recv
conn, addr = self.serv.accept()
self.addCleanup(conn.close)
conn.setblocking(0)
# the server didn't send data yet: non-blocking recv() fails
with self.assertRaises(BlockingIOError):
msg = conn.recv(len(MSG))
self.event.set()
read, write, err = select.select([conn], [], [], MAIN_TIMEOUT)
if conn not in read:
self.fail("Error during select call to non-blocking socket.")
# the server sent data yet: non-blocking recv() doesn't block
msg = conn.recv(len(MSG))
self.assertEqual(msg, MSG)
def _testRecv(self):
self.cli.connect((HOST, self.port))
# don't send anything before event is set to check
# that non-blocking recv() raises BlockingIOError
self.event.wait()
# send data: recv() will no longer block
self.cli.sendall(MSG)
class FileObjectClassTestCase(SocketConnectedTest):
"""Unit tests for the object returned by socket.makefile()
self.read_file is the io object returned by makefile() on
the client connection. You can read from this file to
get output from the server.
self.write_file is the io object returned by makefile() on the
server connection. You can write to this file to send output
to the client.
"""
bufsize = -1 # Use default buffer size
encoding = 'utf-8'
errors = 'strict'
newline = None
read_mode = 'rb'
read_msg = MSG
write_mode = 'wb'
write_msg = MSG
def __init__(self, methodName='runTest'):
SocketConnectedTest.__init__(self, methodName=methodName)
def setUp(self):
self.evt1, self.evt2, self.serv_finished, self.cli_finished = [
threading.Event() for i in range(4)]
SocketConnectedTest.setUp(self)
self.read_file = self.cli_conn.makefile(
self.read_mode, self.bufsize,
encoding = self.encoding,
errors = self.errors,
newline = self.newline)
def tearDown(self):
self.serv_finished.set()
self.read_file.close()
self.assertTrue(self.read_file.closed)
self.read_file = None
SocketConnectedTest.tearDown(self)
def clientSetUp(self):
SocketConnectedTest.clientSetUp(self)
self.write_file = self.serv_conn.makefile(
self.write_mode, self.bufsize,
encoding = self.encoding,
errors = self.errors,
newline = self.newline)
def clientTearDown(self):
self.cli_finished.set()
self.write_file.close()
self.assertTrue(self.write_file.closed)
self.write_file = None
SocketConnectedTest.clientTearDown(self)
def testReadAfterTimeout(self):
# Issue #7322: A file object must disallow further reads
# after a timeout has occurred.
self.cli_conn.settimeout(1)
self.read_file.read(3)
# First read raises a timeout
self.assertRaises(socket.timeout, self.read_file.read, 1)
# Second read is disallowed
with self.assertRaises(OSError) as ctx:
self.read_file.read(1)
self.assertIn("cannot read from timed out object", str(ctx.exception))
def _testReadAfterTimeout(self):
self.write_file.write(self.write_msg[0:3])
self.write_file.flush()
self.serv_finished.wait()
def testSmallRead(self):
# Performing small file read test
first_seg = self.read_file.read(len(self.read_msg)-3)
second_seg = self.read_file.read(3)
msg = first_seg + second_seg
self.assertEqual(msg, self.read_msg)
def _testSmallRead(self):
self.write_file.write(self.write_msg)
self.write_file.flush()
def testFullRead(self):
# read until EOF
msg = self.read_file.read()
self.assertEqual(msg, self.read_msg)
def _testFullRead(self):
self.write_file.write(self.write_msg)
self.write_file.close()
def testUnbufferedRead(self):
# Performing unbuffered file read test
buf = type(self.read_msg)()
while 1:
char = self.read_file.read(1)
if not char:
break
buf += char
self.assertEqual(buf, self.read_msg)
def _testUnbufferedRead(self):
self.write_file.write(self.write_msg)
self.write_file.flush()
def testReadline(self):
# Performing file readline test
line = self.read_file.readline()
self.assertEqual(line, self.read_msg)
def _testReadline(self):
self.write_file.write(self.write_msg)
self.write_file.flush()
def testCloseAfterMakefile(self):
# The file returned by makefile should keep the socket open.
self.cli_conn.close()
# read until EOF
msg = self.read_file.read()
self.assertEqual(msg, self.read_msg)
def _testCloseAfterMakefile(self):
self.write_file.write(self.write_msg)
self.write_file.flush()
def testMakefileAfterMakefileClose(self):
self.read_file.close()
msg = self.cli_conn.recv(len(MSG))
if isinstance(self.read_msg, str):
msg = msg.decode()
self.assertEqual(msg, self.read_msg)
def _testMakefileAfterMakefileClose(self):
self.write_file.write(self.write_msg)
self.write_file.flush()
def testClosedAttr(self):
self.assertTrue(not self.read_file.closed)
def _testClosedAttr(self):
self.assertTrue(not self.write_file.closed)
def testAttributes(self):
self.assertEqual(self.read_file.mode, self.read_mode)
self.assertEqual(self.read_file.name, self.cli_conn.fileno())
def _testAttributes(self):
self.assertEqual(self.write_file.mode, self.write_mode)
self.assertEqual(self.write_file.name, self.serv_conn.fileno())
def testRealClose(self):
self.read_file.close()
self.assertRaises(ValueError, self.read_file.fileno)
self.cli_conn.close()
self.assertRaises(OSError, self.cli_conn.getsockname)
def _testRealClose(self):
pass
class UnbufferedFileObjectClassTestCase(FileObjectClassTestCase):
"""Repeat the tests from FileObjectClassTestCase with bufsize==0.
In this case (and in this case only), it should be possible to
create a file object, read a line from it, create another file
object, read another line from it, without loss of data in the
first file object's buffer. Note that http.client relies on this
when reading multiple requests from the same socket."""
bufsize = 0 # Use unbuffered mode
def testUnbufferedReadline(self):
# Read a line, create a new file object, read another line with it
line = self.read_file.readline() # first line
self.assertEqual(line, b"A. " + self.write_msg) # first line
self.read_file = self.cli_conn.makefile('rb', 0)
line = self.read_file.readline() # second line
self.assertEqual(line, b"B. " + self.write_msg) # second line
def _testUnbufferedReadline(self):
self.write_file.write(b"A. " + self.write_msg)
self.write_file.write(b"B. " + self.write_msg)
self.write_file.flush()
def testMakefileClose(self):
# The file returned by makefile should keep the socket open...
self.cli_conn.close()
msg = self.cli_conn.recv(1024)
self.assertEqual(msg, self.read_msg)
# ...until the file is itself closed
self.read_file.close()
self.assertRaises(OSError, self.cli_conn.recv, 1024)
def _testMakefileClose(self):
self.write_file.write(self.write_msg)
self.write_file.flush()
def testMakefileCloseSocketDestroy(self):
refcount_before = sys.getrefcount(self.cli_conn)
self.read_file.close()
refcount_after = sys.getrefcount(self.cli_conn)
self.assertEqual(refcount_before - 1, refcount_after)
def _testMakefileCloseSocketDestroy(self):
pass
# Non-blocking ops
# NOTE: to set `read_file` as non-blocking, we must call
# `cli_conn.setblocking` and vice-versa (see setUp / clientSetUp).
def testSmallReadNonBlocking(self):
self.cli_conn.setblocking(False)
self.assertEqual(self.read_file.readinto(bytearray(10)), None)
self.assertEqual(self.read_file.read(len(self.read_msg) - 3), None)
self.evt1.set()
self.evt2.wait(1.0)
first_seg = self.read_file.read(len(self.read_msg) - 3)
if first_seg is None:
# Data not arrived (can happen under Windows), wait a bit
time.sleep(0.5)
first_seg = self.read_file.read(len(self.read_msg) - 3)
buf = bytearray(10)
n = self.read_file.readinto(buf)
self.assertEqual(n, 3)
msg = first_seg + buf[:n]
self.assertEqual(msg, self.read_msg)
self.assertEqual(self.read_file.readinto(bytearray(16)), None)
self.assertEqual(self.read_file.read(1), None)
def _testSmallReadNonBlocking(self):
self.evt1.wait(1.0)
self.write_file.write(self.write_msg)
self.write_file.flush()
self.evt2.set()
# Avoid closing the socket before the server test has finished,
# otherwise system recv() will return 0 instead of EWOULDBLOCK.
self.serv_finished.wait(5.0)
def testWriteNonBlocking(self):
self.cli_finished.wait(5.0)
# The client thread can't skip directly - the SkipTest exception
# would appear as a failure.
if self.serv_skipped:
self.skipTest(self.serv_skipped)
def _testWriteNonBlocking(self):
self.serv_skipped = None
self.serv_conn.setblocking(False)
# Try to saturate the socket buffer pipe with repeated large writes.
BIG = b"x" * support.SOCK_MAX_SIZE
LIMIT = 10
# The first write() succeeds since a chunk of data can be buffered
n = self.write_file.write(BIG)
self.assertGreater(n, 0)
for i in range(LIMIT):
n = self.write_file.write(BIG)
if n is None:
# Succeeded
break
self.assertGreater(n, 0)
else:
# Let us know that this test didn't manage to establish
# the expected conditions. This is not a failure in itself but,
# if it happens repeatedly, the test should be fixed.
self.serv_skipped = "failed to saturate the socket buffer"
class LineBufferedFileObjectClassTestCase(FileObjectClassTestCase):
bufsize = 1 # Default-buffered for reading; line-buffered for writing
class SmallBufferedFileObjectClassTestCase(FileObjectClassTestCase):
bufsize = 2 # Exercise the buffering code
class UnicodeReadFileObjectClassTestCase(FileObjectClassTestCase):
"""Tests for socket.makefile() in text mode (rather than binary)"""
read_mode = 'r'
read_msg = MSG.decode('utf-8')
write_mode = 'wb'
write_msg = MSG
newline = ''
class UnicodeWriteFileObjectClassTestCase(FileObjectClassTestCase):
"""Tests for socket.makefile() in text mode (rather than binary)"""
read_mode = 'rb'
read_msg = MSG
write_mode = 'w'
write_msg = MSG.decode('utf-8')
newline = ''
class UnicodeReadWriteFileObjectClassTestCase(FileObjectClassTestCase):
"""Tests for socket.makefile() in text mode (rather than binary)"""
read_mode = 'r'
read_msg = MSG.decode('utf-8')
write_mode = 'w'
write_msg = MSG.decode('utf-8')
newline = ''
class NetworkConnectionTest(object):
"""Prove network connection."""
def clientSetUp(self):
# We're inherited below by BasicTCPTest2, which also inherits
# BasicTCPTest, which defines self.port referenced below.
self.cli = socket.create_connection((HOST, self.port))
self.serv_conn = self.cli
class BasicTCPTest2(NetworkConnectionTest, BasicTCPTest):
"""Tests that NetworkConnection does not break existing TCP functionality.
"""
class NetworkConnectionNoServer(unittest.TestCase):
class MockSocket(socket.socket):
def connect(self, *args):
raise socket.timeout('timed out')
@contextlib.contextmanager
def mocked_socket_module(self):
"""Return a socket which times out on connect"""
old_socket = socket.socket
socket.socket = self.MockSocket
try:
yield
finally:
socket.socket = old_socket
def test_connect(self):
port = support.find_unused_port()
cli = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.addCleanup(cli.close)
with self.assertRaises(OSError) as cm:
cli.connect((HOST, port))
self.assertEqual(cm.exception.errno, errno.ECONNREFUSED)
def test_create_connection(self):
# Issue #9792: errors raised by create_connection() should have
# a proper errno attribute.
port = support.find_unused_port()
with self.assertRaises(OSError) as cm:
socket.create_connection((HOST, port))
# Issue #16257: create_connection() calls getaddrinfo() against
# 'localhost'. This may result in an IPV6 addr being returned
# as well as an IPV4 one:
# >>> socket.getaddrinfo('localhost', port, 0, SOCK_STREAM)
# >>> [(2, 2, 0, '', ('127.0.0.1', 41230)),
# (26, 2, 0, '', ('::1', 41230, 0, 0))]
#
# create_connection() enumerates through all the addresses returned
# and if it doesn't successfully bind to any of them, it propagates
# the last exception it encountered.
#
# On Solaris, ENETUNREACH is returned in this circumstance instead
# of ECONNREFUSED. So, if that errno exists, add it to our list of
# expected errnos.
expected_errnos = support.get_socket_conn_refused_errs()
self.assertIn(cm.exception.errno, expected_errnos)
def test_create_connection_timeout(self):
# Issue #9792: create_connection() should not recast timeout errors
# as generic socket errors.
with self.mocked_socket_module():
with self.assertRaises(socket.timeout):
socket.create_connection((HOST, 1234))
class NetworkConnectionAttributesTest(SocketTCPTest, ThreadableTest):
def __init__(self, methodName='runTest'):
SocketTCPTest.__init__(self, methodName=methodName)
ThreadableTest.__init__(self)
def clientSetUp(self):
self.source_port = support.find_unused_port()
def clientTearDown(self):
self.cli.close()
self.cli = None
ThreadableTest.clientTearDown(self)
def _justAccept(self):
conn, addr = self.serv.accept()
conn.close()
testFamily = _justAccept
def _testFamily(self):
self.cli = socket.create_connection((HOST, self.port), timeout=30)
self.addCleanup(self.cli.close)
self.assertEqual(self.cli.family, 2)
testSourceAddress = _justAccept
def _testSourceAddress(self):
self.cli = socket.create_connection((HOST, self.port), timeout=30,
source_address=('', self.source_port))
self.addCleanup(self.cli.close)
self.assertEqual(self.cli.getsockname()[1], self.source_port)
# The port number being used is sufficient to show that the bind()
# call happened.
testTimeoutDefault = _justAccept
def _testTimeoutDefault(self):
# passing no explicit timeout uses socket's global default
self.assertTrue(socket.getdefaulttimeout() is None)
socket.setdefaulttimeout(42)
try:
self.cli = socket.create_connection((HOST, self.port))
self.addCleanup(self.cli.close)
finally:
socket.setdefaulttimeout(None)
self.assertEqual(self.cli.gettimeout(), 42)
testTimeoutNone = _justAccept
def _testTimeoutNone(self):
# None timeout means the same as sock.settimeout(None)
self.assertTrue(socket.getdefaulttimeout() is None)
socket.setdefaulttimeout(30)
try:
self.cli = socket.create_connection((HOST, self.port), timeout=None)
self.addCleanup(self.cli.close)
finally:
socket.setdefaulttimeout(None)
self.assertEqual(self.cli.gettimeout(), None)
testTimeoutValueNamed = _justAccept
def _testTimeoutValueNamed(self):
self.cli = socket.create_connection((HOST, self.port), timeout=30)
self.assertEqual(self.cli.gettimeout(), 30)
testTimeoutValueNonamed = _justAccept
def _testTimeoutValueNonamed(self):
self.cli = socket.create_connection((HOST, self.port), 30)
self.addCleanup(self.cli.close)
self.assertEqual(self.cli.gettimeout(), 30)
class NetworkConnectionBehaviourTest(SocketTCPTest, ThreadableTest):
def __init__(self, methodName='runTest'):
SocketTCPTest.__init__(self, methodName=methodName)
ThreadableTest.__init__(self)
def clientSetUp(self):
pass
def clientTearDown(self):
self.cli.close()
self.cli = None
ThreadableTest.clientTearDown(self)
def testInsideTimeout(self):
conn, addr = self.serv.accept()
self.addCleanup(conn.close)
time.sleep(3)
conn.send(b"done!")
testOutsideTimeout = testInsideTimeout
def _testInsideTimeout(self):
self.cli = sock = socket.create_connection((HOST, self.port))
data = sock.recv(5)
self.assertEqual(data, b"done!")
def _testOutsideTimeout(self):
self.cli = sock = socket.create_connection((HOST, self.port), timeout=1)
self.assertRaises(socket.timeout, lambda: sock.recv(5))
class TCPTimeoutTest(SocketTCPTest):
def testTCPTimeout(self):
def raise_timeout(*args, **kwargs):
self.serv.settimeout(1.0)
self.serv.accept()
self.assertRaises(socket.timeout, raise_timeout,
"Error generating a timeout exception (TCP)")
def testTimeoutZero(self):
ok = False
try:
self.serv.settimeout(0.0)
foo = self.serv.accept()
except socket.timeout:
self.fail("caught timeout instead of error (TCP)")
except OSError:
ok = True
except:
self.fail("caught unexpected exception (TCP)")
if not ok:
self.fail("accept() returned success when we did not expect it")
@unittest.skipUnless(hasattr(signal, 'alarm'),
'test needs signal.alarm()')
def testInterruptedTimeout(self):
# XXX I don't know how to do this test on MSWindows or any other
# platform that doesn't support signal.alarm() or os.kill(), though
# the bug should have existed on all platforms.
self.serv.settimeout(5.0) # must be longer than alarm
class Alarm(Exception):
pass
def alarm_handler(signal, frame):
raise Alarm
old_alarm = signal.signal(signal.SIGALRM, alarm_handler)
try:
try:
signal.alarm(2) # POSIX allows alarm to be up to 1 second early
foo = self.serv.accept()
except socket.timeout:
self.fail("caught timeout instead of Alarm")
except Alarm:
pass
except:
self.fail("caught other exception instead of Alarm:"
" %s(%s):\n%s" %
(sys.exc_info()[:2] + (traceback.format_exc(),)))
else:
self.fail("nothing caught")
finally:
signal.alarm(0) # shut off alarm
except Alarm:
self.fail("got Alarm in wrong place")
finally:
# no alarm can be pending. Safe to restore old handler.
signal.signal(signal.SIGALRM, old_alarm)
class UDPTimeoutTest(SocketUDPTest):
def testUDPTimeout(self):
def raise_timeout(*args, **kwargs):
self.serv.settimeout(1.0)
self.serv.recv(1024)
self.assertRaises(socket.timeout, raise_timeout,
"Error generating a timeout exception (UDP)")
def testTimeoutZero(self):
ok = False
try:
self.serv.settimeout(0.0)
foo = self.serv.recv(1024)
except socket.timeout:
self.fail("caught timeout instead of error (UDP)")
except OSError:
ok = True
except:
self.fail("caught unexpected exception (UDP)")
if not ok:
self.fail("recv() returned success when we did not expect it")
class TestExceptions(unittest.TestCase):
def testExceptionTree(self):
self.assertTrue(issubclass(OSError, Exception))
self.assertTrue(issubclass(socket.herror, OSError))
self.assertTrue(issubclass(socket.gaierror, OSError))
self.assertTrue(issubclass(socket.timeout, OSError))
def test_setblocking_invalidfd(self):
# Regression test for issue #28471
sock0 = socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0)
sock = socket.socket(
socket.AF_INET, socket.SOCK_STREAM, 0, sock0.fileno())
sock0.close()
self.addCleanup(sock.detach)
with self.assertRaises(OSError):
sock.setblocking(False)
@unittest.skipUnless(sys.platform == 'linux', 'Linux specific test')
class TestLinuxAbstractNamespace(unittest.TestCase):
UNIX_PATH_MAX = 108
def testLinuxAbstractNamespace(self):
address = b"\x00python-test-hello\x00\xff"
with socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) as s1:
s1.bind(address)
s1.listen()
with socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) as s2:
s2.connect(s1.getsockname())
with s1.accept()[0] as s3:
self.assertEqual(s1.getsockname(), address)
self.assertEqual(s2.getpeername(), address)
def testMaxName(self):
address = b"\x00" + b"h" * (self.UNIX_PATH_MAX - 1)
with socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) as s:
s.bind(address)
self.assertEqual(s.getsockname(), address)
def testNameOverflow(self):
address = "\x00" + "h" * self.UNIX_PATH_MAX
with socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) as s:
self.assertRaises(OSError, s.bind, address)
def testStrName(self):
# Check that an abstract name can be passed as a string.
s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
try:
s.bind("\x00python\x00test\x00")
self.assertEqual(s.getsockname(), b"\x00python\x00test\x00")
finally:
s.close()
def testBytearrayName(self):
# Check that an abstract name can be passed as a bytearray.
with socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) as s:
s.bind(bytearray(b"\x00python\x00test\x00"))
self.assertEqual(s.getsockname(), b"\x00python\x00test\x00")
@unittest.skipUnless(hasattr(socket, 'AF_UNIX'), 'test needs socket.AF_UNIX')
class TestUnixDomain(unittest.TestCase):
def setUp(self):
self.sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
def tearDown(self):
self.sock.close()
def encoded(self, path):
# Return the given path encoded in the file system encoding,
# or skip the test if this is not possible.
try:
return os.fsencode(path)
except UnicodeEncodeError:
self.skipTest(
"Pathname {0!a} cannot be represented in file "
"system encoding {1!r}".format(
path, sys.getfilesystemencoding()))
def bind(self, sock, path):
# Bind the socket
try:
support.bind_unix_socket(sock, path)
except OSError as e:
if str(e) == "AF_UNIX path too long":
self.skipTest(
"Pathname {0!a} is too long to serve as an AF_UNIX path"
.format(path))
else:
raise
def testUnbound(self):
# Issue #30205 (note getsockname() can return None on OS X)
self.assertIn(self.sock.getsockname(), ('', None))
def testStrAddr(self):
# Test binding to and retrieving a normal string pathname.
path = os.path.abspath(support.TESTFN)
self.bind(self.sock, path)
self.addCleanup(support.unlink, path)
self.assertEqual(self.sock.getsockname(), path)
def testBytesAddr(self):
# Test binding to a bytes pathname.
path = os.path.abspath(support.TESTFN)
self.bind(self.sock, self.encoded(path))
self.addCleanup(support.unlink, path)
self.assertEqual(self.sock.getsockname(), path)
def testSurrogateescapeBind(self):
# Test binding to a valid non-ASCII pathname, with the
# non-ASCII bytes supplied using surrogateescape encoding.
path = os.path.abspath(support.TESTFN_UNICODE)
b = self.encoded(path)
self.bind(self.sock, b.decode("ascii", "surrogateescape"))
self.addCleanup(support.unlink, path)
self.assertEqual(self.sock.getsockname(), path)
def testUnencodableAddr(self):
# Test binding to a pathname that cannot be encoded in the
# file system encoding.
if support.TESTFN_UNENCODABLE is None:
self.skipTest("No unencodable filename available")
path = os.path.abspath(support.TESTFN_UNENCODABLE)
self.bind(self.sock, path)
self.addCleanup(support.unlink, path)
self.assertEqual(self.sock.getsockname(), path)
class BufferIOTest(SocketConnectedTest):
"""
Test the buffer versions of socket.recv() and socket.send().
"""
def __init__(self, methodName='runTest'):
SocketConnectedTest.__init__(self, methodName=methodName)
def testRecvIntoArray(self):
buf = array.array("B", [0] * len(MSG))
nbytes = self.cli_conn.recv_into(buf)
self.assertEqual(nbytes, len(MSG))
buf = buf.tobytes()
msg = buf[:len(MSG)]
self.assertEqual(msg, MSG)
def _testRecvIntoArray(self):
buf = bytes(MSG)
self.serv_conn.send(buf)
def testRecvIntoBytearray(self):
buf = bytearray(1024)
nbytes = self.cli_conn.recv_into(buf)
self.assertEqual(nbytes, len(MSG))
msg = buf[:len(MSG)]
self.assertEqual(msg, MSG)
_testRecvIntoBytearray = _testRecvIntoArray
def testRecvIntoMemoryview(self):
buf = bytearray(1024)
nbytes = self.cli_conn.recv_into(memoryview(buf))
self.assertEqual(nbytes, len(MSG))
msg = buf[:len(MSG)]
self.assertEqual(msg, MSG)
_testRecvIntoMemoryview = _testRecvIntoArray
def testRecvFromIntoArray(self):
buf = array.array("B", [0] * len(MSG))
nbytes, addr = self.cli_conn.recvfrom_into(buf)
self.assertEqual(nbytes, len(MSG))
buf = buf.tobytes()
msg = buf[:len(MSG)]
self.assertEqual(msg, MSG)
def _testRecvFromIntoArray(self):
buf = bytes(MSG)
self.serv_conn.send(buf)
def testRecvFromIntoBytearray(self):
buf = bytearray(1024)
nbytes, addr = self.cli_conn.recvfrom_into(buf)
self.assertEqual(nbytes, len(MSG))
msg = buf[:len(MSG)]
self.assertEqual(msg, MSG)
_testRecvFromIntoBytearray = _testRecvFromIntoArray
def testRecvFromIntoMemoryview(self):
buf = bytearray(1024)
nbytes, addr = self.cli_conn.recvfrom_into(memoryview(buf))
self.assertEqual(nbytes, len(MSG))
msg = buf[:len(MSG)]
self.assertEqual(msg, MSG)
_testRecvFromIntoMemoryview = _testRecvFromIntoArray
def testRecvFromIntoSmallBuffer(self):
# See issue #20246.
buf = bytearray(8)
self.assertRaises(ValueError, self.cli_conn.recvfrom_into, buf, 1024)
def _testRecvFromIntoSmallBuffer(self):
self.serv_conn.send(MSG)
def testRecvFromIntoEmptyBuffer(self):
buf = bytearray()
self.cli_conn.recvfrom_into(buf)
self.cli_conn.recvfrom_into(buf, 0)
_testRecvFromIntoEmptyBuffer = _testRecvFromIntoArray
TIPC_STYPE = 2000
TIPC_LOWER = 200
TIPC_UPPER = 210
def isTipcAvailable():
"""Check if the TIPC module is loaded
The TIPC module is not loaded automatically on Ubuntu and probably
other Linux distros.
"""
if not hasattr(socket, "AF_TIPC"):
return False
try:
f = open("/proc/modules")
except (FileNotFoundError, IsADirectoryError, PermissionError):
# It's ok if the file does not exist, is a directory or if we
# have not the permission to read it.
return False
with f:
for line in f:
if line.startswith("tipc "):
return True
return False
@unittest.skipUnless(isTipcAvailable(),
"TIPC module is not loaded, please 'sudo modprobe tipc'")
class TIPCTest(unittest.TestCase):
def testRDM(self):
srv = socket.socket(socket.AF_TIPC, socket.SOCK_RDM)
cli = socket.socket(socket.AF_TIPC, socket.SOCK_RDM)
self.addCleanup(srv.close)
self.addCleanup(cli.close)
srv.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
srvaddr = (socket.TIPC_ADDR_NAMESEQ, TIPC_STYPE,
TIPC_LOWER, TIPC_UPPER)
srv.bind(srvaddr)
sendaddr = (socket.TIPC_ADDR_NAME, TIPC_STYPE,
TIPC_LOWER + int((TIPC_UPPER - TIPC_LOWER) / 2), 0)
cli.sendto(MSG, sendaddr)
msg, recvaddr = srv.recvfrom(1024)
self.assertEqual(cli.getsockname(), recvaddr)
self.assertEqual(msg, MSG)
@unittest.skipUnless(isTipcAvailable(),
"TIPC module is not loaded, please 'sudo modprobe tipc'")
class TIPCThreadableTest(unittest.TestCase, ThreadableTest):
def __init__(self, methodName = 'runTest'):
unittest.TestCase.__init__(self, methodName = methodName)
ThreadableTest.__init__(self)
def setUp(self):
self.srv = socket.socket(socket.AF_TIPC, socket.SOCK_STREAM)
self.addCleanup(self.srv.close)
self.srv.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
srvaddr = (socket.TIPC_ADDR_NAMESEQ, TIPC_STYPE,
TIPC_LOWER, TIPC_UPPER)
self.srv.bind(srvaddr)
self.srv.listen()
self.serverExplicitReady()
self.conn, self.connaddr = self.srv.accept()
self.addCleanup(self.conn.close)
def clientSetUp(self):
# There is a hittable race between serverExplicitReady() and the
# accept() call; sleep a little while to avoid it, otherwise
# we could get an exception
time.sleep(0.1)
self.cli = socket.socket(socket.AF_TIPC, socket.SOCK_STREAM)
self.addCleanup(self.cli.close)
addr = (socket.TIPC_ADDR_NAME, TIPC_STYPE,
TIPC_LOWER + int((TIPC_UPPER - TIPC_LOWER) / 2), 0)
self.cli.connect(addr)
self.cliaddr = self.cli.getsockname()
def testStream(self):
msg = self.conn.recv(1024)
self.assertEqual(msg, MSG)
self.assertEqual(self.cliaddr, self.connaddr)
def _testStream(self):
self.cli.send(MSG)
self.cli.close()
class ContextManagersTest(ThreadedTCPSocketTest):
def _testSocketClass(self):
# base test
with socket.socket() as sock:
self.assertFalse(sock._closed)
self.assertTrue(sock._closed)
# close inside with block
with socket.socket() as sock:
sock.close()
self.assertTrue(sock._closed)
# exception inside with block
with socket.socket() as sock:
self.assertRaises(OSError, sock.sendall, b'foo')
self.assertTrue(sock._closed)
def testCreateConnectionBase(self):
conn, addr = self.serv.accept()
self.addCleanup(conn.close)
data = conn.recv(1024)
conn.sendall(data)
def _testCreateConnectionBase(self):
address = self.serv.getsockname()
with socket.create_connection(address) as sock:
self.assertFalse(sock._closed)
sock.sendall(b'foo')
self.assertEqual(sock.recv(1024), b'foo')
self.assertTrue(sock._closed)
def testCreateConnectionClose(self):
conn, addr = self.serv.accept()
self.addCleanup(conn.close)
data = conn.recv(1024)
conn.sendall(data)
def _testCreateConnectionClose(self):
address = self.serv.getsockname()
with socket.create_connection(address) as sock:
sock.close()
self.assertTrue(sock._closed)
self.assertRaises(OSError, sock.sendall, b'foo')
class InheritanceTest(unittest.TestCase):
@unittest.skipUnless(hasattr(socket, "SOCK_CLOEXEC"),
"SOCK_CLOEXEC not defined")
@support.requires_linux_version(2, 6, 28)
def test_SOCK_CLOEXEC(self):
with socket.socket(socket.AF_INET,
socket.SOCK_STREAM | socket.SOCK_CLOEXEC) as s:
self.assertEqual(s.type, socket.SOCK_STREAM)
self.assertFalse(s.get_inheritable())
def test_default_inheritable(self):
sock = socket.socket()
with sock:
self.assertEqual(sock.get_inheritable(), False)
def test_dup(self):
sock = socket.socket()
with sock:
newsock = sock.dup()
sock.close()
with newsock:
self.assertEqual(newsock.get_inheritable(), False)
def test_set_inheritable(self):
sock = socket.socket()
with sock:
sock.set_inheritable(True)
self.assertEqual(sock.get_inheritable(), True)
sock.set_inheritable(False)
self.assertEqual(sock.get_inheritable(), False)
@unittest.skipIf(fcntl is None, "need fcntl")
def test_get_inheritable_cloexec(self):
sock = socket.socket()
with sock:
fd = sock.fileno()
self.assertEqual(sock.get_inheritable(), False)
# clear FD_CLOEXEC flag
flags = fcntl.fcntl(fd, fcntl.F_GETFD)
flags &= ~fcntl.FD_CLOEXEC
fcntl.fcntl(fd, fcntl.F_SETFD, flags)
self.assertEqual(sock.get_inheritable(), True)
@unittest.skipIf(fcntl is None, "need fcntl")
def test_set_inheritable_cloexec(self):
sock = socket.socket()
with sock:
fd = sock.fileno()
self.assertEqual(fcntl.fcntl(fd, fcntl.F_GETFD) & fcntl.FD_CLOEXEC,
fcntl.FD_CLOEXEC)
sock.set_inheritable(True)
self.assertEqual(fcntl.fcntl(fd, fcntl.F_GETFD) & fcntl.FD_CLOEXEC,
0)
def test_socketpair(self):
s1, s2 = socket.socketpair()
self.addCleanup(s1.close)
self.addCleanup(s2.close)
self.assertEqual(s1.get_inheritable(), False)
self.assertEqual(s2.get_inheritable(), False)
@unittest.skipUnless(hasattr(socket, "SOCK_NONBLOCK"),
"SOCK_NONBLOCK not defined")
class NonblockConstantTest(unittest.TestCase):
def checkNonblock(self, s, nonblock=True, timeout=0.0):
if nonblock:
self.assertEqual(s.type, socket.SOCK_STREAM)
self.assertEqual(s.gettimeout(), timeout)
self.assertTrue(
fcntl.fcntl(s, fcntl.F_GETFL, os.O_NONBLOCK) & os.O_NONBLOCK)
if timeout == 0:
# timeout == 0: means that getblocking() must be False.
self.assertFalse(s.getblocking())
else:
# If timeout > 0, the socket will be in a "blocking" mode
# from the standpoint of the Python API. For Python socket
# object, "blocking" means that operations like 'sock.recv()'
# will block. Internally, file descriptors for
# "blocking" Python sockets *with timeouts* are in a
# *non-blocking* mode, and 'sock.recv()' uses 'select()'
# and handles EWOULDBLOCK/EAGAIN to enforce the timeout.
self.assertTrue(s.getblocking())
else:
self.assertEqual(s.type, socket.SOCK_STREAM)
self.assertEqual(s.gettimeout(), None)
self.assertFalse(
fcntl.fcntl(s, fcntl.F_GETFL, os.O_NONBLOCK) & os.O_NONBLOCK)
self.assertTrue(s.getblocking())
@support.requires_linux_version(2, 6, 28)
def test_SOCK_NONBLOCK(self):
# a lot of it seems silly and redundant, but I wanted to test that
# changing back and forth worked ok
with socket.socket(socket.AF_INET,
socket.SOCK_STREAM | socket.SOCK_NONBLOCK) as s:
self.checkNonblock(s)
s.setblocking(1)
self.checkNonblock(s, nonblock=False)
s.setblocking(0)
self.checkNonblock(s)
s.settimeout(None)
self.checkNonblock(s, nonblock=False)
s.settimeout(2.0)
self.checkNonblock(s, timeout=2.0)
s.setblocking(1)
self.checkNonblock(s, nonblock=False)
# defaulttimeout
t = socket.getdefaulttimeout()
socket.setdefaulttimeout(0.0)
with socket.socket() as s:
self.checkNonblock(s)
socket.setdefaulttimeout(None)
with socket.socket() as s:
self.checkNonblock(s, False)
socket.setdefaulttimeout(2.0)
with socket.socket() as s:
self.checkNonblock(s, timeout=2.0)
socket.setdefaulttimeout(None)
with socket.socket() as s:
self.checkNonblock(s, False)
socket.setdefaulttimeout(t)
@unittest.skipUnless(os.name == "nt", "Windows specific")
@unittest.skipUnless(multiprocessing, "need multiprocessing")
class TestSocketSharing(SocketTCPTest):
# This must be classmethod and not staticmethod or multiprocessing
# won't be able to bootstrap it.
@classmethod
def remoteProcessServer(cls, q):
# Recreate socket from shared data
sdata = q.get()
message = q.get()
s = socket.fromshare(sdata)
s2, c = s.accept()
# Send the message
s2.sendall(message)
s2.close()
s.close()
def testShare(self):
# Transfer the listening server socket to another process
# and service it from there.
# Create process:
q = multiprocessing.Queue()
p = multiprocessing.Process(target=self.remoteProcessServer, args=(q,))
p.start()
# Get the shared socket data
data = self.serv.share(p.pid)
# Pass the shared socket to the other process
addr = self.serv.getsockname()
self.serv.close()
q.put(data)
# The data that the server will send us
message = b"slapmahfro"
q.put(message)
# Connect
s = socket.create_connection(addr)
# listen for the data
m = []
while True:
data = s.recv(100)
if not data:
break
m.append(data)
s.close()
received = b"".join(m)
self.assertEqual(received, message)
p.join()
def testShareLength(self):
data = self.serv.share(os.getpid())
self.assertRaises(ValueError, socket.fromshare, data[:-1])
self.assertRaises(ValueError, socket.fromshare, data+b"foo")
def compareSockets(self, org, other):
# socket sharing is expected to work only for blocking socket
# since the internal python timeout value isn't transferred.
self.assertEqual(org.gettimeout(), None)
self.assertEqual(org.gettimeout(), other.gettimeout())
self.assertEqual(org.family, other.family)
self.assertEqual(org.type, other.type)
# If the user specified "0" for proto, then
# internally windows will have picked the correct value.
# Python introspection on the socket however will still return
# 0. For the shared socket, the python value is recreated
# from the actual value, so it may not compare correctly.
if org.proto != 0:
self.assertEqual(org.proto, other.proto)
def testShareLocal(self):
data = self.serv.share(os.getpid())
s = socket.fromshare(data)
try:
self.compareSockets(self.serv, s)
finally:
s.close()
def testTypes(self):
families = [socket.AF_INET, socket.AF_INET6]
types = [socket.SOCK_STREAM, socket.SOCK_DGRAM]
for f in families:
for t in types:
try:
source = socket.socket(f, t)
except OSError:
continue # This combination is not supported
try:
data = source.share(os.getpid())
shared = socket.fromshare(data)
try:
self.compareSockets(source, shared)
finally:
shared.close()
finally:
source.close()
class SendfileUsingSendTest(ThreadedTCPSocketTest):
"""
Test the send() implementation of socket.sendfile().
"""
FILESIZE = (10 * 1024 * 1024) # 10 MiB
BUFSIZE = 8192
FILEDATA = b""
TIMEOUT = 2
@classmethod
def setUpClass(cls):
def chunks(total, step):
assert total >= step
while total > step:
yield step
total -= step
if total:
yield total
chunk = b"".join([random.choice(string.ascii_letters).encode()
for i in range(cls.BUFSIZE)])
with open(support.TESTFN, 'wb') as f:
for csize in chunks(cls.FILESIZE, cls.BUFSIZE):
f.write(chunk)
with open(support.TESTFN, 'rb') as f:
cls.FILEDATA = f.read()
assert len(cls.FILEDATA) == cls.FILESIZE
@classmethod
def tearDownClass(cls):
support.unlink(support.TESTFN)
def accept_conn(self):
self.serv.settimeout(MAIN_TIMEOUT)
conn, addr = self.serv.accept()
conn.settimeout(self.TIMEOUT)
self.addCleanup(conn.close)
return conn
def recv_data(self, conn):
received = []
while True:
chunk = conn.recv(self.BUFSIZE)
if not chunk:
break
received.append(chunk)
return b''.join(received)
def meth_from_sock(self, sock):
# Depending on the mixin class being run return either send()
# or sendfile() method implementation.
return getattr(sock, "_sendfile_use_send")
# regular file
def _testRegularFile(self):
address = self.serv.getsockname()
file = open(support.TESTFN, 'rb')
with socket.create_connection(address) as sock, file as file:
meth = self.meth_from_sock(sock)
sent = meth(file)
self.assertEqual(sent, self.FILESIZE)
self.assertEqual(file.tell(), self.FILESIZE)
def testRegularFile(self):
conn = self.accept_conn()
data = self.recv_data(conn)
self.assertEqual(len(data), self.FILESIZE)
self.assertEqual(data, self.FILEDATA)
# non regular file
def _testNonRegularFile(self):
address = self.serv.getsockname()
file = io.BytesIO(self.FILEDATA)
with socket.create_connection(address) as sock, file as file:
sent = sock.sendfile(file)
self.assertEqual(sent, self.FILESIZE)
self.assertEqual(file.tell(), self.FILESIZE)
self.assertRaises(socket._GiveupOnSendfile,
sock._sendfile_use_sendfile, file)
def testNonRegularFile(self):
conn = self.accept_conn()
data = self.recv_data(conn)
self.assertEqual(len(data), self.FILESIZE)
self.assertEqual(data, self.FILEDATA)
# empty file
def _testEmptyFileSend(self):
address = self.serv.getsockname()
filename = support.TESTFN + "2"
with open(filename, 'wb'):
self.addCleanup(support.unlink, filename)
file = open(filename, 'rb')
with socket.create_connection(address) as sock, file as file:
meth = self.meth_from_sock(sock)
sent = meth(file)
self.assertEqual(sent, 0)
self.assertEqual(file.tell(), 0)
def testEmptyFileSend(self):
conn = self.accept_conn()
data = self.recv_data(conn)
self.assertEqual(data, b"")
# offset
def _testOffset(self):
address = self.serv.getsockname()
file = open(support.TESTFN, 'rb')
with socket.create_connection(address) as sock, file as file:
meth = self.meth_from_sock(sock)
sent = meth(file, offset=5000)
self.assertEqual(sent, self.FILESIZE - 5000)
self.assertEqual(file.tell(), self.FILESIZE)
def testOffset(self):
conn = self.accept_conn()
data = self.recv_data(conn)
self.assertEqual(len(data), self.FILESIZE - 5000)
self.assertEqual(data, self.FILEDATA[5000:])
# count
def _testCount(self):
address = self.serv.getsockname()
file = open(support.TESTFN, 'rb')
with socket.create_connection(address, timeout=2) as sock, file as file:
count = 5000007
meth = self.meth_from_sock(sock)
sent = meth(file, count=count)
self.assertEqual(sent, count)
self.assertEqual(file.tell(), count)
def testCount(self):
count = 5000007
conn = self.accept_conn()
data = self.recv_data(conn)
self.assertEqual(len(data), count)
self.assertEqual(data, self.FILEDATA[:count])
# count small
def _testCountSmall(self):
address = self.serv.getsockname()
file = open(support.TESTFN, 'rb')
with socket.create_connection(address, timeout=2) as sock, file as file:
count = 1
meth = self.meth_from_sock(sock)
sent = meth(file, count=count)
self.assertEqual(sent, count)
self.assertEqual(file.tell(), count)
def testCountSmall(self):
count = 1
conn = self.accept_conn()
data = self.recv_data(conn)
self.assertEqual(len(data), count)
self.assertEqual(data, self.FILEDATA[:count])
# count + offset
def _testCountWithOffset(self):
address = self.serv.getsockname()
file = open(support.TESTFN, 'rb')
with socket.create_connection(address, timeout=2) as sock, file as file:
count = 100007
meth = self.meth_from_sock(sock)
sent = meth(file, offset=2007, count=count)
self.assertEqual(sent, count)
self.assertEqual(file.tell(), count + 2007)
def testCountWithOffset(self):
count = 100007
conn = self.accept_conn()
data = self.recv_data(conn)
self.assertEqual(len(data), count)
self.assertEqual(data, self.FILEDATA[2007:count+2007])
# non blocking sockets are not supposed to work
def _testNonBlocking(self):
address = self.serv.getsockname()
file = open(support.TESTFN, 'rb')
with socket.create_connection(address) as sock, file as file:
sock.setblocking(False)
meth = self.meth_from_sock(sock)
self.assertRaises(ValueError, meth, file)
self.assertRaises(ValueError, sock.sendfile, file)
def testNonBlocking(self):
conn = self.accept_conn()
if conn.recv(8192):
self.fail('was not supposed to receive any data')
# timeout (non-triggered)
def _testWithTimeout(self):
address = self.serv.getsockname()
file = open(support.TESTFN, 'rb')
with socket.create_connection(address, timeout=2) as sock, file as file:
meth = self.meth_from_sock(sock)
sent = meth(file)
self.assertEqual(sent, self.FILESIZE)
def testWithTimeout(self):
conn = self.accept_conn()
data = self.recv_data(conn)
self.assertEqual(len(data), self.FILESIZE)
self.assertEqual(data, self.FILEDATA)
# timeout (triggered)
def _testWithTimeoutTriggeredSend(self):
address = self.serv.getsockname()
with open(support.TESTFN, 'rb') as file:
with socket.create_connection(address) as sock:
sock.settimeout(0.01)
meth = self.meth_from_sock(sock)
self.assertRaises(socket.timeout, meth, file)
def testWithTimeoutTriggeredSend(self):
conn = self.accept_conn()
conn.recv(88192)
# errors
def _test_errors(self):
pass
def test_errors(self):
with open(support.TESTFN, 'rb') as file:
with socket.socket(type=socket.SOCK_DGRAM) as s:
meth = self.meth_from_sock(s)
self.assertRaisesRegex(
ValueError, "SOCK_STREAM", meth, file)
with open(support.TESTFN, 'rt') as file:
with socket.socket() as s:
meth = self.meth_from_sock(s)
self.assertRaisesRegex(
ValueError, "binary mode", meth, file)
with open(support.TESTFN, 'rb') as file:
with socket.socket() as s:
meth = self.meth_from_sock(s)
self.assertRaisesRegex(TypeError, "positive integer",
meth, file, count='2')
self.assertRaisesRegex(TypeError, "positive integer",
meth, file, count=0.1)
self.assertRaisesRegex(ValueError, "positive integer",
meth, file, count=0)
self.assertRaisesRegex(ValueError, "positive integer",
meth, file, count=-1)
@unittest.skipUnless(hasattr(os, "sendfile"),
'os.sendfile() required for this test.')
class SendfileUsingSendfileTest(SendfileUsingSendTest):
"""
Test the sendfile() implementation of socket.sendfile().
"""
def meth_from_sock(self, sock):
return getattr(sock, "_sendfile_use_sendfile")
@unittest.skipUnless(HAVE_SOCKET_ALG, 'AF_ALG required')
class LinuxKernelCryptoAPI(unittest.TestCase):
# tests for AF_ALG
def create_alg(self, typ, name):
sock = socket.socket(socket.AF_ALG, socket.SOCK_SEQPACKET, 0)
try:
sock.bind((typ, name))
except FileNotFoundError as e:
# type / algorithm is not available
sock.close()
raise unittest.SkipTest(str(e), typ, name)
else:
return sock
# bpo-31705: On kernel older than 4.5, sendto() failed with ENOKEY,
# at least on ppc64le architecture
@support.requires_linux_version(4, 5)
def test_sha256(self):
expected = bytes.fromhex("ba7816bf8f01cfea414140de5dae2223b00361a396"
"177a9cb410ff61f20015ad")
with self.create_alg('hash', 'sha256') as algo:
op, _ = algo.accept()
with op:
op.sendall(b"abc")
self.assertEqual(op.recv(512), expected)
op, _ = algo.accept()
with op:
op.send(b'a', socket.MSG_MORE)
op.send(b'b', socket.MSG_MORE)
op.send(b'c', socket.MSG_MORE)
op.send(b'')
self.assertEqual(op.recv(512), expected)
def test_hmac_sha1(self):
expected = bytes.fromhex("effcdf6ae5eb2fa2d27416d5f184df9c259a7c79")
with self.create_alg('hash', 'hmac(sha1)') as algo:
algo.setsockopt(socket.SOL_ALG, socket.ALG_SET_KEY, b"Jefe")
op, _ = algo.accept()
with op:
op.sendall(b"what do ya want for nothing?")
self.assertEqual(op.recv(512), expected)
# Although it should work with 3.19 and newer the test blocks on
# Ubuntu 15.10 with Kernel 4.2.0-19.
@support.requires_linux_version(4, 3)
def test_aes_cbc(self):
key = bytes.fromhex('06a9214036b8a15b512e03d534120006')
iv = bytes.fromhex('3dafba429d9eb430b422da802c9fac41')
msg = b"Single block msg"
ciphertext = bytes.fromhex('e353779c1079aeb82708942dbe77181a')
msglen = len(msg)
with self.create_alg('skcipher', 'cbc(aes)') as algo:
algo.setsockopt(socket.SOL_ALG, socket.ALG_SET_KEY, key)
op, _ = algo.accept()
with op:
op.sendmsg_afalg(op=socket.ALG_OP_ENCRYPT, iv=iv,
flags=socket.MSG_MORE)
op.sendall(msg)
self.assertEqual(op.recv(msglen), ciphertext)
op, _ = algo.accept()
with op:
op.sendmsg_afalg([ciphertext],
op=socket.ALG_OP_DECRYPT, iv=iv)
self.assertEqual(op.recv(msglen), msg)
# long message
multiplier = 1024
longmsg = [msg] * multiplier
op, _ = algo.accept()
with op:
op.sendmsg_afalg(longmsg,
op=socket.ALG_OP_ENCRYPT, iv=iv)
enc = op.recv(msglen * multiplier)
self.assertEqual(len(enc), msglen * multiplier)
self.assertEqual(enc[:msglen], ciphertext)
op, _ = algo.accept()
with op:
op.sendmsg_afalg([enc],
op=socket.ALG_OP_DECRYPT, iv=iv)
dec = op.recv(msglen * multiplier)
self.assertEqual(len(dec), msglen * multiplier)
self.assertEqual(dec, msg * multiplier)
@support.requires_linux_version(4, 9) # see issue29324
def test_aead_aes_gcm(self):
key = bytes.fromhex('c939cc13397c1d37de6ae0e1cb7c423c')
iv = bytes.fromhex('b3d8cc017cbb89b39e0f67e2')
plain = bytes.fromhex('c3b3c41f113a31b73d9a5cd432103069')
assoc = bytes.fromhex('24825602bd12a984e0092d3e448eda5f')
expected_ct = bytes.fromhex('93fe7d9e9bfd10348a5606e5cafa7354')
expected_tag = bytes.fromhex('0032a1dc85f1c9786925a2e71d8272dd')
taglen = len(expected_tag)
assoclen = len(assoc)
with self.create_alg('aead', 'gcm(aes)') as algo:
algo.setsockopt(socket.SOL_ALG, socket.ALG_SET_KEY, key)
algo.setsockopt(socket.SOL_ALG, socket.ALG_SET_AEAD_AUTHSIZE,
None, taglen)
# send assoc, plain and tag buffer in separate steps
op, _ = algo.accept()
with op:
op.sendmsg_afalg(op=socket.ALG_OP_ENCRYPT, iv=iv,
assoclen=assoclen, flags=socket.MSG_MORE)
op.sendall(assoc, socket.MSG_MORE)
op.sendall(plain)
res = op.recv(assoclen + len(plain) + taglen)
self.assertEqual(expected_ct, res[assoclen:-taglen])
self.assertEqual(expected_tag, res[-taglen:])
# now with msg
op, _ = algo.accept()
with op:
msg = assoc + plain
op.sendmsg_afalg([msg], op=socket.ALG_OP_ENCRYPT, iv=iv,
assoclen=assoclen)
res = op.recv(assoclen + len(plain) + taglen)
self.assertEqual(expected_ct, res[assoclen:-taglen])
self.assertEqual(expected_tag, res[-taglen:])
# create anc data manually
pack_uint32 = struct.Struct('I').pack
op, _ = algo.accept()
with op:
msg = assoc + plain
op.sendmsg(
[msg],
([socket.SOL_ALG, socket.ALG_SET_OP, pack_uint32(socket.ALG_OP_ENCRYPT)],
[socket.SOL_ALG, socket.ALG_SET_IV, pack_uint32(len(iv)) + iv],
[socket.SOL_ALG, socket.ALG_SET_AEAD_ASSOCLEN, pack_uint32(assoclen)],
)
)
res = op.recv(len(msg) + taglen)
self.assertEqual(expected_ct, res[assoclen:-taglen])
self.assertEqual(expected_tag, res[-taglen:])
# decrypt and verify
op, _ = algo.accept()
with op:
msg = assoc + expected_ct + expected_tag
op.sendmsg_afalg([msg], op=socket.ALG_OP_DECRYPT, iv=iv,
assoclen=assoclen)
res = op.recv(len(msg) - taglen)
self.assertEqual(plain, res[assoclen:])
@support.requires_linux_version(4, 3) # see test_aes_cbc
def test_drbg_pr_sha256(self):
# deterministic random bit generator, prediction resistance, sha256
with self.create_alg('rng', 'drbg_pr_sha256') as algo:
extra_seed = os.urandom(32)
algo.setsockopt(socket.SOL_ALG, socket.ALG_SET_KEY, extra_seed)
op, _ = algo.accept()
with op:
rn = op.recv(32)
self.assertEqual(len(rn), 32)
def test_sendmsg_afalg_args(self):
sock = socket.socket(socket.AF_ALG, socket.SOCK_SEQPACKET, 0)
with sock:
with self.assertRaises(TypeError):
sock.sendmsg_afalg()
with self.assertRaises(TypeError):
sock.sendmsg_afalg(op=None)
with self.assertRaises(TypeError):
sock.sendmsg_afalg(1)
with self.assertRaises(TypeError):
sock.sendmsg_afalg(op=socket.ALG_OP_ENCRYPT, assoclen=None)
with self.assertRaises(TypeError):
sock.sendmsg_afalg(op=socket.ALG_OP_ENCRYPT, assoclen=-1)
def test_length_restriction(self):
# bpo-35050, off-by-one error in length check
sock = socket.socket(socket.AF_ALG, socket.SOCK_SEQPACKET, 0)
self.addCleanup(sock.close)
# salg_type[14]
with self.assertRaises(FileNotFoundError):
sock.bind(("t" * 13, "name"))
with self.assertRaisesRegex(ValueError, "type too long"):
sock.bind(("t" * 14, "name"))
# salg_name[64]
with self.assertRaises(FileNotFoundError):
sock.bind(("type", "n" * 63))
with self.assertRaisesRegex(ValueError, "name too long"):
sock.bind(("type", "n" * 64))
@unittest.skipUnless(sys.platform.startswith("win"), "requires Windows")
class TestMSWindowsTCPFlags(unittest.TestCase):
knownTCPFlags = {
# available since long time ago
'TCP_MAXSEG',
'TCP_NODELAY',
# available starting with Windows 10 1607
'TCP_FASTOPEN',
# available starting with Windows 10 1703
'TCP_KEEPCNT',
# available starting with Windows 10 1709
'TCP_KEEPIDLE',
'TCP_KEEPINTVL'
}
def test_new_tcp_flags(self):
provided = [s for s in dir(socket) if s.startswith('TCP')]
unknown = [s for s in provided if s not in self.knownTCPFlags]
self.assertEqual([], unknown,
"New TCP flags were discovered. See bpo-32394 for more information")
def test_main():
tests = [GeneralModuleTests, BasicTCPTest, TCPCloserTest, TCPTimeoutTest,
TestExceptions, BufferIOTest, BasicTCPTest2, BasicUDPTest, UDPTimeoutTest ]
tests.extend([
NonBlockingTCPTests,
FileObjectClassTestCase,
UnbufferedFileObjectClassTestCase,
LineBufferedFileObjectClassTestCase,
SmallBufferedFileObjectClassTestCase,
UnicodeReadFileObjectClassTestCase,
UnicodeWriteFileObjectClassTestCase,
UnicodeReadWriteFileObjectClassTestCase,
NetworkConnectionNoServer,
NetworkConnectionAttributesTest,
NetworkConnectionBehaviourTest,
ContextManagersTest,
InheritanceTest,
NonblockConstantTest
])
tests.append(BasicSocketPairTest)
tests.append(TestUnixDomain)
tests.append(TestLinuxAbstractNamespace)
tests.extend([TIPCTest, TIPCThreadableTest])
tests.extend([BasicCANTest, CANTest])
tests.extend([BasicRDSTest, RDSTest])
tests.append(LinuxKernelCryptoAPI)
tests.extend([
BasicVSOCKTest,
ThreadedVSOCKSocketStreamTest,
])
tests.extend([
CmsgMacroTests,
SendmsgUDPTest,
RecvmsgUDPTest,
RecvmsgIntoUDPTest,
SendmsgUDP6Test,
RecvmsgUDP6Test,
RecvmsgRFC3542AncillaryUDP6Test,
RecvmsgIntoRFC3542AncillaryUDP6Test,
RecvmsgIntoUDP6Test,
SendmsgTCPTest,
RecvmsgTCPTest,
RecvmsgIntoTCPTest,
SendmsgSCTPStreamTest,
RecvmsgSCTPStreamTest,
RecvmsgIntoSCTPStreamTest,
SendmsgUnixStreamTest,
RecvmsgUnixStreamTest,
RecvmsgIntoUnixStreamTest,
RecvmsgSCMRightsStreamTest,
RecvmsgIntoSCMRightsStreamTest,
# These are slow when setitimer() is not available
InterruptedRecvTimeoutTest,
InterruptedSendTimeoutTest,
TestSocketSharing,
SendfileUsingSendTest,
SendfileUsingSendfileTest,
])
tests.append(TestMSWindowsTCPFlags)
thread_info = support.threading_setup()
support.run_unittest(*tests)
support.threading_cleanup(*thread_info)
if __name__ == "__main__":
test_main()
|
timeout.py | #!/usr/bin/env python3
from threading import Thread
import functools
def timeout(seconds_before_timeout):
def deco(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
res = ['']
def newFunc():
try:
res[0] = func(*args, **kwargs)
except Exception as e:
res[0] = e
t = Thread(target=newFunc)
t.daemon = True
try:
t.start()
t.join(seconds_before_timeout)
except Exception as e:
print('error starting thread')
raise e
ret = res[0]
if isinstance(ret, BaseException):
raise ret
return ret
return wrapper
return deco
|
panoramaDataExtraction.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os,subprocess,psutil,re,sys,shutil,csv,time,datetime,glob
import urllib,requests
import html2text
import unicodedata
from bs4 import BeautifulSoup
from Bio import SeqIO
import xmltodict
from xml.dom import minidom
from xml.parsers.expat import ExpatError
from collections import OrderedDict
import labkey
from urlparse import unquote
from xml.etree import cElementTree as ET
import itertools
import multiprocessing
import pandas as pd
import ctypes
class ExtractPanoramaData():
def __init__(self):
self.curr_dir=os.getcwd()
self.tempdir='tempPanorama'
if not os.path.exists(self.tempdir):
os.makedirs(self.tempdir)
self.finalreportfilename='final_report_panorama_data'
self.panoramaurl='https://panoramaweb.org/project/Panorama%20Public/begin.view?Targeted%20MS%20Experiment%20List.Organism~isnonblank=&Targeted%20MS%20Experiment%20List.sort=Organism&Targeted MS Experiment List.offset='
self.targetexpdetailsinfo=[]
self.listOfTargetProjects=[0,100,200,300,400,500]
self.RETRY_TIME = 20.0
self.numberOfprocess=4
self.processes=[]
self.tempCSVfilepath=os.path.join(self.curr_dir,self.tempdir)
def homepagedata(self):
countProject=0
for projRange in self.listOfTargetProjects:
print('Project Range:',projRange)
while True:
try:
panoramaurlhome=urllib.urlopen(self.panoramaurl+str(projRange)).read()
phsoup = BeautifulSoup(panoramaurlhome,"lxml")
phsoup.prettify()
phtables = phsoup.find_all('table')
for phtable in phtables:
try:
phtable_id = phtable['id']
if phtable_id.startswith('lk-region-'):
phtr_tags = phtable.find_all('tr')
for phtr in phtr_tags:
try:
phtr_name = phtr['class']
if phtr_name[0].startswith('labkey-alternate-row') or phtr_name[0].startswith('labkey-row'):
phtd_tags = phtr.find_all('td')
countTD=0
tdinsntrument='NA'
tempphurl='NA'
for phtd in phtd_tags:
if countTD==4:
tdinsntrument=phtd.string
phhrf=phtd.find_all('a', href=True)
phtext = phtd.string
try:
if len(phhrf[0]) >0:
if str(phhrf[0]).startswith('<a href="/Panorama%20Public/'):
tempphurl="https://panoramaweb.org"+str(phhrf[0]['href'])
except IndexError:
pass
countTD+=1
if 'NA' !=tempphurl:
countProject+=1
self.targetexpdetailsinfo.append([countProject,tempphurl,tdinsntrument])
except KeyError:
pass
except KeyError:
pass
break
except IOError:
time.sleep(self.RETRY_TIME)
print('Hey, I am trying again until succeeds to get data from Panorama!',str(datetime.datetime.now()))
pass
def panoramamsrunlist(self,inputs):
if os.path.exists(self.tempdir):
os.chdir(self.tempdir)
tempfinalreportfile=open(self.finalreportfilename+'_'+str(inputs[0])+'.csv','w')
tempfinalreportfile.write('UniprotID'+'\t'+'PeptideSequence'+'\t'+'PeptideModifiedSequence'+'\t'+'URLaddress'+'\t'+'Transitions'+'\n')
for hinfo in inputs[1]:
countProt=0
listofinstruments=[]
for i in hinfo[2].split(','):
if 'and' not in i:
listofinstruments.append(i.encode('utf-8').strip())
else:
for x in i.split('and'):
listofinstruments.append(x.encode('utf-8').strip())
reload(sys)
sys.setdefaultencoding("utf-8")
urladdressexp=hinfo[1]
msRundecodeURL=unquote(unquote(urladdressexp))
print(hinfo[0],urladdressexp,str(datetime.datetime.now()))
#msRundecodeURLName=(msRundecodeURL.split('project/')[1]).split('/begin.view?')[0]
msRundecodeURLName=(msRundecodeURL.split('/project-begin.view?')[0]).split('panoramaweb.org/')[1]
msRunserver_context = labkey.utils.create_server_context('panoramaweb.org', msRundecodeURLName, use_ssl=True)
while True:
try:
targetmsmy_results = labkey.query.select_rows(
server_context=msRunserver_context,
schema_name='targetedms',
query_name='Transition'
)
targetmsurldic={}
targetpepmsdic={}
for targetmsitem in targetmsmy_results.values()[0]:
tempdic={}
targetmsID=""
targetmsmodpepseq=""
targetmsurl=""
targetmslabel=""
targetmsQ1Charge=""
targetmsQ3Charge=""
targetmsQ1Mz=""
targetmsQ3Mz=""
targetmsFrag=""
for targetmskey in targetmsitem.keys():
if 'PeptideId/PeptideGroupId/Label'.lower() in targetmskey.lower() and '_labkeyurl_PrecursorId/PeptideId/PeptideGroupId/Label'.lower() not in targetmskey.lower():
targetmsID=str(targetmsitem[targetmskey]).strip()
if 'PeptideId/ModifiedPeptideDisplayColumn'.lower() in targetmskey.lower(): targetmsmodpepseq=str(targetmsitem[targetmskey]).strip()
if '_labkeyurl_PrecursorId/PeptideId/PeptideGroupId/Label'.lower() in targetmskey.lower(): targetmsurl=str(targetmsitem[targetmskey]).strip()
if 'PrecursorId/IsotopeLabelId/Name'.lower() in targetmskey.lower(): targetmslabel=str(targetmsitem[targetmskey]).strip()
if 'PrecursorId/Charge'.lower() in targetmskey.lower(): targetmsQ1Charge=str(targetmsitem[targetmskey]).strip()
if 'Charge'.lower() in targetmskey.lower() and 'PrecursorId/Charge'.lower() not in targetmskey.lower():
targetmsQ3Charge=str(targetmsitem[targetmskey]).strip()
if 'PrecursorId/Mz'.lower() in targetmskey.lower(): targetmsQ1Mz=str(targetmsitem[targetmskey]).strip()
if 'Mz'.lower() in targetmskey.lower() and 'PrecursorId/Mz'.lower() not in targetmskey.lower():
targetmsQ3Mz=str(targetmsitem[targetmskey]).strip()
if 'Fragment'.lower() in targetmskey.lower(): targetmsFrag=str(targetmsitem[targetmskey]).strip()
tempdic["targetmsID"]=targetmsID
tempdic["targetmsmodpepseq"]=targetmsmodpepseq
tempdic["targetmslabel"]=targetmslabel
tempdic["targetmsFrag"]=targetmsFrag
tempdic["targetmsQ1Charge"]=targetmsQ1Charge
tempdic["targetmsQ1Mz"]=targetmsQ1Mz
tempdic["targetmsQ3Charge"]=targetmsQ3Charge
tempdic["targetmsQ3Mz"]=targetmsQ3Mz
tempmspepID=targetmsID+"@"+targetmsmodpepseq
if len(tempmspepID.strip()) >0 and len(targetmslabel.strip()) >0:
if targetpepmsdic.has_key(tempmspepID):
if tempdic not in targetpepmsdic[tempmspepID]:
targetpepmsdic[tempmspepID].append(tempdic)
else:
targetpepmsdic[tempmspepID]=[tempdic]
targetmsurldic[targetmsID]=targetmsurl
for protkey in targetmsurldic.keys():
countProt+=1
if 'showProtein.view?id' in targetmsurldic[protkey]:
uniprotidexist=False
while True:
try:
if 'uniprot.org/uniprot/' in urllib.urlopen("https://panoramaweb.org"+str(targetmsurldic[protkey])).read():
uniprotidexist=True
break
except IOError:
time.sleep(self.RETRY_TIME)
print('Hey, I am trying again until succeeds to get data from Panorama!',str(datetime.datetime.now()))
pass
if uniprotidexist:
uniprotlinklist=[]
while True:
try:
proturldata=urllib.urlopen("https://panoramaweb.org"+str(targetmsurldic[protkey]))
for puline in proturldata:
pudata=puline.strip()
if "href=" in pudata and \
"https://www.uniprot.org/uniprot/" in pudata and \
"target=" in pudata and \
"protWindow" in pudata and \
'<a' in pudata:
subunid=(((pudata.split('href='))[-1]).split('>'))[0].split()[0]
uniprotlinklist.append(subunid)
if "href=" in pudata and \
"http://www.uniprot.org/uniprot/" in pudata and \
"target=" in pudata and \
"protWindow" in pudata and \
'<a' in pudata:
subunid=(((pudata.split('href='))[-1]).split('>'))[0].split()[0]
uniprotlinklist.append(subunid)
proturldata.close()
break
except IOError:
time.sleep(self.RETRY_TIME)
print('Hey, I am trying again until succeeds to get data from Panorama!',str(datetime.datetime.now()))
pass
if len(uniprotlinklist)>0:
if countProt %1000 ==0:
print(str(countProt), "th protein job done",str(datetime.datetime.now()))
for uniprotlink in uniprotlinklist:
uniprotlink=uniprotlink.replace('http','https')
uniprotlink = uniprotlink.replace("'","")
uniprotlink = uniprotlink.replace('"','')
try:
r = requests.get(uniprotlink)
AC='NA'
AC=str(r.url).split('/')[-1].strip()
if AC != 'NA':
for pepkey in targetpepmsdic.keys():
if protkey in pepkey.split('@')[0]:
modpepseq='NA'
pepseq=filter(str.isalpha, str(pepkey.split('@')[1]))
protURL="https://panoramaweb.org"+str(targetmsurldic[protkey])
transList=[]
for peplabitem in targetpepmsdic[pepkey]:
for insitem in listofinstruments:
transList.append(insitem+"|"+peplabitem["targetmslabel"]+"|"+peplabitem["targetmsQ1Mz"]+"|"+peplabitem["targetmsQ1Charge"]+peplabitem["targetmsFrag"]+"|"+peplabitem["targetmsQ3Mz"]+"|"+peplabitem["targetmsQ3Charge"])
transData=",".join(list(set(transList)))
transData="Instrument|Label|Q1 m/z|Q1 Z|Fragment|Q3 m/z|Q3 Z,"+transData
if pepseq !=str(pepkey.split('@')[1]):
modpepseq=str(pepkey.split('@')[1]).strip()
rowdata=AC+'\t'+str(pepseq)+'\t'+str(modpepseq)+'\t'+str(protURL)+'\t'+"'"+transData+"'"
tempfinalreportfile.write(rowdata+'\n')
except requests.exceptions.ConnectionError:
pass
except requests.exceptions.ChunkedEncodingError:
pass
break
except labkey.exceptions.ServerContextError:
time.sleep(self.RETRY_TIME)
print('Hey, I am trying again until succeeds to get data from Panorama by fixing labkey!',str(datetime.datetime.now()))
pass
tempfinalreportfile.close()
os.chdir(self.curr_dir)
def multiProcssJobPanoramaWeb(self):
if os.path.exists(self.tempdir):
self.homepagedata()
print('Number of projects:',len(self.targetexpdetailsinfo))
bins=int(len(self.targetexpdetailsinfo)/self.numberOfprocess)
counter=0
start=time.time()
for i in range(0,len(self.targetexpdetailsinfo),bins):
counter+=1
if counter==self.numberOfprocess:
print(i)
p=multiprocessing.Process(target=self.panoramamsrunlist,args=[[counter,self.targetexpdetailsinfo[i:]]])
p.start()
self.processes.append(p)
break
elif counter < self.numberOfprocess:
p=multiprocessing.Process(target=self.panoramamsrunlist,args=[[counter,self.targetexpdetailsinfo[i:i+bins]]])
p.start()
self.processes.append(p)
print(i,i+bins)
for process in self.processes:
process.join()
finish=time.time()
print('Finished in '+str(finish-start)+' seconds')
panoramaCSV_files = glob.glob(self.tempCSVfilepath+"/*.csv")
#increase the field size of CSV
csv.field_size_limit(int(ctypes.c_ulong(-1).value // 2))
df = pd.concat((pd.read_csv(f, header = 0, sep='\t',keep_default_na=False) for f in panoramaCSV_files))
df_deduplicated = df.drop_duplicates()
df_deduplicated.to_csv(self.finalreportfilename+".csv",sep='\t', encoding='utf-8',index=False)
movefilepath=os.path.join(self.curr_dir,self.tempdir ,self.finalreportfilename+".csv")
filepath = os.path.join(self.curr_dir, self.finalreportfilename+".csv")
os.chdir(self.curr_dir)
shutil.move(movefilepath,filepath)
shutil.rmtree(self.tempdir) |
wrappers.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# #########################################################################
# Copyright (c) 2015-2019, UChicago Argonne, LLC. All rights reserved. #
# #
# Copyright 2015-2019. UChicago Argonne, LLC. This software was produced #
# under U.S. Government contract DE-AC02-06CH11357 for Argonne National #
# Laboratory (ANL), which is operated by UChicago Argonne, LLC for the #
# U.S. Department of Energy. The U.S. Government has rights to use, #
# reproduce, and distribute this software. NEITHER THE GOVERNMENT NOR #
# UChicago Argonne, LLC MAKES ANY WARRANTY, EXPRESS OR IMPLIED, OR #
# ASSUMES ANY LIABILITY FOR THE USE OF THIS SOFTWARE. If software is #
# modified to produce derivative works, such modified software should #
# be clearly marked, so as not to confuse it with the version available #
# from ANL. #
# #
# Additionally, redistribution and use in source and binary forms, with #
# or without modification, are permitted provided that the following #
# conditions are met: #
# #
# * Redistributions of source code must retain the above copyright #
# notice, this list of conditions and the following disclaimer. #
# #
# * Redistributions in binary form must reproduce the above copyright #
# notice, this list of conditions and the following disclaimer in #
# the documentation and/or other materials provided with the #
# distribution. #
# #
# * Neither the name of UChicago Argonne, LLC, Argonne National #
# Laboratory, ANL, the U.S. Government, nor the names of its #
# contributors may be used to endorse or promote products derived #
# from this software without specific prior written permission. #
# #
# THIS SOFTWARE IS PROVIDED BY UChicago Argonne, LLC AND CONTRIBUTORS #
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT #
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS #
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL UChicago #
# Argonne, LLC OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, #
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, #
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; #
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER #
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT #
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN #
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE #
# POSSIBILITY OF SUCH DAMAGE. #
# #########################################################################
"""
Module for reconstruction software wrappers.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import logging
from tomopy.util import mproc
import numpy as np
import copy
import threading
logger = logging.getLogger(__name__)
__author__ = "Daniel M. Pelt"
__copyright__ = "Copyright (c) 2015, UChicago Argonne, LLC."
__docformat__ = 'restructuredtext en'
__all__ = ['astra', 'ufo_fbp', 'ufo_dfi', 'lprec']
default_options = {
'astra': {
'proj_type': 'linear',
'num_iter': 1,
'gpu_list': None,
},
'lprec': {
'lpmethod': 'fbp',
'interp_type': 'cubic',
'filter_name': 'None',
'num_iter': 1,
'reg_par': 1,
'gpu_list': [0],
}
}
needed_options = {
'astra': ['method']
}
def astra(tomo, center, recon, theta, **kwargs):
"""
Reconstruct object using the ASTRA toolbox
Extra options
----------
method : str
ASTRA reconstruction method to use.
num_iter : int, optional
Number of algorithm iterations performed.
proj_type : str, optional
ASTRA projector type to use (see ASTRA docs for more information):
- 'cuda' (for GPU algorithms)
- 'line', 'linear', or 'strip' (for CPU algorithms)
gpu_list : list, optional
List of GPU indices to use
extra_options : dict, optional
Extra options for the ASTRA config (i.e. those in cfg['option'])
Example
-------
>>> import tomopy
>>> obj = tomopy.shepp3d() # Generate an object.
>>> ang = tomopy.angles(180) # Generate uniformly spaced tilt angles.
>>> sim = tomopy.project(obj, ang) # Calculate projections.
>>>
>>> # Reconstruct object:
>>> rec = tomopy.recon(sim, ang, algorithm=tomopy.astra,
>>> options={'method':'SART', 'num_iter':10*180,
>>> 'proj_type':'linear',
>>> 'extra_options':{'MinConstraint':0}})
>>>
>>> # Show 64th slice of the reconstructed object.
>>> import pylab
>>> pylab.imshow(rec[64], cmap='gray')
>>> pylab.show()
"""
# Lazy import ASTRA
import astra as astra_mod
# Unpack arguments
nslices = tomo.shape[0]
num_gridx = kwargs['num_gridx']
num_gridy = kwargs['num_gridy']
opts = kwargs['options']
# Check options
for o in needed_options['astra']:
if o not in opts:
logger.error("Option %s needed for ASTRA reconstruction." % (o,))
raise ValueError()
for o in default_options['astra']:
if o not in opts:
opts[o] = default_options['astra'][o]
niter = opts['num_iter']
proj_type = opts['proj_type']
# Create ASTRA geometries
vol_geom = astra_mod.create_vol_geom((num_gridx, num_gridy))
# Number of GPUs to use
if proj_type == 'cuda':
if opts['gpu_list'] is not None:
import concurrent.futures as cf
gpu_list = opts['gpu_list']
ngpu = len(gpu_list)
_, slcs = mproc.get_ncore_slices(nslices, ngpu)
# execute recon on a thread per GPU
with cf.ThreadPoolExecutor(ngpu) as e:
for gpu, slc in zip(gpu_list, slcs):
e.submit(astra_rec_cuda, tomo[slc], center[slc], recon[slc],
theta, vol_geom, niter, proj_type, gpu, opts)
else:
astra_rec_cuda(tomo, center, recon, theta, vol_geom, niter,
proj_type, None, opts)
else:
astra_rec_cpu(tomo, center, recon, theta, vol_geom, niter,
proj_type, opts)
def astra_rec_cuda(tomo, center, recon, theta, vol_geom, niter, proj_type, gpu_index, opts):
# Lazy import ASTRA
import astra as astra_mod
nslices, nang, ndet = tomo.shape
cfg = astra_mod.astra_dict(opts['method'])
if 'extra_options' in opts:
# NOTE: we are modifying 'extra_options' and so need to make a copy
cfg['option'] = copy.deepcopy(opts['extra_options'])
else:
cfg['option'] = {}
if gpu_index is not None:
cfg['option']['GPUindex'] = gpu_index
oc = None
const_theta = np.ones(nang)
proj_geom = astra_mod.create_proj_geom(
'parallel', 1.0, ndet, theta.astype(np.float64))
for i in range(nslices):
if center[i] != oc:
oc = center[i]
proj_geom['option'] = {
'ExtraDetectorOffset':
(center[i] - ndet / 2.) * const_theta}
pid = astra_mod.create_projector(proj_type, proj_geom, vol_geom)
cfg['ProjectorId'] = pid
sid = astra_mod.data2d.link('-sino', proj_geom, tomo[i])
cfg['ProjectionDataId'] = sid
vid = astra_mod.data2d.link('-vol', vol_geom, recon[i])
cfg['ReconstructionDataId'] = vid
alg_id = astra_mod.algorithm.create(cfg)
astra_mod.algorithm.run(alg_id, niter)
astra_mod.algorithm.delete(alg_id)
astra_mod.data2d.delete(vid)
astra_mod.data2d.delete(sid)
astra_mod.projector.delete(pid)
def astra_rec_cpu(tomo, center, recon, theta, vol_geom, niter, proj_type, opts):
# Lazy import ASTRA
import astra as astra_mod
nslices, nang, ndet = tomo.shape
cfg = astra_mod.astra_dict(opts['method'])
if 'extra_options' in opts:
cfg['option'] = opts['extra_options']
proj_geom = astra_mod.create_proj_geom(
'parallel', 1.0, ndet, theta.astype(np.float64))
pid = astra_mod.create_projector(proj_type, proj_geom, vol_geom)
sino = np.zeros((nang, ndet), dtype=np.float32)
sid = astra_mod.data2d.link('-sino', proj_geom, sino)
cfg['ProjectorId'] = pid
cfg['ProjectionDataId'] = sid
for i in range(nslices):
shft = int(np.round(ndet / 2. - center[i]))
if not shft == 0:
sino[:] = np.roll(tomo[i], shft)
l = shft
r = ndet + shft
if l < 0:
l = 0
if r > ndet:
r = ndet
sino[:, :l] = 0
sino[:, r:] = 0
else:
sino[:] = tomo[i]
vid = astra_mod.data2d.link('-vol', vol_geom, recon[i])
cfg['ReconstructionDataId'] = vid
alg_id = astra_mod.algorithm.create(cfg)
astra_mod.algorithm.run(alg_id, niter)
astra_mod.algorithm.delete(alg_id)
astra_mod.data2d.delete(vid)
astra_mod.data2d.delete(sid)
astra_mod.projector.delete(pid)
def _process_data(input_task, output_task, sinograms, slices):
import ufo.numpy as unp
num_sinograms, num_projections, width = sinograms.shape
for i in range(num_sinograms):
if i == 0:
data = unp.empty_like(sinograms[i, :, :])
else:
data = input_task.get_input_buffer()
# Set host array pointer and use that as first input
data.set_host_array(
sinograms[i, :, :].__array_interface__['data'][0], False)
input_task.release_input_buffer(data)
# Get last output and copy result back into NumPy buffer
data = output_task.get_output_buffer()
array = unp.asarray(data)
frm = int(array.shape[0] / 2 - width / 2)
to = int(array.shape[0] / 2 + width / 2)
slices[i, :, :] = array[frm:to, frm:to]
output_task.release_output_buffer(data)
input_task.stop()
def ufo_fbp(tomo, center, recon, theta, **kwargs):
"""
Reconstruct object using UFO's FBP pipeline
"""
import gi
gi.require_version('Ufo', '0.0')
from gi.repository import Ufo
width = tomo.shape[2]
theta = theta[1] - theta[0]
center = center[0]
g = Ufo.TaskGraph()
pm = Ufo.PluginManager()
sched = Ufo.Scheduler()
input_task = Ufo.InputTask()
output_task = Ufo.OutputTask()
fft = pm.get_task('fft')
ifft = pm.get_task('ifft')
fltr = pm.get_task('filter')
backproject = pm.get_task('backproject')
ifft.set_properties(crop_width=width)
backproject.set_properties(
axis_pos=center, angle_step=theta, angle_offset=np.pi)
g.connect_nodes(input_task, fft)
g.connect_nodes(fft, fltr)
g.connect_nodes(fltr, ifft)
g.connect_nodes(ifft, backproject)
g.connect_nodes(backproject, output_task)
args = (input_task, output_task, tomo, recon)
thread = threading.Thread(target=_process_data, args=args)
thread.start()
sched.run(g)
thread.join()
logger.info("UFO+FBP run time: {}s".format(sched.props.time))
def ufo_dfi(tomo, center, recon, theta, **kwargs):
"""
Reconstruct object using UFO's Direct Fourier pipeline
"""
import gi
gi.require_version('Ufo', '0.0')
from gi.repository import Ufo
theta = theta[1] - theta[0]
center = center[0]
g = Ufo.TaskGraph()
pm = Ufo.PluginManager()
sched = Ufo.Scheduler()
input_task = Ufo.InputTask()
output_task = Ufo.OutputTask()
pad = pm.get_task('zeropad')
fft = pm.get_task('fft')
ifft = pm.get_task('ifft')
dfi = pm.get_task('dfi-sinc')
swap_forward = pm.get_task('swap-quadrants')
swap_backward = pm.get_task('swap-quadrants')
pad.set_properties(oversampling=1, center_of_rotation=center)
fft.set_properties(dimensions=1, auto_zeropadding=False)
ifft.set_properties(dimensions=2)
dfi.set_properties(angle_step=theta)
g.connect_nodes(input_task, pad)
g.connect_nodes(pad, fft)
g.connect_nodes(fft, dfi)
g.connect_nodes(dfi, swap_forward)
g.connect_nodes(swap_forward, ifft)
g.connect_nodes(ifft, swap_backward)
g.connect_nodes(swap_backward, output_task)
args = (input_task, output_task, tomo, recon)
thread = threading.Thread(target=_process_data, args=args)
thread.start()
sched.run(g)
thread.join()
logger.info("UFO+DFI run time: {}s".format(sched.props.time))
def lprec(tomo, center, recon, theta, **kwargs):
"""
Reconstruct object using the Log-polar based method
https://github.com/math-vrn/lprec
Extra options
----------
lpmethod : str
LP reconsruction method to use
- 'fbp'
- 'grad'
- 'cg'
- 'tv'
- 'em'
filter_type:
Filter for backprojection
- 'ramp'
- 'shepp-logan'
- 'cosine'
- 'cosine2'
- 'hamming'
- 'hann'
- 'parzen'
interp_type:
Type of interpolation between Cartesian, polar and log-polar coordinates
- 'linear'
- 'cubic'
Example
-------
>>> import tomopy
>>> obj = tomopy.shepp3d() # Generate an object.
>>> ang = tomopy.angles(180) # Generate uniformly spaced tilt angles.
>>> sim = tomopy.project(obj, ang) # Calculate projections.
>>>
>>> # Reconstruct object:
>>> rec = tomopy.recon(sim, ang, algorithm=tomopy.lprec,
>>> lpmethod='lpfbp', filter_name='parzen', interp_type='cubic', ncore=1)
>>>
>>> # Show 64th slice of the reconstructed object.
>>> import pylab
>>> pylab.imshow(rec[64], cmap='gray')
>>> pylab.show()
"""
from lprec import lpTransform
from lprec import lpmethods
import concurrent.futures as cf
from functools import partial
# set default options
opts = kwargs
for o in default_options['lprec']:
if o not in kwargs:
opts[o] = default_options['lprec'][o]
filter_name = opts['filter_name']
interp_type = opts['interp_type']
lpmethod = opts['lpmethod']
num_iter = opts['num_iter']
reg_par = opts['reg_par']
gpu_list = opts['gpu_list']
# list of available methods for reconstruction
lpmethods_list = {
'fbp': lpmethods.fbp,
'grad': lpmethods.grad,
'cg': lpmethods.cg,
'tv': lpmethods.tv,
'em': lpmethods.em
}
[Ns, Nproj, N] = tomo.shape
ngpus = len(gpu_list)
# number of slices for simultaneous processing by 1 gpu
# (depends on gpu memory size, chosen for gpus with >= 4GB memory)
Nssimgpu = min(int(pow(2, 24)/float(N*N)), int(np.ceil(Ns/float(ngpus))))
# class lprec
lp = lpTransform.lpTransform(
N, Nproj, Nssimgpu, filter_name, int(center[0]), interp_type)
# if not fbp, precompute for the forward transform
lp.precompute(lpmethod != 'fbp')
# list of slices sets for simultaneous processing b gpus
ids_list = [None]*int(np.ceil(Ns/float(Nssimgpu)))
for k in range(0, len(ids_list)):
ids_list[k] = range(k*Nssimgpu, min(Ns, (k+1)*Nssimgpu))
# init memory for each gpu
for igpu in range(0, ngpus):
gpu = gpu_list[igpu]
# if not fbp, allocate memory for the forward transform arrays
lp.initcmem(lpmethod != 'fbp', gpu)
# run reconstruciton on many gpus
with cf.ThreadPoolExecutor(ngpus) as e:
shift = 0
for reconi in e.map(partial(lpmultigpu, lp, lpmethods_list[lpmethod], recon, tomo, num_iter, reg_par, gpu_list), ids_list):
recon[np.arange(0, reconi.shape[0])+shift] = reconi
shift += reconi.shape[0]
return recon
def lpmultigpu(lp, lpmethod, recon, tomo, num_iter, reg_par, gpu_list, ids):
"""
Reconstruction Nssimgpu slices simultaneously on 1 GPU
"""
# take gpu number with respect to the current thread
gpu = gpu_list[int(threading.current_thread().name.split("_", 1)[1])]
print([gpu, ids])
# reconstruct
recon[ids] = lpmethod(lp, recon[ids], tomo[ids], num_iter, reg_par, gpu)
return recon[ids]
|
reverse.py | import logging
import threading
import select
import socket
from typing import Callable
logger = logging.getLogger(__name__)
should_run = {}
def handler(chan, local_port, host, port):
key = local_port, host, port
sock = socket.socket()
try:
sock.connect((host, port))
except Exception as e:
logger.warning('Forwarding request to {}:{} failed: {}'.format(host, port, e))
return
logger.info('Connected! Tunnel open {} -> {} -> {}'.format(
chan.origin_addr, chan.getpeername(), (host, port)))
while should_run.get(key):
r, w, x = select.select([sock, chan], [], [])
if sock in r:
data = sock.recv(1024)
if len(data) == 0:
break
chan.send(data)
if chan in r:
data = chan.recv(1024)
if len(data) == 0:
break
sock.send(data)
chan.close()
sock.close()
logger.info('Tunnel closed from {}'.format(chan.origin_addr))
def reverse_tunnel(server_port, remote_host, remote_port, transport, bind_addr='') -> Callable:
def server():
transport.request_port_forward(bind_addr, server_port)
key = server_port, remote_host, remote_port
should_run[key] = True
while should_run.get(key):
try:
chan = transport.accept(1)
assert chan is not None
except Exception as e:
logger.warning(e)
continue
thr = threading.Thread(
target=handler, args=(chan, server_port, remote_host, remote_port))
thr.setDaemon(True)
thr.start()
return server
def close_tunnel(server_port, remote_host, remote_port):
key = server_port, remote_host, remote_port
if key not in should_run:
logger.info('No such active tunnel: {}:{}:{}'.format(server_port, remote_host, remote_port))
del should_run[key]
# vim:sw=4:ts=4:et:
|
test_multikernelmanager.py | """Tests for the notebook kernel and session manager."""
import os
import time
import threading
import multiprocessing as mp
from subprocess import PIPE
from unittest import TestCase
from traitlets.config.loader import Config
from jupyter_client import KernelManager
from jupyter_client.multikernelmanager import MultiKernelManager
from .utils import skip_win32
from ..localinterfaces import localhost
TIMEOUT = 30
class TestKernelManager(TestCase):
def _get_tcp_km(self):
c = Config()
km = MultiKernelManager(config=c)
return km
def _get_ipc_km(self):
c = Config()
c.KernelManager.transport = 'ipc'
c.KernelManager.ip = 'test'
km = MultiKernelManager(config=c)
return km
def _run_lifecycle(self, km):
kid = km.start_kernel(stdout=PIPE, stderr=PIPE)
self.assertTrue(km.is_alive(kid))
self.assertTrue(kid in km)
self.assertTrue(kid in km.list_kernel_ids())
self.assertEqual(len(km),1)
km.restart_kernel(kid, now=True)
self.assertTrue(km.is_alive(kid))
self.assertTrue(kid in km.list_kernel_ids())
km.interrupt_kernel(kid)
k = km.get_kernel(kid)
self.assertTrue(isinstance(k, KernelManager))
km.shutdown_kernel(kid, now=True)
self.assertTrue(not kid in km)
def _run_cinfo(self, km, transport, ip):
kid = km.start_kernel(stdout=PIPE, stderr=PIPE)
k = km.get_kernel(kid)
cinfo = km.get_connection_info(kid)
self.assertEqual(transport, cinfo['transport'])
self.assertEqual(ip, cinfo['ip'])
self.assertTrue('stdin_port' in cinfo)
self.assertTrue('iopub_port' in cinfo)
stream = km.connect_iopub(kid)
stream.close()
self.assertTrue('shell_port' in cinfo)
stream = km.connect_shell(kid)
stream.close()
self.assertTrue('hb_port' in cinfo)
stream = km.connect_hb(kid)
stream.close()
km.shutdown_kernel(kid, now=True)
def test_tcp_lifecycle(self):
km = self._get_tcp_km()
self._run_lifecycle(km)
def test_shutdown_all(self):
km = self._get_tcp_km()
kid = km.start_kernel(stdout=PIPE, stderr=PIPE)
self.assertIn(kid, km)
km.shutdown_all()
self.assertNotIn(kid, km)
# shutdown again is okay, because we have no kernels
km.shutdown_all()
def test_tcp_cinfo(self):
km = self._get_tcp_km()
self._run_cinfo(km, 'tcp', localhost())
@skip_win32
def test_ipc_lifecycle(self):
km = self._get_ipc_km()
self._run_lifecycle(km)
@skip_win32
def test_ipc_cinfo(self):
km = self._get_ipc_km()
self._run_cinfo(km, 'ipc', 'test')
def test_start_sequence_tcp_kernels(self):
"""Ensure that a sequence of kernel startups doesn't break anything."""
self._run_lifecycle(self._get_tcp_km())
self._run_lifecycle(self._get_tcp_km())
self._run_lifecycle(self._get_tcp_km())
def test_start_sequence_tcp_kernels(self):
"""Ensure that a sequence of kernel startups doesn't break anything."""
self._run_lifecycle(self._get_ipc_km())
self._run_lifecycle(self._get_ipc_km())
self._run_lifecycle(self._get_ipc_km())
def test_start_parallel_thread_kernels(self):
self.test_tcp_lifecycle()
thread = threading.Thread(target=self.test_tcp_lifecycle)
thread2 = threading.Thread(target=self.test_tcp_lifecycle)
try:
thread.start()
thread2.start()
finally:
thread.join()
thread2.join()
def test_start_parallel_process_kernels(self):
self.test_tcp_lifecycle()
thread = threading.Thread(target=self.test_tcp_lifecycle)
proc = mp.Process(target=self.test_tcp_lifecycle)
try:
thread.start()
proc.start()
finally:
thread.join()
proc.join()
assert proc.exitcode == 0
|
server.py | import socket # библиотека для приема/передачи данных
import threading # имортируем потоки
sock = socket.socket() # создание сокета (комбинация IP и номера порта)
port = int(input("Введите порт: "))
sock.bind(('127.0.0.1', port))
sock.listen(3) # сколько клиентов готов слушать сервер
clients = [] # список с подключениями клиентов
names = []
def send_to_clients(name, data):
for i in range(len(names)):
if names[i] != name: # если какое-то имя из списка не является именем отправителя
clients[i].send(f"({name}): ".encode() + data) # (Dasha): Hey
def listen_client(socket):
while True:
data = socket.recv(1024)
pos = clients.index(socket)
name = names[pos]
send_to_clients(name, data)
def server_accept():
while True:
conn, addr = sock.accept()
# получим имя клиента
data = conn.recv(1024)
name = data.decode()
names.append(name)
print(f"Client {name} accepted")
clients.append(conn)
listen_tread = threading.Thread(target=listen_client, args=(conn,))
listen_tread.start()
server_accept() |
load_data.py | import numpy as np
from scipy import ndimage
import glob
import itertools
import threading
import time
import skimage.transform
import skimage.io
import skimage.filter
import gzip
import os
import queue
import multiprocessing as mp
num_train = 61578 # 70948
num_test = 79975 # 79971
train_ids = np.load("data/train_ids.npy")
test_ids = np.load("data/test_ids.npy")
def load_images_from_jpg(subset="train", downsample_factor=None, normalise=True, from_ram=False):
if from_ram:
pattern = "/dev/shm/images_%s_rev1/*.jpg"
else:
pattern = "data/raw/images_%s_rev1/*.jpg"
paths = glob.glob(pattern % subset)
paths.sort() # alphabetic ordering is used everywhere.
for path in paths:
# img = ndimage.imread(path)
img = skimage.io.imread(path)
if normalise:
img = img.astype('float32') / 255.0 # normalise and convert to float
if downsample_factor is None:
yield img
else:
yield img[::downsample_factor, ::downsample_factor]
load_images = load_images_from_jpg
### data loading, chunking ###
def images_gen(id_gen, *args, **kwargs):
for img_id in id_gen:
yield load_image(img_id, *args, **kwargs)
def load_image(img_id, subset='train', normalise=True, from_ram=False):
if from_ram:
path = "/dev/shm/images_%s_rev1/%d.jpg" % (subset, img_id)
else:
path = "data/raw/images_%s_rev1/%d.jpg" % (subset, img_id)
# print "loading %s" % path # TODO DEBUG
img = skimage.io.imread(path)
if normalise:
img = img.astype('float32') / 255.0 # normalise and convert to float
return img
def cycle(l, shuffle=True): # l should be a NUMPY ARRAY of ids
l2 = list(l) # l.copy() # make a copy to avoid changing the input
while True:
if shuffle:
np.random.shuffle(l2)
for i in l2:
yield i
def chunks_gen(images_gen, shape=(100, 424, 424, 3)):
"""
specify images_gen(cycle(list(train_ids))) as the ids_gen to loop through the training set indefinitely in random order.
The shape parameter is (chunk_size, imsize1, imsize2, ...)
So the size of the resulting images needs to be known in advance for efficiency reasons.
"""
chunk = np.zeros(shape)
size = shape[0]
k = 0
for image in images_gen:
chunk[k] = image
k += 1
if k >= size:
yield chunk, size # return the chunk as well as its size (this is useful because the final chunk may be smaller)
chunk = np.zeros(shape)
k = 0
# last bit of chunk
if k > 0: # there is leftover data
yield chunk, k # the chunk is a fullsize array, but only the first k entries are valid.
### threaded generator with a buffer ###
def _generation_thread(source_gen, buffer, buffer_lock, buffer_size=2, sleep_time=1):
while True:
# print "DEBUG: loader: acquiring lock"-
with buffer_lock:
# print "DEBUG: loader: lock acquired, checking if buffer is full"
buffer_is_full = (len(buffer) >= buffer_size)
# print "DEBUG: loader: buffer length is %d" % len(buffer)
if buffer_is_full:
# buffer is full, wait.
# this if-clause has to be outside the with-clause, else the lock is held for no reason!
# print "DEBUG: loader: buffer is full, waiting"
#print "buffer is full, exiting (DEBUG)"
#break
time.sleep(sleep_time)
else:
try:
data = next(source_gen)
except StopIteration:
break # no more data. STAHP.
# print "DEBUG: loader: loading %s" % current_path
# stuff the data in the buffer as soon as it is free
# print "DEBUG: loader: acquiring lock"
with buffer_lock:
# print "DEBUG: loader: lock acquired, adding data to buffer"
buffer.append(data)
# print "DEBUG: loader: buffer length went from %d to %d" % (len(buffer) - 1, len(buffer))
def threaded_gen(source_gen, buffer_size=2, sleep_time=1):
"""
Generator that runs a slow source generator in a separate thread.
buffer_size: the maximal number of items to pre-generate (length of the buffer)
"""
buffer_lock = threading.Lock()
buffer = []
thread = threading.Thread(target=_generation_thread, args=(source_gen, buffer, buffer_lock, buffer_size, sleep_time))
thread.setDaemon(True)
thread.start()
while True:
# print "DEBUG: generator: acquiring lock"
with buffer_lock:
# print "DEBUG: generator: lock acquired, checking if buffer is empty"
buffer_is_empty = (len(buffer) == 0)
# print "DEBUG: generator: buffer length is %d" % len(buffer)
if buffer_is_empty:
# there's nothing in the buffer, so wait a bit.
# this if-clause has to be outside the with-clause, else the lock is held for no reason!
# print "DEBUG: generator: buffer is empty, waiting"
if not thread.isAlive():
print("buffer is empty and loading thread is finished, exiting")
break
print("buffer is empty, waiting!")
time.sleep(sleep_time)
else:
# print "DEBUG: generator: acquiring lock"
with buffer_lock:
# print "DEBUG: generator: lock acquired, removing data from buffer, yielding"
data = buffer.pop(0)
# print "DEBUG: generator: buffer length went from %d to %d" % (len(buffer) + 1, len(buffer))
yield data
### perturbation and preprocessing ###
# use these with imap to apply them to a generator and return a generator
def im_rotate(img, angle):
return skimage.transform.rotate(img, angle, mode='reflect')
def im_flip(img, flip_h, flip_v):
if flip_h:
img = img[::-1]
if flip_v:
img = img[:, ::-1]
return img
# this old version uses ndimage, which is a bit unreliable (lots of artifacts)
def im_rotate_old(img, angle):
# downsampling afterwards is recommended
return ndimage.rotate(img, angle, axes=(0,1), mode='reflect', reshape=False)
def im_translate(img, shift_x, shift_y):
## this could probably be a lot easier... meh.
# downsampling afterwards is recommended
translate_img = np.zeros_like(img, dtype=img.dtype)
if shift_x >= 0:
slice_x_src = slice(None, img.shape[0] - shift_x, None)
slice_x_tgt = slice(shift_x, None, None)
else:
slice_x_src = slice(- shift_x, None, None)
slice_x_tgt = slice(None, img.shape[0] + shift_x, None)
if shift_y >= 0:
slice_y_src = slice(None, img.shape[1] - shift_y, None)
slice_y_tgt = slice(shift_y, None, None)
else:
slice_y_src = slice(- shift_y, None, None)
slice_y_tgt = slice(None, img.shape[1] + shift_y, None)
translate_img[slice_x_tgt, slice_y_tgt] = img[slice_x_src, slice_y_src]
return translate_img
def im_rescale(img, scale_factor):
zoomed_img = np.zeros_like(img, dtype=img.dtype)
zoomed = skimage.transform.rescale(img, scale_factor)
if scale_factor >= 1.0:
shift_x = (zoomed.shape[0] - img.shape[0]) // 2
shift_y = (zoomed.shape[1] - img.shape[1]) // 2
zoomed_img[:,:] = zoomed[shift_x:shift_x+img.shape[0], shift_y:shift_y+img.shape[1]]
else:
shift_x = (img.shape[0] - zoomed.shape[0]) // 2
shift_y = (img.shape[1] - zoomed.shape[1]) // 2
zoomed_img[shift_x:shift_x+zoomed.shape[0], shift_y:shift_y+zoomed.shape[1]] = zoomed
return zoomed_img
# this old version uses ndimage zoom which is unreliable
def im_rescale_old(img, scale_factor):
zoomed_img = np.zeros_like(img, dtype=img.dtype)
if img.ndim == 2:
z = (scale_factor, scale_factor)
elif img.ndim == 3:
z = (scale_factor, scale_factor, 1)
# else fail
zoomed = ndimage.zoom(img, z)
if scale_factor >= 1.0:
shift_x = (zoomed.shape[0] - img.shape[0]) // 2
shift_y = (zoomed.shape[1] - img.shape[1]) // 2
zoomed_img[:,:] = zoomed[shift_x:shift_x+img.shape[0], shift_y:shift_y+img.shape[1]]
else:
shift_x = (img.shape[0] - zoomed.shape[0]) // 2
shift_y = (img.shape[1] - zoomed.shape[1]) // 2
zoomed_img[shift_x:shift_x+zoomed.shape[0], shift_y:shift_y+zoomed.shape[1]] = zoomed
return zoomed_img
def im_downsample(img, ds_factor):
return img[::ds_factor, ::ds_factor]
def im_downsample_smooth(img, ds_factor):
return skimage.transform.rescale(img, 1.0/ds_factor)
# ndimage is unreliable, don't use it
# channels = [ndimage.zoom(img[:,:, k], 1.0/ds_factor) for k in range(3)]
# return np.dstack(channels)
def im_crop(img, ds_factor):
size_x = img.shape[0]
size_y = img.shape[1]
cropped_size_x = img.shape[0] // ds_factor
cropped_size_y = img.shape[1] // ds_factor
shift_x = (size_x - cropped_size_x) // 2
shift_y = (size_y - cropped_size_y) // 2
return img[shift_x:shift_x+cropped_size_x, shift_y:shift_y+cropped_size_y]
def im_lcn(img, sigma_mean, sigma_std):
"""
based on matlab code by Guanglei Xiong, see http://www.mathworks.com/matlabcentral/fileexchange/8303-local-normalization
"""
means = ndimage.gaussian_filter(img, sigma_mean)
img_centered = img - means
stds = np.sqrt(ndimage.gaussian_filter(img_centered**2, sigma_std))
return img_centered / stds
rgb2yuv = np.array([[0.299, 0.587, 0.114],
[-0.147, -0.289, 0.436],
[0.615, -0.515, -0.100]])
yuv2rgb = np.linalg.inv(rgb2yuv)
def im_rgb_to_yuv(img):
return np.tensordot(img, rgb2yuv, [[2], [0]])
def im_yuv_to_rgb(img):
return np.tensordot(img, yuv2rgb, [[2], [0]])
def im_lcn_color(img, sigma_mean, sigma_std, std_bias):
img_yuv = im_rgb_to_yuv(img)
img_luma = img_yuv[:, :, 0]
img_luma_filtered = im_lcn_bias(img_luma, sigma_mean, sigma_std, std_bias)
img_yuv[:, :, 0] = img_luma_filtered
return im_yuv_to_rgb(img_yuv)
def im_norm_01(img): # this is just for visualisation
return (img - img.min()) / (img.max() - img.min())
def im_lcn_bias(img, sigma_mean, sigma_std, std_bias):
"""
LCN with an std bias to avoid noise amplification
"""
means = ndimage.gaussian_filter(img, sigma_mean)
img_centered = img - means
stds = np.sqrt(ndimage.gaussian_filter(img_centered**2, sigma_std) + std_bias)
return img_centered / stds
def im_luma(img):
return np.tensordot(img, np.array([0.299, 0.587, 0.114], dtype='float32'), [[2], [0]])
def chunk_luma(chunk): # faster than doing it per image, probably
return np.tensordot(chunk, np.array([0.299, 0.587, 0.114], dtype='float32'), [[3], [0]])
def im_normhist(img, num_bins=256): # from http://www.janeriksolem.net/2009/06/histogram-equalization-with-python-and.html
# this function only makes sense for grayscale images.
img_flat = img.flatten()
imhist, bins = np.histogram(img_flat, num_bins, normed=True)
cdf = imhist.cumsum() #cumulative distribution function
cdf = 255 * cdf / cdf[-1] #normalize
#use linear interpolation of cdf to find new pixel values
im2 = np.interp(img_flat, bins[:-1], cdf)
return im2.reshape(img.shape)
def chunk_lcn(chunk, sigma_mean, sigma_std, std_bias=0.0, rescale=1.0):
"""
based on matlab code by Guanglei Xiong, see http://www.mathworks.com/matlabcentral/fileexchange/8303-local-normalization
assuming chunk.shape == (num_examples, x, y, channels)
'rescale' is an additional rescaling constant to get the variance of the result in the 'right' range.
"""
means = np.zeros(chunk.shape, dtype=chunk.dtype)
for k in range(len(chunk)):
means[k] = skimage.filter.gaussian_filter(chunk[k], sigma_mean, multichannel=True)
chunk = chunk - means # centering
del means # keep memory usage in check
variances = np.zeros(chunk.shape, dtype=chunk.dtype)
chunk_squared = chunk**2
for k in range(len(chunk)):
variances[k] = skimage.filter.gaussian_filter(chunk_squared[k], sigma_std, multichannel=True)
chunk = chunk / np.sqrt(variances + std_bias)
return chunk / rescale
# TODO: make this 100x faster lol. otherwise it's not usable.
def chunk_gcn(chunk, rescale=1.0):
means = chunk.reshape(chunk.shape[0], chunk.shape[1] * chunk.shape[2], chunk.shape[3]).mean(1).reshape(chunk.shape[0], 1, 1, chunk.shape[3])
chunk -= means
stds = chunk.reshape(chunk.shape[0], chunk.shape[1] * chunk.shape[2], chunk.shape[3]).std(1).reshape(chunk.shape[0], 1, 1, chunk.shape[3])
chunk /= stds
return chunk
def array_chunker_gen(data_list, chunk_size, loop=True, truncate=True, shuffle=True):
while True:
if shuffle:
rs = np.random.get_state()
for data in data_list:
np.random.set_state(rs)
np.random.shuffle(data)
if truncate:
num_chunks = data_list[0].shape[0] // chunk_size # integer division, we only want whole chunks
else:
num_chunks = int(np.ceil(data_list[0].shape[0] / float(chunk_size)))
for k in range(num_chunks):
idx_range = slice(k * chunk_size, (k+1) * chunk_size, None)
chunks = []
for data in data_list:
c = data[idx_range]
current_size = c.shape[0]
if current_size < chunk_size: # incomplete chunk, pad zeros
cs = list(c.shape)
cs[0] = chunk_size
c_full = np.zeros(tuple(cs), dtype=c.dtype)
c_full[:current_size] = c
else:
c_full = c
chunks.append(c_full)
yield tuple(chunks), current_size
if not loop:
break
def load_gz(path): # load a .npy.gz file
if path.endswith(".gz"):
f = gzip.open(path, 'rb')
return np.load(f)
else:
return np.load(path)
def save_gz(path, arr): # save a .npy.gz file
tmp_path = os.path.join("/tmp", os.path.basename(path) + ".tmp.npy")
# tmp_path = path + ".tmp.npy" # temp file needs to end in .npy, else np.load adds it!
np.save(tmp_path, arr)
os.system("gzip -c %s > %s" % (tmp_path, path))
os.remove(tmp_path)
def numpy_loader_gen(paths_gen, shuffle=True):
for paths in paths_gen:
# print "loading " + str(paths)
data_list = [load_gz(p) for p in paths]
if shuffle:
rs = np.random.get_state()
for data in data_list:
np.random.set_state(rs)
np.random.shuffle(data)
yield data_list, data_list[0].shape[0] # 'chunk' length needs to be the last entry
def augmented_data_gen(path_patterns):
paths = [sorted(glob.glob(pattern)) for pattern in path_patterns]
assorted_paths = list(zip(*paths))
paths_gen = cycle(assorted_paths, shuffle=True)
return numpy_loader_gen(paths_gen)
def post_augmented_data_gen(path_patterns):
paths = [sorted(glob.glob(pattern)) for pattern in path_patterns]
assorted_paths = list(zip(*paths))
paths_gen = cycle(assorted_paths, shuffle=True)
for data_list, chunk_length in numpy_loader_gen(paths_gen):
# print "DEBUG: post augmenting..."
start_time = time.time()
data_list = post_augment_chunk(data_list)
# print "DEBUG: post augmenting done. took %.4f seconds." % (time.time() - start_time)
yield data_list, chunk_length
def post_augment_chunk(data_list):
"""
perform fast augmentation that can be applied directly to the chunks in realtime.
"""
chunk_size = data_list[0].shape[0]
rotations = np.random.randint(0, 4, chunk_size)
flip_h = np.random.randint(0, 2, chunk_size).astype('bool')
flip_v = np.random.randint(0, 2, chunk_size).astype('bool')
for x in data_list:
if x.ndim <= 3:
continue # don't apply the transformations to anything that isn't an image
for k in range(chunk_size):
x_k = np.rot90(x[k], k=rotations[k])
if flip_h[k]:
x_k = x_k[::-1]
if flip_v[k]:
x_k = x_k[:, ::-1]
x[k] = x_k
return data_list
### better threaded/buffered generator using the Queue class ###
### threaded generator with a buffer ###
def buffered_gen(source_gen, buffer_size=2, sleep_time=1):
"""
Generator that runs a slow source generator in a separate thread.
buffer_size: the maximal number of items to pre-generate (length of the buffer)
"""
buffer = queue.Queue(maxsize=buffer_size)
def _buffered_generation_thread(source_gen, buffer):
while True:
# we block here when the buffer is full. There's no point in generating more data
# when the buffer is full, it only causes extra memory usage and effectively
# increases the buffer size by one.
while buffer.full():
print("DEBUG: buffer is full, waiting to generate more data.")
time.sleep(sleep_time)
try:
data = next(source_gen)
except StopIteration:
break
buffer.put(data)
thread = threading.Thread(target=_buffered_generation_thread, args=(source_gen, buffer))
thread.setDaemon(True)
thread.start()
while True:
yield buffer.get()
buffer.task_done()
### better version using multiprocessing, because the threading module acts weird,
# the background thread seems to slow down significantly. When the main thread is
# busy, i.e. computation time is not divided fairly.
def buffered_gen_mp(source_gen, buffer_size=2, sleep_time=1):
"""
Generator that runs a slow source generator in a separate process.
buffer_size: the maximal number of items to pre-generate (length of the buffer)
"""
buffer = mp.Queue(maxsize=buffer_size)
def _buffered_generation_process(source_gen, buffer):
while True:
# we block here when the buffer is full. There's no point in generating more data
# when the buffer is full, it only causes extra memory usage and effectively
# increases the buffer size by one.
while buffer.full():
# print "DEBUG: buffer is full, waiting to generate more data."
time.sleep(sleep_time)
try:
data = next(source_gen)
except StopIteration:
# print "DEBUG: OUT OF DATA, CLOSING BUFFER"
buffer.close() # signal that we're done putting data in the buffer
break
buffer.put(data)
process = mp.Process(target=_buffered_generation_process, args=(source_gen, buffer))
process.start()
while True:
try:
# yield buffer.get()
# just blocking on buffer.get() here creates a problem: when get() is called and the buffer
# is empty, this blocks. Subsequently closing the buffer does NOT stop this block.
# so the only solution is to periodically time out and try again. That way we'll pick up
# on the 'close' signal.
try:
yield buffer.get(True, timeout=sleep_time)
except queue.Empty:
if not process.is_alive():
break # no more data is going to come. This is a workaround because the buffer.close() signal does not seem to be reliable.
# print "DEBUG: queue is empty, waiting..."
pass # ignore this, just try again.
except IOError: # if the buffer has been closed, calling get() on it will raise IOError.
# this means that we're done iterating.
# print "DEBUG: buffer closed, stopping."
break
def hms(seconds):
seconds = np.floor(seconds)
minutes, seconds = divmod(seconds, 60)
hours, minutes = divmod(minutes, 60)
return "%02d:%02d:%02d" % (hours, minutes, seconds)
|
test_launcher.py | #!/usr/bin/env python3
# -*-coding: utf-8 -*-
"""
.. invisible:
_ _ _____ _ _____ _____
| | | | ___| | | ___/ ___|
| | | | |__ | | | |__ \ `--.
| | | | __|| | | __| `--. \
\ \_/ / |___| |___| |___/\__/ /
\___/\____/\_____|____/\____/
Created on Feb 14, 2014
███████████████████████████████████████████████████████████████████████████████
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
███████████████████████████████████████████████████████████████████████████████
"""
import logging
import socket
import threading
from twisted.internet import reactor
import unittest
from veles.config import root
from veles.launcher import Launcher, filter_argv
from veles.workflow import Workflow
class TestWorkflow(Workflow):
job_requested = False
job_done = False
job_dropped = False
update_applied = False
power_requested = False
event = threading.Event()
def __init__(self, launcher, **kwargs):
super(TestWorkflow, self).__init__(launcher, **kwargs)
@Workflow.run_timed
@Workflow.method_timed
def generate_data_for_slave(self, slave):
TestWorkflow.job_requested = True
return [{'objective': 'win'}]
@Workflow.run_timed
@Workflow.method_timed
def apply_data_from_slave(self, update, slave):
if TestWorkflow.update_applied:
TestWorkflow.event.set()
if isinstance(update, list) and isinstance(update[0], dict) and \
update[0]['objective'] == 'win':
TestWorkflow.update_applied = True
return True
return False
def do_job(self, job, update, callback):
if isinstance(job, list) and isinstance(job[0], dict) and \
job[0]['objective'] == 'win':
TestWorkflow.job_done = True
callback(job)
def drop_slave(self, slave):
TestWorkflow.job_dropped = True
@property
def computing_power(self):
TestWorkflow.power_requested = True
return 100
class TestLauncher(unittest.TestCase):
def setUp(self):
root.common.web.host = socket.gethostname()
self.server = Launcher(listen_address="localhost:9999",
web_status=False)
self.client = Launcher(master_address="localhost:9999")
self.master_workflow = TestWorkflow(self.server)
self.slave_workflow = TestWorkflow(self.client)
def tearDown(self):
pass
def testConnectivity(self):
reactor.callLater(0.1, reactor.stop)
self.stopper = threading.Thread(target=self.stop)
self.stopper.start()
self.server.run()
self.stopper.join()
self.assertTrue(TestWorkflow.job_requested, "Job was not requested.")
self.assertTrue(TestWorkflow.job_done, "Job was not done.")
self.assertTrue(TestWorkflow.update_applied, "Update was not applied.")
self.assertTrue(TestWorkflow.power_requested,
"Power was not requested.")
def stop(self):
TestWorkflow.event.wait(0.1)
reactor.callFromThread(reactor.stop)
class TestGlobal(unittest.TestCase):
def testFilterArgv(self):
argv = ["-v", "--listen", "0.0.0.0:5000", "-p", "-k=3000", "-e",
"--full", "kwarg", "other", "-x", "--rec"]
f = filter_argv(argv, "--listen", "-k", "--full", "-x")
self.assertEqual(f, ["-v", "-p", "-e", "other", "--rec"],
"filter_argv failed")
argv = ["-s", "workflow", "config"]
f = filter_argv(argv, "-s")
self.assertEqual(f, ["workflow", "config"])
if __name__ == "__main__":
logging.basicConfig(level=logging.DEBUG)
unittest.main()
|
app.py | #!flask/bin/python
from flask import Flask, request, jsonify
import json
import os
from flask_cors import CORS
import numpy as np
import cv2
import datetime
import time
import logging
from logging.handlers import RotatingFileHandler
from threading import Thread
app = Flask(__name__)
app.config['JSON_SORT_KEYS'] = False
cors = CORS(app, supports_credentials=True, resources={r"/api/*": {"origins": "*"}})
"""
Date: 2-June-2019
Build:0.1
Developer: rajat.chaudhari@accenture.com
Functionality: reads analog gauge images and gives current reading
Reviewer:
"""
class VideoGet:
"""
Class that continuously gets frames from a VideoCapture object
with a dedicated thread. Used for live feed
"""
def __init__(self, src=0):
self.stream = cv2.VideoCapture(src)
(self.grabbed, self.frame) = self.stream.read()
self.stopped = False
def start(self):
Thread(target=self.get, args=()).start()
#self.get()
return self
def get(self):
while not self.stopped:
if not self.grabbed:
self.stop()
else:
(self.grabbed, self.frame) = self.stream.read()
def stop(self):
self.stopped = True
self.stream.release()
cv2.destroyAllWindows()
def dist_2_pts(x1, y1, x2, y2):
#print np.sqrt((x2-x1)^2+(y2-y1)^2)
return np.sqrt((x2 - x1)**2 + (y2 - y1)**2)
def avg_circles(circles, b):
avg_x=0
avg_y=0
avg_r=0
for i in range(b):
#optional - average for multiple circles (can happen when a gauge is at a slight angle)
avg_x = avg_x + circles[0][i][0]
avg_y = avg_y + circles[0][i][1]
avg_r = avg_r + circles[0][i][2]
avg_x = int(avg_x/(b))
avg_y = int(avg_y/(b))
avg_r = int(avg_r/(b))
return avg_x, avg_y, avg_r
def crop_cricle(image):
"""
This is used to get the region of interest in cases of heavily tilted images, obsolete as it takes very long
"""
"""(h, w) = image.shape[:2]
if (w>h) :
#M = cv2.getRotationMatrix2D((h/2,w/2), 270, 1.0)
#image = cv2.warpAffine(image, M, (w,w))
image = imutils.rotate_bound(image, 90)"""
height, width = image.shape[:2]
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) #convert to gray
circles = cv2.HoughCircles(gray, cv2.HOUGH_GRADIENT, 1, 20, np.array([]), 80, 50, int(height*0.20), int(height*0.45))
a, b, c = circles.shape
x,y,r = avg_circles(circles, b)
#draw center and circle
#cv2.circle(image, (x, y), r, (0, 0, 255), 3, cv2.LINE_AA) # draw circle
#cv2.circle(image, (x, y), 2, (0, 255, 0), 3, cv2.LINE_AA) # draw center of circle
icircle=circles[0,:][0]
cropSize = (r*2, r*2)
nx=x-int(icircle[0]-cropSize[1]//2)
ny=y-int(icircle[1]-cropSize[0]//2)
cropCoords = (max(0, icircle[1]-cropSize[0]//2),min(image.shape[0], icircle[1]+cropSize[0]//2),
max(0, icircle[0]-cropSize[1]//2),min(image.shape[1], icircle[0]+cropSize[1]//2))
crop_cimg = image[int(cropCoords[0]):int(cropCoords[1]),int(cropCoords[2]):int(cropCoords[3])]
return crop_cimg,nx,ny,r
def alignImages(app,img1, img2,gauge_number, file_type,log_dir,debug):
MAX_FEATURES = 1000#500 #
GOOD_MATCH_PERCENT = 0.10#0.15 #
#print("ALIGNING")
grey1 = cv2.cvtColor(img1, cv2.COLOR_BGR2GRAY)
grey2 = cv2.cvtColor(img2, cv2.COLOR_BGR2GRAY)
# Detect ORB features and compute descriptors.
orb = cv2.ORB_create(MAX_FEATURES)
impts1, d1 = orb.detectAndCompute(grey1, None)
impts2, d2 = orb.detectAndCompute(grey2, None)
# Match features.
featurematcheObj = cv2.DescriptorMatcher_create(cv2.DESCRIPTOR_MATCHER_BRUTEFORCE_HAMMING)
matchedfeatures = featurematcheObj.match(d1, d2, None)
# Sort matches by score
matchedfeatures.sort(key=lambda x: x.distance, reverse=False)
# Remove not so good matches
GoodMatches = int(len(matchedfeatures) * GOOD_MATCH_PERCENT)
matchedfeatures = matchedfeatures[:GoodMatches]
# Extract location of good matches
points1 = np.zeros((len(matchedfeatures), 2), dtype=np.float32)
points2 = np.zeros((len(matchedfeatures), 2), dtype=np.float32)
for i, feature in enumerate(matchedfeatures):
points1[i, :] = impts1[feature.queryIdx].pt
points2[i, :] = impts2[feature.trainIdx].pt
# Find homography
h, mask = cv2.findHomography(points1, points2, cv2.RANSAC)
# Use homography
height, width, channels = img2.shape
transformed = cv2.warpPerspective(img1, h, (width, height))
if debug=="True":
imgMatches = cv2.drawMatches(img1, impts1, img2, impts2, matchedfeatures, None)
cv2.imwrite('%s/frame%s-align.%s' % (log_dir,gauge_number, file_type), transformed)
cv2.imwrite('%s/frame%s-matches.%s' % (log_dir,gauge_number, file_type), imgMatches)
return transformed, h
def calibrate_gauge(app,img,gauge_number, file_type,log_dir,debug):
'''
This function calibrates the range available to the dial as well as the
units. It works by first finding the center point and radius of the gauge. Then it draws lines at intervals
(separation) in degrees from values defined a config file. This assumes that the gauge is linear (as most probably are).
'''
height, width = img.shape[:2]
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) #convert to gray
#detect circles
#restricting the search from 35-48% of the possible radii gives fairly good results across different samples. Remember that
#these are pixel values which correspond to the possible radii search range.
circles = cv2.HoughCircles(gray, cv2.HOUGH_GRADIENT, 1, 20, np.array([]), 200, 80, int(height*0.35), int(height*0.48))
# average found circles, found it to be more accurate than trying to tune HoughCircles parameters to get just the right one
a, b, c = circles.shape
x,y,r = avg_circles(circles, b)
#draw center and circle
cv2.circle(img, (x, y), r, (0, 0, 255), 3, cv2.LINE_AA)
cv2.circle(img, (x, y), 2, (0, 255, 0), 3, cv2.LINE_AA)
if debug =="True":
#for calibration, plot lines from center going out at every 10 degrees and add marker
#for i from 0 to 36 (every 10 deg)
'''
goes through the motion of a circle and sets x and y values based on the set separation spacing. Also adds text to each
line. These lines and text labels serve as the reference point for the user to enter
NOTE: by default this approach sets 0/360 to be the +x axis (if the image has a cartesian grid in the middle), the addition
(i+9) in the text offset rotates the labels by 90 degrees so 0/360 is at the bottom (-y in cartesian). So this assumes the
gauge is aligned in the image, but it can be adjusted by changing the value of 9 to something else.
'''
separation = 10.0 #in degrees
interval = int(360 / separation)
p1 = np.zeros((interval,2)) #set empty arrays
p2 = np.zeros((interval,2))
p_text = np.zeros((interval,2))
for i in range(0,interval):
for j in range(0,2):
if (j%2==0):
p1[i][j] = x + 0.9 * r * np.cos(separation * i * 3.14 / 180) #point for lines
else:
p1[i][j] = y + 0.9 * r * np.sin(separation * i * 3.14 / 180)
text_offset_x = 10
text_offset_y = 5
for i in range(0, interval):
for j in range(0, 2):
if (j % 2 == 0):
p2[i][j] = x + r * np.cos(separation * i * 3.14 / 180)
p_text[i][j] = x - text_offset_x + 1.2 * r * np.cos((separation) * (i+27) * 3.14 / 180) #point for text labels, i+9 rotates the labels by 90 degrees
else:
p2[i][j] = y + r * np.sin(separation * i * 3.14 / 180)
p_text[i][j] = y + text_offset_y + 1.2* r * np.sin((separation) * (i+27) * 3.14 / 180) # point for text labels, i+9 rotates the labels by 90 degrees
#add the lines and labels to the image
for i in range(0,interval):
cv2.line(img, (int(p1[i][0]), int(p1[i][1])), (int(p2[i][0]), int(p2[i][1])),(0, 255, 0), 3)
cv2.putText(img, '%s' %(int(i*separation)), (int(p_text[i][0]), int(p_text[i][1])), cv2.FONT_HERSHEY_SIMPLEX, 0.3,(0,0,0),2,cv2.LINE_AA)
#saving calibrated frames for logs to validate, debug and troubleshoot
cv2.imwrite('%s/funcframe%s-calibration.%s' % (log_dir,gauge_number, file_type), img)
return img, x, y, r
def get_current_value(app,img, min_angle, max_angle, min_value, max_value, x, y, r, gauge_number, file_type,log_dir,outputFolder,debug):
new_value=0
# preprocessing the image for finding lines
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
gray2 = cv2.bitwise_not(gray)
binaryImg = cv2.adaptiveThreshold(gray2, 255, cv2.ADAPTIVE_THRESH_MEAN_C,cv2.THRESH_BINARY, 15, -2)
kernel = np.ones((5,5),np.uint8)
opening = cv2.morphologyEx(binaryImg, cv2.MORPH_OPEN, kernel)
medianBlur = cv2.medianBlur(binaryImg, 5)
guassianBlur = cv2.GaussianBlur(medianBlur, (5, 5), 0)
if debug=="True":
#saving processed frames for logs to validate, debug and troubleshoot
cv2.imwrite('%s/funcframe%s-tempdst1.%s' % (log_dir,gauge_number, file_type), guassianBlur)
# find lines
minLineLength = 200 # this is critical, can be taken from config file if AI people are handling this
maxLineGap = 0
lines = cv2.HoughLinesP(image=guassianBlur, rho=1, theta=np.pi / 180, threshold=100,minLineLength=minLineLength, maxLineGap=0) # rho is set to 3 to detect more lines, easier to get more then filter them out later
# define a range for lines
final_line_list = []
line_length=[]
if lines is not None:
for i in range(0, len(lines)):
for x1, y1, x2, y2 in lines[i]:
diff1 = dist_2_pts(x, y, x1, y1) # x, y is center of circle
diff2 = dist_2_pts(x, y, x2, y2) # x, y is center of circle
#set diff1 to be the smaller (closest to the center) of the two), makes the math easier
if (diff1 > diff2):
temp = diff1
diff1 = diff2
diff2 = temp
line_length.append(dist_2_pts(x1, y1, x2, y2))
# add to final list
final_line_list.append([x1, y1, x2, y2])
#cv2.line(img, (x1, y1), (x2, y2), (0, 255, 0), 2)
longest=line_length.index(max(line_length)) #get the longest line
# assuming the longest line is the best one
if(len(final_line_list)>0):
x1 = final_line_list[longest][0]
y1 = final_line_list[longest][1]
x2 = final_line_list[longest][2]
y2 = final_line_list[longest][3]
#find the farthest point from the center to be what is used to determine the angle
dist_pt_0 = dist_2_pts(x, y, x1, y1)
dist_pt_1 = dist_2_pts(x, y, x2, y2)
if (dist_pt_0 > dist_pt_1):
x_angle = x1 - x
y_angle = y - y1
cv2.line(img, (x, y), (x1, y1), (0, 255, 0), 2)
else:
x_angle = x2 - x
y_angle = y - y2
cv2.line(img, (x, y), (x2, y2), (0, 255, 0), 2)
# take the arc tan of y/x to find the radian angle
res = np.arctan(np.divide(float(y_angle), float(x_angle)))
res = np.rad2deg(abs(res))
if x_angle > 0 and y_angle > 0: #in quadrant I
final_angle = 90-res
if x_angle > 0 and y_angle < 0: #in quadrant II
final_angle = 90+res
if x_angle < 0 and y_angle < 0: #in quadrant III
final_angle = 270-res
if x_angle < 0 and y_angle > 0: #in quadrant IV
final_angle = 270+res
# calculates the reading based on the gauge range and calibrated values
old_min = float(min_angle)
old_max = float(max_angle)
new_min = float(min_value)
new_max = float(max_value)
old_value = final_angle
old_range = (old_max - old_min)
new_range = (new_max - new_min)
new_value = (((old_value - old_min) * new_range) / old_range) + new_min
cv2.putText(img, str(round(new_value,2)), (0, 50), cv2.FONT_HERSHEY_SIMPLEX, 1.0, (0, 0, 0), lineType=cv2.LINE_AA)
if debug =="True":
cv2.imwrite('%s/frame%s-lines.%s' % (log_dir,gauge_number, 'jpg'), img) #get path from config
#cv2.imwrite('%s/Output_%s.%s' % (outputFolder,gauge_number, 'jpg'), img) #prints all frames for video
imgName=('Output_%s.%s' % (gauge_number, 'jpg'))
return img,round(new_value,2),imgName #change response image between frame and transformed image here
else:
app.logger.info("No line was detected on the gauge")
return 0,int(-1),0
def GetReadings(app,file,path,frames_to_process,refimg,min_angle,max_angle,min_value,max_value,units,typeoffile,logFolder,outputFolder,currTime,debug,camera):
GaugeReadings=[]
frameNumber=0
#to make sure only 1st image and reading is written to output
found=0
iName="empty"
try:
#classifying input as looping pattern changes depending on type of input file
if (file.split('.')[-1] in ['jpg','jpeg','png']):
try:
frame=cv2.imread(file)
frameNumber+=1
currTime = datetime.datetime.now().strftime("%d.%m.%y-%H.%M.%S")
image_Id=str(frameNumber)+'_'+currTime
image,h=alignImages(app,frame,refimg,frameNumber, typeoffile,logFolder,debug)
fimg, xf, yf, rf = calibrate_gauge(app,image,frameNumber, typeoffile,logFolder,debug)
resulting_frame,currentValue,imgName = get_current_value(app,fimg, min_angle, max_angle, min_value, max_value, xf, yf, rf, image_Id, typeoffile,logFolder,outputFolder,debug)
if imgName !=str(0) and found==0:
cv2.imwrite('%s/Output_%s.%s' % (outputFolder,image_Id, 'jpg'), resulting_frame)
iName=imgName
found=1
if currentValue > -1:
GaugeReadings.append(currentValue)
app.logger.info("Latest frame gauge reading : "+ str(currentValue)+" "+units)
except Exception as e :
app.logger.error("Failure in reading gauge value")
app.logger.error(str(e))
elif (file.split('.')[-1] in ['avi','wmv','mp4']):
cap=cv2.VideoCapture(file)
if cap.isOpened():
ret, frame = cap.read()
else:
ret=False
#if user gives frames to process more than the video length take minimum of the two
length = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
while ret==True and frameNumber<int(min(int(frames_to_process),length)):
try:
currTime = datetime.datetime.now().strftime("%d.%m.%y-%H.%M.%S")
frameNumber+=1
image_Id=str(frameNumber)+'_'+currTime
ret, frame = cap.read()
image,h=alignImages(app,frame,refimg,frameNumber, typeoffile,logFolder,debug)
fimg, xf, yf, rf = calibrate_gauge(app,image,frameNumber, typeoffile,logFolder,debug)
resulting_frame,currentValue,imgName = get_current_value(app,fimg, min_angle, max_angle, min_value, max_value, xf, yf, rf, image_Id, typeoffile,logFolder,outputFolder,debug)
#write only 1 reading to output folder
if imgName !=str(0) and found==0:
iName=imgName
cv2.imwrite('%s/Output_%s.%s' % (outputFolder,image_Id, 'jpg'), resulting_frame)
found=1
#dont consider bad frames as it interferes with average and min value calculations
if currentValue > -1:
GaugeReadings.append(currentValue)
app.logger.info("Latest frame gauge reading : "+ str(currentValue)+" "+units)
except Exception as e :
app.logger.error("Failure in reading gauge value")
app.logger.error(str(e))
pass
cap.release()
elif file.split('\\')[-1]== "Live_Feed":
record=0
video_getter = VideoGet(camera).start()
while (True) and frameNumber<int(frames_to_process):
try:
frame = video_getter.frame
#special functionality to show input video feed to help user to adjust the position of gauge
#needs extra while loop to enable recording frames on press of 's' key
if debug == "True":
cv2.namedWindow("Input Frame")
cv2.imshow("Input Frame",frame)
if (cv2.waitKey(1) == ord("s")):
record=1
while (record==1) and frameNumber<int(frames_to_process):
frame = video_getter.frame
currTime = datetime.datetime.now().strftime("%d.%m.%y-%H.%M.%S")
frameNumber+=1
image_Id=str(frameNumber)+'_'+currTime
if debug == "True":
cv2.imwrite('%s/Input%s.%s' % (outputFolder,image_Id, typeoffile), frame)
image,h=alignImages(app,frame,refimg,frameNumber, typeoffile,logFolder,debug)
fimg, xf, yf, rf = calibrate_gauge(app,image,frameNumber, typeoffile,logFolder,debug)
resulting_frame,currentValue,imgName = get_current_value(app,fimg, min_angle, max_angle, min_value, max_value, xf, yf, rf, image_Id, typeoffile,logFolder,outputFolder,debug)
if imgName !=str(0) and found==0:
iName=imgName
found=1
if currentValue > -1:
GaugeReadings.append(currentValue)
app.logger.info("Latest frame gauge reading : "+ str(currentValue)+" "+units)
record=0
else:
currTime = datetime.datetime.now().strftime("%d.%m.%y-%H.%M.%S")
frameNumber+=1
image_Id=str(frameNumber)+'_'+currTime
if debug == "True":
cv2.imwrite('%s/Input%s.%s' % (outputFolder,image_Id, typeoffile), frame)
image,h=alignImages(app,frame,refimg,frameNumber, typeoffile,logFolder,debug)
fimg, xf, yf, rf = calibrate_gauge(app,image,frameNumber, typeoffile,logFolder,debug)
resulting_frame,currentValue,imgName = get_current_value(app,fimg, min_angle, max_angle, min_value, max_value, xf, yf, rf, image_Id, typeoffile,logFolder,outputFolder,debug)
if imgName !=str(0) and found==0:
iName=imgName
cv2.imwrite('%s/Output_%s.%s' % (outputFolder,image_Id, 'jpg'), resulting_frame)
found=1
if currentValue > -1:
GaugeReadings.append(currentValue)
app.logger.info("Latest frame gauge reading : "+ str(currentValue)+" "+units)
except Exception as e :
app.logger.info("ERROR:Failure in reading gauge value")
app.logger.info(str(e))
pass
video_getter.stop()
del video_getter
except Exception as e :
app.logger.info("ERROR:Failure in reading gauge value")
app.logger.info(str(e))
return GaugeReadings,frameNumber,iName
def RemoveOutliers(readingList):
GR=sorted(readingList)
q1, q3= np.percentile(GR,[25,75])
iqr = q3 - q1
lower_bound = q1 -(1.5 * iqr)
upper_bound = q3 +(1.5 * iqr)
GaugeReadings=[item for item in readingList if item > lower_bound and item <upper_bound]
return GaugeReadings
@app.route('/', methods=['GET','POST'])
def readDial():
App_start_time = time.time()
try:
with open(os.getcwd()+'\\gaugeconfig.json','r') as json_config_file:
gaugeconfig = json.load(json_config_file)
with open(os.getcwd()+'\\code_view_mapping.json','r') as json_data_file:
dataconfig = json.load(json_data_file)
except Exception as e:
print("failed to load config file, error is - %s" % e)
try:
if not (os.path.exists(os.getcwd()+gaugeconfig['log_dir'])):
os.mkdir(os.getcwd()+gaugeconfig['log_dir'])
except OSError as e:
print ("Creation of the directory %s failed" % e)
currTime = datetime.datetime.now().strftime("%d.%m.%y-%H.%M.%S")
if not (os.path.exists(os.getcwd()+gaugeconfig['log_dir']+currTime)):
os.mkdir(os.getcwd()+gaugeconfig['log_dir']+currTime)
loc_code = request.args.get("functional_loc_code")
path = request.args.get("path")
frames_to_process = request.args.get("frames_to_process")
if frames_to_process is None:
frames_to_process=gaugeconfig['frames_to_process']
GaugeReadings=[]
min_angle = gaugeconfig['calibration']['min_angle']
max_angle = gaugeconfig['calibration']['max_angle']
min_value = gaugeconfig['calibration']['min_value']
max_value = gaugeconfig['calibration']['max_value']
units = gaugeconfig['calibration']['units']
typeoffile=gaugeconfig["type_of_image"]
camera=gaugeconfig["camera"]
debug=gaugeconfig['debug']
outliers=gaugeconfig['remove_outliers']
if (loc_code and path and frames_to_process and loc_code in dataconfig):
try:
if not (os.path.exists(path)):
os.mkdir(path)
except OSError:
print ("Creation of the directory %s failed" % path)
logFolder=os.getcwd()+gaugeconfig['log_dir']+currTime
outputFolder=path
refimg=cv2.imread(os.getcwd()+gaugeconfig['reference_image'])
file=os.getcwd()+dataconfig[loc_code]
log_file= logFolder+'\\GR_'+currTime+'.log'
open(log_file, 'w').close()
handler = RotatingFileHandler(log_file, maxBytes=1024*1024*100, backupCount=1)
handler.setLevel(logging.INFO)
formatter = logging.Formatter('%(asctime)s [%(levelname)s] %(message)s')
handler.setFormatter(formatter)
app.logger.setLevel(logging.INFO)
app.logger.addHandler(handler)
GaugeReadings,framesProcessed,imgName = GetReadings(app,file,path,frames_to_process,refimg,min_angle,max_angle,min_value,max_value,units,typeoffile,logFolder,outputFolder,currTime,debug,camera)
now = datetime.datetime.now()
OverallTime=time.time()-App_start_time
if len(GaugeReadings)>0:
if outliers=="True":
GR=RemoveOutliers(GaugeReadings)
Average=round(sum(GR)/len(GR),2)
else:
Average=round(sum(GaugeReadings)/len(GaugeReadings),2)
minimum = min(GaugeReadings)
maximum = max(GaugeReadings)
reading=GaugeReadings[0]
readingStatus=1
else:
Average=0
minimum=0
maximum=0
reading="No Readings Captured"
readingStatus=0
date_time = now.strftime("%d/%m/%Y, %H:%M:%S")
response = {
"Date" : date_time,
"Reading_Status" : readingStatus,
"Measure" : {
"First Frame Reading":reading,
"Average" : round(Average,2),
"Min" : round(minimum,2),
"Max" :round(maximum,2),
},
"Frame_Details":
{
"Frames_Processed" : framesProcessed,
"Frames_Success" : len(GaugeReadings),
"Total Time":OverallTime
},
"Analysis_image_path" : str(os.getcwd())+"\\"+path+"\\"+imgName,
}
else:
response = {"ERROR":"Inavlid Payload or invalid parameter values. Please pass the query parameters functional_loc_code, path and frames_to_process and their expected values."}
app.logger.info("ERROR:Invalid Payload or invalid parameter values")
return jsonify(response)
if __name__ == '__main__':
app.run() |
__init__.py | # -*- coding: utf-8 -*-
import sys
from noval import _,GetApp,NewId
import noval.iface as iface
import noval.plugin as plugin
import noval.util.utils as utils
import noval.constants as constants
import noval.consts as consts
import noval.util.strutils as strutils
import os
import noval.core as core
import noval.imageutils as imageutils
from dummy.userdb import UserDataDb
import tkinter as tk
from tkinter import ttk,messagebox
import noval.toolbar as toolbar
try:
import tkSimpleDialog
except ImportError:
import tkinter.simpledialog as tkSimpleDialog
import ctypes
from pkg_resources import resource_filename,_manager
import threading
import base64
from bs4 import Tag
import noval.ui_utils as ui_utils
import noval.preference as preference
import webbrowser
import noval.util.urlutils as urlutils
import noval.plugins.update as update
import noval.util.downutils as downutils
import noval.util.fileutils as fileutils
from noval.python.plugins.pip_gui import PluginsPipDialog
import json
import zipimport
import noval.python.interpreter.pythonpackages as pythonpackages
GetApp().AddMessageCatalog('openwebbrowser', __name__)
from openwebbrowser.welcome_html_code import *
PLUGIN_INSTANCE = None
egg_path = 'cefpython3-66.0-py3.6.egg'
cache_path = _manager.get_cache_path(egg_path, '')
pkg_path = resource_filename(__name__,'')
if utils.is_windows():
sys.path.append(cache_path)
try:
from cefpython3 import cefpython as cef
except:
pass
IS_INSTALLING_CEF = False
def InstallCef():
global IS_INSTALLING_CEF
if utils.is_windows():
def finish_download(zip_path):
global IS_INSTALLING_CEF
utils.update_statusbar(_('start unzip cefpython3 component'))
try:
z = zipimport.zipimporter(zip_path)
z.load_module("cefpython3")
except:
pass
finally:
IS_INSTALLING_CEF = False
if os.path.exists(cache_path):
utils.update_statusbar(_('unzip cefpython3 component finished'))
utils.update_statusbar(_('install cefpython3 component success...'))
else:
utils.update_statusbar(_('unzip cefpython3 component failed'))
utils.update_statusbar(_('install cefpython3 component failed...'))
messagebox.showerror(_("Error"),_('install cefpython3 component failed,Please wait for next launch to install it'))
download_url = '%s/api/download' % (UserDataDb.HOST_SERVER_ADDR)
payload = dict(tool=egg_path)
utils.update_statusbar(_('installing cefpython3 component...'))
IS_INSTALLING_CEF = True
try:
downutils.download_file(download_url,call_back=finish_download,show_progress_dlg=False,**payload)
except:
IS_INSTALLING_CEF = False
utils.update_statusbar(_('install cefpython3 component failed...'))
else:
pip_source_path = utils.get_config_value('virtual_env','pip_source_path',default_value=pythonpackages.InstallPackagesDialog.SOURCE_LIST[6])
command = "%s -m pip install --user cefpython3 -i %s --trusted-host %s" % (sys.executable,pip_source_path,pythonpackages.url_parse_host(pip_source_path))
os.system(command)
IS_INSTALLING_CEF = True
def CheckCef():
try:
from cefpython3 import cefpython as cef
return True
except ModuleNotFoundError:
InstallCef()
return False
except:
utils.get_logger().exception('')
return False
IMAGE_EXT = ".png" if tk.TkVersion > 8.5 else ".gif"
INTERNAL_WEB_BROWSER = 16
APPLICATION_STARTUP_PAGE = 32
IS_CEF_INITIALIZED = False
APP_UPDATE_COMMAND = 'command:workbench.action.app.update'
FEEDS_OPEN_URL_COMMAND = 'command:workbench.action.feeds.openurl'
class StartupOptionPanel(ui_utils.CommonOptionPanel):
"""
"""
def __init__(self, parent):
ui_utils.CommonOptionPanel.__init__(self, parent)
self._showWelcomePageVar = tk.IntVar(value=utils.profile_get_int(consts.SHOW_WELCOME_PAGE_KEY, True))
showWelcomePageCheckBox = ttk.Checkbutton(self.panel,text=_("Show start page on startup"),variable=self._showWelcomePageVar)
showWelcomePageCheckBox.pack(fill=tk.X)
row = ttk.Frame(self.panel)
self.mru_var = tk.IntVar(value=utils.profile_get_int(consts.RECENTPROJECT_LENGTH_KEY,consts.DEFAULT_MRU_PROJECT_NUM))
#验证历史文件个数文本控件输入是否合法
validate_cmd = self.register(self.validateMRUInput)
self.mru_project_ctrl = ttk.Entry(row,validate = 'key', textvariable=self.mru_var,validatecommand = (validate_cmd, '%P'))
ttk.Label(row, text=_("Project History length on start page") + "(%d-%d): " % \
(1,consts.MAX_MRU_PROJECT_LIMIT)).pack(side=tk.LEFT)
self.mru_project_ctrl.pack(side=tk.LEFT)
row.pack(fill="x",pady=(0,consts.DEFAUT_CONTRL_PAD_Y))
def OnOK(self, optionsDialog):
utils.profile_set(consts.SHOW_WELCOME_PAGE_KEY, self._showWelcomePageVar.get())
utils.profile_set(consts.RECENTPROJECT_LENGTH_KEY,self.mru_var.get())
return True
def validateMRUInput(self,contents):
if not contents.isdigit():
self.mru_project_ctrl.bell()
return False
return True
def html_to_data_uri(html, js_callback=None):
# This function is called in two ways:
# 1. From Python: in this case value is returned
# 2. From Javascript: in this case value cannot be returned because
# inter-process messaging is asynchronous, so must return value
# by calling js_callback.
html = html.encode("utf-8", "replace")
b64 = base64.b64encode(html).decode("utf-8", "replace")
ret = "data:text/html;base64,{data}".format(data=b64)
if js_callback:
js_print(js_callback.GetFrame().GetBrowser(),
"Python", "html_to_data_uri",
"Called from Javascript. Will call Javascript callback now.")
js_callback.Call(ret)
else:
return ret
class WebDocument(core.Document):
def OnOpenDocument(self, filename):
return True
class LoadHandler(object):
def __init__(self, browser_frame):
self.browser_frame = browser_frame
def OnLoadStart(self, browser, **_):
if self.browser_frame.web_view.navigation_bar:
self.browser_frame.web_view.navigation_bar.set_url(browser.GetUrl())
def OnLoadEnd(self,browser,**kwargs):
print (kwargs)
if not self.browser_frame.web_view.navigation_bar:
print ("load url end....")
if kwargs.get('http_code') == 200:
PLUGIN_INSTANCE.LoadAsync(self.browser_frame)
def OnLoadError(self,browser,**kwargs):
print ("load url %s error...."%browser.GetUrl())
print (kwargs)
print ("-----------------------------")
class FocusHandler(object):
def __init__(self, browser_frame):
self.browser_frame = browser_frame
def OnTakeFocus(self, next_component, **_):
utils.get_logger().debug("FocusHandler.OnTakeFocus, next={next}"
.format(next=next_component))
def OnSetFocus(self, source, **_):
utils.get_logger().debug("FocusHandler.OnSetFocus, source={source}"
.format(source=source))
return False
def OnGotFocus(self, **_):
"""Fix CEF focus issues (#255). Call browser frame's focus_set
to get rid of type cursor in url entry widget."""
utils.get_logger().debug("FocusHandler.OnGotFocus")
#self.browser_frame.focus_set()
@utils.call_after_with_arg(1)
def ShowPluginManagerDlg():
plugin_dlg = PluginsPipDialog(GetApp().GetTopWindow(),package_count=0)
plugin_dlg.ShowModal()
@utils.call_after_with_arg(1)
def ShowErrorMessageDialog(title,msg):
messagebox.showerror(title,msg)
class Command(object):
def action(self, msg):
if msg == 'command:workbench.action.project.openProject':
GetApp().MainFrame.GetProjectView().OpenProject()
elif msg == 'command:workbench.action.project.newProject':
GetApp().MainFrame.GetProjectView().NewProject()
elif msg == "command:workbench.action.help.register_or_login":
GetApp().Registerorlogin()
elif msg == "command:workbench.action.help.ManagePlugins":
ShowPluginManagerDlg()
elif msg.find(APP_UPDATE_COMMAND) != -1:
app_version = msg.replace(APP_UPDATE_COMMAND+":","")
update.UpdateApp(app_version)
elif msg.find(FEEDS_OPEN_URL_COMMAND) != -1:
url,feed_id = msg.replace(FEEDS_OPEN_URL_COMMAND+":","").split('|')
webbrowser.open(url)
if feed_id == "...":
return
api_addr = '%s/api/feed/open' % (UserDataDb.HOST_SERVER_ADDR)
urlutils.RequestData(api_addr,method="post",arg = {'feed_id':feed_id})
elif msg == "command:workbench.action.help.openCodeRepositoryURL":
webbrowser.open("https://gitee.com/wekay/NovalIDE")
elif msg == "command:workbench.action.help.openDocumentationUrl":
webbrowser.open("https://wekay.gitee.io/novalide")
elif msg == "command:workbench.action.help.keybindingsReference":
webbrowser.open("http://www.novalide.com/media/document/shortcuts.pdf")
elif msg == "command:workbench.action.help.openIntroductoryVideosUrl":
webbrowser.open("https://wekay.gitee.io/novalide/zh/getstarted/introductory/")
elif msg == "command:workbench.action.help.openTipsAndTricksUrl":
webbrowser.open("https://wekay.gitee.io/novalide/zh/getstarted/tips/")
else:
project_path = msg.split(':')[-1].replace('/',os.sep).replace('|',":")
if not os.path.exists(project_path):
GetApp().GetDocumentManager().RemoveProjectFromHistory(project_path)
ShowErrorMessageDialog(GetApp().GetAppName(),_("The project '%s' doesn't exist and couldn't be opened!") % project_path)
else:
GetApp().GetDocumentManager().CreateDocument(project_path, core.DOC_SILENT)
class BrowserFrame(ttk.Frame):
def __init__(self, master, url,view,navigation_bar=None):
self.navigation_bar = navigation_bar
self.closing = False
self.browser = None
self.url = url
self.web_view = view
ttk.Frame.__init__(self, master)
self.bind("<FocusIn>", self.on_focus_in)
self.bind("<FocusOut>", self.on_focus_out)
self.bind("<Configure>", self.on_configure)
self.focus_set()
def SetUrl(self,url):
self.url = url
def SetClientHandler(self):
self.browser.SetClientHandler(LoadHandler(self))
self.browser.SetClientHandler(FocusHandler(self))
def CreateBrowserAsync(self,window_info,url):
self.browser = cef.CreateBrowserSync(window_info,
url=url)
assert self.browser
self.SetClientHandler()
js = cef.JavascriptBindings()
js.SetObject('Command', Command())
self.browser.SetJavascriptBindings(js)
def embed_browser_sync(self):
'''
这是在单线程中创建cef浏览器对象
'''
window_info = cef.WindowInfo()
rect = [0, 0, self.winfo_width(), self.winfo_height()]
window_info.SetAsChild(self.get_window_handle(), rect)
self.browser = cef.CreateBrowserSync(window_info,
url=self.url)
assert self.browser
self.SetClientHandler()
js = cef.JavascriptBindings()
js.SetObject('Command', Command())
self.browser.SetJavascriptBindings(js)
#消息循环
#在UI线程创建browser不使用消息循环,只有在单线程时才使用
self.message_loop_work()
def embed_browser(self):
window_info = cef.WindowInfo()
rect = [0, 0, self.winfo_width(), self.winfo_height()]
window_info.SetAsChild(self.get_window_handle(), rect)
#设置以UI线程来创建浏览器,void cef.PostTask(线程,funcName, [params...]),传入funcName函数的参数不能是关键字
cef.PostTask(cef.TID_UI, self.CreateBrowserAsync, window_info, self.url)
def get_window_handle(self):
if self.winfo_id() > 0:
return self.winfo_id()
else:
raise Exception("Couldn't obtain window handle")
def message_loop_work(self):
cef.MessageLoopWork()
self.after(10, self.message_loop_work)
def on_configure(self, _):
if not self.browser:
#windows系统使用UI线程来创建浏览器
if utils.is_windows():
self.embed_browser()
else:
#linux系统使用单线程来创建浏览器
self.embed_browser_sync()
def on_root_configure(self):
# Root <Configure> event will be called when top window is moved
if self.browser:
self.browser.NotifyMoveOrResizeStarted()
def on_mainframe_configure(self, width, height):
if self.browser:
if utils.is_windows():
ctypes.windll.user32.SetWindowPos(
self.browser.GetWindowHandle(), 0,
0, 0, width, height, 0x0002)
elif utils.is_linux():
self.browser.SetBounds(0, 0, width, height)
self.browser.NotifyMoveOrResizeStarted()
def on_focus_in(self, _):
utils.get_logger().debug("BrowserFrame.on_focus_in")
if self.browser:
self.browser.SetFocus(True)
def on_focus_out(self, _):
utils.get_logger().debug("BrowserFrame.on_focus_out")
if self.browser:
self.browser.SetFocus(False)
def on_root_close(self):
if self.browser:
self.browser.CloseBrowser(True)
self.clear_browser_references()
self.destroy()
def clear_browser_references(self):
# Clear browser references that you keep anywhere in your
# code. All references must be cleared for CEF to shutdown cleanly.
self.browser = None
class WebView(core.View):
def __init__(self):
global IS_CEF_INITIALIZED
core.View.__init__(self)
self.browser_frame = None
self.navigation_bar = None
self.start_url = ''
self.zoom_level = 0
if not IS_CEF_INITIALIZED:
settings = {
'locale':GetApp().locale.GetLanguageCanonicalName(),
'single_process':True
}
if utils.is_windows():
settings.update({
'multi_threaded_message_loop':True,
"locales_dir_path": os.path.join(cache_path,"cefpython3","locales"),
"browser_subprocess_path": os.path.join(cache_path,"cefpython3","subprocess.exe"),
"resources_dir_path":os.path.join(cache_path,"cefpython3",'resources')
})
cef.Initialize(settings=settings)
IS_CEF_INITIALIZED = True
def OnClose(self, deleteWindow = True):
self.Activate(False)
if deleteWindow and self.GetFrame():
self.GetFrame().Destroy()
return True
def set_line_and_column(self):
GetApp().MainFrame.GetStatusBar().Reset()
def LoadUrl(self,url):
self.get_browser_frame().SetUrl(url)
def OnCreate(self, doc, flags):
template = doc.GetDocumentTemplate()
template_icon = template.GetIcon()
if flags & APPLICATION_STARTUP_PAGE:
template.SetIcon(None)
frame = GetApp().CreateDocumentFrame(self, doc, flags)
template.SetIcon(template_icon)
frame.bind("<Configure>", self.on_configure)
browser_row = 0
if not (flags & APPLICATION_STARTUP_PAGE) and not (flags & INTERNAL_WEB_BROWSER):
self.start_url = doc.GetFilename()
if utils.is_linux() and os.path.isfile(self.start_url):
self.start_url = "file://" + self.start_url
if not (flags & APPLICATION_STARTUP_PAGE):
self.navigation_bar = NavigationBar(frame,self)
self.navigation_bar.grid(row=0, column=0,
sticky=(tk.N + tk.S + tk.E + tk.W))
frame.rowconfigure(0, weight=0)
frame.columnconfigure(0, weight=0)
browser_row += 1
self.browser_frame = BrowserFrame(frame,self.start_url,self,self.navigation_bar)
self.browser_frame.grid(row=browser_row, column=0,
sticky=(tk.N + tk.S + tk.E + tk.W))
frame.rowconfigure(browser_row, weight=1)
frame.columnconfigure(0, weight=1)
return True
def on_configure(self, event):
utils.get_logger().debug("MainFrame.on_configure")
if self.browser_frame:
width = event.width
height = event.height
if self.navigation_bar:
height = height - self.navigation_bar.winfo_height()
self.browser_frame.on_mainframe_configure(width, height)
def UpdateUI(self, command_id):
if command_id in [constants.ID_CLOSE,constants.ID_CLOSE_ALL]:
return True
return core.View.UpdateUI(self,command_id)
def get_browser(self):
if self.browser_frame:
return self.browser_frame.browser
return None
def get_browser_frame(self):
if self.browser_frame:
return self.browser_frame
return None
def ZoomView(self,delta=0):
if self.zoom_level >= 15 and delta > 0:
return
elif self.zoom_level <= -10 and delta < 0:
return
self.zoom_level += delta
if self.browser_frame:
self.get_browser().SetZoomLevel(self.zoom_level)
class WebBrowserPlugin(plugin.Plugin):
"""plugin description here..."""
ID_WEB_BROWSER = NewId()
ID_SHOW_WELCOME_PAGE = NewId()
plugin.Implements(iface.MainWindowI)
NEWS = "news"
LEARN = "learn"
DEFAULT_NEWS_NUM = 3
DEFAULT_LEARN_NUM = 3
def PlugIt(self, parent):
"""Hook the calculator into the menu and bind the event"""
utils.get_logger().info("Installing WebBrowser plugin")
if not CheckCef():
utils.get_logger().error("cefpython component is not success installled...")
return
global PLUGIN_INSTANCE
webViewTemplate = core.DocTemplate(GetApp().GetDocumentManager(),
_("WebView"),
"*.com;*.org",
os.getcwd(),
".com",
"WebView Document",
_("Internal Web Browser"),
WebDocument,
WebView,
core.TEMPLATE_INVISIBLE,
icon = imageutils.load_image("","web.png"))
GetApp().GetDocumentManager().AssociateTemplate(webViewTemplate)
GetApp().MainFrame.GetProjectView(False).tree.RebuildLookupIcon()
GetApp().InsertCommand(constants.ID_PLUGIN,self.ID_WEB_BROWSER,_("&Tools"),_("&Web Browser"),handler=self.GotoDefaultWebsite,pos="before",\
image=webViewTemplate.GetIcon())
GetApp().bind(constants.CHANGE_APPLICATION_LOOK_EVT, self.UpdateWelcomeTheme,True)
PLUGIN_INSTANCE = self
image_path = os.path.join(pkg_path, "resources","start.png")
GetApp().InsertCommand(constants.ID_CHECK_UPDATE,self.ID_SHOW_WELCOME_PAGE,_("&Help"),_("Start Page"),handler=self.ShowWelcomePage,pos="before",\
image=GetApp().GetImage(image_path))
preference.PreferenceManager().AddOptionsPanelClass(preference.ENVIRONMENT_OPTION_NAME,"Start Page",StartupOptionPanel)
self.is_initialized = True
self.app_update_params = {'has_new':False}
self.feeds = []
def InitNum(self):
self.number = {
self.NEWS:0,
self.LEARN:0
}
if self.app_update_params['has_new']:
self.number[self.NEWS] += 1
def LoadNews(self,frame):
def add_id(d):
d.update({'id':'...'})
return d
self.CheckAppUpdate(self.app_update_params,frame)
self.GetFeeds()
utils.get_logger().info("get feeds count from sever is %d",len(self.feeds))
if len(self.feeds) < (self.DEFAULT_NEWS_NUM + self.DEFAULT_LEARN_NUM):
data = self.LoadDefaultFeeds()
data = list(map(add_id,data))
self.feeds.extend(data)
utils.get_logger().info("get feeds count from sever is not enough,now feeds number is %d",len(self.feeds))
self.CreateFeedNews(frame)
def LoadDefaultFeeds(self):
feeds_file = os.path.join(pkg_path,"feeds.json")
try:
with open(feeds_file,encoding="utf-8") as f:
return json.load(f)
except:
return []
def LoadAsync(self,frame):
self.InitNum()
self.LoadRecent(frame)
t = threading.Thread(target=self.LoadNews,args=(frame,))
t.start()
def UpdateWelcomeTheme(self,event):
theme = event.get('theme')
if self.is_initialized:
if utils.profile_get_int(consts.SHOW_WELCOME_PAGE_KEY, True):
self.GotoStartupPage(theme)
else:
t = threading.Thread(target=self.CheckAppUpdate,args=(self.app_update_params,))
t.start()
self.is_initialized = False
else:
start_doc = GetApp().GetDocumentManager().GetDocument(_("Start Page"))
if start_doc:
self.LoadStartupPage(start_doc,theme)
def ShowWelcomePage(self):
self.GotoStartupPage(GetApp().theme_value.get())
def GotoDefaultWebsite(self):
self.GotoWebView(UserDataDb.HOST_SERVER_ADDR)
def GotoWebView(self,web_addr):
webViewTemplate = GetApp().GetDocumentManager().FindTemplateForTestPath(".com")
doc = GetApp().GetDocumentManager().CreateTemplateDocument(webViewTemplate,_("Internal Web Browser"), core.DOC_SILENT|core.DOC_OPEN_ONCE|INTERNAL_WEB_BROWSER)
if doc:
doc.GetFirstView().LoadUrl(web_addr)
def GotoStartupPage(self,theme):
webViewTemplate = GetApp().GetDocumentManager().FindTemplateForTestPath(".com")
doc = GetApp().GetDocumentManager().CreateTemplateDocument(webViewTemplate,_("Start Page"), core.DOC_SILENT|core.DOC_OPEN_ONCE|APPLICATION_STARTUP_PAGE)
self.LoadStartupPage(doc,theme)
def LoadStartupPage(self,doc,theme):
t = threading.Thread(target=self.OpenStartupPage,args=(doc,theme))
t.start()
def LoadRecentProjects(self):
recent_projects = []
projectHistory = GetApp().GetDocumentManager().GetProjectHistory()
file_size = projectHistory.GetCurrentSize()
for i in range(file_size):
path = projectHistory.GetHistoryFile(i)
recent_projects.append(path)
return recent_projects
def LoadRecent(self,frame):
recent_projects = self.LoadRecentProjects()
if recent_projects == []:
frame.browser.ExecuteFunction('SetEmptyProject')
else:
project_html_content = ''
for recent_project in recent_projects:
recent_project_path = recent_project.replace(":","|").replace("\\",'/')
li = Tag(name='li',attrs={'class':'path'})
a = Tag(name='a',attrs={'href':'javascript:void(0)','title':recent_project,'onclick':"Command.action('command:workbench.action.project.openRecentProject:%s')"%recent_project_path})
a.string = os.path.basename(recent_project)
li.append(a)
project_html_content += li.prettify()
project_html_content += "\n"
frame.browser.ExecuteFunction('LoadProjects', project_html_content)
if utils.is_windows():
frame.on_mainframe_configure(frame.winfo_width(),frame.winfo_height())
def GetFeeds(self):
api_addr = '%s/api/feed/items' % (UserDataDb.HOST_SERVER_ADDR)
app_version = utils.get_app_version()
utils.get_logger().info("start get feeds from sever ...")
data = urlutils.RequestData(api_addr,timeout=3,arg = {'app_version':app_version})
if data is None:
utils.get_logger().error("get feeds from sever error....")
return
self.feeds = data['feeds']
utils.get_logger().info("get feeds from sever success....")
def CreateFeedNews(self,frame):
news_html_content = ''
if self.app_update_params['has_new']:
click_event = "Command.action('%s:%s')"%(APP_UPDATE_COMMAND,self.app_update_params['app_version'])
div = self.CreateNews(self.app_update_params['title'],self.app_update_params['subcontent'],click_event)
news_html_content += div.prettify()
news_html_content += "\n"
self.number[self.NEWS] += 1
learn_html_content = ''
for feed in self.feeds:
click_event = "Command.action('%s:%s|%s')"%(FEEDS_OPEN_URL_COMMAND,feed['url'],feed['id'])
div = self.CreateNews(feed['title'],feed['subcontent'],click_event)
if feed['category'] == self.NEWS and self.number[self.NEWS] < self.DEFAULT_NEWS_NUM:
news_html_content += div.prettify()
news_html_content += "\n"
self.number[self.NEWS] += 1
elif feed['category'] == self.LEARN and self.number[self.LEARN] < self.DEFAULT_LEARN_NUM:
learn_html_content += div.prettify()
learn_html_content += "\n"
self.number[self.LEARN] += 1
if news_html_content:
frame.browser.ExecuteFunction('LoadNews', news_html_content)
if learn_html_content:
frame.browser.ExecuteFunction('LoadLearn', learn_html_content)
#必须显示调用该函数以避免js函数执行无法生效的BUG
if utils.is_windows():
frame.on_mainframe_configure(frame.winfo_width(),frame.winfo_height())
def CreateNews(self,title,subcontent,click_event):
div = Tag(name='div',attrs={'class':'item showLanguageExtensions'})
btn = Tag(name='button',attrs={'role':'group','data-href':"command:workbench.extensions.action.showLanguageExtensions",'onclick':click_event})
h3 = Tag(name="h3",attrs={'class':'caption'})
h3.string = title
span = Tag(name="span",attrs={'class':'detail'})
span.string = subcontent
div.append(btn)
btn.append(h3)
btn.append(span)
return div
def OpenStartupPage(self,doc,theme):
welcome_html_url = html_to_data_uri(html_code)
doc.GetFirstView().LoadUrl(welcome_html_url)
def GetMinVersion(self):
"""Override in subclasses to return the minimum version of novalide that
the plugin is compatible with. By default it will return the current
version of novalide.
@return: version str
"""
return "1.2.3"
def InstallHook(self):
"""Override in subclasses to allow the plugin to be loaded
dynamically.
@return: None
"""
CheckCef()
def CanUninstall(self):
return False
def UninstallHook(self):
pass
def EnableHook(self):
pass
def DisableHook(self):
pass
def GetFree(self):
return True
def GetPrice(self):
pass
def HookExit(self,exit):
''''''
if not exit:
if IS_INSTALLING_CEF:
utils.update_statusbar(_('installing cefpython3 component...'))
messagebox.showinfo(GetApp().GetAppName(),_('Application is installing cef component.Please wait for a monment to exit!'))
return False
return True
else:
try:
cef.PostTask(cef.TID_UI, cef.Shutdown)
finally:
return True
def CheckAppUpdate(self,params={},frame=None):
api_addr = '%s/api/update/app' % (UserDataDb.HOST_SERVER_ADDR)
app_version = utils.get_app_version()
utils.get_logger().info("start check app update info from server...")
data = urlutils.RequestData(api_addr,timeout=3,arg = {'app_version':app_version,'is_dev':int(utils.is_dev())})
if data is None:
utils.get_logger().error("check app update info error from server...")
return
force_show_welcome = data['force_show_welcome']
if force_show_welcome and not utils.profile_get_int(consts.SHOW_WELCOME_PAGE_KEY, True):
utils.profile_set(consts.SHOW_WELCOME_PAGE_KEY, True)
utils.get_logger().info("check app update info success from server...")
#have update
if data['code'] == 1:
new_version = data['new_version']
update_msg = data['update_msg']
msg = _("this lastest version '%s' is available,click here to update it") % new_version
params['has_new'] = True
params['title'] = msg
params['subcontent'] = update_msg
params['app_version'] = new_version
if frame is None:
update.CheckAppupdateInfo(data)
class NavigationBar(toolbar.ToolBar):
ID_OPEN_URL = NewId()
ID_GO_BACK = NewId()
ID_GO_FORWARD = NewId()
ID_RELOAD = NewId()
ID_STOP = NewId()
def __init__(self, master,view):
self.web_view = view
self.back_state = tk.NONE
self.forward_state = tk.NONE
self.back_image = None
self.forward_image = None
self.reload_image = None
self.go_image = None
self.stop_image = None
toolbar.ToolBar.__init__(self, master)
resources = os.path.join(pkg_path, "resources")
go_png = os.path.join(resources, "go"+IMAGE_EXT)
if os.path.exists(go_png):
self.go_image = tk.PhotoImage(file=go_png)
self.AddButton(self.ID_OPEN_URL, self.go_image, _('Open URL'),self.GoUrl)
# Back button
back_png = os.path.join(resources, "back"+IMAGE_EXT)
if os.path.exists(back_png):
self.back_image = tk.PhotoImage(file=back_png)
self.back_button = self.AddButton(self.ID_GO_BACK, self.back_image, _('Go Back'),self.go_back)
# Forward button
forward_png = os.path.join(resources, "forward"+IMAGE_EXT)
if os.path.exists(forward_png):
self.forward_image = tk.PhotoImage(file=forward_png)
self.forward_button = self.AddButton(self.ID_GO_FORWARD, self.forward_image, _('Go Forward'),self.go_forward)
# Forward button
stop_png = os.path.join(resources, "stop"+IMAGE_EXT)
if os.path.exists(stop_png):
self.stop_image = tk.PhotoImage(file=stop_png)
self.AddButton(self.ID_STOP, self.stop_image, _('Stop'),self.Stop)
# Reload button
reload_png = os.path.join(resources, "reload"+IMAGE_EXT)
if os.path.exists(reload_png):
self.reload_image = tk.PhotoImage(file=reload_png)
self.reload_button = self.AddButton(self.ID_RELOAD, self.reload_image, _('Reload'),self.reload)
self.AddLabel(text=_("URL:"))
self.url_entry = self.AddCombox(state=None)
self.url_entry.bind("<FocusIn>", self.on_url_focus_in)
self.url_entry.bind("<FocusOut>", self.on_url_focus_out)
self.url_entry.bind("<Return>", self.on_load_url)
self.url_entry.bind("<Button-1>", self.on_button1)
self.url_entry.bind("<<ComboboxSelected>>",self.on_load_url)
group_frame = self.pack_slaves()[0]
self.url_entry.grid(row=0, column=6, sticky="nsew", padx=5)
group_frame.pack(fill="x",side=tk.LEFT,expand=1)
group_frame.columnconfigure(6, weight=1)
# Update state of buttons
self.update_state()
def GoUrl(self):
url = tkSimpleDialog.askstring(
_("Open URL:"),
_("Enter a full URL or local path")
)
if not url:
return
if utils.is_linux() and os.path.isfile(url):
url = "file://" + url
self.load_url(url)
def go_back(self):
if self.web_view.get_browser():
self.web_view.get_browser().GoBack()
def go_forward(self):
if self.web_view.get_browser():
self.web_view.get_browser().GoForward()
def reload(self):
if self.web_view.get_browser():
self.web_view.get_browser().Reload()
def set_url(self, url):
self.url_entry.delete(0, tk.END)
self.url_entry.insert(0, url)
values = set(self.url_entry['values'])
values.add(url)
self.url_entry['values'] = tuple(values)
def on_url_focus_in(self, _):
utils.get_logger().debug("NavigationBar.on_url_focus_in")
def on_url_focus_out(self, _):
utils.get_logger().debug("NavigationBar.on_url_focus_out")
def Stop(self):
self.web_view.get_browser().StopLoad()
def on_load_url(self, _):
self.load_url(self.url_entry.get())
def load_url(self,url):
if self.web_view.get_browser():
self.web_view.get_browser().StopLoad()
self.web_view.get_browser().LoadUrl(url)
def on_button1(self, _):
"""Fix CEF focus issues (#255). See also FocusHandler.OnGotFocus."""
utils.get_logger().debug("NavigationBar.on_button1")
self.master.master.focus_force()
def update_state(self):
browser = self.web_view.get_browser()
if not browser:
if self.back_state != tk.DISABLED:
self.back_button.config(state=tk.DISABLED)
self.back_state = tk.DISABLED
if self.forward_state != tk.DISABLED:
self.forward_button.config(state=tk.DISABLED)
self.forward_state = tk.DISABLED
self.after(100, self.update_state)
return
if browser.CanGoBack():
if self.back_state != tk.NORMAL:
self.back_button.config(state=tk.NORMAL)
self.back_state = tk.NORMAL
else:
if self.back_state != tk.DISABLED:
self.back_button.config(state=tk.DISABLED)
self.back_state = tk.DISABLED
if browser.CanGoForward():
if self.forward_state != tk.NORMAL:
self.forward_button.config(state=tk.NORMAL)
self.forward_state = tk.NORMAL
else:
if self.forward_state != tk.DISABLED:
self.forward_button.config(state=tk.DISABLED)
self.forward_state = tk.DISABLED
self.after(100, self.update_state) |
test_webgui.py | import sys
import os
import re
import pytest
import glob
import time
def start_server():
import tempfile
from conftest import AppFileHandler
from experimentum.Experiments import App
from experimentum.WebGUI import Server
# App Container
class WebAppContainer(App):
config_path = tempfile.mkdtemp()
# Test Server
class TestServer(Server):
def create_app(self):
app = super(TestServer, self).create_app()
app.template_folder = os.path.abspath(os.path.join(
os.path.dirname(__file__),
'../../experimentum/WebGUI/templates/'
))
return app
# Setup args
main = os.path.join(WebAppContainer.config_path, 'main.py')
sys.argv = [main, 'webgui', '--port', '3000', '--no-reload']
# add files and dirs
app_files = AppFileHandler()
app_files.create_directories_and_files(WebAppContainer.config_path)
# init container and testserver and run command
container = WebAppContainer('testing', WebAppContainer.config_path + '/.')
container.aliases['server'] = lambda: TestServer(container, True)
container.run()
class TestWebGUI():
def test_webui_available(self, app_files):
"""
GIVEN the framework is installed
WHEN a user starts the webgui on port 3000
THEN the gui is available on localhost:3000
"""
from multiprocessing import Process
from six.moves import urllib
# check that webgui is not available yet
with pytest.raises(urllib.error.URLError) as pytest_wrapped_e:
urllib.request.urlopen('http://localhost:3000')
assert pytest_wrapped_e.type == urllib.error.URLError
# User starts the webgui on port 3000
web_gui = Process(target=start_server)
web_gui.start()
# webgui should be available after some startup time
time.sleep(30.0)
assert urllib.request.urlopen('http://localhost:3000').getcode() == 200
# Stop webgui process
web_gui.terminate()
web_gui.join()
del(web_gui)
def test_manage_migrations(self, webclient):
"""
GIVEN the framework is installed and the webgui is accessable
WHEN the user creates, upgrades, and downgrades a migration
THEN these actions are executed on the migration
"""
# User creates a new migration foo
webclient.post('/migrations/make', data={'name': 'create foo migration'})
response = webclient.get('/')
root = webclient.application.config['container'].root
assert len(glob.glob(os.path.join(root, 'migrations', '*_create_foo_migration.py'))) == 1
assert re.search(
'<i class="material-icons">error<\\/i>\\s*\\d{14}_create_foo_migration',
response.get_data(as_text=True),
re.MULTILINE | re.IGNORECASE
)
# User upgrades to new migration
webclient.get('/migrations/upgrade')
response = webclient.get('/')
assert re.search(
'<i class="material-icons">check_circle<\\/i>\\s*\\d{14}_create_foo_migration',
response.get_data(as_text=True),
re.MULTILINE | re.IGNORECASE
)
# User downgrades migration
webclient.get('/migrations/downgrade')
response = webclient.get('/')
assert re.search(
'<i class="material-icons">error<\\/i>\\s*\\d{14}_create_foo_migration',
response.get_data(as_text=True),
re.MULTILINE | re.IGNORECASE
)
def test_manage_experiments(self, webclient, app_files):
"""
GIVEN the framework is installed, the webgui is accessable, and there
is an Experiment called FooExperiment with an associated FooPlot class
WHEN the user executes the experiment with 2 iterations
THEN the experiment is executed 2 times and a diagramm is generated, and the
user sees the output log and performance table
"""
# Create FooExperiment (FooPlot class already exists)
app_files.create_from_stub(
webclient.application.config['container'].root,
'FooExperiment',
'experiments/{name}.py'
)
response = webclient.get('/')
assert 'Foo' in response.get_data(as_text=True)
# User submits experiment run form with iterations=2
response = webclient.post('/experiments/run/foo', data={'iterations': 2, 'config': ''})
# User sees the experiment result
data = response.get_data(as_text=True)
assert '<div id="result" class="col s12"></div>' in data
assert '<div id="plots" class="col s12">' in data
assert 'Running Tests' in data
assert 'Generating Plots' in data
assert "log_stream('/experiments/run/foo?config=&iterations=2'" in data
|
server.py | # Python packages
from _socket import SHUT_RDWR
import socket
import struct
import time
import timer
try: # Python 3
import socketserver
except ImportError: # Python 2
import socketserver as socketserver
import threading
# Third-party packages
# Modules from this project
import globals as G
# noinspection PyUnresolvedReferences
from savingsystem import save_sector_to_string, save_blocks, save_world, load_player, save_player
from world_server import WorldServer
# noinspection PyUnresolvedReferences
import blocks
from advUtils.network import PackageSystem
# from subprocess import
# noinspection PyUnresolvedReferences
from commands import CommandParser, COMMAND_ERROR_COLOR, COMMAND_NOT_HANDLED, COMMAND_HANDLED, COMMAND_INFO_COLOR, \
CommandException
from utils import sectorize, make_string_packet
from mod import load_modules
# This class is effectively a serverside "Player" object
class ThreadedTCPRequestHandler(socketserver.BaseRequestHandler):
inv = None
def get_inv(self):
global inv
return inv
def set_inv(self, value):
global inv
inv = value
# if type(value[1]) == bytes:
# raise Exception("")
print("INVENTORY_EX:", value)
inventory = property(get_inv, set_inv) # "\0" * (4 * 40) # Currently, is serialized to be 4 bytes * (27 inv + 9 quickbar + 4 armor) = 160 bytes
command_parser = CommandParser()
operator = False
def __init__(self, *args, **kwargs):
super(ThreadedTCPRequestHandler, self).__init__(*args, **kwargs)
self.packageSystem = PackageSystem(self.request)
def sendpacket(self, size, packet):
# py_000002 = struct.pack("i", 5 + size)
# print(py_000002, packet)
# if type(packet) == str:
# packet = packet.encode("utf-8")
# self.request.sendall(py_000002 + packet)
if not hasattr(self, "packageSystem"):
self.packageSystem = PackageSystem(self.request)
# print("SENDPACKET_SERVER:", packet) if packet["packetType"] != 1 and packet["packetType"] != 2 else None
# if packet["packetType"] == 6:
# exit(0)
self.packageSystem: PackageSystem
self.packageSystem.sendall(packet)
def sendchat(self, txt, color=(255, 255, 255, 255)):
txt = txt.encode('utf-8')
self.sendpacket(None, {
"packetType": 5,
"packet": {
"message": txt,
"color": color
}
})
# self.sendpacket(len(txt) + 4, b"\5" + txt + struct.pack("BBBB", *color))
def sendinfo(self, info, color=(255, 255, 255, 255)):
info = info.encode('utf-8')
self.sendpacket(None, {
"packetType": 5,
"packet": {
"message": info,
"color": color
}
})
# self.sendpacket(len(info) + 4, "\5" + info + struct.pack("BBBB", *color))
def broadcast(self, txt):
for player in self.server.players.values():
player.sendchat(txt)
def sendpos(self, pos_bytes, mom_bytes, *, momentum, position):
self.sendpacket(None, {
"packetType": 8,
"packet": {
"playerID": self.id,
"momentum": momentum,
"position": position
}
})
# self.sendpacket(38, "\x08" + struct.pack("H", self.id) + mom_bytes + pos_bytes)
def lookup_player(self, playername):
# find player by name
for player in list(self.server.players.values()):
if player.username == playername:
return player
return None
def handle(self):
self.username = str(self.client_address)
print("Client connecting...", self.client_address)
self.server.players[self.client_address] = self
self.server.player_ids.append(self)
self.id = len(self.server.player_ids) - 1
try:
self.loop()
except socket.error as e:
if self.server._stop.isSet():
return # Socket error while shutting down doesn't matter
if e[0] in (10053, 10054):
print("Client %s %s crashed." % (self.username, self.client_address))
else:
raise e
def loop(self):
world, players = self.server.world, self.server.players
package_system = PackageSystem(self.request)
while 1:
# byte = self.request.recv(1)
try:
data = package_system.recv()
except ValueError:
return
# print("Server recieved packet: %s" % data)
packettype = data["packetType"]
packet = data["packet"]
# print(f"SERVERPACKET:", data) if packettype != 1 else None
# if not byte: return # The client has disconnected intentionally
# packettype = struct.unpack("B", byte)[0] # Client Packet Type
if packettype == 1: # Sector request
# sector = struct.unpack("iii", self.request.recv(4 * 3))
sector = packet["sector"]
# print("SECTORCHANGE_CURRENT:", sector)
# print("SECTORCHANGE_ALL:", world.sectors)
# print("SECTORIZE_NEW:", sector in world.sectors)
if sector not in world.sectors:
with world.server_lock:
world.open_sector(sector)
if not world.sectors[sector]:
# Empty sector, send packet 2
self.sendpacket(None, {
"packetType": 2,
"packet": {
"sector": sector
}
})
# self.sendpacket(12, b"\2" + struct.pack("iii", *sector))
else:
# py_000005 = save_sector_to_string(world, sector)
# py_000003 = (py_000005.encode("utf-8") if type(py_000005) == str else py_000005)
# py_000004 = world.get_exposed_sector(sector).encode("utf-8")
# msg = struct.pack("iii", *sector) + py_000003 + py_000004
self.sendpacket(None, {
"packetType": 1,
"packet": {
"sector": sector,
"exposedSector": world.get_exposed_sector(sector),
"sectorData": save_sector_to_string(world, sector)
}
})
# self.sendpacket(len(msg), b"\1" + msg)
elif packettype == 3: # Add block
# positionbytes = self.request.recv(4 * 3)
# blockbytes = self.request.recv(2)
position = packet["position"]
blockid = G.BLOCKS_DIR[packet["block"]]
# position = struct.unpack("iii", positionbytes)
# blockid = G.BLOCKS_DIR[struct.unpack("BB", blockbytes)]
with world.server_lock:
world.add_block(position, blockid, sync=False)
for address in players:
if address is self.client_address:
continue # He told us, we don't need to tell him
players[address].packageSystem.send({
"packetType": 3,
"packet": {
"position": position,
"block": blockid
}
})
# players[address].sendpacket(14, "\3" + positionbytes + blockbytes)
elif packettype == 4: # Remove block
# positionbytes = self.request.recv(4 * 3)
with world.server_lock:
world.remove_block(packet["position"], sync=False)
for address in players:
if address is self.client_address: continue # He told us, we don't need to tell him
players[address].sendpacket(12, "\4" + positionbytes)
elif packettype == 5: # Receive chat text
# txtlen = struct.unpack("i", self.request.recv(4))[0]
# raw_txt = self.request.recv(txtlen).decode('utf-8')
raw_txt = packet["message"].decode()
txt = "%s: %s" % (self.username.decode(), raw_txt)
try:
if raw_txt[0] == '/':
ex = self.command_parser.execute(raw_txt, user=self, world=world)
if ex != COMMAND_HANDLED:
self.sendchat('$$rUnknown command.')
else:
# Not a command, send the chat to all players
for address in players:
players[address].sendchat(txt)
print(txt) # May as well let console see it too
except CommandException as e:
self.sendchat(str(e), COMMAND_ERROR_COLOR)
elif packettype == 6: # Player Inventory Update
print("SERVER_PACKET06:", packet)
self.inventory = packet["items"]
# TODO: All player's inventories should be autosaved at a regular interval.
pass
elif packettype == 8: # Player Movement
# mom_bytes, pos_bytes = self.request.recv(4 * 3), self.request.recv(8 * 3)
# self.momentum = struct.unpack("fff", mom_bytes)
# self.position = struct.unpack("ddd", pos_bytes)
self.momentum = packet["momentum"]
self.position = packet["position"]
for address in players:
if address is self.client_address:
continue # He told us, we don't need to tell him
# TODO: Only send to nearby players
self.sendpacket(None, {
"packetType": 8,
"packet": {
"playerID": self.id,
"momentum": self.momentum,
"position": self.position
}
})
# players[address].sendpacket(38, "\x08" + struct.pack("H", self.id) + mom_bytes + pos_bytes)
elif packettype == 9: # Player Jump
# raise NotImplementedError("Player Jump not implemented")
for address in players:
if address is self.client_address:
continue # He told us, we don't need to tell him
# TODO: Only send to nearby players
players[address].sendpacket(None, {
"packetType": 9,
"package": {
"playerID": self.id
}
})
# players[address].sendpacket(2, "\x09" + struct.pack("H", self.id))
elif packettype == 10: # Update Tile Entity
# block_pos = struct.unpack("iii", self.request.recv(4 * 3))
# ent_size = struct.unpack("i", self.request.recv(4))[0]
# world[block_pos].update_tile_entity(self.request.recv(ent_size))
pass
elif packettype == 255: # Initial Login
# txtlen = struct.unpack("i", self.request.recv(4))[0]
# data2 = package_system.recv()
self.username = packet["username"]
# position = packet["position"]
# self.username = self.request.recv(txtlen).decode('utf-8')
self.position = None
load_player(self, "world")
for player in self.server.players.values():
player.sendchat("$$y%s has connected." % self.username)
print("%s's username is %s" % (self.client_address, self.username))
position = (0, self.server.world.terraingen.get_height(0, 0) + 2, 0)
if self.position is None: self.position = position # New player, set initial position
# Send list of current players to the newcomer
for player in self.server.players.values():
if player is self: continue
name = player.username.encode('utf-8')
self.sendpacket(None, {
"packetType": 7,
"packet": {
"playerID": player.id,
"username": name
}
})
# self.sendpacket(2 + len(name), '\7' + struct.pack("H", player.id) + name)
# Send the newcomer's name to all current players
name = self.username
for player in self.server.players.values():
if player is self: continue
player.sendpacket(None, {
"packetType": 7,
"packet": {
"playerID": self.id,
"username": name
}
})
# player.sendpacket(2 + len(name), '\7' + struct.pack("H", self.id) + name)
# Send them the sector under their feet first so they don't fall
sector = sectorize(position)
if sector not in world.sectors:
with world.server_lock:
world.open_sector(sector)
py_000001 = struct.pack("iii", *sector)
sector_string = save_sector_to_string(world, sector)
exposed_sector = world.get_exposed_sector(sector)
# print(py_000001, sector_string, exposed_sector)
msg = py_000001 + sector_string + exposed_sector.encode('utf-8')
self.sendpacket(None, {
"packetType": 1,
"packet": {
"sector": sector,
"exposedSector": exposed_sector,
"sectorData": sector_string
}
})
# self.sendpacket(len(msg), b"\1" + msg)
# Send them their spawn position and world seed(for client side biome generator)
seed_packet = make_string_packet(G.SEED)
self.sendpacket(None, {
"packetType": 255,
"packet": {
"position": position,
"seed": G.SEED
}
})
# self.sendpacket(12 + len(seed_packet),
# struct.pack("B", 255) + struct.pack("iii", *position) + seed_packet)
print("IMPORTANT0004:", self.inventory)
print("IMPORTANT0005:", len(self.inventory)+1)
self.sendpacket(None, {
"packetType": 6,
"packet": {
"items": self.inventory
}
})
# self.sendpacket(len(self.inventory)+1, "\6" + self.inventory)
else:
print("Received unknown packettype", packettype)
def finish(self):
print("Client disconnected,", self.client_address, self.username)
try:
del self.server.players[self.client_address]
except KeyError:
pass
for player in self.server.players.values():
player.sendchat("%s has disconnected." % self.username)
# Send user list
for player in self.server.players.values():
player.sendpacket(2 + 1, '\7' + struct.pack("H", self.id) + '\0')
save_player(self, "world")
class Server(socketserver.ThreadingTCPServer):
allow_reuse_address = True
def __init__(self, *args, **kwargs):
socketserver.ThreadingTCPServer.__init__(self, *args, **kwargs)
self._stop = threading.Event()
self.world = WorldServer(self)
self.players = {} # Dict of all players connected. {ipaddress: requesthandler,}
self.player_ids = [] # List of all players this session, indexes are their ID's [0: first requesthandler,]
self.command_parser = CommandParser()
def show_block(self, position, block):
blockid = block.id
for player in self.players.values():
# TODO: Only if they're in range
player.sendpacket(None, {
"packetType": 3,
"packet": {
"position": position,
"block": (blockid.main, blockid.sub)
}
})
# player.sendpacket(14, "\3" + struct.pack("iiiBB", *(position + (blockid.main, blockid.sub))))
def hide_block(self, position):
for player in self.players.values():
# TODO: Only if they're in range
player.sendpacket(12, "\4" + struct.pack("iii", *position))
def update_tile_entity(self, position, value):
for player in self.players.values():
player.sendpacket(12 + len(value), "\x0A" + struct.pack("iii", *position) + value)
def start_server(internal=False):
if internal:
server = Server(("localhost", 1486), ThreadedTCPRequestHandler)
else:
localip = [ip for ip in socket.gethostbyname_ex(socket.gethostname())[2] if not ip.startswith("127.")][0]
server = Server((localip, 1486), ThreadedTCPRequestHandler)
G.SERVER = server
server_thread = threading.Thread(target=server.serve_forever)
server_thread.start()
threading.Thread(target=server.world.content_update, name="world_server.content_update").start()
# start server timer
G.main_timer = timer.Timer(G.TIMER_INTERVAL, name="G.main_timer")
G.main_timer.start()
return server, server_thread
if __name__ == '__main__':
# TODO: Enable server launch options
# In the mean time, manually set
setattr(G.LAUNCH_OPTIONS, "seed", None)
G.SAVE_FILENAME = "world"
load_modules(server=True)
server, server_thread = start_server(internal=True)
print(('Server loop running in thread: ' + server_thread.name))
ip, port = server.server_address
print("Listening on", ip, port)
helptext = "Available commands: " + ", ".join(["say", "stop", "save"])
while 1:
args = input().replace(chr(13), "").split(" ") # On some systems CR is appended, gotta remove that
cmd = args.pop(0)
if cmd == "say":
msg = "Server: %s" % " ".join(args)
print(msg)
for player in server.players.values():
player.sendchat(msg, color=(180, 180, 180, 255))
elif cmd == "help":
print(helptext)
elif cmd == "save":
print("Saving...")
save_world(server, "world")
print("Done saving")
elif cmd == "stop":
server._stop.set()
G.main_timer.stop()
print("Disconnecting clients...")
for address in server.players:
try:
server.players[address].request.shutdown(SHUT_RDWR)
server.players[address].request.close()
except socket.error:
pass
print("Shutting down socket...")
server.shutdown()
print("Saving...")
save_world(server, "world")
print("Goodbye")
break
else:
print("Unknown command '%s'." % cmd, helptext)
while len(threading.enumerate()) > 1:
threads = threading.enumerate()
threads.remove(threading.current_thread())
print("Waiting on these threads to close:", threads)
time.sleep(1)
|
Loader.py | """MIT License
Copyright (c) 2019 Zdravko Georgiev
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE. """
from selenium import webdriver
from colorama import init, Fore, Back, Style
from tqdm import tqdm
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
from configparser import ConfigParser
import urllib.request, time, sys, os,urllib.request, platform , \
webbrowser,urllib.error, shutil,threading, PySimpleGUI as sg, requests,json
configPath = "data.txt"
if not os.path.isfile(configPath):
with open(configPath, "a") as f:
f.writelines \
("""
[mainConfig]
downloadlist =
exercise = True
subtitles = True
savedirectory =
[GoogleConfig]
bragent = AdsBot-Google (+http://www.google.com/adsbot.html)
hidewindow = True
[Timings]
logintime = 0
loadingallvideos = 5
extractcoursename = 2
getvideolink = 2
downloaddelay = 3
""")
f.close()
results = ""
config = ConfigParser()
config.read(configPath)
links = str(config.get("mainConfig", "downloadlist")).strip().split("\n")
default_button_color = '#FFF', '#444'
for lin in links:
results += lin + "\n"
def build_chrome_options ():
options = webdriver.ChromeOptions()
options.accept_untrusted_certs = True
options.assume_untrusted_cert_issuer = True
options.add_argument("--disable-extensions")
options.add_argument("--no-sandbox")
options.add_argument("--disable-gpu")
add_default_chrome_path = ""
if platform.system() == "Windows":
add_default_chrome_path = os.path.expanduser('~') + r"\AppData\Local\Google\Chrome\User Data\Default"
elif platform.system() == "MacOS" :
add_default_chrome_path = os.path.expanduser('~') + r"/Library/Application/Support/Google/Chrome/Default"
elif platform.system() == "Linux" :
add_default_chrome_path = os.path.expanduser(
'~') + r"/.config/google-chrome/default"
options.add_argument("--user-data-dir=" + os.path.abspath(add_default_chrome_path))
options.add_argument(
'--user-agent=' + config.get("GoogleConfig","bragent"))
options.add_argument("--disable-impl-side-painting")
options.add_argument("--disable-setuid-sandbox")
options.add_argument("--disable-seccomp-filter-sandbox")
options.add_argument("--disable-breakpad")
options.add_argument("--disable-client-side-phishing-detection")
options.add_argument("--disable-cast")
options.add_argument("--disable-cast-streaming-hw-encoding")
options.add_argument("--disable-cloud-import")
options.add_argument("--disable-popup-blocking")
options.add_argument("--ignore-certificate-errors")
options.add_argument("--disable-session-crashed-bubble")
options.add_argument("--disable-ipv6")
options.add_argument("--allow-http-screen-capture")
options.add_experimental_option("prefs", {
"download.default_directory": "c:/",
"download.prompt_for_download": False,
"download.directory_upgrade": True,
"safebrowsing_for_trusted_sources_enabled": False,
"safebrowsing.enabled": False
})
return options
##################################################
##################################################
# Progress bar Class tqdm
###################################################
class progress_bar(tqdm):
def update_to(self, b=1, bsize=1, tsize=None):
if tsize is not None:
self.total = tsize
self.update(b * bsize - self.n)
def show_progress_bar(url, filename, output_path, type=0, sessions = {}):
with progress_bar(unit='B',smoothing=0.3, unit_scale=True,
miniters=1, desc=filename) as t:
if type == 0:
urllib.request.urlretrieve(url, filename=output_path, reporthook=t.update_to)
else:
r = requests.get(url, cookies=sessions, stream=True)
with open(os.path.join(output_path,filename), 'wb') as f:
for chunk in r.iter_content(chunk_size=1024):
if chunk: # filter out keep-alive new chunks
f.write(chunk)
class bcolors:
if os.name == "posix":
init(autoreset=True)
# colors foreground text:
fc = "\033[0;96m"
fg = "\033[0;92m"
fw = "\033[0;97m"
fr = "\033[0;91m"
fb = "\033[0;94m"
fy = "\033[0;33m"
fm = "\033[0;35m"
# colors background text:
bc = "\033[46m"
bg = "\033[42m"
bw = "\033[47m"
br = "\033[41m"
bb = "\033[44m"
by = "\033[43m"
bm = "\033[45m"
# colors style text:
sd = Style.DIM
sn = Style.NORMAL
sb = Style.BRIGHT
else:
init(autoreset=True)
# colors foreground text:
fc = Fore.CYAN
fg = Fore.GREEN
fw = Fore.WHITE
fr = Fore.RED
fb = Fore.BLUE
fy = Fore.YELLOW
fm = Fore.MAGENTA
# colors background text:
bc = Back.CYAN
bg = Back.GREEN
bw = Back.WHITE
br = Back.RED
bb = Back.BLUE
by = Fore.YELLOW
bm = Fore.MAGENTA
# colors style text:
sd = Style.DIM
sn = Style.NORMAL
sb = Style.BRIGHT
brlist = [
"Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; WOW64; Trident/6.0)",
"Mozilla/5.0 (Windows NT 6.1; WOW64; Trident/7.0; yie11; rv:11.0) like Gecko",
"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.0.3729.169 Safari/537.36",
"Mozilla/5.0 (Linux; Android 8.0.0; SM-G930F Build/R16NW; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/74.0.3729.157 Mobile Safari/537.36",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.0.3729.157 Safari/537.36",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36",
"Mozilla/5.0 (Linux; Android 7.0; SM-G610M Build/NRD90M) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.91 Mobile Safari/537.36",
"Mozilla/5.0 (iPhone; CPU iPhone OS 11_4_1 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/11.0 Mobile/15E148 Safari/604.1",
"Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10_5_4; de-de) AppleWebKit/525.18 (KHTML, like Gecko) Version/3.1.2 Safari/525.20.1",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_1) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/12.0.1 Safari/605.1.15",
"Mozilla/5.0 (iPhone; CPU iPhone OS 10_3_1 like Mac OS X) AppleWebKit/603.1.30 (KHTML, like Gecko) Version/10.0 Mobile/14E304 Safari/602.1",
"Mozilla/5.0 (Linux; Android 5.0; SAMSUNG SM-G900F Build/LRX21T) AppleWebKit/537.36 (KHTML, like Gecko) SamsungBrowser/2.1 Chrome/34.0.1847.76 Mobile Safari/537.36",
"Mozilla/5.0 (compatible; bingbot/2.0; +http://www.bing.com/bingbot.htm)",
"Mozilla / 5.0(compatible; bingbot / 2.0; +http://www.bing.com/bingbot.htm)",
"AdsBot-Google (+http://www.google.com/adsbot.html)",
"Mozilla/5.0 (Linux; Android 6.0.1; Nexus 5X Build/MMB29P) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2272.96 Mobile Safari/537.36 (compatible; Googlebot/2.1; +http://www.google.com/bot.html)",
"Mozilla/5.0 (Windows NT 6.1; WOW64; rv:54.0) Gecko/20100101 Firefox/54.0",
"Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:67.0) Gecko/20100101 Firefox/67.0",
]
config2 = [
[sg.T("Download Options",size=(500, 1), auto_size_text=True,justification='center', font=("Arial", 10), background_color="#888", text_color="#FFF", pad=(5, 5))],
[sg.Multiline(tooltip="Add each course link in a row as follows:"
"\nhttps://www.linkedin.com/learning/photoshop-cc-2015-new-features-3\nhttps://www.linkedin.com/learning/photoshop-cs6-image-optimization\n https://www.linkedin.com/learning/photoshop-cc-for-photographers-sharpening \n ",
font=("Helvetica", 7),
autoscroll=True,
enable_events=True,
enter_submits=True,
auto_size_text=True,
size=(650,15),
default_text=results.strip() + "\n",
background_color="#FFF")],
[sg.Text("Exercise Files", size=(10, 1), pad=(0, 5)),
sg.Checkbox("",default=config.getboolean("mainConfig", "exercise"), background_color="#FFF"),
sg.Text(" "* 3, pad=(0, 5)),
sg.Text("Subtitles", pad=(0, 5)),
sg.Checkbox("",default=config.getboolean("mainConfig", "subtitles"), background_color="#FFF")
],
[sg.Text("Save Directory", size=(14, 1), pad=(0, 5)),
sg.InputText(config.get("mainConfig", "saveDirectory"),background_color="#FFF", size=(30,1)),
sg.FolderBrowse(button_text="Select", button_color=default_button_color, size=(15,1))],
]
config3 = [
[sg.T("Chrome Driver Options",size=(500, 1),auto_size_text=True,justification='center', font=("Arial", 10), background_color="#888", text_color="#FFF", pad=(5, 5))],
[sg.T("Browser Agents", size=(13,1))],
[sg.DropDown(brlist,default_value=config.get("GoogleConfig","bragent"),pad=(5,5), size=(70,10))],
[sg.Checkbox("Hide Window",default=config.getboolean("GoogleConfig","hidewindow"),pad=(5,5))],
]
config4 = [
[sg.T("Delay Settings",size=(500, 1),auto_size_text=True,justification='center', font=("Arial", 10), background_color="#888", text_color="#FFF", pad=(5, 5))],
[sg.T(pad=(5,5),text="Manual Login Delay",size=(20, 1)),
sg.InputText(size=(5,1), default_text=(config.getint("Timings", "logintime")))],
[sg.T(pad=(5,5),text="Get Video List Delay",size=(20, 1)),
sg.InputText(size=(5,1), default_text=(config.getint("Timings", "loadingallvideos")))],
[sg.T(pad=(5,5),text="Get Course Name Delay",size=(20, 1)),
sg.InputText(size=(5,1), default_text=(config.getint("Timings", "extractcoursename"))),
],
[sg.T(pad=(5,5),text="Get Video Links Delay",size=(20, 1)),
sg.InputText(size=(5,1), default_text=(config.getint("Timings", "getvideolink")))],
[sg.T(pad=(5,5),text="Download Delay",size=(20, 1)),
sg.InputText(size=(5,1), default_text=(config.getint("Timings", "downloaddelay")))],
]
config5 = [
[sg.Frame("",layout=(
[sg.T("LinkedIn Downloader", size=(500, 1), auto_size_text=True, justification='center',
font=("Arial", 10), background_color="#888", text_color="#FFF", pad=(5, 5))],
[sg.T("Author: @r00tme", justification='center')],
[sg.T("Version: GUI 0.16.1", justification='center')],
[sg.T("Release Date: 29/02/2020", justification='center')],
[sg.Button(button_text="Bug Report", button_color=default_button_color, size=(10, 1),
key="_open_git_link_")]
), element_justification="center")]
]
layout = [[sg.TabGroup([[
sg.Tab('Download', config2),
sg.Tab('Browser', config3),
sg.Tab('Timings', config4),
sg.Tab('About', config5)
]])],
[
sg.Button('Start', button_color=default_button_color, size=(15,1), auto_size_button=False),
sg.Button('Stop', button_color=default_button_color, size=(15,1), auto_size_button=False),
]
]
window = sg.Window('LinkedIn Downloader GUI v0.16.3', icon="icon.ico",
auto_size_text=True,
auto_size_buttons=True,
background_color="#d4d0c8",
use_default_focus=True,
text_justification="left",
size=(600,350),
debugger_enabled=False,
element_justification="left",
).Layout(layout).Finalize()
window.Element('Start').Update(disabled=False)
window.Element('Stop').Update(disabled=True)
valueList = ""
def the_gui():
while True:
event, values = window.Read(timeout=100)
if event is None or event == "Exit":
break
if event == "_open_git_link_":
webbrowser.open('https://github.com/r00tmebaby', new=2)
if event is not sg.TIMEOUT_KEY:
counter = 0
twiliosec = config.sections()
for sections in twiliosec:
twilioset = list(config[sections])
for sets in twilioset:
config.set(sections, sets, str(values[counter]))
counter += 1
with open(configPath, "w+") as f:
config.write(f)
if event == "Start":
if not os.path.isfile("chromedriver.exe"):
sg.Popup("Chrome Driver is missing, please download it from here http://chromedriver.chromium.org/.\n The program can not be started without it", button_type=3,auto_close_duration=1, auto_close=True)
else:
event, values = window.Read(timeout=0)
if event is None or event == "Exit":
break
if config.get("mainConfig", "downloadlist") != "" and config.get("mainConfig", "savedirectory") != "":
window.Element('Start').Update(disabled=True)
window.Element('Stop').Update(disabled=False)
threading.Thread(target=downloader).start()
else:
sg.Popup("Please specify download folder and at least one course url")
elif event == "Stop":
os.system('taskkill /f /im chrome.exe')
os.system('taskkill /f /im chromedriver.exe')
window.Element('Start').Update(disabled=False)
window.Element('Stop').Update(disabled=True)
def DownloadFile(url,sessions):
local_filename = url.split('?')[0]
file_name = local_filename.split("/")
file_name1 = file_name[len(file_name) -1]
r = requests.get(url,cookies=sessions,stream=True)
with open(file_name1, 'wb') as f:
for chunk in r.iter_content(chunk_size=1024):
if chunk: # filter out keep-alive new chunks
f.write(chunk)
return
def downloader():
driver = webdriver.Chrome("chromedriver.exe", options=build_chrome_options())
if config.getboolean("GoogleConfig", "hidewindow"):
driver.set_window_position(-2000, 0)
sys.stdout.write(
"\r%s%s###############################################\n"
"# LinkedIn Learning Download #\n"
"# @author r00tme 01/03/2020 #\n"
"# @version: GUI 0.16.4 #\n"
"##############################################\n\n" % (
bcolors.sd, bcolors.fc))
sys.stdout.flush()
if config.getint("Timings", "logintime") > 0:
driver.get("https://www.linkedin.com/learning-login/?redirect=https%3A%2F%2F")
time.sleep(config.getint("Timings", "logintime"))
if not os.path.isfile(configPath):
sys.stdout.write('\n%s[%s*%s]%s The configuration file does not exist' % (
bcolors.bm, bcolors.fc, bcolors.fm, bcolors.fc))
sys.stdout.flush()
sys.exit()
courses_count = 0
total_counter = 0
for items_for_download in links:
driver.get(items_for_download)
time.sleep(config.getint("Timings", "loadingallvideos"))
time.sleep(4)
driver.find_element_by_css_selector(
".course-body__info-tab-name.course-body__info-tab-name-content.ember-view").click()
counter = 0
temp_counter = 0
sys.stdout.write('\n%s[%s*%s]%s Working on course %s' % (
bcolors.bm, bcolors.fc, bcolors.fm, bcolors.fc, items_for_download))
sys.stdout.flush()
time.sleep(config.getint("Timings","loadingallvideos"))
driver.find_element_by_class_name(
'course-body__info-tab-name-overview').click()
elementss = driver.find_element_by_class_name("course-info__difficulty").text
if elementss == "Intermediate":
prefix = "1-"
elif elementss == "Advanced":
prefix = "2-"
else:
prefix = "0-"
course_name = (prefix + items_for_download.split("?")[0].split("/")[4].replace("-", " ").title()).rstrip()
if not os.path.isdir(config.get('mainConfig', 'saveDirectory') + r"\\" + course_name):
os.makedirs(config.get('mainConfig', 'saveDirectory') + r"\\" + course_name)
sys.stdout.write('\n%s[%s*%s]%s Directory %s has been successfully created' % (
bcolors.bm, bcolors.fc, bcolors.fm, bcolors.fc, course_name))
sys.stdout.flush()
else:
sys.stdout.write('\n%s[%s*%s]%s Directory %s already exists' % (
bcolors.bm, bcolors.fc, bcolors.fm, bcolors.fc, course_name))
sys.stdout.flush()
time.sleep(1)
elementss2 = driver.find_element_by_css_selector(
'.course-info__details-section.course-info__divider').get_attribute('innerHTML')
if not os.path.isfile(config.get('mainConfig', 'saveDirectory') + r"\\" + course_name + r"\info.php") or \
os.stat(config.get('mainConfig', 'saveDirectory') + r"\\" + course_name + r"\info.php").st_size == 0:
f = open(config.get('mainConfig', 'saveDirectory') + r"\\" + course_name + r"\info.php", "a+",
encoding="utf-8")
f.write(elementss2)
time.sleep(config.getint("Timings", "extractcoursename"))
all_cookies = driver.get_cookies()
cookies = {}
for s_cookie in all_cookies:
cookies[s_cookie["name"]] = s_cookie["value"]
if config.getboolean('mainConfig', 'exercise'):
try:
driver.find_element_by_class_name("course-body__info-tab-name-exercise-files").click()
for each_file in driver.find_elements_by_class_name("exercise-file__link"):
filename = each_file.get_attribute("href").split("?")[0].split("/")[7]
excersize_file = "Downloadig excersise file : %s" % filename
show_progress_bar(each_file.get_attribute("href"),
"%s[%s*%s]%s %s" % (
bcolors.bm, bcolors.fc, bcolors.fm, bcolors.fm, excersize_file),
os.path.join(config.get('mainConfig', 'saveDirectory'), course_name,
filename),
sessions=cookies)
except:
sys.stdout.write(
"\n%s[%s+%s]%s%s Excercise files were not found" % (bcolors.bm, bcolors.fc, bcolors.fm, bcolors.fc,bcolors.sd)
)
driver.find_element_by_class_name(
"course-body__info-tab-name-content").click()
driver.find_element_by_class_name(
"course-body__info-tab-name-content").click()
time.sleep(1)
video_id = 0
for k in driver.find_elements_by_class_name("video-item__link"):
k.click()
time.sleep(config.getint("Timings", "downloaddelay"))
try:
WebDriverWait(driver, time.sleep(config.getint("Timings", "getvideolink"))).until(
EC.presence_of_element_located((By.TAG_NAME, "video")))
except:
pass
video_src = driver.find_element_by_tag_name("video").get_attribute("src")
video_name = driver.current_url.split("/")[5].replace("-", " ").split("?")[0].title()
course_id = video_src.split("/")[5]
if counter > 0:
video_id +=1
else:
video_id = json.loads(requests.get(
'https://www.lynda.com/ajax/player?courseId=' + course_id + '&type=course').text)['Chapters'][0]['Videos'][counter]['ID']
fk = open(config.get('mainConfig', 'saveDirectory') + r"\\" + course_name + r"\CourseInfo.json", "a+",
encoding="utf-8")
datas= requests.get("https://www.lynda.com/ajax/player/?courseid=%s&type=course" % course_id).json()
fk.write(json.dumps(datas, indent=4, sort_keys=True))
video_name = "%04d_%s" % (counter, video_name)
counter += 1
time.sleep(0.5)
save_dir = config.get('mainConfig',
'saveDirectory') + r"\\" + course_name + r"\\" + video_name + ".mp4"
show_progress_bar(video_src, "\r%s[%s*%s]%s %s" % (bcolors.bm, bcolors.fc, bcolors.fm, bcolors.fc, video_name), save_dir)
if config.getboolean("mainConfig","subtitles"):
videoSrt = 'https://www.lynda.com/ajax/player/transcript/?courseID=%s&videoID=%s' % (course_id, video_id)
time.sleep(0.5)
show_progress_bar(videoSrt, "\r%s[%s*%s]%s %s" % (
bcolors.bm,
bcolors.fc,
bcolors.fm,
bcolors.fw,
video_name + ".srt"),
os.path.join(config.get('mainConfig', 'saveDirectory'),course_name , video_name + ".srt"),
sessions=cookies)
total_counter += 1
temp_counter += 1
if counter == len(driver.find_elements_by_css_selector(".video-item__link.t-black.ember-view")):
courses_count += 1
sys.stdout.write(
"\n%s[%s+%s]%s%sYou have successfully downloaded course %s%s %swith %d videos. Downloaded %d courses and %d videos in total\n" % (
bcolors.bm, bcolors.fc, bcolors.fm, bcolors.fc,
bcolors.sd + bcolors.fc, bcolors.sb + bcolors.fg, course_name,
bcolors.sd + bcolors.fc, temp_counter, courses_count, total_counter)
)
break
sys.stdout.flush()
if __name__ == '__main__':
the_gui()
|
test_stdout.py | import os
import random
import string
import sys
import tempfile
import time
import pytest
from dagster import (
DagsterEventType,
InputDefinition,
ModeDefinition,
execute_pipeline,
pipeline,
reconstructable,
resource,
solid,
)
from dagster.core.execution.compute_logs import should_disable_io_stream_redirect
from dagster.core.instance import DagsterInstance
from dagster.core.storage.compute_log_manager import ComputeIOType
from dagster.core.test_utils import (
create_run_for_test,
instance_for_test,
instance_for_test_tempdir,
)
from dagster.seven import multiprocessing
HELLO_SOLID = "HELLO SOLID"
HELLO_RESOURCE = "HELLO RESOURCE"
SEPARATOR = os.linesep if (os.name == "nt" and sys.version_info < (3,)) else "\n"
@resource
def resource_a(_):
print(HELLO_RESOURCE) # pylint: disable=print-call
return "A"
@solid
def spawn(_):
return 1
@solid(input_defs=[InputDefinition("num", int)], required_resource_keys={"a"})
def spew(_, num):
print(HELLO_SOLID) # pylint: disable=print-call
return num
def define_pipeline():
@pipeline(mode_defs=[ModeDefinition(resource_defs={"a": resource_a})])
def spew_pipeline():
spew(spew(spawn()))
return spew_pipeline
def normalize_file_content(s):
return "\n".join([line for line in s.replace(os.linesep, "\n").split("\n") if line])
@pytest.mark.skipif(
should_disable_io_stream_redirect(), reason="compute logs disabled for win / py3.6+"
)
def test_compute_log_to_disk():
with instance_for_test() as instance:
spew_pipeline = define_pipeline()
manager = instance.compute_log_manager
result = execute_pipeline(spew_pipeline, instance=instance)
assert result.success
compute_steps = [
event.step_key
for event in result.step_event_list
if event.event_type == DagsterEventType.STEP_START
]
for step_key in compute_steps:
if step_key.startswith("spawn"):
continue
compute_io_path = manager.get_local_path(result.run_id, step_key, ComputeIOType.STDOUT)
assert os.path.exists(compute_io_path)
with open(compute_io_path, "r") as stdout_file:
assert normalize_file_content(stdout_file.read()) == HELLO_SOLID
@pytest.mark.skipif(
should_disable_io_stream_redirect(), reason="compute logs disabled for win / py3.6+"
)
def test_compute_log_to_disk_multiprocess():
spew_pipeline = reconstructable(define_pipeline)
with instance_for_test() as instance:
manager = instance.compute_log_manager
result = execute_pipeline(
spew_pipeline,
run_config={"storage": {"filesystem": {}}, "execution": {"multiprocess": {}}},
instance=instance,
)
assert result.success
compute_steps = [
event.step_key
for event in result.step_event_list
if event.event_type == DagsterEventType.STEP_START
]
for step_key in compute_steps:
if step_key.startswith("spawn"):
continue
compute_io_path = manager.get_local_path(result.run_id, step_key, ComputeIOType.STDOUT)
assert os.path.exists(compute_io_path)
with open(compute_io_path, "r") as stdout_file:
assert normalize_file_content(stdout_file.read()) == HELLO_SOLID
@pytest.mark.skipif(
should_disable_io_stream_redirect(), reason="compute logs disabled for win / py3.6+"
)
def test_compute_log_manager():
with instance_for_test() as instance:
manager = instance.compute_log_manager
spew_pipeline = define_pipeline()
result = execute_pipeline(spew_pipeline, instance=instance)
assert result.success
compute_steps = [
event.step_key
for event in result.step_event_list
if event.event_type == DagsterEventType.STEP_START
]
assert len(compute_steps) == 3
step_key = "spew"
assert manager.is_watch_completed(result.run_id, step_key)
stdout = manager.read_logs_file(result.run_id, step_key, ComputeIOType.STDOUT)
assert normalize_file_content(stdout.data) == HELLO_SOLID
stderr = manager.read_logs_file(result.run_id, step_key, ComputeIOType.STDERR)
cleaned_logs = stderr.data.replace("\x1b[34m", "").replace("\x1b[0m", "")
assert "dagster - DEBUG - spew_pipeline - " in cleaned_logs
bad_logs = manager.read_logs_file("not_a_run_id", step_key, ComputeIOType.STDOUT)
assert bad_logs.data is None
assert not manager.is_watch_completed("not_a_run_id", step_key)
@pytest.mark.skipif(
should_disable_io_stream_redirect(), reason="compute logs disabled for win / py3.6+"
)
def test_compute_log_manager_subscriptions():
with instance_for_test() as instance:
spew_pipeline = define_pipeline()
step_key = "spew"
result = execute_pipeline(spew_pipeline, instance=instance)
stdout_observable = instance.compute_log_manager.observable(
result.run_id, step_key, ComputeIOType.STDOUT
)
stderr_observable = instance.compute_log_manager.observable(
result.run_id, step_key, ComputeIOType.STDERR
)
stdout = []
stdout_observable.subscribe(stdout.append)
stderr = []
stderr_observable.subscribe(stderr.append)
assert len(stdout) == 1
assert stdout[0].data.startswith(HELLO_SOLID)
assert stdout[0].cursor in [12, 13]
assert len(stderr) == 1
assert stderr[0].cursor == len(stderr[0].data)
assert stderr[0].cursor > 400
def gen_solid_name(length):
return "".join(random.choice(string.ascii_lowercase) for x in range(length))
@pytest.mark.skipif(
should_disable_io_stream_redirect(), reason="compute logs disabled for win / py3.6+"
)
def test_long_solid_names():
solid_name = gen_solid_name(300)
@pipeline(mode_defs=[ModeDefinition(resource_defs={"a": resource_a})])
def long_pipeline():
spew.alias(name=solid_name)()
with instance_for_test() as instance:
manager = instance.compute_log_manager
result = execute_pipeline(
long_pipeline,
instance=instance,
run_config={"solids": {solid_name: {"inputs": {"num": 1}}}},
)
assert result.success
compute_steps = [
event.step_key
for event in result.step_event_list
if event.event_type == DagsterEventType.STEP_START
]
assert len(compute_steps) == 1
step_key = compute_steps[0]
assert manager.is_watch_completed(result.run_id, step_key)
stdout = manager.read_logs_file(result.run_id, step_key, ComputeIOType.STDOUT)
assert normalize_file_content(stdout.data) == HELLO_SOLID
def execute_inner(step_key, pipeline_run, instance_ref):
instance = DagsterInstance.from_ref(instance_ref)
inner_step(instance, pipeline_run, step_key)
def inner_step(instance, pipeline_run, step_key):
with instance.compute_log_manager.watch(pipeline_run, step_key=step_key):
time.sleep(0.1)
print(step_key, "inner 1") # pylint: disable=print-call
print(step_key, "inner 2") # pylint: disable=print-call
print(step_key, "inner 3") # pylint: disable=print-call
time.sleep(0.1)
def expected_inner_output(step_key):
return "\n".join(
["{step_key} inner {num}".format(step_key=step_key, num=i + 1) for i in range(3)]
)
def expected_outer_prefix():
return "\n".join(["outer {num}".format(num=i + 1) for i in range(3)])
@pytest.mark.skipif(
should_disable_io_stream_redirect(), reason="compute logs disabled for win / py3.6+"
)
def test_single():
with instance_for_test() as instance:
pipeline_name = "foo_pipeline"
pipeline_run = create_run_for_test(instance, pipeline_name=pipeline_name)
step_keys = ["A", "B", "C"]
with instance.compute_log_manager.watch(pipeline_run):
print("outer 1") # pylint: disable=print-call
print("outer 2") # pylint: disable=print-call
print("outer 3") # pylint: disable=print-call
for step_key in step_keys:
inner_step(instance, pipeline_run, step_key)
for step_key in step_keys:
stdout = instance.compute_log_manager.read_logs_file(
pipeline_run.run_id, step_key, ComputeIOType.STDOUT
)
assert normalize_file_content(stdout.data) == expected_inner_output(step_key)
full_out = instance.compute_log_manager.read_logs_file(
pipeline_run.run_id, pipeline_name, ComputeIOType.STDOUT
)
assert normalize_file_content(full_out.data).startswith(expected_outer_prefix())
@pytest.mark.skipif(
should_disable_io_stream_redirect(), reason="compute logs disabled for win / py3.6+"
)
def test_compute_log_base_with_spaces():
with tempfile.TemporaryDirectory() as temp_dir:
with instance_for_test_tempdir(
temp_dir,
{
"compute_logs": {
"module": "dagster.core.storage.local_compute_log_manager",
"class": "LocalComputeLogManager",
"config": {"base_dir": os.path.join(temp_dir, "base with spaces")},
}
},
) as instance:
pipeline_name = "foo_pipeline"
pipeline_run = create_run_for_test(instance, pipeline_name=pipeline_name)
step_keys = ["A", "B", "C"]
with instance.compute_log_manager.watch(pipeline_run):
print("outer 1") # pylint: disable=print-call
print("outer 2") # pylint: disable=print-call
print("outer 3") # pylint: disable=print-call
for step_key in step_keys:
inner_step(instance, pipeline_run, step_key)
for step_key in step_keys:
stdout = instance.compute_log_manager.read_logs_file(
pipeline_run.run_id, step_key, ComputeIOType.STDOUT
)
assert normalize_file_content(stdout.data) == expected_inner_output(step_key)
full_out = instance.compute_log_manager.read_logs_file(
pipeline_run.run_id, pipeline_name, ComputeIOType.STDOUT
)
assert normalize_file_content(full_out.data).startswith(expected_outer_prefix())
@pytest.mark.skipif(
should_disable_io_stream_redirect(), reason="compute logs disabled for win / py3.6+"
)
def test_multi():
with instance_for_test() as instance:
pipeline_name = "foo_pipeline"
pipeline_run = create_run_for_test(instance, pipeline_name=pipeline_name)
step_keys = ["A", "B", "C"]
with instance.compute_log_manager.watch(pipeline_run):
print("outer 1") # pylint: disable=print-call
print("outer 2") # pylint: disable=print-call
print("outer 3") # pylint: disable=print-call
for step_key in step_keys:
process = multiprocessing.Process(
target=execute_inner, args=(step_key, pipeline_run, instance.get_ref())
)
process.start()
process.join()
for step_key in step_keys:
stdout = instance.compute_log_manager.read_logs_file(
pipeline_run.run_id, step_key, ComputeIOType.STDOUT
)
assert normalize_file_content(stdout.data) == expected_inner_output(step_key)
full_out = instance.compute_log_manager.read_logs_file(
pipeline_run.run_id, pipeline_name, ComputeIOType.STDOUT
)
# The way that the multiprocess compute-logging interacts with pytest (which stubs out the
# sys.stdout fileno) makes this difficult to test. The pytest-captured stdout only captures
# the stdout from the outer process, not also the inner process
assert normalize_file_content(full_out.data).startswith(expected_outer_prefix())
|
test_basic_3.py | # coding: utf-8
import gc
import logging
import os
import sys
import time
import subprocess
import numpy as np
import pytest
import ray.cluster_utils
from ray._private.test_utils import (
dicts_equal,
wait_for_pid_to_exit,
wait_for_condition,
)
from ray.autoscaler._private.constants import RAY_PROCESSES
from pathlib import Path
import ray
import psutil
logger = logging.getLogger(__name__)
def test_auto_global_gc(shutdown_only):
# 100MB
ray.init(num_cpus=1, object_store_memory=100 * 1024 * 1024)
@ray.remote
class Test:
def __init__(self):
self.collected = False
import gc
gc.disable()
def gc_called(phase, info):
self.collected = True
gc.callbacks.append(gc_called)
def circular_ref(self):
# 20MB
buf1 = b"0" * (10 * 1024 * 1024)
buf2 = b"1" * (10 * 1024 * 1024)
ref1 = ray.put(buf1)
ref2 = ray.put(buf2)
b = []
a = []
b.append(a)
a.append(b)
b.append(ref1)
a.append(ref2)
return a
def collected(self):
return self.collected
test = Test.remote()
# 60MB
for i in range(3):
ray.get(test.circular_ref.remote())
time.sleep(2)
assert not ray.get(test.collected.remote())
# 80MB
for _ in range(1):
ray.get(test.circular_ref.remote())
time.sleep(2)
assert ray.get(test.collected.remote())
@pytest.mark.skipif(sys.platform == "win32", reason="Hangs on windows")
def test_many_fractional_resources(shutdown_only):
ray.init(num_cpus=2, num_gpus=2, resources={"Custom": 2})
@ray.remote
def g():
return 1
@ray.remote
def f(block, accepted_resources):
true_resources = {
resource: value[0][1]
for resource, value in ray.worker.get_resource_ids().items()
}
if block:
ray.get(g.remote())
return dicts_equal(true_resources, accepted_resources)
# Check that the resource are assigned correctly.
result_ids = []
for rand1, rand2, rand3 in np.random.uniform(size=(100, 3)):
resource_set = {"CPU": int(rand1 * 10000) / 10000}
result_ids.append(f._remote([False, resource_set], num_cpus=rand1))
resource_set = {"CPU": 1, "GPU": int(rand1 * 10000) / 10000}
result_ids.append(f._remote([False, resource_set], num_gpus=rand1))
resource_set = {"CPU": 1, "Custom": int(rand1 * 10000) / 10000}
result_ids.append(
f._remote([False, resource_set], resources={"Custom": rand1}))
resource_set = {
"CPU": int(rand1 * 10000) / 10000,
"GPU": int(rand2 * 10000) / 10000,
"Custom": int(rand3 * 10000) / 10000
}
result_ids.append(
f._remote(
[False, resource_set],
num_cpus=rand1,
num_gpus=rand2,
resources={"Custom": rand3}))
result_ids.append(
f._remote(
[True, resource_set],
num_cpus=rand1,
num_gpus=rand2,
resources={"Custom": rand3}))
assert all(ray.get(result_ids))
# Check that the available resources at the end are the same as the
# beginning.
stop_time = time.time() + 10
correct_available_resources = False
while time.time() < stop_time:
available_resources = ray.available_resources()
if ("CPU" in available_resources
and ray.available_resources()["CPU"] == 2.0
and "GPU" in available_resources
and ray.available_resources()["GPU"] == 2.0
and "Custom" in available_resources
and ray.available_resources()["Custom"] == 2.0):
correct_available_resources = True
break
if not correct_available_resources:
assert False, "Did not get correct available resources."
@pytest.mark.skipif(sys.platform == "win32", reason="Fails on windows")
def test_background_tasks_with_max_calls(shutdown_only):
ray.init(
# TODO (Alex): We need to fix
# https://github.com/ray-project/ray/issues/20203 to remove this flag.
num_cpus=2,
_system_config={"worker_cap_initial_backoff_delay_ms": 0})
@ray.remote
def g():
time.sleep(.1)
return 0
@ray.remote(max_calls=1, max_retries=0)
def f():
return [g.remote()]
nested = ray.get([f.remote() for _ in range(10)])
# Should still be able to retrieve these objects, since f's workers will
# wait for g to finish before exiting.
ray.get([x[0] for x in nested])
@ray.remote(max_calls=1, max_retries=0)
def f():
return os.getpid(), g.remote()
nested = ray.get([f.remote() for _ in range(10)])
while nested:
pid, g_id = nested.pop(0)
assert ray.get(g_id) == 0
del g_id
# Necessary to dereference the object via GC, so the worker can exit.
gc.collect()
wait_for_pid_to_exit(pid)
@pytest.mark.skipif(sys.platform == "win32", reason="Hangs on windows")
def test_fair_queueing(shutdown_only):
ray.init(
num_cpus=1,
_system_config={
# Having parallel leases is slow in this case
# because tasks are scheduled FIFO,
# the more parallism we have,
# the more workers we need to start to execute f and g tasks
# before we can execute the first h task.
"max_pending_lease_requests_per_scheduling_category": 1,
"worker_cap_enabled": True,
})
@ray.remote
def h():
return 0
@ray.remote
def g():
return ray.get(h.remote())
@ray.remote
def f():
return ray.get(g.remote())
# This will never finish without fair queueing of {f, g, h}:
# https://github.com/ray-project/ray/issues/3644
timeout = 510.0 if sys.platform == "win32" else 60.0
ready, _ = ray.wait(
[f.remote() for _ in range(1000)], timeout=timeout, num_returns=1000)
assert len(ready) == 1000, len(ready)
@pytest.mark.skipif(sys.platform == "win32", reason="Hangs on windows")
def test_actor_killing(shutdown_only):
# This is to test create and kill an actor immediately
import ray
ray.init(num_cpus=1)
@ray.remote(num_cpus=1)
class Actor:
def foo(self):
return None
worker_1 = Actor.remote()
ray.kill(worker_1)
worker_2 = Actor.remote()
assert ray.get(worker_2.foo.remote()) is None
ray.kill(worker_2)
worker_1 = Actor.options(max_restarts=1).remote()
ray.kill(worker_1, no_restart=False)
assert ray.get(worker_1.foo.remote()) is None
ray.kill(worker_1, no_restart=False)
worker_2 = Actor.remote()
assert ray.get(worker_2.foo.remote()) is None
def test_actor_scheduling(shutdown_only):
ray.init()
@ray.remote
class A:
def run_fail(self):
ray.actor.exit_actor()
def get(self):
return 1
a = A.remote()
a.run_fail.remote()
with pytest.raises(Exception):
ray.get([a.get.remote()])
def test_worker_startup_count(ray_start_cluster):
"""Test that no extra workers started while no available cpu resources
in cluster."""
cluster = ray_start_cluster
# Cluster total cpu resources is 4.
cluster.add_node(
num_cpus=4, _system_config={
"debug_dump_period_milliseconds": 100,
})
ray.init(address=cluster.address)
# A slow function never returns. It will hold cpu resources all the way.
@ray.remote
def slow_function():
while True:
time.sleep(1000)
# Flood a large scale lease worker requests.
for i in range(10000):
# Use random cpu resources to make sure that all tasks are sent
# to the raylet. Because core worker will cache tasks with the
# same resource shape.
num_cpus = 0.24 + np.random.uniform(0, 0.01)
slow_function.options(num_cpus=num_cpus).remote()
# Check "debug_state.txt" to ensure no extra workers were started.
session_dir = ray.worker.global_worker.node.address_info["session_dir"]
session_path = Path(session_dir)
debug_state_path = session_path / "logs" / "debug_state.txt"
def get_num_workers():
with open(debug_state_path) as f:
for line in f.readlines():
num_workers_prefix = "- num PYTHON workers: "
if num_workers_prefix in line:
num_workers = int(line[len(num_workers_prefix):])
return num_workers
return None
# Wait for "debug_state.txt" to be updated to reflect the started worker.
timeout_limit = 15
start = time.time()
wait_for_condition(lambda: get_num_workers() == 16, timeout=timeout_limit)
time_waited = time.time() - start
print(f"Waited {time_waited} for debug_state.txt to be updated")
# Check that no more workers started for a while.
for i in range(100):
# Sometimes the debug state file can be empty. Retry if needed.
for _ in range(3):
num = get_num_workers()
if num is None:
print("Retrying parse debug_state.txt")
time.sleep(0.05)
else:
break
assert num == 16
time.sleep(0.1)
@pytest.mark.skipif(sys.platform == "win32", reason="Hangs on windows")
def test_function_unique_export(ray_start_regular):
@ray.remote
def f():
pass
@ray.remote
def g():
ray.get(f.remote())
ray.get(g.remote())
num_exports = ray.worker.global_worker.redis_client.llen("Exports")
ray.get([g.remote() for _ in range(5)])
assert ray.worker.global_worker.redis_client.llen("Exports") == num_exports
@pytest.mark.skipif(
sys.platform not in ["win32", "darwin"],
reason="Only listen on localhost by default on mac and windows.")
@pytest.mark.parametrize("start_ray", ["ray_start_regular", "call_ray_start"])
def test_listen_on_localhost(start_ray, request):
"""All ray processes should listen on localhost by default
on mac and windows to prevent security popups.
"""
request.getfixturevalue(start_ray)
process_infos = []
for proc in psutil.process_iter(["name", "cmdline"]):
try:
process_infos.append((proc, proc.name(), proc.cmdline()))
except psutil.Error:
pass
for keyword, filter_by_cmd in RAY_PROCESSES:
for candidate in process_infos:
proc, proc_cmd, proc_cmdline = candidate
corpus = (proc_cmd if filter_by_cmd else
subprocess.list2cmdline(proc_cmdline))
if keyword not in corpus:
continue
for connection in proc.connections():
if connection.status != psutil.CONN_LISTEN:
continue
# ip can be 127.0.0.1 or ::127.0.0.1
assert "127.0.0.1" in connection.laddr.ip
def test_job_id_consistency(ray_start_regular):
@ray.remote
def foo():
return "bar"
@ray.remote
class Foo:
def ping(self):
return "pong"
@ray.remote
def verify_job_id(job_id, new_thread):
def verify():
current_task_id = ray.runtime_context.get_runtime_context().task_id
assert job_id == current_task_id.job_id()
obj1 = foo.remote()
assert job_id == obj1.job_id()
obj2 = ray.put(1)
assert job_id == obj2.job_id()
a = Foo.remote()
assert job_id == a._actor_id.job_id
obj3 = a.ping.remote()
assert job_id == obj3.job_id()
if not new_thread:
verify()
else:
exc = []
def run():
try:
verify()
except BaseException as e:
exc.append(e)
import threading
t = threading.Thread(target=run)
t.start()
t.join()
if len(exc) > 0:
raise exc[0]
job_id = ray.runtime_context.get_runtime_context().job_id
ray.get(verify_job_id.remote(job_id, False))
ray.get(verify_job_id.remote(job_id, True))
if __name__ == "__main__":
sys.exit(pytest.main(["-v", __file__]))
|
connection.py | #!/usr/bin/env python3
import sys
import threading
from .packet import \
SetCompression, LoginSuccess, JoinGame, \
OutgoingPluginMessage, IncomingPacket, Disconnect
from .crypto import CryptoState
from .keepalive import KeepAlive
from .player import Player
from .net import ProtocolError, IllegalData
from .types import mc_varint, mc_string, States
from .version import APP_NAME, APP_VERSION
class MCConnection:
closed = False
version = mc_varint(-1)
def __init__(self, server, conn_info):
self.server = server
self.config = server.config
self._sock, self.addr = conn_info
self._sock.settimeout(self.config.get("timeout") or 15)
self.crypto = CryptoState(self)
self.sock = self.crypto.sock
self.keepalive = KeepAlive(self)
self.player = None
self.compression = -1
self.state = States.HANDSHAKING
self.thread = threading.Thread(target=self._worker, daemon=True)
self.thread.start()
def assign_player(self, username):
self.player = Player(self, username, resolve_uuid=True)
self.server.players.append(self.player)
self.server.entities.append(self.player.entity)
def join_game(self):
SetCompression(self).send()
LoginSuccess(self).send()
self.state = States.PLAY
JoinGame(self).send()
impl_name = mc_string("{}/{}".format(APP_NAME, APP_VERSION)).bytes()
OutgoingPluginMessage(self, "MC|Brand", impl_name).send()
self.player.spawn()
def _worker(self):
try:
while self.state != States.PLAY:
if self.closed:
return
pkt = IncomingPacket.from_connection(self)
pkt.recv()
self.keepalive.start()
while True:
if self.closed:
return
pkt = IncomingPacket.from_connection(self)
pkt.recv()
self.keepalive.check()
except IllegalData as e:
print(e, file=sys.stderr)
pkt = Disconnect(self, str(e))
pkt.send()
except ProtocolError as e:
print(e, file=sys.stderr)
finally:
self.close()
def close(self):
if not self.closed:
self.closed = True
self.server.connections = [s for s in self.server.connections if s]
self.server.players = [p for p in self.server.players if p]
print("term <{}:{}>: ({} left)".format(self.addr[0], self.addr[1], len(self.server.connections)))
self._sock.close()
def __bool__(self):
return not self.closed
|
run-tests.py | #!/usr/bin/env python
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import print_function
import logging
from argparse import ArgumentParser
import os
import re
import shutil
import subprocess
import sys
import tempfile
from threading import Thread, Lock
import time
import uuid
if sys.version < '3':
import Queue
else:
import queue as Queue
from multiprocessing import Manager
# Append `SPARK_HOME/dev` to the Python path so that we can import the sparktestsupport module
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), "../dev/"))
from sparktestsupport import SPARK_HOME # noqa (suppress pep8 warnings)
from sparktestsupport.shellutils import which, subprocess_check_output # noqa
from sparktestsupport.modules import all_modules, pyspark_sql # noqa
python_modules = dict((m.name, m) for m in all_modules if m.python_test_goals if m.name != 'root')
def print_red(text):
print('\033[31m' + text + '\033[0m')
SKIPPED_TESTS = Manager().dict()
LOG_FILE = os.path.join(SPARK_HOME, "python/unit-tests.log")
FAILURE_REPORTING_LOCK = Lock()
LOGGER = logging.getLogger()
# Find out where the assembly jars are located.
# TODO: revisit for Scala 2.13
for scala in ["2.12"]:
build_dir = os.path.join(SPARK_HOME, "assembly", "target", "scala-" + scala)
if os.path.isdir(build_dir):
SPARK_DIST_CLASSPATH = os.path.join(build_dir, "jars", "*")
break
else:
raise Exception("Cannot find assembly build directory, please build Spark first.")
def run_individual_python_test(target_dir, test_name, pyspark_python):
env = dict(os.environ)
env.update({
'SPARK_DIST_CLASSPATH': SPARK_DIST_CLASSPATH,
'SPARK_TESTING': '1',
'SPARK_PREPEND_CLASSES': '1',
'PYSPARK_PYTHON': which(pyspark_python),
'PYSPARK_DRIVER_PYTHON': which(pyspark_python)
})
# Create a unique temp directory under 'target/' for each run. The TMPDIR variable is
# recognized by the tempfile module to override the default system temp directory.
tmp_dir = os.path.join(target_dir, str(uuid.uuid4()))
while os.path.isdir(tmp_dir):
tmp_dir = os.path.join(target_dir, str(uuid.uuid4()))
os.mkdir(tmp_dir)
env["TMPDIR"] = tmp_dir
# Also override the JVM's temp directory by setting driver and executor options.
spark_args = [
"--conf", "spark.driver.extraJavaOptions=-Djava.io.tmpdir={0}".format(tmp_dir),
"--conf", "spark.executor.extraJavaOptions=-Djava.io.tmpdir={0}".format(tmp_dir),
"pyspark-shell"
]
env["PYSPARK_SUBMIT_ARGS"] = " ".join(spark_args)
LOGGER.info("Starting test(%s): %s", pyspark_python, test_name)
start_time = time.time()
try:
per_test_output = tempfile.TemporaryFile()
retcode = subprocess.Popen(
[os.path.join(SPARK_HOME, "bin/pyspark")] + test_name.split(),
stderr=per_test_output, stdout=per_test_output, env=env).wait()
shutil.rmtree(tmp_dir, ignore_errors=True)
except:
LOGGER.exception("Got exception while running %s with %s", test_name, pyspark_python)
# Here, we use os._exit() instead of sys.exit() in order to force Python to exit even if
# this code is invoked from a thread other than the main thread.
os._exit(1)
duration = time.time() - start_time
# Exit on the first failure.
if retcode != 0:
try:
with FAILURE_REPORTING_LOCK:
with open(LOG_FILE, 'ab') as log_file:
per_test_output.seek(0)
log_file.writelines(per_test_output)
per_test_output.seek(0)
for line in per_test_output:
decoded_line = line.decode()
if not re.match('[0-9]+', decoded_line):
print(decoded_line, end='')
per_test_output.close()
except:
LOGGER.exception("Got an exception while trying to print failed test output")
finally:
print_red("\nHad test failures in %s with %s; see logs." % (test_name, pyspark_python))
# Here, we use os._exit() instead of sys.exit() in order to force Python to exit even if
# this code is invoked from a thread other than the main thread.
os._exit(-1)
else:
skipped_counts = 0
try:
per_test_output.seek(0)
# Here expects skipped test output from unittest when verbosity level is
# 2 (or --verbose option is enabled).
decoded_lines = map(lambda line: line.decode(), iter(per_test_output))
skipped_tests = list(filter(
lambda line: re.search(r'test_.* \(pyspark\..*\) ... (skip|SKIP)', line),
decoded_lines))
skipped_counts = len(skipped_tests)
if skipped_counts > 0:
key = (pyspark_python, test_name)
SKIPPED_TESTS[key] = skipped_tests
per_test_output.close()
except:
import traceback
print_red("\nGot an exception while trying to store "
"skipped test output:\n%s" % traceback.format_exc())
# Here, we use os._exit() instead of sys.exit() in order to force Python to exit even if
# this code is invoked from a thread other than the main thread.
os._exit(-1)
if skipped_counts != 0:
LOGGER.info(
"Finished test(%s): %s (%is) ... %s tests were skipped", pyspark_python, test_name,
duration, skipped_counts)
else:
LOGGER.info(
"Finished test(%s): %s (%is)", pyspark_python, test_name, duration)
def get_default_python_executables():
python_execs = [x for x in ["python2.7", "python3.6", "pypy"] if which(x)]
if "python2.7" not in python_execs:
LOGGER.warning("Not testing against `python2.7` because it could not be found; falling"
" back to `python` instead")
python_execs.insert(0, "python")
return python_execs
def parse_opts():
parser = ArgumentParser(
prog="run-tests"
)
parser.add_argument(
"--python-executables", type=str, default=','.join(get_default_python_executables()),
help="A comma-separated list of Python executables to test against (default: %(default)s)"
)
parser.add_argument(
"--modules", type=str,
default=",".join(sorted(python_modules.keys())),
help="A comma-separated list of Python modules to test (default: %(default)s)"
)
parser.add_argument(
"-p", "--parallelism", type=int, default=4,
help="The number of suites to test in parallel (default %(default)d)"
)
parser.add_argument(
"--verbose", action="store_true",
help="Enable additional debug logging"
)
group = parser.add_argument_group("Developer Options")
group.add_argument(
"--testnames", type=str,
default=None,
help=(
"A comma-separated list of specific modules, classes and functions of doctest "
"or unittest to test. "
"For example, 'pyspark.sql.foo' to run the module as unittests or doctests, "
"'pyspark.sql.tests FooTests' to run the specific class of unittests, "
"'pyspark.sql.tests FooTests.test_foo' to run the specific unittest in the class. "
"'--modules' option is ignored if they are given.")
)
args, unknown = parser.parse_known_args()
if unknown:
parser.error("Unsupported arguments: %s" % ' '.join(unknown))
if args.parallelism < 1:
parser.error("Parallelism cannot be less than 1")
return args
def _check_coverage(python_exec):
# Make sure if coverage is installed.
try:
subprocess_check_output(
[python_exec, "-c", "import coverage"],
stderr=open(os.devnull, 'w'))
except:
print_red("Coverage is not installed in Python executable '%s' "
"but 'COVERAGE_PROCESS_START' environment variable is set, "
"exiting." % python_exec)
sys.exit(-1)
def main():
opts = parse_opts()
if opts.verbose:
log_level = logging.DEBUG
else:
log_level = logging.INFO
should_test_modules = opts.testnames is None
logging.basicConfig(stream=sys.stdout, level=log_level, format="%(message)s")
LOGGER.info("Running PySpark tests. Output is in %s", LOG_FILE)
if os.path.exists(LOG_FILE):
os.remove(LOG_FILE)
python_execs = opts.python_executables.split(',')
LOGGER.info("Will test against the following Python executables: %s", python_execs)
if should_test_modules:
modules_to_test = []
for module_name in opts.modules.split(','):
if module_name in python_modules:
modules_to_test.append(python_modules[module_name])
else:
print("Error: unrecognized module '%s'. Supported modules: %s" %
(module_name, ", ".join(python_modules)))
sys.exit(-1)
LOGGER.info("Will test the following Python modules: %s", [x.name for x in modules_to_test])
else:
testnames_to_test = opts.testnames.split(',')
LOGGER.info("Will test the following Python tests: %s", testnames_to_test)
task_queue = Queue.PriorityQueue()
for python_exec in python_execs:
# Check if the python executable has coverage installed when 'COVERAGE_PROCESS_START'
# environmental variable is set.
if "COVERAGE_PROCESS_START" in os.environ:
_check_coverage(python_exec)
python_implementation = subprocess_check_output(
[python_exec, "-c", "import platform; print(platform.python_implementation())"],
universal_newlines=True).strip()
LOGGER.debug("%s python_implementation is %s", python_exec, python_implementation)
LOGGER.debug("%s version is: %s", python_exec, subprocess_check_output(
[python_exec, "--version"], stderr=subprocess.STDOUT, universal_newlines=True).strip())
if should_test_modules:
for module in modules_to_test:
if python_implementation not in module.blacklisted_python_implementations:
for test_goal in module.python_test_goals:
heavy_tests = ['pyspark.streaming.tests', 'pyspark.mllib.tests',
'pyspark.tests', 'pyspark.sql.tests', 'pyspark.ml.tests']
if any(map(lambda prefix: test_goal.startswith(prefix), heavy_tests)):
priority = 0
else:
priority = 100
task_queue.put((priority, (python_exec, test_goal)))
else:
for test_goal in testnames_to_test:
task_queue.put((0, (python_exec, test_goal)))
# Create the target directory before starting tasks to avoid races.
target_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), 'target'))
if not os.path.isdir(target_dir):
os.mkdir(target_dir)
def process_queue(task_queue):
while True:
try:
(priority, (python_exec, test_goal)) = task_queue.get_nowait()
except Queue.Empty:
break
try:
run_individual_python_test(target_dir, test_goal, python_exec)
finally:
task_queue.task_done()
start_time = time.time()
for _ in range(opts.parallelism):
worker = Thread(target=process_queue, args=(task_queue,))
worker.daemon = True
worker.start()
try:
task_queue.join()
except (KeyboardInterrupt, SystemExit):
print_red("Exiting due to interrupt")
sys.exit(-1)
total_duration = time.time() - start_time
LOGGER.info("Tests passed in %i seconds", total_duration)
for key, lines in sorted(SKIPPED_TESTS.items()):
pyspark_python, test_name = key
LOGGER.info("\nSkipped tests in %s with %s:" % (test_name, pyspark_python))
for line in lines:
LOGGER.info(" %s" % line.rstrip())
if __name__ == "__main__":
main()
|
test_itertools.py | import unittest
from test import support
from itertools import *
import weakref
from decimal import Decimal
from fractions import Fraction
import operator
import random
import copy
import pickle
from functools import reduce
import sys
import struct
import threading
maxsize = support.MAX_Py_ssize_t
minsize = -maxsize-1
def lzip(*args):
return list(zip(*args))
def onearg(x):
'Test function of one argument'
return 2*x
def errfunc(*args):
'Test function that raises an error'
raise ValueError
def gen3():
'Non-restartable source sequence'
for i in (0, 1, 2):
yield i
def isEven(x):
'Test predicate'
return x%2==0
def isOdd(x):
'Test predicate'
return x%2==1
def tupleize(*args):
return args
def irange(n):
for i in range(n):
yield i
class StopNow:
'Class emulating an empty iterable.'
def __iter__(self):
return self
def __next__(self):
raise StopIteration
def take(n, seq):
'Convenience function for partially consuming a long of infinite iterable'
return list(islice(seq, n))
def prod(iterable):
return reduce(operator.mul, iterable, 1)
def fact(n):
'Factorial'
return prod(range(1, n+1))
# root level methods for pickling ability
def testR(r):
return r[0]
def testR2(r):
return r[2]
def underten(x):
return x<10
picklecopiers = [lambda s, proto=proto: pickle.loads(pickle.dumps(s, proto))
for proto in range(pickle.HIGHEST_PROTOCOL + 1)]
class TestBasicOps(unittest.TestCase):
def pickletest(self, protocol, it, stop=4, take=1, compare=None):
"""Test that an iterator is the same after pickling, also when part-consumed"""
def expand(it, i=0):
# Recursively expand iterables, within sensible bounds
if i > 10:
raise RuntimeError("infinite recursion encountered")
if isinstance(it, str):
return it
try:
l = list(islice(it, stop))
except TypeError:
return it # can't expand it
return [expand(e, i+1) for e in l]
# Test the initial copy against the original
dump = pickle.dumps(it, protocol)
i2 = pickle.loads(dump)
self.assertEqual(type(it), type(i2))
a, b = expand(it), expand(i2)
self.assertEqual(a, b)
if compare:
c = expand(compare)
self.assertEqual(a, c)
# Take from the copy, and create another copy and compare them.
i3 = pickle.loads(dump)
took = 0
try:
for i in range(take):
next(i3)
took += 1
except StopIteration:
pass #in case there is less data than 'take'
dump = pickle.dumps(i3, protocol)
i4 = pickle.loads(dump)
a, b = expand(i3), expand(i4)
self.assertEqual(a, b)
if compare:
c = expand(compare[took:])
self.assertEqual(a, c);
def test_accumulate(self):
self.assertEqual(list(accumulate(range(10))), # one positional arg
[0, 1, 3, 6, 10, 15, 21, 28, 36, 45])
self.assertEqual(list(accumulate(iterable=range(10))), # kw arg
[0, 1, 3, 6, 10, 15, 21, 28, 36, 45])
for typ in int, complex, Decimal, Fraction: # multiple types
self.assertEqual(
list(accumulate(map(typ, range(10)))),
list(map(typ, [0, 1, 3, 6, 10, 15, 21, 28, 36, 45])))
self.assertEqual(list(accumulate('abc')), ['a', 'ab', 'abc']) # works with non-numeric
self.assertEqual(list(accumulate([])), []) # empty iterable
self.assertEqual(list(accumulate([7])), [7]) # iterable of length one
self.assertRaises(TypeError, accumulate, range(10), 5, 6) # too many args
self.assertRaises(TypeError, accumulate) # too few args
self.assertRaises(TypeError, accumulate, x=range(10)) # unexpected kwd arg
self.assertRaises(TypeError, list, accumulate([1, []])) # args that don't add
s = [2, 8, 9, 5, 7, 0, 3, 4, 1, 6]
self.assertEqual(list(accumulate(s, min)),
[2, 2, 2, 2, 2, 0, 0, 0, 0, 0])
self.assertEqual(list(accumulate(s, max)),
[2, 8, 9, 9, 9, 9, 9, 9, 9, 9])
self.assertEqual(list(accumulate(s, operator.mul)),
[2, 16, 144, 720, 5040, 0, 0, 0, 0, 0])
with self.assertRaises(TypeError):
list(accumulate(s, chr)) # unary-operation
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
self.pickletest(proto, accumulate(range(10))) # test pickling
def test_chain(self):
def chain2(*iterables):
'Pure python version in the docs'
for it in iterables:
for element in it:
yield element
for c in (chain, chain2):
self.assertEqual(list(c('abc', 'def')), list('abcdef'))
self.assertEqual(list(c('abc')), list('abc'))
self.assertEqual(list(c('')), [])
self.assertEqual(take(4, c('abc', 'def')), list('abcd'))
self.assertRaises(TypeError, list,c(2, 3))
def test_chain_from_iterable(self):
self.assertEqual(list(chain.from_iterable(['abc', 'def'])), list('abcdef'))
self.assertEqual(list(chain.from_iterable(['abc'])), list('abc'))
self.assertEqual(list(chain.from_iterable([''])), [])
self.assertEqual(take(4, chain.from_iterable(['abc', 'def'])), list('abcd'))
self.assertRaises(TypeError, list, chain.from_iterable([2, 3]))
def test_chain_reducible(self):
for oper in [copy.deepcopy] + picklecopiers:
it = chain('abc', 'def')
self.assertEqual(list(oper(it)), list('abcdef'))
self.assertEqual(next(it), 'a')
self.assertEqual(list(oper(it)), list('bcdef'))
self.assertEqual(list(oper(chain(''))), [])
self.assertEqual(take(4, oper(chain('abc', 'def'))), list('abcd'))
self.assertRaises(TypeError, list, oper(chain(2, 3)))
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
self.pickletest(proto, chain('abc', 'def'), compare=list('abcdef'))
@support.impl_detail("XXX chain.__setstate__ does not do all checks"
" on PyPy, will just complain later (but could"
" be fixed if important)")
def test_chain_setstate(self):
self.assertRaises(TypeError, chain().__setstate__, ())
self.assertRaises(TypeError, chain().__setstate__, [])
self.assertRaises(TypeError, chain().__setstate__, 0)
self.assertRaises(TypeError, chain().__setstate__, ([],))
self.assertRaises(TypeError, chain().__setstate__, (iter([]), []))
it = chain()
it.__setstate__((iter(['abc', 'def']),))
self.assertEqual(list(it), ['a', 'b', 'c', 'd', 'e', 'f'])
it = chain()
it.__setstate__((iter(['abc', 'def']), iter(['ghi'])))
self.assertEqual(list(it), ['ghi', 'a', 'b', 'c', 'd', 'e', 'f'])
def test_combinations(self):
self.assertRaises(TypeError, combinations, 'abc') # missing r argument
self.assertRaises(TypeError, combinations, 'abc', 2, 1) # too many arguments
self.assertRaises(TypeError, combinations, None) # pool is not iterable
self.assertRaises(ValueError, combinations, 'abc', -2) # r is negative
for op in [lambda a:a] + picklecopiers:
self.assertEqual(list(op(combinations('abc', 32))), []) # r > n
self.assertEqual(list(op(combinations('ABCD', 2))),
[('A','B'), ('A','C'), ('A','D'), ('B','C'), ('B','D'), ('C','D')])
testIntermediate = combinations('ABCD', 2)
next(testIntermediate)
self.assertEqual(list(op(testIntermediate)),
[('A','C'), ('A','D'), ('B','C'), ('B','D'), ('C','D')])
self.assertEqual(list(op(combinations(range(4), 3))),
[(0,1,2), (0,1,3), (0,2,3), (1,2,3)])
testIntermediate = combinations(range(4), 3)
next(testIntermediate)
self.assertEqual(list(op(testIntermediate)),
[(0,1,3), (0,2,3), (1,2,3)])
def combinations1(iterable, r):
'Pure python version shown in the docs'
pool = tuple(iterable)
n = len(pool)
if r > n:
return
indices = list(range(r))
yield tuple(pool[i] for i in indices)
while 1:
for i in reversed(range(r)):
if indices[i] != i + n - r:
break
else:
return
indices[i] += 1
for j in range(i+1, r):
indices[j] = indices[j-1] + 1
yield tuple(pool[i] for i in indices)
def combinations2(iterable, r):
'Pure python version shown in the docs'
pool = tuple(iterable)
n = len(pool)
for indices in permutations(range(n), r):
if sorted(indices) == list(indices):
yield tuple(pool[i] for i in indices)
def combinations3(iterable, r):
'Pure python version from cwr()'
pool = tuple(iterable)
n = len(pool)
for indices in combinations_with_replacement(range(n), r):
if len(set(indices)) == r:
yield tuple(pool[i] for i in indices)
for n in range(7):
values = [5*x-12 for x in range(n)]
for r in range(n+2):
result = list(combinations(values, r))
self.assertEqual(len(result), 0 if r>n else fact(n) / fact(r) / fact(n-r)) # right number of combs
self.assertEqual(len(result), len(set(result))) # no repeats
self.assertEqual(result, sorted(result)) # lexicographic order
for c in result:
self.assertEqual(len(c), r) # r-length combinations
self.assertEqual(len(set(c)), r) # no duplicate elements
self.assertEqual(list(c), sorted(c)) # keep original ordering
self.assertTrue(all(e in values for e in c)) # elements taken from input iterable
self.assertEqual(list(c),
[e for e in values if e in c]) # comb is a subsequence of the input iterable
self.assertEqual(result, list(combinations1(values, r))) # matches first pure python version
self.assertEqual(result, list(combinations2(values, r))) # matches second pure python version
self.assertEqual(result, list(combinations3(values, r))) # matches second pure python version
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
self.pickletest(proto, combinations(values, r)) # test pickling
@support.bigaddrspacetest
def test_combinations_overflow(self):
with self.assertRaises((OverflowError, MemoryError)):
combinations("AA", 2**29)
# Test implementation detail: tuple re-use
@support.impl_detail("tuple reuse is specific to CPython")
def test_combinations_tuple_reuse(self):
self.assertEqual(len(set(map(id, combinations('abcde', 3)))), 1)
self.assertNotEqual(len(set(map(id, list(combinations('abcde', 3))))), 1)
def test_combinations_with_replacement(self):
cwr = combinations_with_replacement
self.assertRaises(TypeError, cwr, 'abc') # missing r argument
self.assertRaises(TypeError, cwr, 'abc', 2, 1) # too many arguments
self.assertRaises(TypeError, cwr, None) # pool is not iterable
self.assertRaises(ValueError, cwr, 'abc', -2) # r is negative
for op in [lambda a:a] + picklecopiers:
self.assertEqual(list(op(cwr('ABC', 2))),
[('A','A'), ('A','B'), ('A','C'), ('B','B'), ('B','C'), ('C','C')])
testIntermediate = cwr('ABC', 2)
next(testIntermediate)
self.assertEqual(list(op(testIntermediate)),
[('A','B'), ('A','C'), ('B','B'), ('B','C'), ('C','C')])
def cwr1(iterable, r):
'Pure python version shown in the docs'
# number items returned: (n+r-1)! / r! / (n-1)! when n>0
pool = tuple(iterable)
n = len(pool)
if not n and r:
return
indices = [0] * r
yield tuple(pool[i] for i in indices)
while 1:
for i in reversed(range(r)):
if indices[i] != n - 1:
break
else:
return
indices[i:] = [indices[i] + 1] * (r - i)
yield tuple(pool[i] for i in indices)
def cwr2(iterable, r):
'Pure python version shown in the docs'
pool = tuple(iterable)
n = len(pool)
for indices in product(range(n), repeat=r):
if sorted(indices) == list(indices):
yield tuple(pool[i] for i in indices)
def numcombs(n, r):
if not n:
return 0 if r else 1
return fact(n+r-1) / fact(r)/ fact(n-1)
for n in range(7):
values = [5*x-12 for x in range(n)]
for r in range(n+2):
result = list(cwr(values, r))
self.assertEqual(len(result), numcombs(n, r)) # right number of combs
self.assertEqual(len(result), len(set(result))) # no repeats
self.assertEqual(result, sorted(result)) # lexicographic order
regular_combs = list(combinations(values, r)) # compare to combs without replacement
if n == 0 or r <= 1:
self.assertEqual(result, regular_combs) # cases that should be identical
else:
self.assertTrue(set(result) >= set(regular_combs)) # rest should be supersets of regular combs
for c in result:
self.assertEqual(len(c), r) # r-length combinations
noruns = [k for k,v in groupby(c)] # combo without consecutive repeats
self.assertEqual(len(noruns), len(set(noruns))) # no repeats other than consecutive
self.assertEqual(list(c), sorted(c)) # keep original ordering
self.assertTrue(all(e in values for e in c)) # elements taken from input iterable
self.assertEqual(noruns,
[e for e in values if e in c]) # comb is a subsequence of the input iterable
self.assertEqual(result, list(cwr1(values, r))) # matches first pure python version
self.assertEqual(result, list(cwr2(values, r))) # matches second pure python version
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
self.pickletest(proto, cwr(values,r)) # test pickling
@support.bigaddrspacetest
def test_combinations_with_replacement_overflow(self):
with self.assertRaises((OverflowError, MemoryError)):
combinations_with_replacement("AA", 2**30)
# Test implementation detail: tuple re-use
@support.impl_detail("tuple reuse is specific to CPython")
def test_combinations_with_replacement_tuple_reuse(self):
cwr = combinations_with_replacement
self.assertEqual(len(set(map(id, cwr('abcde', 3)))), 1)
self.assertNotEqual(len(set(map(id, list(cwr('abcde', 3))))), 1)
def test_permutations(self):
self.assertRaises(TypeError, permutations) # too few arguments
self.assertRaises(TypeError, permutations, 'abc', 2, 1) # too many arguments
self.assertRaises(TypeError, permutations, None) # pool is not iterable
self.assertRaises(ValueError, permutations, 'abc', -2) # r is negative
self.assertEqual(list(permutations('abc', 32)), []) # r > n
self.assertRaises(TypeError, permutations, 'abc', 's') # r is not an int or None
self.assertEqual(list(permutations(range(3), 2)),
[(0,1), (0,2), (1,0), (1,2), (2,0), (2,1)])
def permutations1(iterable, r=None):
'Pure python version shown in the docs'
pool = tuple(iterable)
n = len(pool)
r = n if r is None else r
if r > n:
return
indices = list(range(n))
cycles = list(range(n-r+1, n+1))[::-1]
yield tuple(pool[i] for i in indices[:r])
while n:
for i in reversed(range(r)):
cycles[i] -= 1
if cycles[i] == 0:
indices[i:] = indices[i+1:] + indices[i:i+1]
cycles[i] = n - i
else:
j = cycles[i]
indices[i], indices[-j] = indices[-j], indices[i]
yield tuple(pool[i] for i in indices[:r])
break
else:
return
def permutations2(iterable, r=None):
'Pure python version shown in the docs'
pool = tuple(iterable)
n = len(pool)
r = n if r is None else r
for indices in product(range(n), repeat=r):
if len(set(indices)) == r:
yield tuple(pool[i] for i in indices)
for n in range(7):
values = [5*x-12 for x in range(n)]
for r in range(n+2):
result = list(permutations(values, r))
self.assertEqual(len(result), 0 if r>n else fact(n) / fact(n-r)) # right number of perms
self.assertEqual(len(result), len(set(result))) # no repeats
self.assertEqual(result, sorted(result)) # lexicographic order
for p in result:
self.assertEqual(len(p), r) # r-length permutations
self.assertEqual(len(set(p)), r) # no duplicate elements
self.assertTrue(all(e in values for e in p)) # elements taken from input iterable
self.assertEqual(result, list(permutations1(values, r))) # matches first pure python version
self.assertEqual(result, list(permutations2(values, r))) # matches second pure python version
if r == n:
self.assertEqual(result, list(permutations(values, None))) # test r as None
self.assertEqual(result, list(permutations(values))) # test default r
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
self.pickletest(proto, permutations(values, r)) # test pickling
@support.bigaddrspacetest
def test_permutations_overflow(self):
with self.assertRaises((OverflowError, MemoryError)):
permutations("A", 2**30)
@support.impl_detail("tuple reuse is specific to CPython")
def test_permutations_tuple_reuse(self):
self.assertEqual(len(set(map(id, permutations('abcde', 3)))), 1)
self.assertNotEqual(len(set(map(id, list(permutations('abcde', 3))))), 1)
def test_combinatorics(self):
# Test relationships between product(), permutations(),
# combinations() and combinations_with_replacement().
for n in range(6):
s = 'ABCDEFG'[:n]
for r in range(8):
prod = list(product(s, repeat=r))
cwr = list(combinations_with_replacement(s, r))
perm = list(permutations(s, r))
comb = list(combinations(s, r))
# Check size
self.assertEqual(len(prod), n**r)
self.assertEqual(len(cwr), (fact(n+r-1) / fact(r)/ fact(n-1)) if n else (not r))
self.assertEqual(len(perm), 0 if r>n else fact(n) / fact(n-r))
self.assertEqual(len(comb), 0 if r>n else fact(n) / fact(r) / fact(n-r))
# Check lexicographic order without repeated tuples
self.assertEqual(prod, sorted(set(prod)))
self.assertEqual(cwr, sorted(set(cwr)))
self.assertEqual(perm, sorted(set(perm)))
self.assertEqual(comb, sorted(set(comb)))
# Check interrelationships
self.assertEqual(cwr, [t for t in prod if sorted(t)==list(t)]) # cwr: prods which are sorted
self.assertEqual(perm, [t for t in prod if len(set(t))==r]) # perm: prods with no dups
self.assertEqual(comb, [t for t in perm if sorted(t)==list(t)]) # comb: perms that are sorted
self.assertEqual(comb, [t for t in cwr if len(set(t))==r]) # comb: cwrs without dups
self.assertEqual(comb, list(filter(set(cwr).__contains__, perm))) # comb: perm that is a cwr
self.assertEqual(comb, list(filter(set(perm).__contains__, cwr))) # comb: cwr that is a perm
self.assertEqual(comb, sorted(set(cwr) & set(perm))) # comb: both a cwr and a perm
def test_compress(self):
self.assertEqual(list(compress(data='ABCDEF', selectors=[1,0,1,0,1,1])), list('ACEF'))
self.assertEqual(list(compress('ABCDEF', [1,0,1,0,1,1])), list('ACEF'))
self.assertEqual(list(compress('ABCDEF', [0,0,0,0,0,0])), list(''))
self.assertEqual(list(compress('ABCDEF', [1,1,1,1,1,1])), list('ABCDEF'))
self.assertEqual(list(compress('ABCDEF', [1,0,1])), list('AC'))
self.assertEqual(list(compress('ABC', [0,1,1,1,1,1])), list('BC'))
n = 10000
data = chain.from_iterable(repeat(range(6), n))
selectors = chain.from_iterable(repeat((0, 1)))
self.assertEqual(list(compress(data, selectors)), [1,3,5] * n)
self.assertRaises(TypeError, compress, None, range(6)) # 1st arg not iterable
self.assertRaises(TypeError, compress, range(6), None) # 2nd arg not iterable
self.assertRaises(TypeError, compress, range(6)) # too few args
self.assertRaises(TypeError, compress, range(6), None) # too many args
# check copy, deepcopy, pickle
for op in [lambda a:copy.copy(a), lambda a:copy.deepcopy(a)] + picklecopiers:
for data, selectors, result1, result2 in [
('ABCDEF', [1,0,1,0,1,1], 'ACEF', 'CEF'),
('ABCDEF', [0,0,0,0,0,0], '', ''),
('ABCDEF', [1,1,1,1,1,1], 'ABCDEF', 'BCDEF'),
('ABCDEF', [1,0,1], 'AC', 'C'),
('ABC', [0,1,1,1,1,1], 'BC', 'C'),
]:
self.assertEqual(list(op(compress(data=data, selectors=selectors))), list(result1))
self.assertEqual(list(op(compress(data, selectors))), list(result1))
testIntermediate = compress(data, selectors)
if result1:
next(testIntermediate)
self.assertEqual(list(op(testIntermediate)), list(result2))
def test_count(self):
self.assertEqual(lzip('abc',count()), [('a', 0), ('b', 1), ('c', 2)])
self.assertEqual(lzip('abc',count(3)), [('a', 3), ('b', 4), ('c', 5)])
self.assertEqual(take(2, lzip('abc',count(3))), [('a', 3), ('b', 4)])
self.assertEqual(take(2, zip('abc',count(-1))), [('a', -1), ('b', 0)])
self.assertEqual(take(2, zip('abc',count(-3))), [('a', -3), ('b', -2)])
self.assertRaises(TypeError, count, 2, 3, 4)
self.assertRaises(TypeError, count, 'a')
self.assertEqual(take(10, count(maxsize-5)),
list(range(maxsize-5, maxsize+5)))
self.assertEqual(take(10, count(-maxsize-5)),
list(range(-maxsize-5, -maxsize+5)))
self.assertEqual(take(3, count(3.25)), [3.25, 4.25, 5.25])
self.assertEqual(take(3, count(3.25-4j)), [3.25-4j, 4.25-4j, 5.25-4j])
self.assertEqual(take(3, count(Decimal('1.1'))),
[Decimal('1.1'), Decimal('2.1'), Decimal('3.1')])
self.assertEqual(take(3, count(Fraction(2, 3))),
[Fraction(2, 3), Fraction(5, 3), Fraction(8, 3)])
BIGINT = 1<<1000
self.assertEqual(take(3, count(BIGINT)), [BIGINT, BIGINT+1, BIGINT+2])
c = count(3)
self.assertEqual(repr(c), 'count(3)')
next(c)
self.assertEqual(repr(c), 'count(4)')
c = count(-9)
self.assertEqual(repr(c), 'count(-9)')
next(c)
self.assertEqual(next(c), -8)
self.assertEqual(repr(count(10.25)), 'count(10.25)')
self.assertEqual(repr(count(10.0)), 'count(10.0)')
self.assertEqual(type(next(count(10.0))), float)
for i in (-sys.maxsize-5, -sys.maxsize+5 ,-10, -1, 0, 10, sys.maxsize-5, sys.maxsize+5):
# Test repr
r1 = repr(count(i))
r2 = 'count(%r)'.__mod__(i)
self.assertEqual(r1, r2)
# check copy, deepcopy, pickle
for value in -3, 3, maxsize-5, maxsize+5:
c = count(value)
self.assertEqual(next(copy.copy(c)), value)
self.assertEqual(next(copy.deepcopy(c)), value)
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
self.pickletest(proto, count(value))
#check proper internal error handling for large "step' sizes
count(1, maxsize+5); sys.exc_info()
def test_count_with_stride(self):
self.assertEqual(lzip('abc',count(2,3)), [('a', 2), ('b', 5), ('c', 8)])
self.assertEqual(lzip('abc',count(start=2,step=3)),
[('a', 2), ('b', 5), ('c', 8)])
self.assertEqual(lzip('abc',count(step=-1)),
[('a', 0), ('b', -1), ('c', -2)])
self.assertRaises(TypeError, count, 'a', 'b')
self.assertEqual(lzip('abc',count(2,0)), [('a', 2), ('b', 2), ('c', 2)])
self.assertEqual(lzip('abc',count(2,1)), [('a', 2), ('b', 3), ('c', 4)])
self.assertEqual(lzip('abc',count(2,3)), [('a', 2), ('b', 5), ('c', 8)])
self.assertEqual(take(20, count(maxsize-15, 3)), take(20, range(maxsize-15, maxsize+100, 3)))
self.assertEqual(take(20, count(-maxsize-15, 3)), take(20, range(-maxsize-15,-maxsize+100, 3)))
self.assertEqual(take(3, count(10, maxsize+5)),
list(range(10, 10+3*(maxsize+5), maxsize+5)))
self.assertEqual(take(3, count(2, 1.25)), [2, 3.25, 4.5])
self.assertEqual(take(3, count(2, 3.25-4j)), [2, 5.25-4j, 8.5-8j])
self.assertEqual(take(3, count(Decimal('1.1'), Decimal('.1'))),
[Decimal('1.1'), Decimal('1.2'), Decimal('1.3')])
self.assertEqual(take(3, count(Fraction(2,3), Fraction(1,7))),
[Fraction(2,3), Fraction(17,21), Fraction(20,21)])
BIGINT = 1<<1000
self.assertEqual(take(3, count(step=BIGINT)), [0, BIGINT, 2*BIGINT])
self.assertEqual(repr(take(3, count(10, 2.5))), repr([10, 12.5, 15.0]))
c = count(3, 5)
self.assertEqual(repr(c), 'count(3, 5)')
next(c)
self.assertEqual(repr(c), 'count(8, 5)')
c = count(-9, 0)
self.assertEqual(repr(c), 'count(-9, 0)')
next(c)
self.assertEqual(repr(c), 'count(-9, 0)')
c = count(-9, -3)
self.assertEqual(repr(c), 'count(-9, -3)')
next(c)
self.assertEqual(repr(c), 'count(-12, -3)')
self.assertEqual(repr(c), 'count(-12, -3)')
self.assertEqual(repr(count(10.5, 1.25)), 'count(10.5, 1.25)')
self.assertEqual(repr(count(10.5, 1)), 'count(10.5)') # suppress step=1 when it's an int
self.assertEqual(repr(count(10.5, 1.00)), 'count(10.5, 1.0)') # do show float values lilke 1.0
self.assertEqual(repr(count(10, 1.00)), 'count(10, 1.0)')
c = count(10, 1.0)
self.assertEqual(type(next(c)), int)
self.assertEqual(type(next(c)), float)
for i in (-sys.maxsize-5, -sys.maxsize+5 ,-10, -1, 0, 10, sys.maxsize-5, sys.maxsize+5):
for j in (-sys.maxsize-5, -sys.maxsize+5 ,-10, -1, 0, 1, 10, sys.maxsize-5, sys.maxsize+5):
# Test repr
r1 = repr(count(i, j))
if j == 1:
r2 = ('count(%r)' % i)
else:
r2 = ('count(%r, %r)' % (i, j))
self.assertEqual(r1, r2)
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
self.pickletest(proto, count(i, j))
def test_cycle(self):
self.assertEqual(take(10, cycle('abc')), list('abcabcabca'))
self.assertEqual(list(cycle('')), [])
self.assertRaises(TypeError, cycle)
self.assertRaises(TypeError, cycle, 5)
self.assertEqual(list(islice(cycle(gen3()),10)), [0,1,2,0,1,2,0,1,2,0])
# check copy, deepcopy, pickle
c = cycle('abc')
self.assertEqual(next(c), 'a')
#simple copy currently not supported, because __reduce__ returns
#an internal iterator
#self.assertEqual(take(10, copy.copy(c)), list('bcabcabcab'))
self.assertEqual(take(10, copy.deepcopy(c)), list('bcabcabcab'))
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
self.assertEqual(take(10, pickle.loads(pickle.dumps(c, proto))),
list('bcabcabcab'))
next(c)
self.assertEqual(take(10, pickle.loads(pickle.dumps(c, proto))),
list('cabcabcabc'))
next(c)
next(c)
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
self.pickletest(proto, cycle('abc'))
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
# test with partial consumed input iterable
it = iter('abcde')
c = cycle(it)
_ = [next(c) for i in range(2)] # consume 2 of 5 inputs
p = pickle.dumps(c, proto)
d = pickle.loads(p) # rebuild the cycle object
self.assertEqual(take(20, d), list('cdeabcdeabcdeabcdeab'))
# test with completely consumed input iterable
it = iter('abcde')
c = cycle(it)
_ = [next(c) for i in range(7)] # consume 7 of 5 inputs
p = pickle.dumps(c, proto)
d = pickle.loads(p) # rebuild the cycle object
self.assertEqual(take(20, d), list('cdeabcdeabcdeabcdeab'))
@support.impl_detail("XXX cycle.__reduce__ and __setstate__ differ"
" on PyPy (but could be fixed if important)")
def test_cycle_setstate(self):
# Verify both modes for restoring state
# Mode 0 is efficient. It uses an incompletely consumed input
# iterator to build a cycle object and then passes in state with
# a list of previously consumed values. There is no data
# overlap between the two.
c = cycle('defg')
c.__setstate__((list('abc'), 0))
self.assertEqual(take(20, c), list('defgabcdefgabcdefgab'))
# Mode 1 is inefficient. It starts with a cycle object built
# from an iterator over the remaining elements in a partial
# cycle and then passes in state with all of the previously
# seen values (this overlaps values included in the iterator).
c = cycle('defg')
c.__setstate__((list('abcdefg'), 1))
self.assertEqual(take(20, c), list('defgabcdefgabcdefgab'))
# The first argument to setstate needs to be a tuple
with self.assertRaises(TypeError):
cycle('defg').__setstate__([list('abcdefg'), 0])
# The first argument in the setstate tuple must be a list
with self.assertRaises(TypeError):
c = cycle('defg')
c.__setstate__((tuple('defg'), 0))
take(20, c)
# The second argument in the setstate tuple must be an int
with self.assertRaises(TypeError):
cycle('defg').__setstate__((list('abcdefg'), 'x'))
self.assertRaises(TypeError, cycle('').__setstate__, ())
self.assertRaises(TypeError, cycle('').__setstate__, ([],))
def test_groupby(self):
# Check whether it accepts arguments correctly
self.assertEqual([], list(groupby([])))
self.assertEqual([], list(groupby([], key=id)))
self.assertRaises(TypeError, list, groupby('abc', []))
self.assertRaises(TypeError, groupby, None)
self.assertRaises(TypeError, groupby, 'abc', lambda x:x, 10)
# Check normal input
s = [(0, 10, 20), (0, 11,21), (0,12,21), (1,13,21), (1,14,22),
(2,15,22), (3,16,23), (3,17,23)]
dup = []
for k, g in groupby(s, lambda r:r[0]):
for elem in g:
self.assertEqual(k, elem[0])
dup.append(elem)
self.assertEqual(s, dup)
# Check normal pickled
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
dup = []
for k, g in pickle.loads(pickle.dumps(groupby(s, testR), proto)):
for elem in g:
self.assertEqual(k, elem[0])
dup.append(elem)
self.assertEqual(s, dup)
# Check nested case
dup = []
for k, g in groupby(s, testR):
for ik, ig in groupby(g, testR2):
for elem in ig:
self.assertEqual(k, elem[0])
self.assertEqual(ik, elem[2])
dup.append(elem)
self.assertEqual(s, dup)
# Check nested and pickled
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
dup = []
for k, g in pickle.loads(pickle.dumps(groupby(s, testR), proto)):
for ik, ig in pickle.loads(pickle.dumps(groupby(g, testR2), proto)):
for elem in ig:
self.assertEqual(k, elem[0])
self.assertEqual(ik, elem[2])
dup.append(elem)
self.assertEqual(s, dup)
# Check case where inner iterator is not used
keys = [k for k, g in groupby(s, testR)]
expectedkeys = set([r[0] for r in s])
self.assertEqual(set(keys), expectedkeys)
self.assertEqual(len(keys), len(expectedkeys))
# Check case where inner iterator is used after advancing the groupby
# iterator
s = list(zip('AABBBAAAA', range(9)))
it = groupby(s, testR)
_, g1 = next(it)
_, g2 = next(it)
_, g3 = next(it)
self.assertEqual(list(g1), [])
self.assertEqual(list(g2), [])
self.assertEqual(next(g3), ('A', 5))
list(it) # exhaust the groupby iterator
self.assertEqual(list(g3), [])
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
it = groupby(s, testR)
_, g = next(it)
next(it)
next(it)
self.assertEqual(list(pickle.loads(pickle.dumps(g, proto))), [])
# Exercise pipes and filters style
s = 'abracadabra'
# sort s | uniq
r = [k for k, g in groupby(sorted(s))]
self.assertEqual(r, ['a', 'b', 'c', 'd', 'r'])
# sort s | uniq -d
r = [k for k, g in groupby(sorted(s)) if list(islice(g,1,2))]
self.assertEqual(r, ['a', 'b', 'r'])
# sort s | uniq -c
r = [(len(list(g)), k) for k, g in groupby(sorted(s))]
self.assertEqual(r, [(5, 'a'), (2, 'b'), (1, 'c'), (1, 'd'), (2, 'r')])
# sort s | uniq -c | sort -rn | head -3
r = sorted([(len(list(g)) , k) for k, g in groupby(sorted(s))], reverse=True)[:3]
self.assertEqual(r, [(5, 'a'), (2, 'r'), (2, 'b')])
# iter.__next__ failure
class ExpectedError(Exception):
pass
def delayed_raise(n=0):
for i in range(n):
yield 'yo'
raise ExpectedError
def gulp(iterable, keyp=None, func=list):
return [func(g) for k, g in groupby(iterable, keyp)]
# iter.__next__ failure on outer object
self.assertRaises(ExpectedError, gulp, delayed_raise(0))
# iter.__next__ failure on inner object
self.assertRaises(ExpectedError, gulp, delayed_raise(1))
# __eq__ failure
class DummyCmp:
def __eq__(self, dst):
raise ExpectedError
s = [DummyCmp(), DummyCmp(), None]
# __eq__ failure on outer object
self.assertRaises(ExpectedError, gulp, s, func=id)
# __eq__ failure on inner object
self.assertRaises(ExpectedError, gulp, s)
# keyfunc failure
def keyfunc(obj):
if keyfunc.skip > 0:
keyfunc.skip -= 1
return obj
else:
raise ExpectedError
# keyfunc failure on outer object
keyfunc.skip = 0
self.assertRaises(ExpectedError, gulp, [None], keyfunc)
keyfunc.skip = 1
self.assertRaises(ExpectedError, gulp, [None, None], keyfunc)
def test_filter(self):
self.assertEqual(list(filter(isEven, range(6))), [0,2,4])
self.assertEqual(list(filter(None, [0,1,0,2,0])), [1,2])
self.assertEqual(list(filter(bool, [0,1,0,2,0])), [1,2])
self.assertEqual(take(4, filter(isEven, count())), [0,2,4,6])
self.assertRaises(TypeError, filter)
self.assertRaises(TypeError, filter, lambda x:x)
self.assertRaises(TypeError, filter, lambda x:x, range(6), 7)
self.assertRaises(TypeError, filter, isEven, 3)
self.assertRaises(TypeError, next, filter(range(6), range(6)))
# check copy, deepcopy, pickle
ans = [0,2,4]
c = filter(isEven, range(6))
self.assertEqual(list(copy.copy(c)), ans)
c = filter(isEven, range(6))
self.assertEqual(list(copy.deepcopy(c)), ans)
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
c = filter(isEven, range(6))
self.assertEqual(list(pickle.loads(pickle.dumps(c, proto))), ans)
next(c)
self.assertEqual(list(pickle.loads(pickle.dumps(c, proto))), ans[1:])
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
c = filter(isEven, range(6))
self.pickletest(proto, c)
def test_filterfalse(self):
self.assertEqual(list(filterfalse(isEven, range(6))), [1,3,5])
self.assertEqual(list(filterfalse(None, [0,1,0,2,0])), [0,0,0])
self.assertEqual(list(filterfalse(bool, [0,1,0,2,0])), [0,0,0])
self.assertEqual(take(4, filterfalse(isEven, count())), [1,3,5,7])
self.assertRaises(TypeError, filterfalse)
self.assertRaises(TypeError, filterfalse, lambda x:x)
self.assertRaises(TypeError, filterfalse, lambda x:x, range(6), 7)
self.assertRaises(TypeError, filterfalse, isEven, 3)
self.assertRaises(TypeError, next, filterfalse(range(6), range(6)))
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
self.pickletest(proto, filterfalse(isEven, range(6)))
def test_zip(self):
# XXX This is rather silly now that builtin zip() calls zip()...
ans = [(x,y) for x, y in zip('abc',count())]
self.assertEqual(ans, [('a', 0), ('b', 1), ('c', 2)])
self.assertEqual(list(zip('abc', range(6))), lzip('abc', range(6)))
self.assertEqual(list(zip('abcdef', range(3))), lzip('abcdef', range(3)))
self.assertEqual(take(3,zip('abcdef', count())), lzip('abcdef', range(3)))
self.assertEqual(list(zip('abcdef')), lzip('abcdef'))
self.assertEqual(list(zip()), lzip())
self.assertRaises(TypeError, zip, 3)
self.assertRaises(TypeError, zip, range(3), 3)
self.assertEqual([tuple(list(pair)) for pair in zip('abc', 'def')],
lzip('abc', 'def'))
self.assertEqual([pair for pair in zip('abc', 'def')],
lzip('abc', 'def'))
@support.impl_detail("tuple reuse is specific to CPython")
def test_zip_tuple_reuse(self):
ids = list(map(id, zip('abc', 'def')))
self.assertEqual(min(ids), max(ids))
ids = list(map(id, list(zip('abc', 'def'))))
self.assertEqual(len(dict.fromkeys(ids)), len(ids))
# check copy, deepcopy, pickle
ans = [(x,y) for x, y in copy.copy(zip('abc',count()))]
self.assertEqual(ans, [('a', 0), ('b', 1), ('c', 2)])
ans = [(x,y) for x, y in copy.deepcopy(zip('abc',count()))]
self.assertEqual(ans, [('a', 0), ('b', 1), ('c', 2)])
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
ans = [(x,y) for x, y in pickle.loads(pickle.dumps(zip('abc',count()), proto))]
self.assertEqual(ans, [('a', 0), ('b', 1), ('c', 2)])
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
testIntermediate = zip('abc',count())
next(testIntermediate)
ans = [(x,y) for x, y in pickle.loads(pickle.dumps(testIntermediate, proto))]
self.assertEqual(ans, [('b', 1), ('c', 2)])
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
self.pickletest(proto, zip('abc', count()))
def test_ziplongest(self):
for args in [
['abc', range(6)],
[range(6), 'abc'],
[range(1000), range(2000,2100), range(3000,3050)],
[range(1000), range(0), range(3000,3050), range(1200), range(1500)],
[range(1000), range(0), range(3000,3050), range(1200), range(1500), range(0)],
]:
target = [tuple([arg[i] if i < len(arg) else None for arg in args])
for i in range(max(map(len, args)))]
self.assertEqual(list(zip_longest(*args)), target)
self.assertEqual(list(zip_longest(*args, **{})), target)
target = [tuple((e is None and 'X' or e) for e in t) for t in target] # Replace None fills with 'X'
self.assertEqual(list(zip_longest(*args, **dict(fillvalue='X'))), target)
self.assertEqual(take(3,zip_longest('abcdef', count())), list(zip('abcdef', range(3)))) # take 3 from infinite input
self.assertEqual(list(zip_longest()), list(zip()))
self.assertEqual(list(zip_longest([])), list(zip([])))
self.assertEqual(list(zip_longest('abcdef')), list(zip('abcdef')))
self.assertEqual(list(zip_longest('abc', 'defg', **{})),
list(zip(list('abc')+[None], 'defg'))) # empty keyword dict
self.assertRaises(TypeError, zip_longest, 3)
self.assertRaises(TypeError, zip_longest, range(3), 3)
for stmt in [
"zip_longest('abc', fv=1)",
"zip_longest('abc', fillvalue=1, bogus_keyword=None)",
]:
try:
eval(stmt, globals(), locals())
except TypeError:
pass
else:
self.fail('Did not raise Type in: ' + stmt)
self.assertEqual([tuple(list(pair)) for pair in zip_longest('abc', 'def')],
list(zip('abc', 'def')))
self.assertEqual([pair for pair in zip_longest('abc', 'def')],
list(zip('abc', 'def')))
@support.impl_detail("tuple reuse is specific to CPython")
def test_zip_longest_tuple_reuse(self):
ids = list(map(id, zip_longest('abc', 'def')))
self.assertEqual(min(ids), max(ids))
ids = list(map(id, list(zip_longest('abc', 'def'))))
self.assertEqual(len(dict.fromkeys(ids)), len(ids))
def test_zip_longest_pickling(self):
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
self.pickletest(proto, zip_longest("abc", "def"))
self.pickletest(proto, zip_longest("abc", "defgh"))
self.pickletest(proto, zip_longest("abc", "defgh", fillvalue=1))
self.pickletest(proto, zip_longest("", "defgh"))
def test_bug_7244(self):
class Repeater:
# this class is similar to itertools.repeat
def __init__(self, o, t, e):
self.o = o
self.t = int(t)
self.e = e
def __iter__(self): # its iterator is itself
return self
def __next__(self):
if self.t > 0:
self.t -= 1
return self.o
else:
raise self.e
# Formerly this code in would fail in debug mode
# with Undetected Error and Stop Iteration
r1 = Repeater(1, 3, StopIteration)
r2 = Repeater(2, 4, StopIteration)
def run(r1, r2):
result = []
for i, j in zip_longest(r1, r2, fillvalue=0):
with support.captured_output('stdout'):
print((i, j))
result.append((i, j))
return result
self.assertEqual(run(r1, r2), [(1,2), (1,2), (1,2), (0,2)])
# Formerly, the RuntimeError would be lost
# and StopIteration would stop as expected
r1 = Repeater(1, 3, RuntimeError)
r2 = Repeater(2, 4, StopIteration)
it = zip_longest(r1, r2, fillvalue=0)
self.assertEqual(next(it), (1, 2))
self.assertEqual(next(it), (1, 2))
self.assertEqual(next(it), (1, 2))
self.assertRaises(RuntimeError, next, it)
def test_product(self):
for args, result in [
([], [()]), # zero iterables
(['ab'], [('a',), ('b',)]), # one iterable
([range(2), range(3)], [(0,0), (0,1), (0,2), (1,0), (1,1), (1,2)]), # two iterables
([range(0), range(2), range(3)], []), # first iterable with zero length
([range(2), range(0), range(3)], []), # middle iterable with zero length
([range(2), range(3), range(0)], []), # last iterable with zero length
]:
self.assertEqual(list(product(*args)), result)
for r in range(4):
self.assertEqual(list(product(*(args*r))),
list(product(*args, **dict(repeat=r))))
self.assertEqual(len(list(product(*[range(7)]*6))), 7**6)
self.assertRaises(TypeError, product, range(6), None)
def product1(*args, **kwds):
pools = list(map(tuple, args)) * kwds.get('repeat', 1)
n = len(pools)
if n == 0:
yield ()
return
if any(len(pool) == 0 for pool in pools):
return
indices = [0] * n
yield tuple(pool[i] for pool, i in zip(pools, indices))
while 1:
for i in reversed(range(n)): # right to left
if indices[i] == len(pools[i]) - 1:
continue
indices[i] += 1
for j in range(i+1, n):
indices[j] = 0
yield tuple(pool[i] for pool, i in zip(pools, indices))
break
else:
return
def product2(*args, **kwds):
'Pure python version used in docs'
pools = list(map(tuple, args)) * kwds.get('repeat', 1)
result = [[]]
for pool in pools:
result = [x+[y] for x in result for y in pool]
for prod in result:
yield tuple(prod)
argtypes = ['', 'abc', '', range(0), range(4), dict(a=1, b=2, c=3),
set('abcdefg'), range(11), tuple(range(13))]
for i in range(100):
args = [random.choice(argtypes) for j in range(random.randrange(5))]
expected_len = prod(map(len, args))
self.assertEqual(len(list(product(*args))), expected_len)
self.assertEqual(list(product(*args)), list(product1(*args)))
self.assertEqual(list(product(*args)), list(product2(*args)))
args = map(iter, args)
self.assertEqual(len(list(product(*args))), expected_len)
@support.bigaddrspacetest
def test_product_overflow(self):
with self.assertRaises((OverflowError, MemoryError)):
product(*(['ab']*2**5), repeat=2**25)
@support.impl_detail("tuple reuse is specific to CPython")
def test_product_tuple_reuse(self):
self.assertEqual(len(set(map(id, product('abc', 'def')))), 1)
self.assertNotEqual(len(set(map(id, list(product('abc', 'def'))))), 1)
def test_product_pickling(self):
# check copy, deepcopy, pickle
for args, result in [
([], [()]), # zero iterables
(['ab'], [('a',), ('b',)]), # one iterable
([range(2), range(3)], [(0,0), (0,1), (0,2), (1,0), (1,1), (1,2)]), # two iterables
([range(0), range(2), range(3)], []), # first iterable with zero length
([range(2), range(0), range(3)], []), # middle iterable with zero length
([range(2), range(3), range(0)], []), # last iterable with zero length
]:
self.assertEqual(list(copy.copy(product(*args))), result)
self.assertEqual(list(copy.deepcopy(product(*args))), result)
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
self.pickletest(proto, product(*args))
def test_product_issue_25021(self):
# test that indices are properly clamped to the length of the tuples
p = product((1, 2),(3,))
p.__setstate__((0, 0x1000)) # will access tuple element 1 if not clamped
self.assertEqual(next(p), (2, 3))
# test that empty tuple in the list will result in an immediate StopIteration
p = product((1, 2), (), (3,))
p.__setstate__((0, 0, 0x1000)) # will access tuple element 1 if not clamped
self.assertRaises(StopIteration, next, p)
def test_repeat(self):
self.assertEqual(list(repeat(object='a', times=3)), ['a', 'a', 'a'])
self.assertEqual(lzip(range(3),repeat('a')),
[(0, 'a'), (1, 'a'), (2, 'a')])
self.assertEqual(list(repeat('a', 3)), ['a', 'a', 'a'])
self.assertEqual(take(3, repeat('a')), ['a', 'a', 'a'])
self.assertEqual(list(repeat('a', 0)), [])
self.assertEqual(list(repeat('a', -3)), [])
self.assertRaises(TypeError, repeat)
self.assertRaises(TypeError, repeat, None, 3, 4)
self.assertRaises(TypeError, repeat, None, 'a')
r = repeat(1+0j)
self.assertEqual(repr(r), 'repeat((1+0j))')
r = repeat(1+0j, 5)
self.assertEqual(repr(r), 'repeat((1+0j), 5)')
list(r)
self.assertEqual(repr(r), 'repeat((1+0j), 0)')
# check copy, deepcopy, pickle
c = repeat(object='a', times=10)
self.assertEqual(next(c), 'a')
self.assertEqual(take(2, copy.copy(c)), list('a' * 2))
self.assertEqual(take(2, copy.deepcopy(c)), list('a' * 2))
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
self.pickletest(proto, repeat(object='a', times=10))
def test_repeat_with_negative_times(self):
self.assertEqual(repr(repeat('a', -1)), "repeat('a', 0)")
self.assertEqual(repr(repeat('a', -2)), "repeat('a', 0)")
self.assertEqual(repr(repeat('a', times=-1)), "repeat('a', 0)")
self.assertEqual(repr(repeat('a', times=-2)), "repeat('a', 0)")
def test_map(self):
self.assertEqual(list(map(operator.pow, range(3), range(1,7))),
[0**1, 1**2, 2**3])
self.assertEqual(list(map(tupleize, 'abc', range(5))),
[('a',0),('b',1),('c',2)])
self.assertEqual(list(map(tupleize, 'abc', count())),
[('a',0),('b',1),('c',2)])
self.assertEqual(take(2,map(tupleize, 'abc', count())),
[('a',0),('b',1)])
self.assertEqual(list(map(operator.pow, [])), [])
self.assertRaises(TypeError, map)
self.assertRaises(TypeError, list, map(None, range(3), range(3)))
self.assertRaises(TypeError, map, operator.neg)
self.assertRaises(TypeError, next, map(10, range(5)))
self.assertRaises(ValueError, next, map(errfunc, [4], [5]))
self.assertRaises(TypeError, next, map(onearg, [4], [5]))
# check copy, deepcopy, pickle
ans = [('a',0),('b',1),('c',2)]
c = map(tupleize, 'abc', count())
self.assertEqual(list(copy.copy(c)), ans)
c = map(tupleize, 'abc', count())
self.assertEqual(list(copy.deepcopy(c)), ans)
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
c = map(tupleize, 'abc', count())
self.pickletest(proto, c)
def test_starmap(self):
self.assertEqual(list(starmap(operator.pow, zip(range(3), range(1,7)))),
[0**1, 1**2, 2**3])
self.assertEqual(take(3, starmap(operator.pow, zip(count(), count(1)))),
[0**1, 1**2, 2**3])
self.assertEqual(list(starmap(operator.pow, [])), [])
self.assertEqual(list(starmap(operator.pow, [iter([4,5])])), [4**5])
self.assertRaises(TypeError, list, starmap(operator.pow, [None]))
self.assertRaises(TypeError, starmap)
self.assertRaises(TypeError, starmap, operator.pow, [(4,5)], 'extra')
self.assertRaises(TypeError, next, starmap(10, [(4,5)]))
self.assertRaises(ValueError, next, starmap(errfunc, [(4,5)]))
self.assertRaises(TypeError, next, starmap(onearg, [(4,5)]))
# check copy, deepcopy, pickle
ans = [0**1, 1**2, 2**3]
c = starmap(operator.pow, zip(range(3), range(1,7)))
self.assertEqual(list(copy.copy(c)), ans)
c = starmap(operator.pow, zip(range(3), range(1,7)))
self.assertEqual(list(copy.deepcopy(c)), ans)
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
c = starmap(operator.pow, zip(range(3), range(1,7)))
self.pickletest(proto, c)
def test_islice(self):
for args in [ # islice(args) should agree with range(args)
(10, 20, 3),
(10, 3, 20),
(10, 20),
(10, 10),
(10, 3),
(20,)
]:
self.assertEqual(list(islice(range(100), *args)),
list(range(*args)))
for args, tgtargs in [ # Stop when seqn is exhausted
((10, 110, 3), ((10, 100, 3))),
((10, 110), ((10, 100))),
((110,), (100,))
]:
self.assertEqual(list(islice(range(100), *args)),
list(range(*tgtargs)))
# Test stop=None
self.assertEqual(list(islice(range(10), None)), list(range(10)))
self.assertEqual(list(islice(range(10), None, None)), list(range(10)))
self.assertEqual(list(islice(range(10), None, None, None)), list(range(10)))
self.assertEqual(list(islice(range(10), 2, None)), list(range(2, 10)))
self.assertEqual(list(islice(range(10), 1, None, 2)), list(range(1, 10, 2)))
# Test number of items consumed SF #1171417
it = iter(range(10))
self.assertEqual(list(islice(it, 3)), list(range(3)))
self.assertEqual(list(it), list(range(3, 10)))
it = iter(range(10))
self.assertEqual(list(islice(it, 3, 3)), [])
self.assertEqual(list(it), list(range(3, 10)))
# Test invalid arguments
ra = range(10)
self.assertRaises(TypeError, islice, ra)
self.assertRaises(TypeError, islice, ra, 1, 2, 3, 4)
self.assertRaises(ValueError, islice, ra, -5, 10, 1)
self.assertRaises(ValueError, islice, ra, 1, -5, -1)
self.assertRaises(ValueError, islice, ra, 1, 10, -1)
self.assertRaises(ValueError, islice, ra, 1, 10, 0)
self.assertRaises(ValueError, islice, ra, 'a')
self.assertRaises(ValueError, islice, ra, 'a', 1)
self.assertRaises(ValueError, islice, ra, 1, 'a')
self.assertRaises(ValueError, islice, ra, 'a', 1, 1)
self.assertRaises(ValueError, islice, ra, 1, 'a', 1)
self.assertEqual(len(list(islice(count(), 1, 10, maxsize))), 1)
# Issue #10323: Less islice in a predictable state
c = count()
self.assertEqual(list(islice(c, 1, 3, 50)), [1])
self.assertEqual(next(c), 3)
# check copy, deepcopy, pickle
for args in [ # islice(args) should agree with range(args)
(10, 20, 3),
(10, 3, 20),
(10, 20),
(10, 3),
(20,)
]:
self.assertEqual(list(copy.copy(islice(range(100), *args))),
list(range(*args)))
self.assertEqual(list(copy.deepcopy(islice(range(100), *args))),
list(range(*args)))
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
self.pickletest(proto, islice(range(100), *args))
# Issue #21321: check source iterator is not referenced
# from islice() after the latter has been exhausted
it = (x for x in (1, 2))
wr = weakref.ref(it)
it = islice(it, 1)
self.assertIsNotNone(wr())
list(it) # exhaust the iterator
support.gc_collect()
self.assertIsNone(wr())
# Issue #30537: islice can accept integer-like objects as
# arguments
class IntLike(object):
def __init__(self, val):
self.val = val
def __index__(self):
return self.val
self.assertEqual(list(islice(range(100), IntLike(10))), list(range(10)))
self.assertEqual(list(islice(range(100), IntLike(10), IntLike(50))),
list(range(10, 50)))
self.assertEqual(list(islice(range(100), IntLike(10), IntLike(50), IntLike(5))),
list(range(10,50,5)))
def test_takewhile(self):
data = [1, 3, 5, 20, 2, 4, 6, 8]
self.assertEqual(list(takewhile(underten, data)), [1, 3, 5])
self.assertEqual(list(takewhile(underten, [])), [])
self.assertRaises(TypeError, takewhile)
self.assertRaises(TypeError, takewhile, operator.pow)
self.assertRaises(TypeError, takewhile, operator.pow, [(4,5)], 'extra')
self.assertRaises(TypeError, next, takewhile(10, [(4,5)]))
self.assertRaises(ValueError, next, takewhile(errfunc, [(4,5)]))
t = takewhile(bool, [1, 1, 1, 0, 0, 0])
self.assertEqual(list(t), [1, 1, 1])
self.assertRaises(StopIteration, next, t)
# check copy, deepcopy, pickle
self.assertEqual(list(copy.copy(takewhile(underten, data))), [1, 3, 5])
self.assertEqual(list(copy.deepcopy(takewhile(underten, data))),
[1, 3, 5])
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
self.pickletest(proto, takewhile(underten, data))
def test_dropwhile(self):
data = [1, 3, 5, 20, 2, 4, 6, 8]
self.assertEqual(list(dropwhile(underten, data)), [20, 2, 4, 6, 8])
self.assertEqual(list(dropwhile(underten, [])), [])
self.assertRaises(TypeError, dropwhile)
self.assertRaises(TypeError, dropwhile, operator.pow)
self.assertRaises(TypeError, dropwhile, operator.pow, [(4,5)], 'extra')
self.assertRaises(TypeError, next, dropwhile(10, [(4,5)]))
self.assertRaises(ValueError, next, dropwhile(errfunc, [(4,5)]))
# check copy, deepcopy, pickle
self.assertEqual(list(copy.copy(dropwhile(underten, data))), [20, 2, 4, 6, 8])
self.assertEqual(list(copy.deepcopy(dropwhile(underten, data))),
[20, 2, 4, 6, 8])
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
self.pickletest(proto, dropwhile(underten, data))
def test_tee(self):
n = 200
a, b = tee([]) # test empty iterator
self.assertEqual(list(a), [])
self.assertEqual(list(b), [])
a, b = tee(irange(n)) # test 100% interleaved
self.assertEqual(lzip(a,b), lzip(range(n), range(n)))
a, b = tee(irange(n)) # test 0% interleaved
self.assertEqual(list(a), list(range(n)))
self.assertEqual(list(b), list(range(n)))
a, b = tee(irange(n)) # test dealloc of leading iterator
for i in range(100):
self.assertEqual(next(a), i)
del a
self.assertEqual(list(b), list(range(n)))
a, b = tee(irange(n)) # test dealloc of trailing iterator
for i in range(100):
self.assertEqual(next(a), i)
del b
self.assertEqual(list(a), list(range(100, n)))
for j in range(5): # test randomly interleaved
order = [0]*n + [1]*n
random.shuffle(order)
lists = ([], [])
its = tee(irange(n))
for i in order:
value = next(its[i])
lists[i].append(value)
self.assertEqual(lists[0], list(range(n)))
self.assertEqual(lists[1], list(range(n)))
# test argument format checking
self.assertRaises(TypeError, tee)
self.assertRaises(TypeError, tee, 3)
self.assertRaises(TypeError, tee, [1,2], 'x')
self.assertRaises(TypeError, tee, [1,2], 3, 'x')
# tee object should be instantiable
a, b = tee('abc')
c = type(a)('def')
self.assertEqual(list(c), list('def'))
# test long-lagged and multi-way split
a, b, c = tee(range(2000), 3)
for i in range(100):
self.assertEqual(next(a), i)
self.assertEqual(list(b), list(range(2000)))
self.assertEqual([next(c), next(c)], list(range(2)))
self.assertEqual(list(a), list(range(100,2000)))
self.assertEqual(list(c), list(range(2,2000)))
# test values of n
self.assertRaises(TypeError, tee, 'abc', 'invalid')
self.assertRaises(ValueError, tee, [], -1)
for n in range(5):
result = tee('abc', n)
self.assertEqual(type(result), tuple)
self.assertEqual(len(result), n)
self.assertEqual([list(x) for x in result], [list('abc')]*n)
# tee pass-through to copyable iterator
a, b = tee('abc')
c, d = tee(a)
self.assertTrue(a is c)
# test tee_new
t1, t2 = tee('abc')
tnew = type(t1)
self.assertRaises(TypeError, tnew)
self.assertRaises(TypeError, tnew, 10)
t3 = tnew(t1)
self.assertTrue(list(t1) == list(t2) == list(t3) == list('abc'))
# test that tee objects are weak referencable
a, b = tee(range(10))
p = weakref.proxy(a)
self.assertEqual(getattr(p, '__class__'), type(b))
del a
support.gc_collect()
self.assertRaises(ReferenceError, getattr, p, '__class__')
ans = list('abc')
long_ans = list(range(10000))
# check copy
a, b = tee('abc')
self.assertEqual(list(copy.copy(a)), ans)
self.assertEqual(list(copy.copy(b)), ans)
a, b = tee(list(range(10000)))
self.assertEqual(list(copy.copy(a)), long_ans)
self.assertEqual(list(copy.copy(b)), long_ans)
# check partially consumed copy
a, b = tee('abc')
take(2, a)
take(1, b)
self.assertEqual(list(copy.copy(a)), ans[2:])
self.assertEqual(list(copy.copy(b)), ans[1:])
self.assertEqual(list(a), ans[2:])
self.assertEqual(list(b), ans[1:])
a, b = tee(range(10000))
take(100, a)
take(60, b)
self.assertEqual(list(copy.copy(a)), long_ans[100:])
self.assertEqual(list(copy.copy(b)), long_ans[60:])
self.assertEqual(list(a), long_ans[100:])
self.assertEqual(list(b), long_ans[60:])
# check deepcopy
a, b = tee('abc')
self.assertEqual(list(copy.deepcopy(a)), ans)
self.assertEqual(list(copy.deepcopy(b)), ans)
self.assertEqual(list(a), ans)
self.assertEqual(list(b), ans)
a, b = tee(range(10000))
self.assertEqual(list(copy.deepcopy(a)), long_ans)
self.assertEqual(list(copy.deepcopy(b)), long_ans)
self.assertEqual(list(a), long_ans)
self.assertEqual(list(b), long_ans)
# check partially consumed deepcopy
a, b = tee('abc')
take(2, a)
take(1, b)
self.assertEqual(list(copy.deepcopy(a)), ans[2:])
self.assertEqual(list(copy.deepcopy(b)), ans[1:])
self.assertEqual(list(a), ans[2:])
self.assertEqual(list(b), ans[1:])
a, b = tee(range(10000))
take(100, a)
take(60, b)
self.assertEqual(list(copy.deepcopy(a)), long_ans[100:])
self.assertEqual(list(copy.deepcopy(b)), long_ans[60:])
self.assertEqual(list(a), long_ans[100:])
self.assertEqual(list(b), long_ans[60:])
# check pickle
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
self.pickletest(proto, iter(tee('abc')))
a, b = tee('abc')
self.pickletest(proto, a, compare=ans)
self.pickletest(proto, b, compare=ans)
# Issue 13454: Crash when deleting backward iterator from tee()
def test_tee_del_backward(self):
forward, backward = tee(repeat(None, 20000000))
try:
any(forward) # exhaust the iterator
del backward
except:
del forward, backward
raise
def test_tee_reenter(self):
class I:
first = True
def __iter__(self):
return self
def __next__(self):
first = self.first
self.first = False
if first:
return next(b)
a, b = tee(I())
with self.assertRaisesRegex(RuntimeError, "tee"):
next(a)
def test_tee_concurrent(self):
start = threading.Event()
finish = threading.Event()
class I:
def __iter__(self):
return self
def __next__(self):
start.set()
finish.wait()
a, b = tee(I())
thread = threading.Thread(target=next, args=[a])
thread.start()
try:
start.wait()
with self.assertRaisesRegex(RuntimeError, "tee"):
next(b)
finally:
finish.set()
thread.join()
def test_StopIteration(self):
self.assertRaises(StopIteration, next, zip())
for f in (chain, cycle, zip, groupby):
self.assertRaises(StopIteration, next, f([]))
self.assertRaises(StopIteration, next, f(StopNow()))
self.assertRaises(StopIteration, next, islice([], None))
self.assertRaises(StopIteration, next, islice(StopNow(), None))
p, q = tee([])
self.assertRaises(StopIteration, next, p)
self.assertRaises(StopIteration, next, q)
p, q = tee(StopNow())
self.assertRaises(StopIteration, next, p)
self.assertRaises(StopIteration, next, q)
self.assertRaises(StopIteration, next, repeat(None, 0))
for f in (filter, filterfalse, map, takewhile, dropwhile, starmap):
self.assertRaises(StopIteration, next, f(lambda x:x, []))
self.assertRaises(StopIteration, next, f(lambda x:x, StopNow()))
class TestExamples(unittest.TestCase):
def test_accumulate(self):
self.assertEqual(list(accumulate([1,2,3,4,5])), [1, 3, 6, 10, 15])
def test_accumulate_reducible(self):
# check copy, deepcopy, pickle
data = [1, 2, 3, 4, 5]
accumulated = [1, 3, 6, 10, 15]
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
it = accumulate(data)
self.assertEqual(list(pickle.loads(pickle.dumps(it, proto))), accumulated[:])
self.assertEqual(next(it), 1)
self.assertEqual(list(pickle.loads(pickle.dumps(it, proto))), accumulated[1:])
it = accumulate(data)
self.assertEqual(next(it), 1)
self.assertEqual(list(copy.deepcopy(it)), accumulated[1:])
self.assertEqual(list(copy.copy(it)), accumulated[1:])
def test_accumulate_reducible_none(self):
# Issue #25718: total is None
it = accumulate([None, None, None], operator.is_)
self.assertEqual(next(it), None)
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
it_copy = pickle.loads(pickle.dumps(it, proto))
self.assertEqual(list(it_copy), [True, False])
self.assertEqual(list(copy.deepcopy(it)), [True, False])
self.assertEqual(list(copy.copy(it)), [True, False])
def test_chain(self):
self.assertEqual(''.join(chain('ABC', 'DEF')), 'ABCDEF')
def test_chain_from_iterable(self):
self.assertEqual(''.join(chain.from_iterable(['ABC', 'DEF'])), 'ABCDEF')
def test_combinations(self):
self.assertEqual(list(combinations('ABCD', 2)),
[('A','B'), ('A','C'), ('A','D'), ('B','C'), ('B','D'), ('C','D')])
self.assertEqual(list(combinations(range(4), 3)),
[(0,1,2), (0,1,3), (0,2,3), (1,2,3)])
def test_combinations_with_replacement(self):
self.assertEqual(list(combinations_with_replacement('ABC', 2)),
[('A','A'), ('A','B'), ('A','C'), ('B','B'), ('B','C'), ('C','C')])
def test_compress(self):
self.assertEqual(list(compress('ABCDEF', [1,0,1,0,1,1])), list('ACEF'))
def test_count(self):
self.assertEqual(list(islice(count(10), 5)), [10, 11, 12, 13, 14])
def test_cycle(self):
self.assertEqual(list(islice(cycle('ABCD'), 12)), list('ABCDABCDABCD'))
def test_dropwhile(self):
self.assertEqual(list(dropwhile(lambda x: x<5, [1,4,6,4,1])), [6,4,1])
def test_groupby(self):
self.assertEqual([k for k, g in groupby('AAAABBBCCDAABBB')],
list('ABCDAB'))
self.assertEqual([(list(g)) for k, g in groupby('AAAABBBCCD')],
[list('AAAA'), list('BBB'), list('CC'), list('D')])
def test_filter(self):
self.assertEqual(list(filter(lambda x: x%2, range(10))), [1,3,5,7,9])
def test_filterfalse(self):
self.assertEqual(list(filterfalse(lambda x: x%2, range(10))), [0,2,4,6,8])
def test_map(self):
self.assertEqual(list(map(pow, (2,3,10), (5,2,3))), [32, 9, 1000])
def test_islice(self):
self.assertEqual(list(islice('ABCDEFG', 2)), list('AB'))
self.assertEqual(list(islice('ABCDEFG', 2, 4)), list('CD'))
self.assertEqual(list(islice('ABCDEFG', 2, None)), list('CDEFG'))
self.assertEqual(list(islice('ABCDEFG', 0, None, 2)), list('ACEG'))
def test_zip(self):
self.assertEqual(list(zip('ABCD', 'xy')), [('A', 'x'), ('B', 'y')])
def test_zip_longest(self):
self.assertEqual(list(zip_longest('ABCD', 'xy', fillvalue='-')),
[('A', 'x'), ('B', 'y'), ('C', '-'), ('D', '-')])
def test_permutations(self):
self.assertEqual(list(permutations('ABCD', 2)),
list(map(tuple, 'AB AC AD BA BC BD CA CB CD DA DB DC'.split())))
self.assertEqual(list(permutations(range(3))),
[(0,1,2), (0,2,1), (1,0,2), (1,2,0), (2,0,1), (2,1,0)])
def test_product(self):
self.assertEqual(list(product('ABCD', 'xy')),
list(map(tuple, 'Ax Ay Bx By Cx Cy Dx Dy'.split())))
self.assertEqual(list(product(range(2), repeat=3)),
[(0,0,0), (0,0,1), (0,1,0), (0,1,1),
(1,0,0), (1,0,1), (1,1,0), (1,1,1)])
def test_repeat(self):
self.assertEqual(list(repeat(10, 3)), [10, 10, 10])
def test_stapmap(self):
self.assertEqual(list(starmap(pow, [(2,5), (3,2), (10,3)])),
[32, 9, 1000])
def test_takewhile(self):
self.assertEqual(list(takewhile(lambda x: x<5, [1,4,6,4,1])), [1,4])
class TestPurePythonRoughEquivalents(unittest.TestCase):
@staticmethod
def islice(iterable, *args):
s = slice(*args)
start, stop, step = s.start or 0, s.stop or sys.maxsize, s.step or 1
it = iter(range(start, stop, step))
try:
nexti = next(it)
except StopIteration:
# Consume *iterable* up to the *start* position.
for i, element in zip(range(start), iterable):
pass
return
try:
for i, element in enumerate(iterable):
if i == nexti:
yield element
nexti = next(it)
except StopIteration:
# Consume to *stop*.
for i, element in zip(range(i + 1, stop), iterable):
pass
def test_islice_recipe(self):
self.assertEqual(list(self.islice('ABCDEFG', 2)), list('AB'))
self.assertEqual(list(self.islice('ABCDEFG', 2, 4)), list('CD'))
self.assertEqual(list(self.islice('ABCDEFG', 2, None)), list('CDEFG'))
self.assertEqual(list(self.islice('ABCDEFG', 0, None, 2)), list('ACEG'))
# Test items consumed.
it = iter(range(10))
self.assertEqual(list(self.islice(it, 3)), list(range(3)))
self.assertEqual(list(it), list(range(3, 10)))
it = iter(range(10))
self.assertEqual(list(self.islice(it, 3, 3)), [])
self.assertEqual(list(it), list(range(3, 10)))
# Test that slice finishes in predictable state.
c = count()
self.assertEqual(list(self.islice(c, 1, 3, 50)), [1])
self.assertEqual(next(c), 3)
class TestGC(unittest.TestCase):
def makecycle(self, iterator, container):
container.append(iterator)
next(iterator)
del container, iterator
def test_accumulate(self):
a = []
self.makecycle(accumulate([1,2,a,3]), a)
def test_chain(self):
a = []
self.makecycle(chain(a), a)
def test_chain_from_iterable(self):
a = []
self.makecycle(chain.from_iterable([a]), a)
def test_combinations(self):
a = []
self.makecycle(combinations([1,2,a,3], 3), a)
def test_combinations_with_replacement(self):
a = []
self.makecycle(combinations_with_replacement([1,2,a,3], 3), a)
def test_compress(self):
a = []
self.makecycle(compress('ABCDEF', [1,0,1,0,1,0]), a)
def test_count(self):
a = []
Int = type('Int', (int,), dict(x=a))
self.makecycle(count(Int(0), Int(1)), a)
def test_cycle(self):
a = []
self.makecycle(cycle([a]*2), a)
def test_dropwhile(self):
a = []
self.makecycle(dropwhile(bool, [0, a, a]), a)
def test_groupby(self):
a = []
self.makecycle(groupby([a]*2, lambda x:x), a)
def test_issue2246(self):
# Issue 2246 -- the _grouper iterator was not included in GC
n = 10
keyfunc = lambda x: x
for i, j in groupby(range(n), key=keyfunc):
keyfunc.__dict__.setdefault('x',[]).append(j)
def test_filter(self):
a = []
self.makecycle(filter(lambda x:True, [a]*2), a)
def test_filterfalse(self):
a = []
self.makecycle(filterfalse(lambda x:False, a), a)
def test_zip(self):
a = []
self.makecycle(zip([a]*2, [a]*3), a)
def test_zip_longest(self):
a = []
self.makecycle(zip_longest([a]*2, [a]*3), a)
b = [a, None]
self.makecycle(zip_longest([a]*2, [a]*3, fillvalue=b), a)
def test_map(self):
a = []
self.makecycle(map(lambda x:x, [a]*2), a)
def test_islice(self):
a = []
self.makecycle(islice([a]*2, None), a)
def test_permutations(self):
a = []
self.makecycle(permutations([1,2,a,3], 3), a)
def test_product(self):
a = []
self.makecycle(product([1,2,a,3], repeat=3), a)
def test_repeat(self):
a = []
self.makecycle(repeat(a), a)
def test_starmap(self):
a = []
self.makecycle(starmap(lambda *t: t, [(a,a)]*2), a)
def test_takewhile(self):
a = []
self.makecycle(takewhile(bool, [1, 0, a, a]), a)
def R(seqn):
'Regular generator'
for i in seqn:
yield i
class G:
'Sequence using __getitem__'
def __init__(self, seqn):
self.seqn = seqn
def __getitem__(self, i):
return self.seqn[i]
class I:
'Sequence using iterator protocol'
def __init__(self, seqn):
self.seqn = seqn
self.i = 0
def __iter__(self):
return self
def __next__(self):
if self.i >= len(self.seqn): raise StopIteration
v = self.seqn[self.i]
self.i += 1
return v
class Ig:
'Sequence using iterator protocol defined with a generator'
def __init__(self, seqn):
self.seqn = seqn
self.i = 0
def __iter__(self):
for val in self.seqn:
yield val
class X:
'Missing __getitem__ and __iter__'
def __init__(self, seqn):
self.seqn = seqn
self.i = 0
def __next__(self):
if self.i >= len(self.seqn): raise StopIteration
v = self.seqn[self.i]
self.i += 1
return v
class N:
'Iterator missing __next__()'
def __init__(self, seqn):
self.seqn = seqn
self.i = 0
def __iter__(self):
return self
class E:
'Test propagation of exceptions'
def __init__(self, seqn):
self.seqn = seqn
self.i = 0
def __iter__(self):
return self
def __next__(self):
3 // 0
class S:
'Test immediate stop'
def __init__(self, seqn):
pass
def __iter__(self):
return self
def __next__(self):
raise StopIteration
def L(seqn):
'Test multiple tiers of iterators'
return chain(map(lambda x:x, R(Ig(G(seqn)))))
class TestVariousIteratorArgs(unittest.TestCase):
def test_accumulate(self):
s = [1,2,3,4,5]
r = [1,3,6,10,15]
n = len(s)
for g in (G, I, Ig, L, R):
self.assertEqual(list(accumulate(g(s))), r)
self.assertEqual(list(accumulate(S(s))), [])
self.assertRaises(TypeError, accumulate, X(s))
self.assertRaises(TypeError, accumulate, N(s))
self.assertRaises(ZeroDivisionError, list, accumulate(E(s)))
def test_chain(self):
for s in ("123", "", range(1000), ('do', 1.2), range(2000,2200,5)):
for g in (G, I, Ig, S, L, R):
self.assertEqual(list(chain(g(s))), list(g(s)))
self.assertEqual(list(chain(g(s), g(s))), list(g(s))+list(g(s)))
self.assertRaises(TypeError, list, chain(X(s)))
self.assertRaises(TypeError, list, chain(N(s)))
self.assertRaises(ZeroDivisionError, list, chain(E(s)))
def test_compress(self):
for s in ("123", "", range(1000), ('do', 1.2), range(2000,2200,5)):
n = len(s)
for g in (G, I, Ig, S, L, R):
self.assertEqual(list(compress(g(s), repeat(1))), list(g(s)))
self.assertRaises(TypeError, compress, X(s), repeat(1))
self.assertRaises(TypeError, compress, N(s), repeat(1))
self.assertRaises(ZeroDivisionError, list, compress(E(s), repeat(1)))
def test_product(self):
for s in ("123", "", range(1000), ('do', 1.2), range(2000,2200,5)):
self.assertRaises(TypeError, product, X(s))
self.assertRaises(TypeError, product, N(s))
self.assertRaises(ZeroDivisionError, product, E(s))
def test_cycle(self):
for s in ("123", "", range(1000), ('do', 1.2), range(2000,2200,5)):
for g in (G, I, Ig, S, L, R):
tgtlen = len(s) * 3
expected = list(g(s))*3
actual = list(islice(cycle(g(s)), tgtlen))
self.assertEqual(actual, expected)
self.assertRaises(TypeError, cycle, X(s))
self.assertRaises(TypeError, cycle, N(s))
self.assertRaises(ZeroDivisionError, list, cycle(E(s)))
def test_groupby(self):
for s in (range(10), range(0), range(1000), (7,11), range(2000,2200,5)):
for g in (G, I, Ig, S, L, R):
self.assertEqual([k for k, sb in groupby(g(s))], list(g(s)))
self.assertRaises(TypeError, groupby, X(s))
self.assertRaises(TypeError, groupby, N(s))
self.assertRaises(ZeroDivisionError, list, groupby(E(s)))
def test_filter(self):
for s in (range(10), range(0), range(1000), (7,11), range(2000,2200,5)):
for g in (G, I, Ig, S, L, R):
self.assertEqual(list(filter(isEven, g(s))),
[x for x in g(s) if isEven(x)])
self.assertRaises(TypeError, filter, isEven, X(s))
self.assertRaises(TypeError, filter, isEven, N(s))
self.assertRaises(ZeroDivisionError, list, filter(isEven, E(s)))
def test_filterfalse(self):
for s in (range(10), range(0), range(1000), (7,11), range(2000,2200,5)):
for g in (G, I, Ig, S, L, R):
self.assertEqual(list(filterfalse(isEven, g(s))),
[x for x in g(s) if isOdd(x)])
self.assertRaises(TypeError, filterfalse, isEven, X(s))
self.assertRaises(TypeError, filterfalse, isEven, N(s))
self.assertRaises(ZeroDivisionError, list, filterfalse(isEven, E(s)))
def test_zip(self):
for s in ("123", "", range(1000), ('do', 1.2), range(2000,2200,5)):
for g in (G, I, Ig, S, L, R):
self.assertEqual(list(zip(g(s))), lzip(g(s)))
self.assertEqual(list(zip(g(s), g(s))), lzip(g(s), g(s)))
self.assertRaises(TypeError, zip, X(s))
self.assertRaises(TypeError, zip, N(s))
self.assertRaises(ZeroDivisionError, list, zip(E(s)))
def test_ziplongest(self):
for s in ("123", "", range(1000), ('do', 1.2), range(2000,2200,5)):
for g in (G, I, Ig, S, L, R):
self.assertEqual(list(zip_longest(g(s))), list(zip(g(s))))
self.assertEqual(list(zip_longest(g(s), g(s))), list(zip(g(s), g(s))))
self.assertRaises(TypeError, zip_longest, X(s))
self.assertRaises(TypeError, zip_longest, N(s))
self.assertRaises(ZeroDivisionError, list, zip_longest(E(s)))
def test_map(self):
for s in (range(10), range(0), range(100), (7,11), range(20,50,5)):
for g in (G, I, Ig, S, L, R):
self.assertEqual(list(map(onearg, g(s))),
[onearg(x) for x in g(s)])
self.assertEqual(list(map(operator.pow, g(s), g(s))),
[x**x for x in g(s)])
self.assertRaises(TypeError, map, onearg, X(s))
self.assertRaises(TypeError, map, onearg, N(s))
self.assertRaises(ZeroDivisionError, list, map(onearg, E(s)))
def test_islice(self):
for s in ("12345", "", range(1000), ('do', 1.2), range(2000,2200,5)):
for g in (G, I, Ig, S, L, R):
self.assertEqual(list(islice(g(s),1,None,2)), list(g(s))[1::2])
self.assertRaises(TypeError, islice, X(s), 10)
self.assertRaises(TypeError, islice, N(s), 10)
self.assertRaises(ZeroDivisionError, list, islice(E(s), 10))
def test_starmap(self):
for s in (range(10), range(0), range(100), (7,11), range(20,50,5)):
for g in (G, I, Ig, S, L, R):
ss = lzip(s, s)
self.assertEqual(list(starmap(operator.pow, g(ss))),
[x**x for x in g(s)])
self.assertRaises(TypeError, starmap, operator.pow, X(ss))
self.assertRaises(TypeError, starmap, operator.pow, N(ss))
self.assertRaises(ZeroDivisionError, list, starmap(operator.pow, E(ss)))
def test_takewhile(self):
for s in (range(10), range(0), range(1000), (7,11), range(2000,2200,5)):
for g in (G, I, Ig, S, L, R):
tgt = []
for elem in g(s):
if not isEven(elem): break
tgt.append(elem)
self.assertEqual(list(takewhile(isEven, g(s))), tgt)
self.assertRaises(TypeError, takewhile, isEven, X(s))
self.assertRaises(TypeError, takewhile, isEven, N(s))
self.assertRaises(ZeroDivisionError, list, takewhile(isEven, E(s)))
def test_dropwhile(self):
for s in (range(10), range(0), range(1000), (7,11), range(2000,2200,5)):
for g in (G, I, Ig, S, L, R):
tgt = []
for elem in g(s):
if not tgt and isOdd(elem): continue
tgt.append(elem)
self.assertEqual(list(dropwhile(isOdd, g(s))), tgt)
self.assertRaises(TypeError, dropwhile, isOdd, X(s))
self.assertRaises(TypeError, dropwhile, isOdd, N(s))
self.assertRaises(ZeroDivisionError, list, dropwhile(isOdd, E(s)))
def test_tee(self):
for s in ("123", "", range(1000), ('do', 1.2), range(2000,2200,5)):
for g in (G, I, Ig, S, L, R):
it1, it2 = tee(g(s))
self.assertEqual(list(it1), list(g(s)))
self.assertEqual(list(it2), list(g(s)))
self.assertRaises(TypeError, tee, X(s))
self.assertRaises(TypeError, tee, N(s))
self.assertRaises(ZeroDivisionError, list, tee(E(s))[0])
class LengthTransparency(unittest.TestCase):
@support.impl_detail("__length_hint__() API is undocumented")
def test_repeat(self):
self.assertEqual(operator.length_hint(repeat(None, 50)), 50)
self.assertEqual(operator.length_hint(repeat(None, 0)), 0)
self.assertEqual(operator.length_hint(repeat(None), 12), 12)
def test_repeat_with_negative_times(self):
self.assertEqual(operator.length_hint(repeat(None, -1)), 0)
self.assertEqual(operator.length_hint(repeat(None, -2)), 0)
self.assertEqual(operator.length_hint(repeat(None, times=-1)), 0)
self.assertEqual(operator.length_hint(repeat(None, times=-2)), 0)
class RegressionTests(unittest.TestCase):
def test_sf_793826(self):
# Fix Armin Rigo's successful efforts to wreak havoc
def mutatingtuple(tuple1, f, tuple2):
# this builds a tuple t which is a copy of tuple1,
# then calls f(t), then mutates t to be equal to tuple2
# (needs len(tuple1) == len(tuple2)).
def g(value, first=[1]):
if first:
del first[:]
f(next(z))
return value
items = list(tuple2)
items[1:1] = list(tuple1)
gen = map(g, items)
z = zip(*[gen]*len(tuple1))
next(z)
def f(t):
global T
T = t
first[:] = list(T)
first = []
mutatingtuple((1,2,3), f, (4,5,6))
second = list(T)
self.assertEqual(first, second)
def test_sf_950057(self):
# Make sure that chain() and cycle() catch exceptions immediately
# rather than when shifting between input sources
def gen1():
hist.append(0)
yield 1
hist.append(1)
raise AssertionError
hist.append(2)
def gen2(x):
hist.append(3)
yield 2
hist.append(4)
hist = []
self.assertRaises(AssertionError, list, chain(gen1(), gen2(False)))
self.assertEqual(hist, [0,1])
hist = []
self.assertRaises(AssertionError, list, chain(gen1(), gen2(True)))
self.assertEqual(hist, [0,1])
hist = []
self.assertRaises(AssertionError, list, cycle(gen1()))
self.assertEqual(hist, [0,1])
def test_long_chain_of_empty_iterables(self):
# Make sure itertools.chain doesn't run into recursion limits when
# dealing with long chains of empty iterables. Even with a high
# number this would probably only fail in Py_DEBUG mode.
it = chain.from_iterable(() for unused in range(10000000))
with self.assertRaises(StopIteration):
next(it)
def test_issue30347_1(self):
def f(n):
if n == 5:
list(b)
return n != 6
for (k, b) in groupby(range(10), f):
list(b) # shouldn't crash
def test_issue30347_2(self):
class K:
def __init__(self, v):
pass
def __eq__(self, other):
nonlocal i
i += 1
if i == 1:
next(g, None)
return True
i = 0
g = next(groupby(range(10), K))[1]
for j in range(2):
next(g, None) # shouldn't crash
class SubclassWithKwargsTest(unittest.TestCase):
def test_keywords_in_subclass(self):
# count is not subclassable...
for cls in (repeat, zip, filter, filterfalse, chain, map,
starmap, islice, takewhile, dropwhile, cycle, compress):
class Subclass(cls):
def __init__(self, newarg=None, *args):
cls.__init__(self, *args)
try:
Subclass(newarg=1)
except TypeError as err:
# we expect type errors because of wrong argument count
self.assertNotIn("keyword arguments", err.args[0])
@support.cpython_only
class SizeofTest(unittest.TestCase):
def setUp(self):
self.ssize_t = struct.calcsize('n')
check_sizeof = support.check_sizeof
def test_product_sizeof(self):
basesize = support.calcobjsize('3Pi')
check = self.check_sizeof
check(product('ab', '12'), basesize + 2 * self.ssize_t)
check(product(*(('abc',) * 10)), basesize + 10 * self.ssize_t)
def test_combinations_sizeof(self):
basesize = support.calcobjsize('3Pni')
check = self.check_sizeof
check(combinations('abcd', 3), basesize + 3 * self.ssize_t)
check(combinations(range(10), 4), basesize + 4 * self.ssize_t)
def test_combinations_with_replacement_sizeof(self):
cwr = combinations_with_replacement
basesize = support.calcobjsize('3Pni')
check = self.check_sizeof
check(cwr('abcd', 3), basesize + 3 * self.ssize_t)
check(cwr(range(10), 4), basesize + 4 * self.ssize_t)
def test_permutations_sizeof(self):
basesize = support.calcobjsize('4Pni')
check = self.check_sizeof
check(permutations('abcd'),
basesize + 4 * self.ssize_t + 4 * self.ssize_t)
check(permutations('abcd', 3),
basesize + 4 * self.ssize_t + 3 * self.ssize_t)
check(permutations('abcde', 3),
basesize + 5 * self.ssize_t + 3 * self.ssize_t)
check(permutations(range(10), 4),
basesize + 10 * self.ssize_t + 4 * self.ssize_t)
libreftest = """ Doctest for examples in the library reference: libitertools.tex
>>> amounts = [120.15, 764.05, 823.14]
>>> for checknum, amount in zip(count(1200), amounts):
... print('Check %d is for $%.2f' % (checknum, amount))
...
Check 1200 is for $120.15
Check 1201 is for $764.05
Check 1202 is for $823.14
>>> import operator
>>> for cube in map(operator.pow, range(1,4), repeat(3)):
... print(cube)
...
1
8
27
>>> reportlines = ['EuroPython', 'Roster', '', 'alex', '', 'laura', '', 'martin', '', 'walter', '', 'samuele']
>>> for name in islice(reportlines, 3, None, 2):
... print(name.title())
...
Alex
Laura
Martin
Walter
Samuele
>>> from operator import itemgetter
>>> d = dict(a=1, b=2, c=1, d=2, e=1, f=2, g=3)
>>> di = sorted(sorted(d.items()), key=itemgetter(1))
>>> for k, g in groupby(di, itemgetter(1)):
... print(k, list(map(itemgetter(0), g)))
...
1 ['a', 'c', 'e']
2 ['b', 'd', 'f']
3 ['g']
# Find runs of consecutive numbers using groupby. The key to the solution
# is differencing with a range so that consecutive numbers all appear in
# same group.
>>> data = [ 1, 4,5,6, 10, 15,16,17,18, 22, 25,26,27,28]
>>> for k, g in groupby(enumerate(data), lambda t:t[0]-t[1]):
... print(list(map(operator.itemgetter(1), g)))
...
[1]
[4, 5, 6]
[10]
[15, 16, 17, 18]
[22]
[25, 26, 27, 28]
>>> def take(n, iterable):
... "Return first n items of the iterable as a list"
... return list(islice(iterable, n))
>>> def prepend(value, iterator):
... "Prepend a single value in front of an iterator"
... # prepend(1, [2, 3, 4]) -> 1 2 3 4
... return chain([value], iterator)
>>> def enumerate(iterable, start=0):
... return zip(count(start), iterable)
>>> def tabulate(function, start=0):
... "Return function(0), function(1), ..."
... return map(function, count(start))
>>> import collections
>>> def consume(iterator, n=None):
... "Advance the iterator n-steps ahead. If n is None, consume entirely."
... # Use functions that consume iterators at C speed.
... if n is None:
... # feed the entire iterator into a zero-length deque
... collections.deque(iterator, maxlen=0)
... else:
... # advance to the empty slice starting at position n
... next(islice(iterator, n, n), None)
>>> def nth(iterable, n, default=None):
... "Returns the nth item or a default value"
... return next(islice(iterable, n, None), default)
>>> def all_equal(iterable):
... "Returns True if all the elements are equal to each other"
... g = groupby(iterable)
... return next(g, True) and not next(g, False)
>>> def quantify(iterable, pred=bool):
... "Count how many times the predicate is true"
... return sum(map(pred, iterable))
>>> def padnone(iterable):
... "Returns the sequence elements and then returns None indefinitely"
... return chain(iterable, repeat(None))
>>> def ncycles(iterable, n):
... "Returns the sequence elements n times"
... return chain(*repeat(iterable, n))
>>> def dotproduct(vec1, vec2):
... return sum(map(operator.mul, vec1, vec2))
>>> def flatten(listOfLists):
... return list(chain.from_iterable(listOfLists))
>>> def repeatfunc(func, times=None, *args):
... "Repeat calls to func with specified arguments."
... " Example: repeatfunc(random.random)"
... if times is None:
... return starmap(func, repeat(args))
... else:
... return starmap(func, repeat(args, times))
>>> def pairwise(iterable):
... "s -> (s0,s1), (s1,s2), (s2, s3), ..."
... a, b = tee(iterable)
... try:
... next(b)
... except StopIteration:
... pass
... return zip(a, b)
>>> def grouper(n, iterable, fillvalue=None):
... "grouper(3, 'ABCDEFG', 'x') --> ABC DEF Gxx"
... args = [iter(iterable)] * n
... return zip_longest(*args, fillvalue=fillvalue)
>>> def roundrobin(*iterables):
... "roundrobin('ABC', 'D', 'EF') --> A D E B F C"
... # Recipe credited to George Sakkis
... pending = len(iterables)
... nexts = cycle(iter(it).__next__ for it in iterables)
... while pending:
... try:
... for next in nexts:
... yield next()
... except StopIteration:
... pending -= 1
... nexts = cycle(islice(nexts, pending))
>>> def powerset(iterable):
... "powerset([1,2,3]) --> () (1,) (2,) (3,) (1,2) (1,3) (2,3) (1,2,3)"
... s = list(iterable)
... return chain.from_iterable(combinations(s, r) for r in range(len(s)+1))
>>> def unique_everseen(iterable, key=None):
... "List unique elements, preserving order. Remember all elements ever seen."
... # unique_everseen('AAAABBBCCDAABBB') --> A B C D
... # unique_everseen('ABBCcAD', str.lower) --> A B C D
... seen = set()
... seen_add = seen.add
... if key is None:
... for element in iterable:
... if element not in seen:
... seen_add(element)
... yield element
... else:
... for element in iterable:
... k = key(element)
... if k not in seen:
... seen_add(k)
... yield element
>>> def unique_justseen(iterable, key=None):
... "List unique elements, preserving order. Remember only the element just seen."
... # unique_justseen('AAAABBBCCDAABBB') --> A B C D A B
... # unique_justseen('ABBCcAD', str.lower) --> A B C A D
... return map(next, map(itemgetter(1), groupby(iterable, key)))
>>> def first_true(iterable, default=False, pred=None):
... '''Returns the first true value in the iterable.
...
... If no true value is found, returns *default*
...
... If *pred* is not None, returns the first item
... for which pred(item) is true.
...
... '''
... # first_true([a,b,c], x) --> a or b or c or x
... # first_true([a,b], x, f) --> a if f(a) else b if f(b) else x
... return next(filter(pred, iterable), default)
>>> def nth_combination(iterable, r, index):
... 'Equivalent to list(combinations(iterable, r))[index]'
... pool = tuple(iterable)
... n = len(pool)
... if r < 0 or r > n:
... raise ValueError
... c = 1
... k = min(r, n-r)
... for i in range(1, k+1):
... c = c * (n - k + i) // i
... if index < 0:
... index += c
... if index < 0 or index >= c:
... raise IndexError
... result = []
... while r:
... c, n, r = c*r//n, n-1, r-1
... while index >= c:
... index -= c
... c, n = c*(n-r)//n, n-1
... result.append(pool[-1-n])
... return tuple(result)
This is not part of the examples but it tests to make sure the definitions
perform as purported.
>>> take(10, count())
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
>>> list(prepend(1, [2, 3, 4]))
[1, 2, 3, 4]
>>> list(enumerate('abc'))
[(0, 'a'), (1, 'b'), (2, 'c')]
>>> list(islice(tabulate(lambda x: 2*x), 4))
[0, 2, 4, 6]
>>> it = iter(range(10))
>>> consume(it, 3)
>>> next(it)
3
>>> consume(it)
>>> next(it, 'Done')
'Done'
>>> nth('abcde', 3)
'd'
>>> nth('abcde', 9) is None
True
>>> [all_equal(s) for s in ('', 'A', 'AAAA', 'AAAB', 'AAABA')]
[True, True, True, False, False]
>>> quantify(range(99), lambda x: x%2==0)
50
>>> a = [[1, 2, 3], [4, 5, 6]]
>>> flatten(a)
[1, 2, 3, 4, 5, 6]
>>> list(repeatfunc(pow, 5, 2, 3))
[8, 8, 8, 8, 8]
>>> import random
>>> take(5, map(int, repeatfunc(random.random)))
[0, 0, 0, 0, 0]
>>> list(pairwise('abcd'))
[('a', 'b'), ('b', 'c'), ('c', 'd')]
>>> list(pairwise([]))
[]
>>> list(pairwise('a'))
[]
>>> list(islice(padnone('abc'), 0, 6))
['a', 'b', 'c', None, None, None]
>>> list(ncycles('abc', 3))
['a', 'b', 'c', 'a', 'b', 'c', 'a', 'b', 'c']
>>> dotproduct([1,2,3], [4,5,6])
32
>>> list(grouper(3, 'abcdefg', 'x'))
[('a', 'b', 'c'), ('d', 'e', 'f'), ('g', 'x', 'x')]
>>> list(roundrobin('abc', 'd', 'ef'))
['a', 'd', 'e', 'b', 'f', 'c']
>>> list(powerset([1,2,3]))
[(), (1,), (2,), (3,), (1, 2), (1, 3), (2, 3), (1, 2, 3)]
>>> all(len(list(powerset(range(n)))) == 2**n for n in range(18))
True
>>> list(powerset('abcde')) == sorted(sorted(set(powerset('abcde'))), key=len)
True
>>> list(unique_everseen('AAAABBBCCDAABBB'))
['A', 'B', 'C', 'D']
>>> list(unique_everseen('ABBCcAD', str.lower))
['A', 'B', 'C', 'D']
>>> list(unique_justseen('AAAABBBCCDAABBB'))
['A', 'B', 'C', 'D', 'A', 'B']
>>> list(unique_justseen('ABBCcAD', str.lower))
['A', 'B', 'C', 'A', 'D']
>>> first_true('ABC0DEF1', '9', str.isdigit)
'0'
>>> population = 'ABCDEFGH'
>>> for r in range(len(population) + 1):
... seq = list(combinations(population, r))
... for i in range(len(seq)):
... assert nth_combination(population, r, i) == seq[i]
... for i in range(-len(seq), 0):
... assert nth_combination(population, r, i) == seq[i]
"""
__test__ = {'libreftest' : libreftest}
def test_main(verbose=None):
test_classes = (TestBasicOps, TestVariousIteratorArgs, TestGC,
RegressionTests, LengthTransparency,
SubclassWithKwargsTest, TestExamples,
TestPurePythonRoughEquivalents,
SizeofTest)
support.run_unittest(*test_classes)
# verify reference counting
if verbose and hasattr(sys, "gettotalrefcount"):
import gc
counts = [None] * 5
for i in range(len(counts)):
support.run_unittest(*test_classes)
gc.collect()
counts[i] = sys.gettotalrefcount()
print(counts)
# doctest the examples in the library reference
support.run_doctest(sys.modules[__name__], verbose)
if __name__ == "__main__":
test_main(verbose=True)
|
subprocess_env_manager.py | from typing import Any, Union, List, Tuple, Dict, Callable, Optional
from multiprocessing import Pipe, connection, get_context, Array
from collections import namedtuple
import logging
import platform
import time
import copy
import traceback
import numpy as np
import torch
import ctypes
import pickle
import cloudpickle
from easydict import EasyDict
from types import MethodType
from ding.utils import PropagatingThread, LockContextType, LockContext, ENV_MANAGER_REGISTRY
from .base_env_manager import BaseEnvManager, EnvState, retry_wrapper, timeout_wrapper
_NTYPE_TO_CTYPE = {
np.bool_: ctypes.c_bool,
np.uint8: ctypes.c_uint8,
np.uint16: ctypes.c_uint16,
np.uint32: ctypes.c_uint32,
np.uint64: ctypes.c_uint64,
np.int8: ctypes.c_int8,
np.int16: ctypes.c_int16,
np.int32: ctypes.c_int32,
np.int64: ctypes.c_int64,
np.float32: ctypes.c_float,
np.float64: ctypes.c_double,
}
class ShmBuffer():
"""
Overview:
Shared memory buffer to store numpy array.
"""
def __init__(self, dtype: np.generic, shape: Tuple[int]) -> None:
"""
Overview:
Initialize the buffer.
Arguments:
- dtype (:obj:`np.generic`): dtype of the data to limit the size of the buffer.
- shape (:obj:`Tuple[int]`): shape of the data to limit the size of the buffer.
"""
self.buffer = Array(_NTYPE_TO_CTYPE[dtype.type], int(np.prod(shape)))
self.dtype = dtype
self.shape = shape
def fill(self, src_arr: np.ndarray) -> None:
"""
Overview:
Fill the shared memory buffer with a numpy array. (Replace the original one.)
Arguments:
- src_arr (:obj:`np.ndarray`): array to fill the buffer.
"""
assert isinstance(src_arr, np.ndarray), type(src_arr)
dst_arr = np.frombuffer(self.buffer.get_obj(), dtype=self.dtype).reshape(self.shape)
with self.buffer.get_lock():
np.copyto(dst_arr, src_arr)
def get(self) -> np.ndarray:
"""
Overview:
Get the array stored in the buffer.
Return:
- copy_data (:obj:`np.ndarray`): A copy of the data stored in the buffer.
"""
arr = np.frombuffer(self.buffer.get_obj(), dtype=self.dtype).reshape(self.shape)
return arr.copy()
class ShmBufferContainer(object):
"""
Overview:
Support multiple shared memory buffers. Each key-value is name-buffer.
"""
def __init__(self, dtype: np.generic, shape: Union[Dict[Any, tuple], tuple]) -> None:
"""
Overview:
Initialize the buffer container.
Arguments:
- dtype (:obj:`np.generic`): dtype of the data to limit the size of the buffer.
- shape (:obj:`Union[Dict[Any, tuple], tuple]`): If `Dict[Any, tuple]`, use a dict to manage \
multiple buffers; If `tuple`, use single buffer.
"""
if isinstance(shape, dict):
self._data = {k: ShmBufferContainer(dtype, v) for k, v in shape.items()}
elif isinstance(shape, (tuple, list)):
self._data = ShmBuffer(dtype, shape)
else:
raise RuntimeError("not support shape: {}".format(shape))
self._shape = shape
def fill(self, src_arr: Union[Dict[Any, np.ndarray], np.ndarray]) -> None:
"""
Overview:
Fill the one or many shared memory buffer.
Arguments:
- src_arr (:obj:`Union[Dict[Any, np.ndarray], np.ndarray]`): array to fill the buffer.
"""
if isinstance(self._shape, dict):
for k in self._shape.keys():
self._data[k].fill(src_arr[k])
elif isinstance(self._shape, (tuple, list)):
self._data.fill(src_arr)
def get(self) -> Union[Dict[Any, np.ndarray], np.ndarray]:
"""
Overview:
Get the one or many arrays stored in the buffer.
Return:
- data (:obj:`np.ndarray`): The array(s) stored in the buffer.
"""
if isinstance(self._shape, dict):
return {k: self._data[k].get() for k in self._shape.keys()}
elif isinstance(self._shape, (tuple, list)):
return self._data.get()
class CloudPickleWrapper:
"""
Overview:
CloudPickleWrapper can be able to pickle more python object(e.g: an object with lambda expression)
"""
def __init__(self, data: Any) -> None:
self.data = data
def __getstate__(self) -> bytes:
return cloudpickle.dumps(self.data)
def __setstate__(self, data: bytes) -> None:
if isinstance(data, (tuple, list, np.ndarray)): # pickle is faster
self.data = pickle.loads(data)
else:
self.data = cloudpickle.loads(data)
@ENV_MANAGER_REGISTRY.register('async_subprocess')
class AsyncSubprocessEnvManager(BaseEnvManager):
"""
Overview:
Create an AsyncSubprocessEnvManager to manage multiple environments.
Each Environment is run by a respective subprocess.
Interfaces:
seed, launch, ready_obs, step, reset, env_info,active_env
"""
config = dict(
episode_num=float("inf"),
max_retry=5,
step_timeout=60,
auto_reset=True,
reset_timeout=60,
retry_waiting_time=0.1,
# subprocess specified args
shared_memory=True,
context='spawn' if platform.system().lower() == 'windows' else 'fork',
wait_num=2,
step_wait_timeout=0.01,
connect_timeout=60,
)
def __init__(
self,
env_fn: List[Callable],
cfg: EasyDict = EasyDict({}),
) -> None:
"""
Overview:
Initialize the AsyncSubprocessEnvManager.
Arguments:
- env_fn (:obj:`List[Callable]`): The function to create environment
- cfg (:obj:`EasyDict`): Config
.. note::
- wait_num: for each time the minimum number of env return to gather
- step_wait_timeout: for each time the minimum number of env return to gather
"""
super().__init__(env_fn, cfg)
self._shared_memory = self._cfg.shared_memory
self._context = self._cfg.context
self._wait_num = self._cfg.wait_num
self._step_wait_timeout = self._cfg.step_wait_timeout
self._lock = LockContext(LockContextType.THREAD_LOCK)
self._connect_timeout = self._cfg.connect_timeout
self._connect_timeout = np.max([self._connect_timeout, self._step_timeout + 0.5, self._reset_timeout + 0.5])
self._async_args = {
'step': {
'wait_num': min(self._wait_num, self._env_num),
'timeout': self._step_wait_timeout
}
}
def _create_state(self) -> None:
r"""
Overview:
Fork/spawn sub-processes(Call ``_create_env_subprocess``) and create pipes to transfer the data.
"""
self._env_episode_count = {env_id: 0 for env_id in range(self.env_num)}
self._ready_obs = {env_id: None for env_id in range(self.env_num)}
self._env_ref = self._env_fn[0]()
self._reset_param = {i: {} for i in range(self.env_num)}
if self._shared_memory:
obs_space = self._env_ref.info().obs_space
shape = obs_space.shape
dtype = np.dtype(obs_space.value['dtype']) if obs_space.value is not None else np.dtype(np.float32)
self._obs_buffers = {env_id: ShmBufferContainer(dtype, shape) for env_id in range(self.env_num)}
else:
self._obs_buffers = {env_id: None for env_id in range(self.env_num)}
self._pipe_parents, self._pipe_children = {}, {}
self._subprocesses = {}
for env_id in range(self.env_num):
self._create_env_subprocess(env_id)
self._waiting_env = {'step': set()}
self._closed = False
def _create_env_subprocess(self, env_id):
# start a new one
self._pipe_parents[env_id], self._pipe_children[env_id] = Pipe()
ctx = get_context(self._context)
self._subprocesses[env_id] = ctx.Process(
# target=self.worker_fn,
target=self.worker_fn_robust,
args=(
self._pipe_parents[env_id],
self._pipe_children[env_id],
CloudPickleWrapper(self._env_fn[env_id]),
self._obs_buffers[env_id],
self.method_name_list,
self._reset_timeout,
self._step_timeout,
self._max_retry,
),
daemon=True,
name='subprocess_env_manager{}_{}'.format(env_id, time.time())
)
self._subprocesses[env_id].start()
self._pipe_children[env_id].close()
self._env_states[env_id] = EnvState.INIT
if self._env_replay_path is not None:
self._pipe_parents[env_id].send(['enable_save_replay', [self._env_replay_path[env_id]], {}])
self._pipe_parents[env_id].recv()
@property
def ready_env(self) -> List[int]:
return [i for i in self.active_env if i not in self._waiting_env['step']]
@property
def ready_obs(self) -> Dict[int, Any]:
"""
Overview:
Get the next observations.
Return:
A dictionary with observations and their environment IDs.
Note:
The observations are returned in np.ndarray.
Example:
>>> obs_dict = env_manager.ready_obs
>>> actions_dict = {env_id: model.forward(obs) for env_id, obs in obs_dict.items())}
"""
no_done_env_idx = [i for i, s in self._env_states.items() if s != EnvState.DONE]
sleep_count = 0
while not any([self._env_states[i] == EnvState.RUN for i in no_done_env_idx]):
if sleep_count % 1000 == 0:
logging.warning(
'VEC_ENV_MANAGER: all the not done envs are resetting, sleep {} times'.format(sleep_count)
)
time.sleep(0.001)
sleep_count += 1
return {i: self._ready_obs[i] for i in self.ready_env}
def launch(self, reset_param: Optional[Dict] = None) -> None:
"""
Overview:
Set up the environments and their parameters.
Arguments:
- reset_param (:obj:`Optional[Dict]`): Dict of reset parameters for each environment, key is the env_id, \
value is the cooresponding reset parameters.
"""
assert self._closed, "please first close the env manager"
if reset_param is not None:
assert len(reset_param) == len(self._env_fn)
self._create_state()
self.reset(reset_param)
def reset(self, reset_param: Optional[Dict] = None) -> None:
"""
Overview:
Reset the environments their parameters.
Arguments:
- reset_param (:obj:`List`): Dict of reset parameters for each environment, key is the env_id, \
value is the cooresponding reset parameters.
"""
self._check_closed()
# clear previous info
for env_id in self._waiting_env['step']:
self._pipe_parents[env_id].recv()
self._waiting_env['step'].clear()
if reset_param is None:
reset_env_list = [env_id for env_id in range(self._env_num)]
else:
reset_env_list = reset_param.keys()
for env_id in reset_param:
self._reset_param[env_id] = reset_param[env_id]
sleep_count = 0
while any([self._env_states[i] == EnvState.RESET for i in reset_env_list]):
if sleep_count % 1000 == 0:
logging.warning(
'VEC_ENV_MANAGER: not all the envs finish resetting, sleep {} times'.format(sleep_count)
)
time.sleep(0.001)
sleep_count += 1
# reset env
reset_thread_list = []
for i, env_id in enumerate(reset_env_list):
self._env_states[env_id] = EnvState.RESET
# set seed
if self._env_seed[env_id] is not None:
try:
if self._env_dynamic_seed is not None:
self._pipe_parents[env_id].send(['seed', [self._env_seed[env_id], self._env_dynamic_seed], {}])
else:
self._pipe_parents[env_id].send(['seed', [self._env_seed[env_id]], {}])
ret = self._pipe_parents[env_id].recv()
self._check_data({env_id: ret})
self._env_seed[env_id] = None # seed only use once
except Exception as e:
logging.warning("subprocess reset set seed failed, ignore and continue...")
reset_thread = PropagatingThread(target=self._reset, args=(env_id, ))
reset_thread.daemon = True
reset_thread_list.append(reset_thread)
for t in reset_thread_list:
t.start()
for t in reset_thread_list:
t.join()
def _reset(self, env_id: int) -> None:
@retry_wrapper(max_retry=self._max_retry, waiting_time=self._retry_waiting_time)
def reset_fn():
if self._pipe_parents[env_id].poll():
recv_data = self._pipe_parents[env_id].recv()
raise Exception("unread data left before sending to the pipe: {}".format(repr(recv_data)))
# if self._reset_param[env_id] is None, just reset specific env, not pass reset param
if self._reset_param[env_id] is not None:
assert isinstance(self._reset_param[env_id], dict), type(self._reset_param[env_id])
self._pipe_parents[env_id].send(['reset', [], self._reset_param[env_id]])
else:
self._pipe_parents[env_id].send(['reset', [], {}])
if not self._pipe_parents[env_id].poll(self._connect_timeout):
# terminate the old subprocess
self._pipe_parents[env_id].close()
if self._subprocesses[env_id].is_alive():
self._subprocesses[env_id].terminate()
# reset the subprocess
self._create_env_subprocess(env_id)
raise Exception("env reset timeout") # Leave it to retry_wrapper to try again
obs = self._pipe_parents[env_id].recv()
self._check_data({env_id: obs}, close=False)
if self._shared_memory:
obs = self._obs_buffers[env_id].get()
# Because each thread updates the corresponding env_id value, they won't lead to a thread-safe problem.
self._env_states[env_id] = EnvState.RUN
self._ready_obs[env_id] = obs
try:
reset_fn()
except Exception as e:
logging.error('VEC_ENV_MANAGER: env {} reset error'.format(env_id))
logging.error('\nEnv Process Reset Exception:\n' + ''.join(traceback.format_tb(e.__traceback__)) + repr(e))
if self._closed: # exception cased by main thread closing parent_remote
return
else:
self.close()
raise e
def step(self, actions: Dict[int, Any]) -> Dict[int, namedtuple]:
"""
Overview:
Step all environments. Reset an env if done.
Arguments:
- actions (:obj:`Dict[int, Any]`): {env_id: action}
Returns:
- timesteps (:obj:`Dict[int, namedtuple]`): {env_id: timestep}. Timestep is a \
``BaseEnvTimestep`` tuple with observation, reward, done, env_info.
Example:
>>> actions_dict = {env_id: model.forward(obs) for env_id, obs in obs_dict.items())}
>>> timesteps = env_manager.step(actions_dict):
>>> for env_id, timestep in timesteps.items():
>>> pass
.. note:
- The env_id that appears in ``actions`` will also be returned in ``timesteps``.
- Each environment is run by a subprocess separately. Once an environment is done, it is reset immediately.
- Async subprocess env manager use ``connection.wait`` to poll.
"""
self._check_closed()
env_ids = list(actions.keys())
assert all([self._env_states[env_id] == EnvState.RUN for env_id in env_ids]
), 'current env state are: {}, please check whether the requested env is in reset or done'.format(
{env_id: self._env_states[env_id]
for env_id in env_ids}
)
for env_id, act in actions.items():
self._pipe_parents[env_id].send(['step', [act], {}])
timesteps = {}
step_args = self._async_args['step']
wait_num, timeout = min(step_args['wait_num'], len(env_ids)), step_args['timeout']
rest_env_ids = list(set(env_ids).union(self._waiting_env['step']))
ready_env_ids = []
cur_rest_env_ids = copy.deepcopy(rest_env_ids)
while True:
rest_conn = [self._pipe_parents[env_id] for env_id in cur_rest_env_ids]
ready_conn, ready_ids = AsyncSubprocessEnvManager.wait(rest_conn, min(wait_num, len(rest_conn)), timeout)
cur_ready_env_ids = [cur_rest_env_ids[env_id] for env_id in ready_ids]
assert len(cur_ready_env_ids) == len(ready_conn)
timesteps.update({env_id: p.recv() for env_id, p in zip(cur_ready_env_ids, ready_conn)})
self._check_data(timesteps)
ready_env_ids += cur_ready_env_ids
cur_rest_env_ids = list(set(cur_rest_env_ids).difference(set(cur_ready_env_ids)))
# At least one not done env timestep, or all envs' steps are finished
if any([not t.done for t in timesteps.values()]) or len(ready_conn) == len(rest_conn):
break
self._waiting_env['step']: set
for env_id in rest_env_ids:
if env_id in ready_env_ids:
if env_id in self._waiting_env['step']:
self._waiting_env['step'].remove(env_id)
else:
self._waiting_env['step'].add(env_id)
if self._shared_memory:
for i, (env_id, timestep) in enumerate(timesteps.items()):
timesteps[env_id] = timestep._replace(obs=self._obs_buffers[env_id].get())
for env_id, timestep in timesteps.items():
if timestep.info.get('abnormal', False):
self._env_states[env_id] = EnvState.ERROR
continue
if timestep.done:
self._env_episode_count[env_id] += 1
if self._env_episode_count[env_id] < self._episode_num and self._auto_reset:
self._env_states[env_id] = EnvState.RESET
reset_thread = PropagatingThread(target=self._reset, args=(env_id, ), name='regular_reset')
reset_thread.daemon = True
reset_thread.start()
else:
self._env_states[env_id] = EnvState.DONE
else:
self._ready_obs[env_id] = timestep.obs
return timesteps
# This method must be staticmethod, otherwise there will be some resource conflicts(e.g. port or file)
# Env must be created in worker, which is a trick of avoiding env pickle errors.
# A more robust version is used by default. But this one is also preserved.
@staticmethod
def worker_fn(
p: connection.Connection, c: connection.Connection, env_fn_wrapper: 'CloudPickleWrapper',
obs_buffer: ShmBuffer, method_name_list: list
) -> None: # noqa
"""
Overview:
Subprocess's target function to run.
"""
torch.set_num_threads(1)
env_fn = env_fn_wrapper.data
env = env_fn()
p.close()
try:
while True:
try:
cmd, args, kwargs = c.recv()
except EOFError: # for the case when the pipe has been closed
c.close()
break
try:
if cmd == 'getattr':
ret = getattr(env, args[0])
elif cmd in method_name_list:
if cmd == 'step':
timestep = env.step(*args, **kwargs)
if timestep.info.get('abnormal', False):
ret = timestep
else:
if obs_buffer is not None:
obs_buffer.fill(timestep.obs)
timestep = timestep._replace(obs=None)
ret = timestep
elif cmd == 'reset':
ret = env.reset(*args, **kwargs) # obs
if obs_buffer is not None:
obs_buffer.fill(ret)
ret = None
elif args is None and kwargs is None:
ret = getattr(env, cmd)()
else:
ret = getattr(env, cmd)(*args, **kwargs)
else:
raise KeyError("not support env cmd: {}".format(cmd))
c.send(ret)
except Exception as e:
# when there are some errors in env, worker_fn will send the errors to env manager
# directly send error to another process will lose the stack trace, so we create a new Exception
c.send(
e.__class__(
'\nEnv Process Exception:\n' + ''.join(traceback.format_tb(e.__traceback__)) + repr(e)
)
)
if cmd == 'close':
c.close()
break
except KeyboardInterrupt:
c.close()
@staticmethod
def worker_fn_robust(
parent,
child,
env_fn_wrapper,
obs_buffer,
method_name_list,
reset_timeout=60,
step_timeout=60,
max_retry=1
) -> None:
"""
Overview:
A more robust version of subprocess's target function to run. Used by default.
"""
torch.set_num_threads(1)
env_fn = env_fn_wrapper.data
env = env_fn()
parent.close()
@retry_wrapper(max_retry=max_retry)
@timeout_wrapper(timeout=step_timeout)
def step_fn(*args, **kwargs):
timestep = env.step(*args, **kwargs)
if timestep.info.get('abnormal', False):
ret = timestep
else:
if obs_buffer is not None:
obs_buffer.fill(timestep.obs)
timestep = timestep._replace(obs=None)
ret = timestep
return ret
# self._reset method has add retry_wrapper decorator
@timeout_wrapper(timeout=reset_timeout)
def reset_fn(*args, **kwargs):
try:
ret = env.reset(*args, **kwargs)
if obs_buffer is not None:
obs_buffer.fill(ret)
ret = None
return ret
except Exception as e:
env.close()
raise e
while True:
try:
cmd, args, kwargs = child.recv()
except EOFError: # for the case when the pipe has been closed
child.close()
break
try:
if cmd == 'getattr':
ret = getattr(env, args[0])
elif cmd in method_name_list:
if cmd == 'step':
ret = step_fn(*args, **kwargs)
elif cmd == 'reset':
ret = reset_fn(*args, **kwargs)
elif args is None and kwargs is None:
ret = getattr(env, cmd)()
else:
ret = getattr(env, cmd)(*args, **kwargs)
else:
raise KeyError("not support env cmd: {}".format(cmd))
child.send(ret)
except Exception as e:
# print("Sub env '{}' error when executing {}".format(str(env), cmd))
# when there are some errors in env, worker_fn will send the errors to env manager
# directly send error to another process will lose the stack trace, so we create a new Exception
child.send(
e.__class__('\nEnv Process Exception:\n' + ''.join(traceback.format_tb(e.__traceback__)) + repr(e))
)
if cmd == 'close':
child.close()
break
def _check_data(self, data: Dict, close: bool = True) -> None:
exceptions = []
for i, d in data.items():
if isinstance(d, Exception):
self._env_states[i] = EnvState.ERROR
exceptions.append(d)
# when receiving env Exception, env manager will safely close and raise this Exception to caller
if len(exceptions) > 0:
if close:
self.close()
raise exceptions[0]
# override
def __getattr__(self, key: str) -> Any:
self._check_closed()
# we suppose that all the envs has the same attributes, if you need different envs, please
# create different env managers.
if not hasattr(self._env_ref, key):
raise AttributeError("env `{}` doesn't have the attribute `{}`".format(type(self._env_ref), key))
if isinstance(getattr(self._env_ref, key), MethodType) and key not in self.method_name_list:
raise RuntimeError("env getattr doesn't supports method({}), please override method_name_list".format(key))
for _, p in self._pipe_parents.items():
p.send(['getattr', [key], {}])
data = {i: p.recv() for i, p in self._pipe_parents.items()}
self._check_data(data)
ret = [data[i] for i in self._pipe_parents.keys()]
return ret
# override
def enable_save_replay(self, replay_path: Union[List[str], str]) -> None:
"""
Overview:
Set each env's replay save path.
Arguments:
- replay_path (:obj:`Union[List[str], str]`): List of paths for each environment; \
Or one path for all environments.
"""
if isinstance(replay_path, str):
replay_path = [replay_path] * self.env_num
self._env_replay_path = replay_path
# override
def close(self) -> None:
"""
Overview:
CLose the env manager and release all related resources.
"""
if self._closed:
return
self._closed = True
self._env_ref.close()
for _, p in self._pipe_parents.items():
p.send(['close', None, None])
for _, p in self._pipe_parents.items():
p.recv()
for i in range(self._env_num):
self._env_states[i] = EnvState.VOID
# disable process join for avoiding hang
# for p in self._subprocesses:
# p.join()
for _, p in self._subprocesses.items():
p.terminate()
for _, p in self._pipe_parents.items():
p.close()
@staticmethod
def wait(rest_conn: list, wait_num: int, timeout: Optional[float] = None) -> Tuple[list, list]:
"""
Overview:
Wait at least enough(len(ready_conn) >= wait_num) connections within timeout constraint.
If timeout is None and wait_num == len(ready_conn), means sync mode;
If timeout is not None, will return when len(ready_conn) >= wait_num and
this method takes more than timeout seconds.
"""
assert 1 <= wait_num <= len(rest_conn
), 'please indicate proper wait_num: <wait_num: {}, rest_conn_num: {}>'.format(
wait_num, len(rest_conn)
)
rest_conn_set = set(rest_conn)
ready_conn = set()
start_time = time.time()
while len(rest_conn_set) > 0:
if len(ready_conn) >= wait_num and timeout:
if (time.time() - start_time) >= timeout:
break
finish_conn = set(connection.wait(rest_conn_set, timeout=timeout))
ready_conn = ready_conn.union(finish_conn)
rest_conn_set = rest_conn_set.difference(finish_conn)
ready_ids = [rest_conn.index(c) for c in ready_conn]
return list(ready_conn), ready_ids
@ENV_MANAGER_REGISTRY.register('subprocess')
class SyncSubprocessEnvManager(AsyncSubprocessEnvManager):
config = dict(
episode_num=float("inf"),
max_retry=5,
step_timeout=60,
auto_reset=True,
reset_timeout=60,
retry_waiting_time=0.1,
# subprocess specified args
shared_memory=True,
context='spawn' if platform.system().lower() == 'windows' else 'fork',
wait_num=float("inf"), # inf mean all the environments
step_wait_timeout=None,
connect_timeout=60,
force_reproducibility=False,
)
def __init__(
self,
env_fn: List[Callable],
cfg: EasyDict = EasyDict({}),
) -> None:
super(SyncSubprocessEnvManager, self).__init__(env_fn, cfg)
self._force_reproducibility = self._cfg.force_reproducibility
def step(self, actions: Dict[int, Any]) -> Dict[int, namedtuple]:
"""
Overview:
Step all environments. Reset an env if done.
Arguments:
- actions (:obj:`Dict[int, Any]`): {env_id: action}
Returns:
- timesteps (:obj:`Dict[int, namedtuple]`): {env_id: timestep}. Timestep is a \
``BaseEnvTimestep`` tuple with observation, reward, done, env_info.
Example:
>>> actions_dict = {env_id: model.forward(obs) for env_id, obs in obs_dict.items())}
>>> timesteps = env_manager.step(actions_dict):
>>> for env_id, timestep in timesteps.items():
>>> pass
.. note::
- The env_id that appears in ``actions`` will also be returned in ``timesteps``.
- Each environment is run by a subprocess separately. Once an environment is done, it is reset immediately.
"""
self._check_closed()
env_ids = list(actions.keys())
assert all([self._env_states[env_id] == EnvState.RUN for env_id in env_ids]
), 'current env state are: {}, please check whether the requested env is in reset or done'.format(
{env_id: self._env_states[env_id]
for env_id in env_ids}
)
for env_id, act in actions.items():
self._pipe_parents[env_id].send(['step', [act], {}])
# === This part is different from async one. ===
# === Because operate in this way is more efficient. ===
timesteps = {}
ready_conn = [self._pipe_parents[env_id] for env_id in env_ids]
timesteps.update({env_id: p.recv() for env_id, p in zip(env_ids, ready_conn)})
self._check_data(timesteps)
# ======================================================
if self._shared_memory:
for i, (env_id, timestep) in enumerate(timesteps.items()):
timesteps[env_id] = timestep._replace(obs=self._obs_buffers[env_id].get())
for env_id, timestep in timesteps.items():
if timestep.info.get('abnormal', False):
self._env_states[env_id] = EnvState.ERROR
continue
if timestep.done:
self._env_episode_count[env_id] += 1
if self._env_episode_count[env_id] < self._episode_num and self._auto_reset:
self._env_states[env_id] = EnvState.RESET
reset_thread = PropagatingThread(target=self._reset, args=(env_id, ), name='regular_reset')
reset_thread.daemon = True
reset_thread.start()
if self._force_reproducibility:
reset_thread.join()
else:
self._env_states[env_id] = EnvState.DONE
else:
self._ready_obs[env_id] = timestep.obs
return timesteps
|
server.py | import hmac
import json
import urllib.parse
from .main import (
PullReqState,
parse_commands,
db_query,
IGNORE_BLOCK_END,
IGNORE_BLOCK_START,
INTERRUPTED_BY_HOMU_RE,
suppress_ignore_block,
suppress_pings,
synchronize,
LabelEvent,
)
from . import comments
from . import utils
from .utils import lazy_debug
import github3
import jinja2
import requests
import pkg_resources
from bottle import (
get,
post,
run,
request,
redirect,
abort,
response,
error,
)
from threading import Thread
import sys
import os
import traceback
from retrying import retry
import random
import string
import bottle
bottle.BaseRequest.MEMFILE_MAX = 1024 * 1024 * 10
class G:
pass
g = G()
ROLLUP_STR = {
-2: 'never',
-1: 'iffy',
0: '',
1: 'always',
}
def find_state(sha):
for repo_label, repo_states in g.states.items():
for state in repo_states.values():
if state.merge_sha == sha:
return state, repo_label
raise ValueError('Invalid SHA')
def get_repo(repo_label, repo_cfg):
repo = g.repos[repo_label].gh
if not repo:
repo = g.gh.repository(repo_cfg['owner'], repo_cfg['name'])
g.repos[repo_label].gh = repo
assert repo.owner.login == repo_cfg['owner']
assert repo.name == repo_cfg['name']
return repo
@get('/')
def index():
return g.tpls['index'].render(repos=[g.repos[label]
for label in sorted(g.repos)])
@get('/results/<repo_label:path>/<pull:int>')
def result(repo_label, pull):
if repo_label not in g.states:
abort(404, 'No such repository: {}'.format(repo_label))
states = [state for state in g.states[repo_label].values()
if state.num == pull]
if len(states) == 0:
abort(404, 'No build results for pull request {}'.format(pull))
state = states[0]
builders = []
repo_url = 'https://github.com/{}/{}'.format(
g.cfg['repo'][repo_label]['owner'],
g.cfg['repo'][repo_label]['name'])
for (builder, data) in state.build_res.items():
result = "pending"
if data['res'] is not None:
result = "success" if data['res'] else "failed"
builder_details = {
'result': result,
'name': builder,
}
if data['url']:
builder_details['url'] = data['url']
builders.append(builder_details)
return g.tpls['build_res'].render(repo_label=repo_label, repo_url=repo_url,
builders=builders, pull=pull)
@get('/queue/<repo_label:path>')
def queue(repo_label):
if repo_label not in g.cfg['repo'] and repo_label != 'all':
abort(404)
logger = g.logger.getChild('queue')
lazy_debug(logger, lambda: 'repo_label: {}'.format(repo_label))
single_repo_closed = None
treeclosed_src = None
if repo_label == 'all':
labels = g.repos.keys()
multiple = True
repo_url = None
else:
labels = repo_label.split('+')
multiple = len(labels) > 1
if repo_label in g.repos and g.repos[repo_label].treeclosed >= 0:
single_repo_closed = g.repos[repo_label].treeclosed
treeclosed_src = g.repos[repo_label].treeclosed_src
repo_url = 'https://github.com/{}/{}'.format(
g.cfg['repo'][repo_label]['owner'],
g.cfg['repo'][repo_label]['name'])
states = []
for label in labels:
try:
states += g.states[label].values()
except KeyError:
abort(404, 'No such repository: {}'.format(label))
prechecked_prs = set()
if request.query.get('prs'):
prechecked_prs = set(request.query.get('prs').split(','))
pull_states = sorted(states)
rows = []
for state in pull_states:
treeclosed = (single_repo_closed and
state.priority < g.repos[state.repo_label].treeclosed)
status_ext = ''
if state.try_:
status_ext += ' (try)'
rows.append({
'status': state.get_status(),
'status_ext': status_ext,
'priority': state.priority,
'rollup': ROLLUP_STR.get(state.rollup, ''),
'prechecked': str(state.num) in prechecked_prs,
'url': 'https://github.com/{}/{}/pull/{}'.format(state.owner,
state.name,
state.num),
'num': state.num,
'approved_by': state.approved_by,
'title': state.title,
'head_ref': state.head_ref,
'mergeable': ('yes' if state.mergeable is True else
'no' if state.mergeable is False else ''),
'assignee': state.assignee,
'repo_label': state.repo_label,
'repo_url': 'https://github.com/{}/{}'.format(state.owner,
state.name),
'greyed': "treeclosed" if treeclosed else "",
})
return g.tpls['queue'].render(
repo_url=repo_url,
repo_label=repo_label,
treeclosed=single_repo_closed,
treeclosed_src=treeclosed_src,
states=rows,
oauth_client_id=g.cfg['github']['app_client_id'],
total=len(pull_states),
approved=len([x for x in pull_states if x.approved_by]),
rolled_up=len([x for x in pull_states if x.rollup > 0]),
failed=len([x for x in pull_states if x.status == 'failure' or
x.status == 'error']),
multiple=multiple,
)
@get('/retry_log/<repo_label:path>')
def retry_log(repo_label):
if repo_label not in g.cfg['repo']:
abort(404)
logger = g.logger.getChild('retry_log')
lazy_debug(logger, lambda: 'repo_label: {}'.format(repo_label))
repo_url = 'https://github.com/{}/{}'.format(
g.cfg['repo'][repo_label]['owner'],
g.cfg['repo'][repo_label]['name'],
)
db_query(
g.db,
'''
SELECT num, time, src, msg FROM retry_log
WHERE repo = ? ORDER BY time DESC
''',
[repo_label],
)
logs = [
{'num': num, 'time': time, 'src': src, 'msg': msg}
for num, time, src, msg in g.db.fetchall()
]
return g.tpls['retry_log'].render(
repo_url=repo_url,
repo_label=repo_label,
logs=logs,
)
@get('/callback')
def callback():
logger = g.logger.getChild('callback')
response.content_type = 'text/plain'
code = request.query.code
state = json.loads(request.query.state)
lazy_debug(logger, lambda: 'state: {}'.format(state))
oauth_url = 'https://github.com/login/oauth/access_token'
try:
res = requests.post(oauth_url, data={
'client_id': g.cfg['github']['app_client_id'],
'client_secret': g.cfg['github']['app_client_secret'],
'code': code,
})
except Exception as ex:
logger.warn('/callback encountered an error '
'during github oauth callback')
lazy_debug(
logger,
lambda ex=ex: 'github oauth callback err: {}'.format(ex),
)
abort(502, 'Bad Gateway')
args = urllib.parse.parse_qs(res.text)
token = args['access_token'][0]
repo_label = state['repo_label']
repo_cfg = g.repo_cfgs[repo_label]
repo = get_repo(repo_label, repo_cfg)
user_gh = github3.login(token=token)
if state['cmd'] == 'rollup':
return rollup(user_gh, state, repo_label, repo_cfg, repo)
elif state['cmd'] == 'synch':
return synch(user_gh, state, repo_label, repo_cfg, repo)
else:
abort(400, 'Invalid command')
def rollup(user_gh, state, repo_label, repo_cfg, repo):
user_repo = user_gh.repository(user_gh.user().login, repo.name)
if user_repo is None:
return 'You must have a fork of rust-lang/rust named rust under your user account.' # noqa
base_repo = user_gh.repository(repo.owner.login, repo.name)
nums = state.get('nums', [])
if nums:
try:
rollup_states = [g.states[repo_label][num] for num in nums]
except KeyError as e:
return 'Invalid PR number: {}'.format(e.args[0])
else:
rollup_states = [x for x in g.states[repo_label].values() if x.rollup]
rollup_states = [x for x in rollup_states if x.approved_by]
rollup_states.sort(key=lambda x: x.num)
if not rollup_states:
return 'No pull requests are marked as rollup'
base_ref = rollup_states[0].base_ref
base_sha = repo.ref('heads/' + base_ref).object.sha
branch_name = 'rollup-' + ''.join(
random.choice(string.digits + string.ascii_lowercase) for _ in range(7)
)
utils.github_set_ref(
user_repo,
'heads/' + branch_name,
base_sha,
force=True,
)
successes = []
failures = []
for state in rollup_states:
if base_ref != state.base_ref:
failures.append(state.num)
continue
state.body = suppress_pings(state.body)
state.body = suppress_ignore_block(state.body)
merge_msg = 'Rollup merge of #{} - {}, r={}\n\n{}\n\n{}'.format(
state.num,
state.head_ref,
state.approved_by,
state.title,
state.body,
)
try:
user_repo.merge(branch_name, state.head_sha, merge_msg)
except github3.models.GitHubError as e:
if e.code != 409:
raise
failures.append(state)
else:
successes.append(state)
title = 'Rollup of {} pull requests'.format(len(successes))
body = 'Successful merges:\n\n'
for x in successes:
body += ' - #{} ({})\n'.format(x.num, x.title)
body += '\nFailed merges:\n\n'
for x in failures:
body += ' - #{} ({})\n'.format(x.num, x.title)
body += '\nr? @ghost\n@rustbot modify labels: rollup'
# Set web.base_url in cfg to enable
base_url = g.cfg['web'].get('base_url')
if not base_url:
# If web.base_url is not present, fall back to using web.canonical_url
base_url = g.cfg['web'].get('canonical_url')
if base_url:
pr_list = ','.join(str(x.num) for x in successes)
link = '{}/queue/{}?prs={}'.format(base_url, repo_label, pr_list)
body += '\n'
body += IGNORE_BLOCK_START
body += '\n[Create a similar rollup]({})\n'.format(link)
body += IGNORE_BLOCK_END
try:
pull = base_repo.create_pull(
title,
state.base_ref,
user_repo.owner.login + ':' + branch_name,
body,
)
except github3.models.GitHubError as e:
return e.response.text
else:
redirect(pull.html_url)
@post('/github')
def github():
logger = g.logger.getChild('github')
response.content_type = 'text/plain'
payload = request.body.read()
info = request.json
lazy_debug(logger, lambda: 'info: {}'.format(utils.remove_url_keys_from_json(info))) # noqa
owner_info = info['repository']['owner']
owner = owner_info.get('login') or owner_info['name']
repo_label = g.repo_labels[owner, info['repository']['name']]
repo_cfg = g.repo_cfgs[repo_label]
hmac_method, hmac_sig = request.headers['X-Hub-Signature'].split('=')
if hmac_sig != hmac.new(
repo_cfg['github']['secret'].encode('utf-8'),
payload,
hmac_method,
).hexdigest():
abort(400, 'Invalid signature')
event_type = request.headers['X-Github-Event']
if event_type == 'pull_request_review_comment':
action = info['action']
original_commit_id = info['comment']['original_commit_id']
head_sha = info['pull_request']['head']['sha']
if action == 'created' and original_commit_id == head_sha:
pull_num = info['pull_request']['number']
body = info['comment']['body']
username = info['sender']['login']
user_id = info['sender']['id']
state = g.states[repo_label].get(pull_num)
if state:
state.title = info['pull_request']['title']
state.body = info['pull_request']['body']
if parse_commands(
body,
username,
user_id,
repo_label,
repo_cfg,
state,
g.my_username,
g.db,
g.states,
realtime=True,
sha=original_commit_id,
command_src=info['comment']['html_url'],
):
state.save()
g.queue_handler()
elif event_type == 'pull_request':
action = info['action']
pull_num = info['number']
head_sha = info['pull_request']['head']['sha']
if action == 'synchronize':
state = g.states[repo_label][pull_num]
state.head_advanced(head_sha)
state.save()
elif action in ['opened', 'reopened']:
state = PullReqState(pull_num, head_sha, '', g.db, repo_label,
g.mergeable_que, g.gh,
info['repository']['owner']['login'],
info['repository']['name'],
repo_cfg.get('labels', {}),
g.repos,
repo_cfg.get('test-on-fork'))
state.title = info['pull_request']['title']
state.body = info['pull_request']['body']
state.head_ref = info['pull_request']['head']['repo']['owner']['login'] + ':' + info['pull_request']['head']['ref'] # noqa
state.base_ref = info['pull_request']['base']['ref']
state.set_mergeable(info['pull_request']['mergeable'])
state.assignee = (info['pull_request']['assignee']['login'] if
info['pull_request']['assignee'] else '')
found = False
if action == 'reopened':
# FIXME: Review comments are ignored here
for c in state.get_repo().issue(pull_num).iter_comments():
found = parse_commands(
c.body,
c.user.login,
c.user.id,
repo_label,
repo_cfg,
state,
g.my_username,
g.db,
g.states,
command_src=c.to_json()['html_url'],
# FIXME switch to `c.html_url`
# after updating github3 to 1.3.0+
) or found
status = ''
for info in utils.github_iter_statuses(state.get_repo(),
state.head_sha):
if info.context == 'homu':
status = info.state
break
state.set_status(status)
state.save()
g.states[repo_label][pull_num] = state
if found:
g.queue_handler()
elif action == 'closed':
state = g.states[repo_label][pull_num]
if hasattr(state, 'fake_merge_sha'):
def inner():
utils.github_set_ref(
state.get_repo(),
'heads/' + state.base_ref,
state.merge_sha,
force=True,
)
def fail(err):
state.add_comment(':boom: Failed to recover from the '
'artificial commit. See {} for details.'
' ({})'.format(state.fake_merge_sha,
err))
utils.retry_until(inner, fail, state)
del g.states[repo_label][pull_num]
db_query(g.db, 'DELETE FROM pull WHERE repo = ? AND num = ?',
[repo_label, pull_num])
db_query(g.db, 'DELETE FROM build_res WHERE repo = ? AND num = ?',
[repo_label, pull_num])
db_query(g.db, 'DELETE FROM mergeable WHERE repo = ? AND num = ?',
[repo_label, pull_num])
g.queue_handler()
elif action in ['assigned', 'unassigned']:
state = g.states[repo_label][pull_num]
state.assignee = (info['pull_request']['assignee']['login'] if
info['pull_request']['assignee'] else '')
state.save()
elif action == 'edited':
state = g.states[repo_label][pull_num]
base_ref = info['pull_request']['base']['ref']
if state.base_ref != base_ref:
state.base_ref = base_ref
state.set_mergeable(None)
# Remove PR approval when the branch changes, to prevent the PR
# authors to merge the changes on other branches
if state.get_status() != '':
state.approved_by = ''
state.set_status('')
state.change_labels(LabelEvent.PUSHED)
state.add_comment(
':warning: The base branch changed to `{}`, and the '
'PR will need to be re-approved.\n\n'
'<!-- @{} r- -->'.format(base_ref, g.my_username)
)
state.title = info['pull_request']['title']
state.body = info['pull_request']['body']
state.save()
else:
lazy_debug(logger, lambda: 'Invalid pull_request action: {}'.format(action)) # noqa
elif event_type == 'push':
ref = info['ref'][len('refs/heads/'):]
for state in list(g.states[repo_label].values()):
if state.base_ref == ref:
state.set_mergeable(None, cause={
'sha': info['head_commit']['id'],
'title': info['head_commit']['message'].splitlines()[0],
})
if state.head_sha == info['before']:
if state.status:
state.change_labels(LabelEvent.PUSHED)
state.head_advanced(info['after'])
state.save()
elif event_type == 'issue_comment':
action = info['action']
body = info['comment']['body']
username = info['comment']['user']['login']
user_id = info['comment']['user']['id']
pull_num = info['issue']['number']
state = g.states[repo_label].get(pull_num)
if action == 'created' and 'pull_request' in info['issue'] and state:
state.title = info['issue']['title']
state.body = info['issue']['body']
if parse_commands(
body,
username,
user_id,
repo_label,
repo_cfg,
state,
g.my_username,
g.db,
g.states,
realtime=True,
command_src=info['comment']['html_url'],
):
state.save()
g.queue_handler()
elif event_type == 'status':
try:
state, repo_label = find_state(info['sha'])
except ValueError:
return 'OK'
status_name = ""
if 'status' in repo_cfg:
for name, value in repo_cfg['status'].items():
if 'context' in value and value['context'] == info['context']:
status_name = name
if status_name == "":
return 'OK'
if info['state'] == 'pending':
return 'OK'
for row in info['branches']:
if row['name'] == state.base_ref:
return 'OK'
report_build_res(info['state'] == 'success', info['target_url'],
'status-' + status_name, state, logger, repo_cfg)
elif event_type == 'check_run':
try:
state, repo_label = find_state(info['check_run']['head_sha'])
except ValueError:
return 'OK'
current_run_name = info['check_run']['name']
checks_name = None
if 'checks' in repo_cfg:
for name, value in repo_cfg['checks'].items():
if state.try_ and 'try_name' in value:
if value['try_name'] == current_run_name:
checks_name = name
elif 'name' in value and value['name'] == current_run_name:
checks_name = name
if checks_name is None:
return 'OK'
if info['check_run']['status'] != 'completed':
return 'OK'
if info['check_run']['conclusion'] is None:
return 'OK'
# GHA marks jobs as skipped, if they are not run due to the job
# condition. This prevents bors from failing because of these jobs.
if info['check_run']['conclusion'] == 'skipped':
return 'OK'
report_build_res(
info['check_run']['conclusion'] == 'success',
info['check_run']['details_url'],
'checks-' + checks_name,
state, logger, repo_cfg,
)
return 'OK'
def report_build_res(succ, url, builder, state, logger, repo_cfg):
lazy_debug(logger,
lambda: 'build result {}: builder = {}, succ = {}, current build_res = {}' # noqa
.format(state, builder, succ,
state.build_res_summary()))
state.set_build_res(builder, succ, url)
if succ:
if all(x['res'] for x in state.build_res.values()):
state.set_status('success')
utils.github_create_status(
state.get_repo(), state.head_sha,
'success', url, "Test successful", context='homu'
)
if state.approved_by and not state.try_:
state.add_comment(comments.BuildCompleted(
approved_by=state.approved_by,
base_ref=state.base_ref,
builders={k: v["url"] for k, v in state.build_res.items()},
merge_sha=state.merge_sha,
))
state.change_labels(LabelEvent.SUCCEED)
def set_ref():
utils.github_set_ref(state.get_repo(), 'heads/' +
state.base_ref, state.merge_sha)
if state.test_on_fork is not None:
utils.github_set_ref(state.get_test_on_fork_repo(),
'heads/' + state.base_ref,
state.merge_sha, force=True)
try:
try:
set_ref()
except github3.models.GitHubError:
utils.github_create_status(
state.get_repo(),
state.merge_sha,
'success', '',
'Branch protection bypassed',
context='homu')
set_ref()
state.fake_merge(repo_cfg)
except github3.models.GitHubError as e:
state.set_status('error')
desc = ('Test was successful, but fast-forwarding failed:'
' {}'.format(e))
utils.github_create_status(state.get_repo(),
state.head_sha, 'error', url,
desc, context='homu')
state.add_comment(':eyes: ' + desc)
else:
state.add_comment(comments.TryBuildCompleted(
builders={k: v["url"] for k, v in state.build_res.items()},
merge_sha=state.merge_sha,
))
state.change_labels(LabelEvent.TRY_SUCCEED)
else:
if state.status == 'pending':
state.set_status('failure')
utils.github_create_status(
state.get_repo(), state.head_sha,
'failure', url, "Test failed", context='homu'
)
if state.try_:
state.add_comment(comments.TryBuildFailed(
builder_url=url,
builder_name=builder,
))
state.change_labels(LabelEvent.TRY_FAILED)
else:
state.add_comment(comments.BuildFailed(
builder_url=url,
builder_name=builder,
))
state.change_labels(LabelEvent.FAILED)
g.queue_handler()
@post('/buildbot')
def buildbot():
logger = g.logger.getChild('buildbot')
response.content_type = 'text/plain'
for row in json.loads(request.forms.packets):
if row['event'] == 'buildFinished':
info = row['payload']['build']
lazy_debug(logger, lambda: 'info: {}'.format(info))
props = dict(x[:2] for x in info['properties'])
if 'retry' in info['text']:
continue
if not props['revision']:
continue
try:
state, repo_label = find_state(props['revision'])
except ValueError:
lazy_debug(logger,
lambda: 'Invalid commit ID from Buildbot: {}'.format(props['revision'])) # noqa
continue
lazy_debug(logger, lambda: 'state: {}, {}'.format(state, state.build_res_summary())) # noqa
if info['builderName'] not in state.build_res:
lazy_debug(logger,
lambda: 'Invalid builder from Buildbot: {}'.format(info['builderName'])) # noqa
continue
repo_cfg = g.repo_cfgs[repo_label]
if request.forms.secret != repo_cfg['buildbot']['secret']:
abort(400, 'Invalid secret')
build_succ = 'successful' in info['text'] or info['results'] == 0
url = '{}/builders/{}/builds/{}'.format(
repo_cfg['buildbot']['url'],
info['builderName'],
props['buildnumber'],
)
if 'interrupted' in info['text']:
step_name = ''
for step in reversed(info['steps']):
if 'interrupted' in step.get('text', []):
step_name = step['name']
break
if step_name:
try:
url = ('{}/builders/{}/builds/{}/steps/{}/logs/interrupt' # noqa
).format(repo_cfg['buildbot']['url'],
info['builderName'],
props['buildnumber'],
step_name,)
res = requests.get(url)
except Exception as ex:
logger.warn('/buildbot encountered an error during '
'github logs request')
lazy_debug(
logger,
lambda ex=ex: 'buildbot logs err: {}'.format(ex),
)
abort(502, 'Bad Gateway')
mat = INTERRUPTED_BY_HOMU_RE.search(res.text)
if mat:
interrupt_token = mat.group(1)
if getattr(state, 'interrupt_token',
'') != interrupt_token:
state.interrupt_token = interrupt_token
if state.status == 'pending':
state.set_status('')
desc = (':snowman: The build was interrupted '
'to prioritize another pull request.')
state.add_comment(desc)
state.change_labels(LabelEvent.INTERRUPTED)
utils.github_create_status(state.get_repo(),
state.head_sha,
'error', url,
desc,
context='homu')
g.queue_handler()
continue
else:
logger.error('Corrupt payload from Buildbot')
report_build_res(build_succ, url, info['builderName'],
state, logger, repo_cfg)
elif row['event'] == 'buildStarted':
info = row['payload']['build']
lazy_debug(logger, lambda: 'info: {}'.format(info))
props = dict(x[:2] for x in info['properties'])
if not props['revision']:
continue
try:
state, repo_label = find_state(props['revision'])
except ValueError:
pass
else:
if info['builderName'] in state.build_res:
repo_cfg = g.repo_cfgs[repo_label]
if request.forms.secret != repo_cfg['buildbot']['secret']:
abort(400, 'Invalid secret')
url = '{}/builders/{}/builds/{}'.format(
repo_cfg['buildbot']['url'],
info['builderName'],
props['buildnumber'],
)
state.set_build_res(info['builderName'], None, url)
if g.buildbot_slots[0] == props['revision']:
g.buildbot_slots[0] = ''
g.queue_handler()
return 'OK'
@get('/assets/<file:path>')
def server_static(file):
current_path = os.path.dirname(__file__)
return bottle.static_file(file, root=os.path.join(current_path, 'assets'))
def synch(user_gh, state, repo_label, repo_cfg, repo):
try:
if not repo.is_collaborator(user_gh.user().login):
abort(400, 'You are not a collaborator')
except github3.GitHubError as e:
if e.code == 403:
abort(400, 'Homu does not have write access on the repository')
raise e
Thread(target=synchronize, args=[repo_label, repo_cfg, g.logger,
g.gh, g.states, g.repos, g.db,
g.mergeable_que, g.my_username,
g.repo_labels]).start()
return 'Synchronizing {}...'.format(repo_label)
def synch_all():
@retry(wait_exponential_multiplier=1000, wait_exponential_max=600000)
def sync_repo(repo_label, g):
try:
synchronize(repo_label, g.repo_cfgs[repo_label], g.logger, g.gh,
g.states, g.repos, g.db, g.mergeable_que,
g.my_username, g.repo_labels)
except Exception:
print('* Error while synchronizing {}'.format(repo_label))
traceback.print_exc()
raise
for repo_label in g.repos:
sync_repo(repo_label, g)
print('* Done synchronizing all')
@post('/admin')
def admin():
if request.json['secret'] != g.cfg['web']['secret']:
return 'Authentication failure'
if request.json['cmd'] == 'repo_new':
repo_label = request.json['repo_label']
repo_cfg = request.json['repo_cfg']
g.states[repo_label] = {}
g.repos[repo_label] = None
g.repo_cfgs[repo_label] = repo_cfg
g.repo_labels[repo_cfg['owner'], repo_cfg['name']] = repo_label
Thread(target=synchronize, args=[repo_label, repo_cfg, g.logger,
g.gh, g.states, g.repos, g.db,
g.mergeable_que, g.my_username,
g.repo_labels]).start()
return 'OK'
elif request.json['cmd'] == 'repo_del':
repo_label = request.json['repo_label']
repo_cfg = g.repo_cfgs[repo_label]
db_query(g.db, 'DELETE FROM pull WHERE repo = ?', [repo_label])
db_query(g.db, 'DELETE FROM build_res WHERE repo = ?', [repo_label])
db_query(g.db, 'DELETE FROM mergeable WHERE repo = ?', [repo_label])
del g.states[repo_label]
del g.repos[repo_label]
del g.repo_cfgs[repo_label]
del g.repo_labels[repo_cfg['owner'], repo_cfg['name']]
return 'OK'
elif request.json['cmd'] == 'repo_edit':
repo_label = request.json['repo_label']
repo_cfg = request.json['repo_cfg']
assert repo_cfg['owner'] == g.repo_cfgs[repo_label]['owner']
assert repo_cfg['name'] == g.repo_cfgs[repo_label]['name']
g.repo_cfgs[repo_label] = repo_cfg
return 'OK'
elif request.json['cmd'] == 'sync_all':
Thread(target=synch_all).start()
return 'OK'
return 'Unrecognized command'
@get('/health')
def health():
return 'OK'
@error(404)
def not_found(error):
return g.tpls['404'].render()
def redirect_to_canonical_host():
request_url = urllib.parse.urlparse(request.url)
redirect_url = request_url
# Disable redirects on the health check endpoint.
if request_url.path == "/health":
return
# Handle hostname changes
if "canonical_url" in g.cfg["web"]:
canonical_url = urllib.parse.urlparse(g.cfg["web"]["canonical_url"])
redirect_url = redirect_url._replace(
scheme=canonical_url.scheme,
netloc=canonical_url.netloc,
)
# Handle path changes
for prefix in g.cfg["web"].get("remove_path_prefixes", []):
if redirect_url.path.startswith("/" + prefix + "/"):
new_path = redirect_url.path[len(prefix)+1:]
redirect_url = redirect_url._replace(path=new_path)
elif redirect_url.path == "/" + prefix:
redirect_url = redirect_url._replace(path="/")
if request_url != redirect_url:
redirect(urllib.parse.urlunparse(redirect_url), 301)
def start(cfg, states, queue_handler, repo_cfgs, repos, logger,
buildbot_slots, my_username, db, repo_labels, mergeable_que, gh):
env = jinja2.Environment(
loader=jinja2.FileSystemLoader(pkg_resources.resource_filename(__name__, 'html')), # noqa
autoescape=True,
)
env.globals["announcement"] = cfg["web"].get("announcement")
tpls = {}
tpls['index'] = env.get_template('index.html')
tpls['queue'] = env.get_template('queue.html')
tpls['build_res'] = env.get_template('build_res.html')
tpls['retry_log'] = env.get_template('retry_log.html')
tpls['404'] = env.get_template('404.html')
g.cfg = cfg
g.states = states
g.queue_handler = queue_handler
g.repo_cfgs = repo_cfgs
g.repos = repos
g.logger = logger.getChild('server')
g.buildbot_slots = buildbot_slots
g.tpls = tpls
g.my_username = my_username
g.db = db
g.repo_labels = repo_labels
g.mergeable_que = mergeable_que
g.gh = gh
bottle.app().add_hook("before_request", redirect_to_canonical_host)
# Synchronize all PR data on startup
if cfg['web'].get('sync_on_start', False):
Thread(target=synch_all).start()
try:
run(host=cfg['web'].get('host', '0.0.0.0'),
port=cfg['web']['port'],
server='waitress')
except OSError as e:
print(e, file=sys.stderr)
os._exit(1)
|
esempio2.py | from threading import Thread, Lock
import time
import logging
from random import randrange
def thread_function(name):
logging.info("Thread %s : starting", name)
time.sleep(randrange(5))
logging.info("Thread %s : finishing", name)
if __name__ == "__main__":
format = "%(asctime)s: %(message)s"
logging.basicConfig(format=format, level=logging.INFO, datefmt="%H:%M:%S")
logging.info("Main : before creating threads")
x = Thread(target=thread_function, args=(1,))
y = Thread(target=thread_function, args=(2,))
logging.info("Main : before running threads")
x.start()
y.start()
logging.info("Main : wait for the threads to finish")
x.join()
y.join()
logging.info("Main : all done")
|
mcom_rt.py | import os, copy, atexit, time, gzip, threading, zlib, asyncio
import numpy as np
from colorama import init
from multiprocessing import Process
from UTILS.colorful import *
from UTILS.network import get_host_ip, find_free_port
mcom_fn_list_define = [
"v2dx", "flash", "plot", "figure", "hold", "box", "pause", "clf", "xlim", "ylim", "xlabel",
"ylabel", "drawnow", "v2d", "v2d_init", "v3d_init", "v2L", "title", "plot3", "grid", "v3dx", "v2d_show",
"v2d_pop", "v2d_line_object", "v2d_clear", "v2d_add_terrain", "set_style", "set_env", "use_geometry",
"geometry_rotate_scale_translate", "test_function_terrain", 'line3d', 'advanced_geometry_rotate_scale_translate',
"advanced_geometry_material", "skip"
]
别名对齐 = [
('初始化3D', 'v2d_init'),
('设置样式', 'set_style'),
('形状之旋转缩放和平移','geometry_rotate_scale_translate'),
('其他几何体之旋转缩放和平移','advanced_geometry_rotate_scale_translate'),
('其他几何体之材质','advanced_geometry_material'),
('发送几何体','v2dx'),
('结束关键帧','v2d_show'),
('发送线条','line3d'),
('发射光束','flash'),
('空指令','skip'),
]
# The Design Principle: Under No Circumstance should this program Interrupt the main program!
class mcom():
def __init__(self, path=None, digit=8, rapid_flush=True, draw_mode=False, tag='default', **kargs):
# digit 默认8,可选4,16,越小程序负担越轻 (All data is float, you do not need anything else)
# rapid_flush 当数据流不大时,及时倾倒文件缓存内容 (set 'False' if you'd like your SSD to survive longer)
self.draw_mode = draw_mode
self.path = path
self.digit = digit
self.tag = tag
if kargs is None: kargs = {}
if draw_mode in ['Web', 'Native', 'Img', 'Threejs']:
self.draw_process = True
port = find_free_port()
print亮红('[mcom.py]: draw process active!')
self.draw_tcp_port = ('localhost', port)
kargs.update({
'draw_mode': draw_mode,
'draw_udp_port': self.draw_tcp_port,
'port': self.draw_tcp_port,
'backup_file': self.path + '/backup.dp.gz'
})
DP = DrawProcess if draw_mode != 'Threejs' else DrawProcessThreejs
self.draw_proc = DP(**kargs)
self.draw_proc.start()
from UTILS.network import QueueOnTcpClient
self.draw_tcp_client = QueueOnTcpClient('localhost:%d'%port)
else:
print亮红('[mcom.py]: Draw process off! No plot will be done')
self.draw_process = False
if not self.draw_mode=='Threejs':
_, _, self.current_buffer_index = find_where_to_log(self.path)
self.starting_file = self.path + '/mcom_buffer_%d____starting_session.txt' % (self.current_buffer_index)
self.file_lines_cnt = 0
self.file_max_lines = 5e8 # limit file lines to avoid a very large file
self.rapid_flush = rapid_flush
self.flow_cnt = 0
print蓝('[mcom.py]: log file at:' + self.starting_file)
self.current_file_handle = open(self.starting_file, 'w+', encoding = "utf-8")
atexit.register(lambda: self.__del__())
# on the end of the program
def __del__(self):
if hasattr(self,'_deleted_'): return # avoid exit twice
else: self._deleted_ = True # avoid exit twice
print红('[mcom.py]: mcom exiting! tag: %s'%self.tag)
if hasattr(self, 'current_file_handle') and self.current_file_handle is not None:
end_file_flag = ('><EndTaskFlag\n')
self.current_file_handle.write(end_file_flag)
self.current_file_handle.close()
if hasattr(self, 'port') and self.port is not None:
self.disconnect()
if hasattr(self, 'draw_proc') and self.draw_proc is not None:
try:
self.draw_proc.terminate()
self.draw_proc.join()
except:
pass
print蓝('[mcom.py]: mcom exited! tag: %s'%self.tag)
def disconnect(self):
# self.draw_udp_client.close()
self.draw_tcp_client.close()
def recall(self, starting_file):
with open(starting_file,'rb') as f:
lines = f.readlines()
r = None
for l in lines:
if 'rec_show' in str(l, encoding='utf8'):
r = copy.deepcopy(l)
continue
self.draw_tcp_client.send_str(l)
if r is not None:
self.draw_tcp_client.send_str(r)
return None
'''
mcom core function: send out/write str
'''
def send(self, data):
# # step 1: send directive to draw process
if self.draw_process:
# self.draw_udp_client.sendto(data, self.draw_udp_port)
self.draw_tcp_client.send_str(data)
# ! vhmap has its own backup method
if self.draw_mode=='Threejs': return
# step 2: add to file
self.file_lines_cnt += 1
self.current_file_handle.write(data)
if self.rapid_flush:
self.current_file_handle.flush()
elif self.flow_cnt>500:
self.current_file_handle.flush()
self.flow_cnt = 0
# step 3: check whether the file is too large, if so, move on to next file.
if self.file_lines_cnt > self.file_max_lines:
end_file_flag = ('><EndFileFlag\n')
self.current_file_handle.write(end_file_flag)
self.current_file_handle.close()
self.current_buffer_index += 1
self.current_file_handle = open((self.path + '/mcom_buffer_%d.txt' % self.current_buffer_index), 'wb+')
self.file_lines_cnt = 0
return
def rec_init(self, color='k'):
str_tmp = '>>rec_init(\'%s\')\n' % color
self.send(str_tmp)
def rec_show(self):
self.send('>>rec_show\n')
def rec_end(self):
self.send('>>rec_end\n')
def rec_save(self):
self.send('>>rec_save\n')
def rec_end_hold(self):
self.send('>>rec_end_hold\n')
def rec_clear(self, name):
str_tmp = '>>rec_clear("%s")\n' % (name)
self.send(str_tmp)
def rec(self, value, name):
value = float(value)
if self.digit == 16:
str_tmp = '>>rec(%.16e,"%s")\n' % (value, name)
elif self.digit == 8:
str_tmp = '>>rec(%.8e,"%s")\n' % (value, name)
elif self.digit == 4:
str_tmp = '>>rec(%.4e,"%s")\n' % (value, name)
self.send(str_tmp)
def 发送虚幻4数据流(self, x, y, z, pitch, yaw, roll):
x = float(x)
y = float(y)
z = float(z)
pitch = float(pitch)
yaw = float(yaw)
roll = float(roll)
str_tmp = 'UE4>>(\"agent#1\",%.6e,%.6e,%.6e,%.6e,%.6e,%.6e)\n' % (x, y, z, pitch, yaw, roll)
self.send(str_tmp)
def 发送虚幻4数据流_多智能体(self, x_, y_, z_, pitch_, yaw_, roll_):
str_list = ['UE4>>']
for x, y, z, pitch, yaw, roll in zip(x_, y_, z_, pitch_, yaw_, roll_):
x = float(x)
y = float(y)
z = float(z)
pitch = float(pitch)
yaw = float(yaw)
roll = float(roll)
str_tmp = '(\"agent#1\",%.5e,%.5e,%.5e,%.5e,%.5e,%.5e)' % (x, y, z, pitch, yaw, roll)
str_list.append(str_tmp)
str_list.append(';')
str_list.append('\n')
cmd = ''.join(str_list)
self.send(cmd)
def other_cmd(self, func_name, *args, **kargs):
# func_name = traceback.extract_stack()[-2][2]
strlist = ['>>', func_name, '(']
for _i_ in range(len(args)):
if isinstance(args[_i_], np.ndarray):
strlist = self._process_ndarray(args[_i_], strlist)
else:
strlist = self._process_scalar(args[_i_], strlist)
if len(kargs)>0:
for _key_ in kargs:
if isinstance(kargs[_key_], np.ndarray):
strlist = self._process_ndarray(kargs[_key_], strlist, _key_)
else:
strlist = self._process_scalar(kargs[_key_], strlist, _key_)
if strlist[len(strlist) - 1] == "(": strlist.append(")\n")
else: strlist[len(strlist) - 1] = ")\n" # 把逗号换成后括号
self.send(''.join(strlist))
def _process_scalar(self, arg, strlist,key=None):
if key is not None: strlist += '%s='%key
if isinstance(arg, int):
strlist.append("%d" % arg)
strlist.append(",")
elif isinstance(arg, float):
if self.digit == 16: strlist.append("%.16e" % arg)
elif self.digit == 8: strlist.append("%.8e" % arg)
elif self.digit == 4: strlist.append("%.4e" % arg)
strlist.append(",")
elif isinstance(arg, str):
assert '$' not in arg
strlist.extend(["\'", arg.replace('\n', '$'), "\'", ","])
elif isinstance(arg, list):
strlist.append(str(arg))
strlist.append(",")
elif hasattr(arg, 'dtype') and np.issubdtype(arg.dtype, np.integer):
strlist.append("%d" % arg)
strlist.append(",")
elif hasattr(arg, 'dtype') and np.issubdtype(arg.dtype, np.floating):
if self.digit == 16: strlist.append("%.16e" % arg)
elif self.digit == 8: strlist.append("%.8e" % arg)
elif self.digit == 4: strlist.append("%.4e" % arg)
strlist.append(",")
else:
print('输入的参数类型不能处理',arg.__class__)
return strlist
def _process_ndarray(self, args, strlist, key=None):
if args.ndim == 1:
if key is not None: strlist += '%s='%key
d = len(args)
sub_list = ["["] + ["%.3e,"%t if (i+1)!=d else "%.3e"%t for i, t in enumerate(args)] + ["]"]
strlist += sub_list
strlist.append(",")
elif args.ndim == 2:
print红('[mcom]: 输入数组的维度大于1维, 目前处理不了。')
else:
print红('[mcom]: 输入数组的维度大于2维, 目前处理不了。')
return strlist
for fn_name in mcom_fn_list_define:
build_exec_cmd = 'def %s(self,*args,**kargs):\n self.other_cmd("%s", *args,**kargs)\n'%(fn_name, fn_name)
exec(build_exec_cmd)
for 别名, fn_name in 别名对齐:
build_exec_cmd = '%s = %s\n'%(别名, fn_name)
exec(build_exec_cmd)
def find_where_to_log(path):
if not os.path.exists(path): os.makedirs(path)
def find_previous_start_end():
start = None; end = None; t = 0
while True:
is_body = os.path.exists(path + '/mcom_buffer_%d.txt' % t)
is_head = os.path.exists(path + '/mcom_buffer_%d____starting_session.txt' % t)
if is_head: start = t
if is_head or is_body: end = t; t += 1
else:
new = t
return (start, end, new)
prev_start, prev_end, new = find_previous_start_end()
return prev_start, prev_end, new
class DrawProcessThreejs(Process):
def __init__(self, draw_udp_port, draw_mode, **kargs):
super(DrawProcessThreejs, self).__init__()
from UTILS.network import QueueOnTcpServer
self.draw_mode = draw_mode
self.draw_udp_port = draw_udp_port
self.tcp_connection = QueueOnTcpServer(self.draw_udp_port)
self.buffer_list = []
self.backup_file = kargs['backup_file']
self.allow_backup = False if self.backup_file is None else True
if self.allow_backup:
if os.path.exists(self.backup_file):
print亮红('[mcom.py]: warning, purge previous 3D visual data!')
try: os.remove(self.backup_file)
except: pass
self.tflush_buffer = []
self.client_tokens = {}
def flush_backup(self):
while True:
time.sleep(20)
if not os.path.exists(os.path.dirname(self.backup_file)):
os.makedirs(os.path.dirname(self.backup_file))
# print('Flush backup')
with gzip.open(self.backup_file, 'at') as f:
f.writelines(self.tflush_buffer)
self.tflush_buffer = []
# print('Flush backup done')
def init_threejs(self):
http_port = find_free_port()
ws_port = 8765 # http_port+1
t = threading.Thread(target=self.run_flask, args=(http_port,))
t.daemon = True
t.start()
t2 = threading.Thread(target=self.run_ws, args=(ws_port,))
t2.daemon = True
t2.start()
time.sleep(2)
if self.allow_backup:
self.tflush = threading.Thread(target=self.flush_backup)
self.tflush.daemon = True
self.tflush.start()
def run_ws(self, port):
import asyncio
import websockets
self.connected_ws = None
self.new_ws_connection_flag = False
async def echo(websocket):
self.connected_ws = websocket
self.new_ws_connection_flag = True
while True:
try:
# not supposed to receive anything, just to maintain connection
await self.connected_ws.recv()
except websockets.ConnectionClosed:
print(f"Previous Websocket Terminated")
self.connected_ws = None
break
async def run_ws():
async with websockets.serve(echo, "localhost", port):
await asyncio.Future() # run forever
self.init_cmd_captured = False
init_cmd_list = []
def init_cmd_capture_fn(tosend):
for strx in tosend:
if '>>v2d_show()\n'==strx:
self.init_cmd_captured = True
init_cmd_list.append(strx)
if self.init_cmd_captured:
break
return
async def run_ws_main():
while True:
await asyncio.sleep(0.01)
if self.connected_ws is not None:
# 本次正常情况下,需要发送的数据
# dont send too much in one POST, might overload the network traffic
if len(self.buffer_list)>35000:
tosend = self.buffer_list[:30000]
self.buffer_list = self.buffer_list[30000:]
else:
tosend = self.buffer_list
self.buffer_list = []
# 处理断线重连的情况,断线重连时,会出现新的token
if self.new_ws_connection_flag:
self.new_ws_connection_flag = False
if (not self.init_cmd_captured):
# 尚未捕获初始化命令,或者第一次client
buf = "".join(tosend)
else:
print('[mcom.py] If there are other tabs, please close them now.')
tosend = [""]
buf = "".join(init_cmd_list + tosend)
else:
# 正常连接
buf = "".join(tosend)
# 尝试捕获并保存初始化部分的命令
if not self.init_cmd_captured:
init_cmd_capture_fn(tosend)
# use zlib to compress output command, worked out like magic
buf = bytes(buf, encoding='utf8')
zlib_compress = zlib.compressobj()
buf = zlib_compress.compress(buf) + zlib_compress.flush(zlib.Z_FINISH)
print('await start')
if not self.connected_ws.open: continue
await self.connected_ws.send(buf)
print('await done')
async def main():
task1 = asyncio.create_task(run_ws())
task2 = asyncio.create_task(run_ws_main())
await task1
await task2
asyncio.run(main())
def run_flask(self, port):
import json
from flask import Flask, request, send_from_directory
from waitress import serve
from mimetypes import add_type
add_type('application/javascript', '.js')
add_type('text/css', '.css')
app = Flask(__name__)
dirname = os.path.dirname(__file__) + '/threejsmod'
import zlib
self.init_cmd_captured = False
init_cmd_list = []
@app.route("/<path:path>")
def static_dirx(path):
if path=='favicon.ico':
return send_from_directory("%s/"%dirname, 'files/HMP.ico')
return send_from_directory("%s/"%dirname, path)
@app.route("/")
def main_app():
with open('%s/examples/abc_rt.html'%dirname, 'r', encoding = "utf-8") as f:
buf = f.read()
return buf
print('\n--------------------------------')
print('JS visualizer online: http://%s:%d'%(get_host_ip(), port))
print('JS visualizer online (localhost): http://localhost:%d'%(port))
print('--------------------------------')
# app.run(host='0.0.0.0', port=port)
serve(app, threads=8, ipv4=True, ipv6=True, listen='*:%d'%port)
def run(self):
self.init_threejs()
try:
from queue import Empty
queue = self.tcp_connection.get_queue()
self.tcp_connection.wait_connection() # after this, the queue begin to work
while True:
buff_list = []
buff_list.extend(queue.get(timeout=600))
for _ in range(queue.qsize()): buff_list.extend(queue.get(timeout=600))
self.run_handler(buff_list)
except KeyboardInterrupt:
self.__del__()
self.__del__()
def __del__(self):
return
def run_handler(self, new_buff_list):
self.buffer_list.extend(new_buff_list)
self.tflush_buffer.extend(new_buff_list)
# too many, delete with fifo
if len(self.buffer_list) > 1e9:
# 当存储的指令超过十亿后,开始删除旧的
del self.buffer_list[:len(new_buff_list)]
class DrawProcess(Process):
def __init__(self, draw_udp_port, draw_mode, **kargs):
from UTILS.network import QueueOnTcpServer
super(DrawProcess, self).__init__()
self.draw_mode = draw_mode
self.draw_udp_port = draw_udp_port
self.tcp_connection = QueueOnTcpServer(self.draw_udp_port)
self.image_path = kargs['image_path'] if 'image_path' in kargs else None
return
def init_matplot_lib(self):
if self.draw_mode in ['Web', 'Img']:
import matplotlib
matplotlib.use('Agg') # set the backend before importing pyplot
import matplotlib.pyplot as plt
self.gui_reflesh = lambda: time.sleep(1) # plt.pause(0.1)
elif self.draw_mode == 'Native':
import matplotlib
# matplotlib.use('Agg') # set the backend before importing pyplot
matplotlib.use('Qt5Agg')
import matplotlib.pyplot as plt
self.gui_reflesh = lambda: plt.pause(0.2)
elif self.draw_mode == 'Threejs':
assert False
else:
assert False
from config import GlobalConfig
logdir = GlobalConfig.logdir
if not os.path.exists(logdir):
os.makedirs(logdir)
if self.draw_mode == 'Web':
self.avail_port = find_free_port()
my_http = MyHttp('%s/html.html'%logdir, self.avail_port)
my_http.daemon = True
my_http.start()
self.libs_family = {
'rec_init': 'rec', 'rec': 'rec', 'rec_show': 'rec',
'v2d_init': 'v2d', 'v2dx':'v2d', 'v2d_show': 'v2d', 'v2d_pop':'v2d',
'v2d_line_object':'v2d', 'v2d_clear':'v2d', 'v2d_add_terrain': 'v2d',
}
self.libs_init_fns = {
'rec': self.rec_init_fn,
'v2d': self.v2d_init_fn,
}
def run(self):
self.init_matplot_lib()
try:
# self.tcp_connection.set_handler(self.run_handler)
from queue import Empty
queue = self.tcp_connection.get_queue()
# self.tcp_connection.set_handler(self.run_handler)
self.tcp_connection.wait_connection() # after this, the queue begin to work
while True:
try:
buff_list = []
buff_list.extend(queue.get(timeout=0.1))
for _ in range(queue.qsize()): buff_list.extend(queue.get(timeout=0.1))
self.run_handler(buff_list)
except Empty: self.gui_reflesh()
except KeyboardInterrupt:
self.__del__()
self.__del__()
def run_handler(self, buff_list):
while True:
if len(buff_list) == 0: break
buff = buff_list.pop(0)
if (buff=='>>rec_show\n') and ('>>rec_show\n' in buff_list): continue # skip
self.process_cmd(buff)
# # print('成功处理指令:', buff)
def __del__(self):
self.tcp_connection.close()
def process_cmd(self, cmd_str):
if '>>' in cmd_str:
cmd_str_ = cmd_str[2:].strip('\n')
if ')' not in cmd_str_:
cmd_str_ = cmd_str_+'()'
prefix = self.get_cmd_lib(cmd_str_)
if prefix is not None:
eval('%s.%s'%(prefix, cmd_str_))
def get_cmd_lib(self, cmd):
cmd_key = None
func_name = cmd.split('(')[0]
if func_name not in self.libs_family:
print蓝('绘图函数不能处理:', cmd)
return None
family_name = self.libs_family[func_name]
if self.libs_init_fns[family_name] is not None:
self.libs_init_fns[family_name]()
self.libs_init_fns[family_name] = None
return 'self.%s'%family_name
def rec_init_fn(self):
from VISUALIZE.mcom_rec import rec_family
self.rec = rec_family('r', self.draw_mode, self.image_path)
def v2d_init_fn(self):
from VISUALIZE.mcom_v2d import v2d_family
self.v2d = v2d_family(self.draw_mode)
class MyHttp(Process):
def __init__(self, path_to_html, avail_port):
super(MyHttp, self).__init__()
self.path_to_html = path_to_html
self.avail_port = avail_port
def run(self):
from flask import Flask
app = Flask(__name__)
@app.route("/")
def hello():
try:
with open(self.path_to_html,'r') as f:
html = f.read()
except:
html = "no plot yet please wait"
return html
app.run(port=self.avail_port)
|
relay_test.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import time
import threading
import random
import click
from influxdb.client import InfluxDBClient
def generate_fake_data(points): # type: (int) -> list(dict)
return [
{
"measurement": "cpu_load_short",
"tags": {
"host": "test",
},
"time": int(time.time() * 1000000),
"fields": {
"value": float(random.randint(0, 100)) / 100
}
}
for _ in range(points)
]
def do_influx_request(n_points, host, database): # type: (int, str, str) -> float
conn = InfluxDBClient(host=host)
conn.switch_database('p2')
points = generate_fake_data(n_points)
request_start_time = time.time()
conn.write_points(points, time_precision='u', database=database)
return time.time() - request_start_time
def thread_run_request(points, seconds, host, database): # type: (int, int, str, str) -> None
end_time = time.time() + seconds
total_request_count = 0 # type: int
total_request_time = 0.0 # type: float
while time.time() < end_time:
# do request
total_request_time += do_influx_request(points, host, database)
total_request_count += 1
print('Total points: %d\tTotal request: %d\tAvg request time: %f' %
(total_request_count * points, total_request_count, total_request_time / total_request_count))
@click.command()
@click.option('--points', default=100, help='每个请求带多少个数据点')
@click.option('--seconds', default=10, help='持续运行多少时间')
@click.option('--threads', default=4, help='使用多少个线程')
@click.option('--host', default='172.20.1.30', help='InfluxDB-relay 的域名或者IP地址')
@click.option('--database', default='test', help='写入到哪个数据库')
def main(points, seconds, threads, host, database): # type: (int, int, int, str, str) -> None
"""
简单的向 InfluxDB-relay 写入数据的工具
"""
running_threads = [] # type: list(threading.Thread)
for i in range(threads):
t = threading.Thread(target=lambda: thread_run_request(points, seconds, host, database))
t.start()
running_threads.append(t)
for t in running_threads:
t.join()
if __name__ == '__main__':
main()
|
Tensorboard_reader.py | #Denis Rothman
def launchTensorBoard():
import os
os.system('tensorboard --logdir=' + 'log/')
return
import threading
t = threading.Thread(target=launchTensorBoard, args=([]))
t.start()
#In your browser, enter http://localhost:6006 as the URL
#add #projector to the URL if necessary: http://localhost:6006/#projector
#Once loaded, click on "choose file" and open log/labels.tsv from your machine
|
remove_nans_dmstack.py | # Author : Bhishan Poudel
# Date : July 5, 2019
# Update : Oct 29, 2019
# Description:
#===============
# Remove nans from dmstack output csv files and do some filterings to give
# txt files.
#
# Input/Oputputs:
#=================
# Reads all *.csv files and creates *.txt from them.
#
#
# Filtering:
#============
# 1. flag calib_psfCandidate==False
# 2. column deblend_nChild==0
# 3. ellipticity e = sqrt(e1^2 + e2^2) < 1.5
# 4. choose only few columns given below
# 5. remove nans from all these columns
# 6. change delimiter to tab.
#
#
# Usage:
#=======
# python remove_nans_dmstack.py
#
# Estimated time: 1m 2s
#
import pandas as pd
import numpy as np
import sys
import glob
import multiprocessing
from multiprocessing import Process
def remove_nans(ifile,file_number):
""" Remove nans and filter data from dmstack output csv file.
There are 90 flags col0 to col89
col90 is id is first column 'id'
There are 90 flags and 77 columns.
We exclude first column 'flags' and have 76 columns
In total there are 90 + 76 = 166 columns.
Columns selected:
1 : calib_psfCandidate (for filtering only)
94 : deblend_nChild (for filtering only)
90 : id
102 : base_SdssCentroid_x
103 : base_SdssCentroid_y
104 : base_SdssCentroid_xSigma
105 : base_SdssCentroid_ySigma
127 : ext_shapeHSM_HsmShapeRegauss_e1
128 : ext_shapeHSM_HsmShapeRegauss_e2
114 : ext_shapeHSM_HsmShapeRegauss_sigma
# Added later for radius calculation
133: 'ext_shapeHSM_HsmSourceMoments_xx',
134: 'ext_shapeHSM_HsmSourceMoments_yy',
135: 'ext_shapeHSM_HsmSourceMoments_xy',
# This gives
radius = (xx*yy - xy**2)**1/4
# In the output file we have
# 1 2 34 56 78 9 10 11
file_number, id, x,y xsigma,ysigma, e1,e2, ellip flux, radius
"""
usecols = [1, 94, 90, 102, 103, 104, 105, 127, 128, 114, 133,134,135]
df = pd.read_csv(ifile, sep=",",low_memory=False,usecols=usecols)
for c in df.columns:
df[c] = pd.to_numeric(df[c],errors='coerce')
# filter the flag calib_psfCandidate==False
# not a star candidate
df = df.query('calib_psfCandidate == 0.0')
# filter the column deblend_nChild==0
# no child source after deblending
df = df.query('deblend_nChild == 0.0')
df = df.copy()
# clean out unphysical results
# e1^2 + e2^2 < 1.5^2
df['ellip'] = (df['ext_shapeHSM_HsmShapeRegauss_e1'] ** 2 + df['ext_shapeHSM_HsmShapeRegauss_e2'] ** 2)**0.5
df = df.query('ellip < 1.5')
# calculate radius of ellipse using HSM moments
# radius**4 = xx*yy - xy**2
df['radius'] = df.eval(""" ( (ext_shapeHSM_HsmSourceMoments_xx * ext_shapeHSM_HsmSourceMoments_yy) \
- (ext_shapeHSM_HsmSourceMoments_xy**2 ) )**0.25 """)
# add a new column with file_number
df['file_number'] = file_number
# take only required columns
cols_select = ['file_number', 'id',
'base_SdssCentroid_x', 'base_SdssCentroid_y',
'base_SdssCentroid_xSigma','base_SdssCentroid_ySigma',
'ext_shapeHSM_HsmShapeRegauss_e1','ext_shapeHSM_HsmShapeRegauss_e2',
'ellip', 'base_SdssShape_flux', 'radius'
]
df = df[cols_select]
# drop all nans
df = df.dropna()
# write txt file with commented header
prefix = ' '*2
header_line = prefix.join(cols_select)
# from: dmstack_csv/src_lsst_mono_z1.5_000.csv
# to : dmstack_txt/src_lsst_mono_z1.5_000.txt
ofile = ifile.replace('dmstack_csv', 'dmstack_txt')
ofile = ofile.replace('.csv', '.txt')
np.savetxt(ofile,df.values,header=header_line,delimiter='\t')
def func1():
infiles = ['dmstack_csv/src_lsst_z1.5_{:03d}.csv'.format(i) for i in range(100)]
for ifile in infiles:
file_number = int(ifile.rstrip('.csv').split('_')[-1])
remove_nans(ifile, file_number)
def func2():
infiles = ['dmstack_csv/src_lsst90_z1.5_{:03d}.csv'.format(i) for i in range(100)]
for ifile in infiles:
file_number = int(ifile.rstrip('.csv').split('_')[-1])
remove_nans(ifile, file_number)
def func3():
infiles = ['dmstack_csv/src_lsst_mono_z1.5_{:03d}.csv'.format(i) for i in range(100)]
for ifile in infiles:
file_number = int(ifile.rstrip('.csv').split('_')[-1])
remove_nans(ifile, file_number)
def func4():
infiles = ['dmstack_csv/src_lsst_mono90_z1.5_{:03d}.csv'.format(i) for i in range(100)]
for ifile in infiles:
file_number = int(ifile.rstrip('.csv').split('_')[-1])
remove_nans(ifile, file_number)
if __name__ == '__main__':
p1 = Process(target=func1)
p1.start()
p2 = Process(target=func2)
p2.start()
p3 = Process(target=func3)
p3.start()
p4 = Process(target=func4)
p4.start()
# join them all
p1.join()
p2.join()
p3.join()
p4.join()
|
trustedcoin.py | #!/usr/bin/env python
#
# Electrum - Lightweight Bitcoin Client
# Copyright (C) 2015 Thomas Voegtlin
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import asyncio
import socket
import json
import base64
import time
import hashlib
from collections import defaultdict
from typing import Dict, Union, Sequence, List
from urllib.parse import urljoin
from urllib.parse import quote
from aiohttp import ClientResponse
from electrum import ecc, constants, keystore, version, bip32, bitcoin
from electrum.bip32 import BIP32Node, xpub_type
from electrum.crypto import sha256
from electrum.transaction import PartialTxOutput, PartialTxInput, PartialTransaction, Transaction
from electrum.mnemonic import Mnemonic, seed_type, is_any_2fa_seed_type
from electrum.wallet import Multisig_Wallet, Deterministic_Wallet
from electrum.i18n import _
from electrum.plugin import BasePlugin, hook
from electrum.util import NotEnoughFunds, UserFacingException
from electrum.storage import StorageEncryptionVersion
from electrum.network import Network
from electrum.base_wizard import BaseWizard, WizardWalletPasswordSetting
from electrum.logging import Logger
def get_signing_xpub(xtype):
if not constants.net.TESTNET:
xpub = "xpub661MyMwAqRbcGnMkaTx2594P9EDuiEqMq25PM2aeG6UmwzaohgA6uDmNsvSUV8ubqwA3Wpste1hg69XHgjUuCD5HLcEp2QPzyV1HMrPppsL"
else:
xpub = "tpubD6NzVbkrYhZ4XdmyJQcCPjQfg6RXVUzGFhPjZ7uvRC8JLcS7Hw1i7UTpyhp9grHpak4TyK2hzBJrujDVLXQ6qB5tNpVx9rC6ixijUXadnmY"
if xtype not in ('standard', 'p2wsh'):
raise NotImplementedError('xtype: {}'.format(xtype))
if xtype == 'standard':
return xpub
node = BIP32Node.from_xkey(xpub)
return node._replace(xtype=xtype).to_xpub()
def get_billing_xpub():
if constants.net.TESTNET:
return "tpubD6NzVbkrYhZ4X11EJFTJujsYbUmVASAYY7gXsEt4sL97AMBdypiH1E9ZVTpdXXEy3Kj9Eqd1UkxdGtvDt5z23DKsh6211CfNJo8bLLyem5r"
else:
return "xpub6DTBdtBB8qUmH5c77v8qVGVoYk7WjJNpGvutqjLasNG1mbux6KsojaLrYf2sRhXAVU4NaFuHhbD9SvVPRt1MB1MaMooRuhHcAZH1yhQ1qDU"
DISCLAIMER = [
_("Two-factor authentication is a service provided by TrustedCoin. "
"It uses a multi-signature wallet, where you own 2 of 3 keys. "
"The third key is stored on a remote server that signs transactions on "
"your behalf. To use this service, you will need a smartphone with "
"Google Authenticator installed."),
_("A small fee will be charged on each transaction that uses the "
"remote server. You may check and modify your billing preferences "
"once the installation is complete."),
_("Note that your coins are not locked in this service. You may withdraw "
"your funds at any time and at no cost, without the remote server, by "
"using the 'restore wallet' option with your wallet seed."),
_("The next step will generate the seed of your wallet. This seed will "
"NOT be saved in your computer, and it must be stored on paper. "
"To be safe from malware, you may want to do this on an offline "
"computer, and move your wallet later to an online computer."),
]
KIVY_DISCLAIMER = [
_("Two-factor authentication is a service provided by TrustedCoin. "
"To use it, you must have a separate device with Google Authenticator."),
_("This service uses a multi-signature wallet, where you own 2 of 3 keys. "
"The third key is stored on a remote server that signs transactions on "
"your behalf. A small fee will be charged on each transaction that uses the "
"remote server."),
_("Note that your coins are not locked in this service. You may withdraw "
"your funds at any time and at no cost, without the remote server, by "
"using the 'restore wallet' option with your wallet seed."),
]
RESTORE_MSG = _("Enter the seed for your 2-factor wallet:")
class TrustedCoinException(Exception):
def __init__(self, message, status_code=0):
Exception.__init__(self, message)
self.status_code = status_code
class ErrorConnectingServer(Exception):
def __init__(self, reason: Union[str, Exception] = None):
self.reason = reason
def __str__(self):
header = _("Error connecting to {} server").format('TrustedCoin')
reason = self.reason
if isinstance(reason, BaseException):
reason = repr(reason)
return f"{header}:\n{reason}" if reason else header
class TrustedCoinCosignerClient(Logger):
def __init__(self, user_agent=None, base_url='https://api.trustedcoin.com/2/'):
self.base_url = base_url
self.debug = False
self.user_agent = user_agent
Logger.__init__(self)
async def handle_response(self, resp: ClientResponse):
if resp.status != 200:
try:
r = await resp.json()
message = r['message']
except:
message = await resp.text()
raise TrustedCoinException(message, resp.status)
try:
return await resp.json()
except:
return await resp.text()
def send_request(self, method, relative_url, data=None, *, timeout=None):
network = Network.get_instance()
if not network:
raise ErrorConnectingServer('You are offline.')
url = urljoin(self.base_url, relative_url)
if self.debug:
self.logger.debug(f'<-- {method} {url} {data}')
headers = {}
if self.user_agent:
headers['user-agent'] = self.user_agent
try:
if method == 'get':
response = Network.send_http_on_proxy(method, url,
params=data,
headers=headers,
on_finish=self.handle_response,
timeout=timeout)
elif method == 'post':
response = Network.send_http_on_proxy(method, url,
json=data,
headers=headers,
on_finish=self.handle_response,
timeout=timeout)
else:
assert False
except TrustedCoinException:
raise
except Exception as e:
raise ErrorConnectingServer(e)
else:
if self.debug:
self.logger.debug(f'--> {response}')
return response
def get_terms_of_service(self, billing_plan='electrum-per-tx-otp'):
"""
Returns the TOS for the given billing plan as a plain/text unicode string.
:param billing_plan: the plan to return the terms for
"""
payload = {'billing_plan': billing_plan}
return self.send_request('get', 'tos', payload)
def create(self, xpubkey1, xpubkey2, email, billing_plan='electrum-per-tx-otp'):
"""
Creates a new cosigner resource.
:param xpubkey1: a bip32 extended public key (customarily the hot key)
:param xpubkey2: a bip32 extended public key (customarily the cold key)
:param email: a contact email
:param billing_plan: the billing plan for the cosigner
"""
payload = {
'email': email,
'xpubkey1': xpubkey1,
'xpubkey2': xpubkey2,
'billing_plan': billing_plan,
}
return self.send_request('post', 'cosigner', payload)
def auth(self, id, otp):
"""
Attempt to authenticate for a particular cosigner.
:param id: the id of the cosigner
:param otp: the one time password
"""
payload = {'otp': otp}
return self.send_request('post', 'cosigner/%s/auth' % quote(id), payload)
def get(self, id):
""" Get billing info """
return self.send_request('get', 'cosigner/%s' % quote(id))
def get_challenge(self, id):
""" Get challenge to reset Google Auth secret """
return self.send_request('get', 'cosigner/%s/otp_secret' % quote(id))
def reset_auth(self, id, challenge, signatures):
""" Reset Google Auth secret """
payload = {'challenge':challenge, 'signatures':signatures}
return self.send_request('post', 'cosigner/%s/otp_secret' % quote(id), payload)
def sign(self, id, transaction, otp):
"""
Attempt to authenticate for a particular cosigner.
:param id: the id of the cosigner
:param transaction: the hex encoded [partially signed] compact transaction to sign
:param otp: the one time password
"""
payload = {
'otp': otp,
'transaction': transaction
}
return self.send_request('post', 'cosigner/%s/sign' % quote(id), payload,
timeout=60)
def transfer_credit(self, id, recipient, otp, signature_callback):
"""
Transfer a cosigner's credits to another cosigner.
:param id: the id of the sending cosigner
:param recipient: the id of the recipient cosigner
:param otp: the one time password (of the sender)
:param signature_callback: a callback that signs a text message using xpubkey1/0/0 returning a compact sig
"""
payload = {
'otp': otp,
'recipient': recipient,
'timestamp': int(time.time()),
}
relative_url = 'cosigner/%s/transfer' % quote(id)
full_url = urljoin(self.base_url, relative_url)
headers = {
'x-signature': signature_callback(full_url + '\n' + json.dumps(payload))
}
return self.send_request('post', relative_url, payload, headers)
server = TrustedCoinCosignerClient(user_agent="Electrum/" + version.ELECTRUM_VERSION)
class Wallet_2fa(Multisig_Wallet):
plugin: 'TrustedCoinPlugin'
wallet_type = '2fa'
def __init__(self, db, storage, *, config):
self.m, self.n = 2, 3
Deterministic_Wallet.__init__(self, db, storage, config=config)
self.is_billing = False
self.billing_info = None
self._load_billing_addresses()
def _load_billing_addresses(self):
billing_addresses = {
'legacy': self.db.get('trustedcoin_billing_addresses', {}),
'segwit': self.db.get('trustedcoin_billing_addresses_segwit', {})
}
self._billing_addresses = {} # type: Dict[str, Dict[int, str]] # addr_type -> index -> addr
self._billing_addresses_set = set() # set of addrs
for addr_type, d in list(billing_addresses.items()):
self._billing_addresses[addr_type] = {}
# convert keys from str to int
for index, addr in d.items():
self._billing_addresses[addr_type][int(index)] = addr
self._billing_addresses_set.add(addr)
def can_sign_without_server(self):
return not self.keystores['x2/'].is_watching_only()
def get_user_id(self):
return get_user_id(self.db)
def min_prepay(self):
return min(self.price_per_tx.keys())
def num_prepay(self):
default = self.min_prepay()
n = self.config.get('trustedcoin_prepay', default)
if n not in self.price_per_tx:
n = default
return n
def extra_fee(self):
if self.can_sign_without_server():
return 0
if self.billing_info is None:
self.plugin.start_request_thread(self)
return 0
if self.billing_info.get('tx_remaining'):
return 0
if self.is_billing:
return 0
n = self.num_prepay()
price = int(self.price_per_tx[n])
if price > 100000 * n:
raise Exception('too high trustedcoin fee ({} for {} txns)'.format(price, n))
return price
def make_unsigned_transaction(
self, *,
coins: Sequence[PartialTxInput],
outputs: List[PartialTxOutput],
fee=None,
change_addr: str = None,
is_sweep=False,
rbf=False) -> PartialTransaction:
mk_tx = lambda o: Multisig_Wallet.make_unsigned_transaction(
self, coins=coins, outputs=o, fee=fee, change_addr=change_addr, rbf=rbf)
extra_fee = self.extra_fee() if not is_sweep else 0
if extra_fee:
address = self.billing_info['billing_address_segwit']
fee_output = PartialTxOutput.from_address_and_value(address, extra_fee)
try:
tx = mk_tx(outputs + [fee_output])
except NotEnoughFunds:
# TrustedCoin won't charge if the total inputs is
# lower than their fee
tx = mk_tx(outputs)
if tx.input_value() >= extra_fee:
raise
self.logger.info("not charging for this tx")
else:
tx = mk_tx(outputs)
return tx
def on_otp(self, tx: PartialTransaction, otp):
if not otp:
self.logger.info("sign_transaction: no auth code")
return
otp = int(otp)
long_user_id, short_id = self.get_user_id()
raw_tx = tx.serialize_as_bytes().hex()
assert raw_tx[:10] == "70736274ff", f"bad magic. {raw_tx[:10]}"
try:
r = server.sign(short_id, raw_tx, otp)
except TrustedCoinException as e:
if e.status_code == 400: # invalid OTP
raise UserFacingException(_('Invalid one-time password.')) from e
else:
raise
if r:
received_raw_tx = r.get('transaction')
received_tx = Transaction(received_raw_tx)
tx.combine_with_other_psbt(received_tx)
self.logger.info(f"twofactor: is complete {tx.is_complete()}")
# reset billing_info
self.billing_info = None
self.plugin.start_request_thread(self)
def add_new_billing_address(self, billing_index: int, address: str, addr_type: str):
billing_addresses_of_this_type = self._billing_addresses[addr_type]
saved_addr = billing_addresses_of_this_type.get(billing_index)
if saved_addr is not None:
if saved_addr == address:
return # already saved this address
else:
raise Exception('trustedcoin billing address inconsistency.. '
'for index {}, already saved {}, now got {}'
.format(billing_index, saved_addr, address))
# do we have all prior indices? (are we synced?)
largest_index_we_have = max(billing_addresses_of_this_type) if billing_addresses_of_this_type else -1
if largest_index_we_have + 1 < billing_index: # need to sync
for i in range(largest_index_we_have + 1, billing_index):
addr = make_billing_address(self, i, addr_type=addr_type)
billing_addresses_of_this_type[i] = addr
self._billing_addresses_set.add(addr)
# save this address; and persist to disk
billing_addresses_of_this_type[billing_index] = address
self._billing_addresses_set.add(address)
self._billing_addresses[addr_type] = billing_addresses_of_this_type
self.db.put('trustedcoin_billing_addresses', self._billing_addresses['legacy'])
self.db.put('trustedcoin_billing_addresses_segwit', self._billing_addresses['segwit'])
# FIXME this often runs in a daemon thread, where storage.write will fail
self.db.write(self.storage)
def is_billing_address(self, addr: str) -> bool:
return addr in self._billing_addresses_set
# Utility functions
def get_user_id(db):
def make_long_id(xpub_hot, xpub_cold):
return sha256(''.join(sorted([xpub_hot, xpub_cold])))
xpub1 = db.get('x1/')['xpub']
xpub2 = db.get('x2/')['xpub']
long_id = make_long_id(xpub1, xpub2)
short_id = hashlib.sha256(long_id).hexdigest()
return long_id, short_id
def make_xpub(xpub, s) -> str:
rootnode = BIP32Node.from_xkey(xpub)
child_pubkey, child_chaincode = bip32._CKD_pub(parent_pubkey=rootnode.eckey.get_public_key_bytes(compressed=True),
parent_chaincode=rootnode.chaincode,
child_index=s)
child_node = BIP32Node(xtype=rootnode.xtype,
eckey=ecc.ECPubkey(child_pubkey),
chaincode=child_chaincode)
return child_node.to_xpub()
def make_billing_address(wallet, num, addr_type):
long_id, short_id = wallet.get_user_id()
xpub = make_xpub(get_billing_xpub(), long_id)
usernode = BIP32Node.from_xkey(xpub)
child_node = usernode.subkey_at_public_derivation([num])
pubkey = child_node.eckey.get_public_key_bytes(compressed=True)
if addr_type == 'legacy':
return bitcoin.public_key_to_p2pkh(pubkey)
elif addr_type == 'segwit':
return bitcoin.public_key_to_p2wpkh(pubkey)
else:
raise ValueError(f'unexpected billing type: {addr_type}')
class TrustedCoinPlugin(BasePlugin):
wallet_class = Wallet_2fa
disclaimer_msg = DISCLAIMER
def __init__(self, parent, config, name):
BasePlugin.__init__(self, parent, config, name)
self.wallet_class.plugin = self
self.requesting = False
@staticmethod
def is_valid_seed(seed):
t = seed_type(seed)
return is_any_2fa_seed_type(t)
def is_available(self):
return True
def is_enabled(self):
return True
def can_user_disable(self):
return False
@hook
def tc_sign_wrapper(self, wallet, tx, on_success, on_failure):
if not isinstance(wallet, self.wallet_class):
return
if tx.is_complete():
return
if wallet.can_sign_without_server():
return
if not wallet.keystores['x3/'].can_sign(tx, ignore_watching_only=True):
self.logger.info("twofactor: xpub3 not needed")
return
def wrapper(tx):
assert tx
self.prompt_user_for_otp(wallet, tx, on_success, on_failure)
return wrapper
def prompt_user_for_otp(self, wallet, tx, on_success, on_failure) -> None:
raise NotImplementedError()
@hook
def get_tx_extra_fee(self, wallet, tx: Transaction):
if type(wallet) != Wallet_2fa:
return
for o in tx.outputs():
if wallet.is_billing_address(o.address):
return o.address, o.value
def finish_requesting(func):
def f(self, *args, **kwargs):
try:
return func(self, *args, **kwargs)
finally:
self.requesting = False
return f
@finish_requesting
def request_billing_info(self, wallet: 'Wallet_2fa', *, suppress_connection_error=True):
if wallet.can_sign_without_server():
return
self.logger.info("request billing info")
try:
billing_info = server.get(wallet.get_user_id()[1])
except ErrorConnectingServer as e:
if suppress_connection_error:
self.logger.info(repr(e))
return
raise
billing_index = billing_info['billing_index']
# add segwit billing address; this will be used for actual billing
billing_address = make_billing_address(wallet, billing_index, addr_type='segwit')
if billing_address != billing_info['billing_address_segwit']:
raise Exception(f'unexpected trustedcoin billing address: '
f'calculated {billing_address}, received {billing_info["billing_address_segwit"]}')
wallet.add_new_billing_address(billing_index, billing_address, addr_type='segwit')
# also add legacy billing address; only used for detecting past payments in GUI
billing_address = make_billing_address(wallet, billing_index, addr_type='legacy')
wallet.add_new_billing_address(billing_index, billing_address, addr_type='legacy')
wallet.billing_info = billing_info
wallet.price_per_tx = dict(billing_info['price_per_tx'])
wallet.price_per_tx.pop(1, None)
return True
def start_request_thread(self, wallet):
from threading import Thread
if self.requesting is False:
self.requesting = True
t = Thread(target=self.request_billing_info, args=(wallet,))
t.daemon = True
t.start()
return t
def make_seed(self, seed_type):
if not is_any_2fa_seed_type(seed_type):
raise Exception(f'unexpected seed type: {seed_type}')
return Mnemonic('english').make_seed(seed_type=seed_type)
@hook
def do_clear(self, window):
window.wallet.is_billing = False
def show_disclaimer(self, wizard: BaseWizard):
wizard.set_icon('trustedcoin-wizard.png')
wizard.reset_stack()
wizard.confirm_dialog(title='Disclaimer', message='\n\n'.join(self.disclaimer_msg), run_next = lambda x: wizard.run('choose_seed'))
def choose_seed(self, wizard):
title = _('Create or restore')
message = _('Do you want to create a new seed, or to restore a wallet using an existing seed?')
choices = [
('choose_seed_type', _('Create a new seed')),
('restore_wallet', _('I already have a seed')),
]
wizard.choice_dialog(title=title, message=message, choices=choices, run_next=wizard.run)
def choose_seed_type(self, wizard):
seed_type = '2fa' if self.config.get('nosegwit') else '2fa_segwit'
self.create_seed(wizard, seed_type)
def create_seed(self, wizard, seed_type):
seed = self.make_seed(seed_type)
f = lambda x: wizard.request_passphrase(seed, x)
wizard.opt_bip39 = False
wizard.opt_ext = True
wizard.show_seed_dialog(run_next=f, seed_text=seed)
@classmethod
def get_xkeys(self, seed, t, passphrase, derivation):
assert is_any_2fa_seed_type(t)
xtype = 'standard' if t == '2fa' else 'p2wsh'
bip32_seed = Mnemonic.mnemonic_to_seed(seed, passphrase)
rootnode = BIP32Node.from_rootseed(bip32_seed, xtype=xtype)
child_node = rootnode.subkey_at_private_derivation(derivation)
return child_node.to_xprv(), child_node.to_xpub()
@classmethod
def xkeys_from_seed(self, seed, passphrase):
t = seed_type(seed)
if not is_any_2fa_seed_type(t):
raise Exception(f'unexpected seed type: {t}')
words = seed.split()
n = len(words)
if t == '2fa':
if n >= 20: # old scheme
# note: pre-2.7 2fa seeds were typically 24-25 words, however they
# could probabilistically be arbitrarily shorter due to a bug. (see #3611)
# the probability of it being < 20 words is about 2^(-(256+12-19*11)) = 2^(-59)
if passphrase != '':
raise Exception('old 2fa seed cannot have passphrase')
xprv1, xpub1 = self.get_xkeys(' '.join(words[0:12]), t, '', "m/")
xprv2, xpub2 = self.get_xkeys(' '.join(words[12:]), t, '', "m/")
elif n == 12: # new scheme
xprv1, xpub1 = self.get_xkeys(seed, t, passphrase, "m/0'/")
xprv2, xpub2 = self.get_xkeys(seed, t, passphrase, "m/1'/")
else:
raise Exception(f'unrecognized seed length for "2fa" seed: {n}')
elif t == '2fa_segwit':
xprv1, xpub1 = self.get_xkeys(seed, t, passphrase, "m/0'/")
xprv2, xpub2 = self.get_xkeys(seed, t, passphrase, "m/1'/")
else:
raise Exception(f'unexpected seed type: {t}')
return xprv1, xpub1, xprv2, xpub2
def create_keystore(self, wizard, seed, passphrase):
# this overloads the wizard's method
xprv1, xpub1, xprv2, xpub2 = self.xkeys_from_seed(seed, passphrase)
k1 = keystore.from_xprv(xprv1)
k2 = keystore.from_xpub(xpub2)
wizard.request_password(run_next=lambda pw, encrypt: self.on_password(wizard, pw, encrypt, k1, k2))
def on_password(self, wizard, password, encrypt_storage, k1, k2):
k1.update_password(None, password)
wizard.data['x1/'] = k1.dump()
wizard.data['x2/'] = k2.dump()
wizard.pw_args = WizardWalletPasswordSetting(password=password,
encrypt_storage=encrypt_storage,
storage_enc_version=StorageEncryptionVersion.USER_PASSWORD,
encrypt_keystore=bool(password))
self.go_online_dialog(wizard)
def restore_wallet(self, wizard):
wizard.opt_bip39 = False
wizard.opt_slip39 = False
wizard.opt_ext = True
title = _("Restore two-factor Wallet")
f = lambda seed, seed_type, is_ext: wizard.run('on_restore_seed', seed, is_ext)
wizard.restore_seed_dialog(run_next=f, test=self.is_valid_seed)
def on_restore_seed(self, wizard, seed, is_ext):
f = lambda x: self.restore_choice(wizard, seed, x)
wizard.passphrase_dialog(run_next=f) if is_ext else f('')
def restore_choice(self, wizard: BaseWizard, seed, passphrase):
wizard.set_icon('trustedcoin-wizard.png')
wizard.reset_stack()
title = _('Restore 2FA wallet')
msg = ' '.join([
'You are going to restore a wallet protected with two-factor authentication.',
'Do you want to keep using two-factor authentication with this wallet,',
'or do you want to disable it, and have two master private keys in your wallet?'
])
choices = [('keep', 'Keep'), ('disable', 'Disable')]
f = lambda x: self.on_choice(wizard, seed, passphrase, x)
wizard.choice_dialog(choices=choices, message=msg, title=title, run_next=f)
def on_choice(self, wizard, seed, passphrase, x):
if x == 'disable':
f = lambda pw, encrypt: wizard.run('on_restore_pw', seed, passphrase, pw, encrypt)
wizard.request_password(run_next=f)
else:
self.create_keystore(wizard, seed, passphrase)
def on_restore_pw(self, wizard, seed, passphrase, password, encrypt_storage):
xprv1, xpub1, xprv2, xpub2 = self.xkeys_from_seed(seed, passphrase)
k1 = keystore.from_xprv(xprv1)
k2 = keystore.from_xprv(xprv2)
k1.add_seed(seed)
k1.update_password(None, password)
k2.update_password(None, password)
wizard.data['x1/'] = k1.dump()
wizard.data['x2/'] = k2.dump()
long_user_id, short_id = get_user_id(wizard.data)
xtype = xpub_type(xpub1)
xpub3 = make_xpub(get_signing_xpub(xtype), long_user_id)
k3 = keystore.from_xpub(xpub3)
wizard.data['x3/'] = k3.dump()
wizard.pw_args = WizardWalletPasswordSetting(password=password,
encrypt_storage=encrypt_storage,
storage_enc_version=StorageEncryptionVersion.USER_PASSWORD,
encrypt_keystore=bool(password))
wizard.terminate()
def create_remote_key(self, email, wizard):
xpub1 = wizard.data['x1/']['xpub']
xpub2 = wizard.data['x2/']['xpub']
# Generate third key deterministically.
long_user_id, short_id = get_user_id(wizard.data)
xtype = xpub_type(xpub1)
xpub3 = make_xpub(get_signing_xpub(xtype), long_user_id)
# secret must be sent by the server
try:
r = server.create(xpub1, xpub2, email)
except (socket.error, ErrorConnectingServer):
wizard.show_message('Server not reachable, aborting')
wizard.terminate(aborted=True)
return
except TrustedCoinException as e:
if e.status_code == 409:
r = None
else:
wizard.show_message(str(e))
return
if r is None:
otp_secret = None
else:
otp_secret = r.get('otp_secret')
if not otp_secret:
wizard.show_message(_('Error'))
return
_xpub3 = r['xpubkey_cosigner']
_id = r['id']
if short_id != _id:
wizard.show_message("unexpected trustedcoin short_id: expected {}, received {}"
.format(short_id, _id))
return
if xpub3 != _xpub3:
wizard.show_message("unexpected trustedcoin xpub3: expected {}, received {}"
.format(xpub3, _xpub3))
return
self.request_otp_dialog(wizard, short_id, otp_secret, xpub3)
def check_otp(self, wizard, short_id, otp_secret, xpub3, otp, reset):
if otp:
self.do_auth(wizard, short_id, otp, xpub3)
elif reset:
wizard.opt_bip39 = False
wizard.opt_slip39 = False
wizard.opt_ext = True
f = lambda seed, seed_type, is_ext: wizard.run('on_reset_seed', short_id, seed, is_ext, xpub3)
wizard.restore_seed_dialog(run_next=f, test=self.is_valid_seed)
def on_reset_seed(self, wizard, short_id, seed, is_ext, xpub3):
f = lambda passphrase: wizard.run('on_reset_auth', short_id, seed, passphrase, xpub3)
wizard.passphrase_dialog(run_next=f) if is_ext else f('')
def do_auth(self, wizard, short_id, otp, xpub3):
try:
server.auth(short_id, otp)
except TrustedCoinException as e:
if e.status_code == 400: # invalid OTP
wizard.show_message(_('Invalid one-time password.'))
# ask again for otp
self.request_otp_dialog(wizard, short_id, None, xpub3)
else:
wizard.show_message(str(e))
wizard.terminate(aborted=True)
except Exception as e:
wizard.show_message(repr(e))
wizard.terminate(aborted=True)
else:
k3 = keystore.from_xpub(xpub3)
wizard.data['x3/'] = k3.dump()
wizard.data['use_trustedcoin'] = True
wizard.terminate()
def on_reset_auth(self, wizard, short_id, seed, passphrase, xpub3):
xprv1, xpub1, xprv2, xpub2 = self.xkeys_from_seed(seed, passphrase)
if (wizard.data['x1/']['xpub'] != xpub1 or
wizard.data['x2/']['xpub'] != xpub2):
wizard.show_message(_('Incorrect seed'))
return
r = server.get_challenge(short_id)
challenge = r.get('challenge')
message = 'TRUSTEDCOIN CHALLENGE: ' + challenge
def f(xprv):
rootnode = BIP32Node.from_xkey(xprv)
key = rootnode.subkey_at_private_derivation((0, 0)).eckey
sig = key.sign_message(message, True)
return base64.b64encode(sig).decode()
signatures = [f(x) for x in [xprv1, xprv2]]
r = server.reset_auth(short_id, challenge, signatures)
new_secret = r.get('otp_secret')
if not new_secret:
wizard.show_message(_('Request rejected by server'))
return
self.request_otp_dialog(wizard, short_id, new_secret, xpub3)
@hook
def get_action(self, db):
if db.get('wallet_type') != '2fa':
return
if not db.get('x1/'):
return self, 'show_disclaimer'
if not db.get('x2/'):
return self, 'show_disclaimer'
if not db.get('x3/'):
return self, 'accept_terms_of_use'
|
stream_emu.py | #!/usr/bin/env python
"""
This is an AARTFAAC imaging pipeline simulator. It will spawn 6 server sockets
where multiple clients can connect to. once connected the server will
send out empty fits images, with increasing timestamps. Each port has a
different band.
"""
import struct
import time
import monotonic
from datetime import datetime
import socket
import StringIO
import logging
from Queue import Queue
from os import path
import atexit
from threading import Lock, Thread, active_count
from tkp.testutil.data import DATAPATH
from astropy.io import fits
from tkp.stream import CHECKSUM
# if true only start one thread on the first port, useful for debugging
DEBUGGING = False
DEFAULT_PORTS = range(6666, 6672)
DEFAULT_FREQS = range(int(6e6), int(9e6), int(5e5))
logger = logging.getLogger(__name__)
class Repeater(object):
"""
repeats incoming queue messages to subscribed queues
"""
lock = Lock()
receivers = []
def __init__(self):
pass
def run(self, in_queue):
"""
Monitor incoming queue for message, repeat them to all subscribers
"""
while True:
mesg = in_queue.get()
self.put(mesg)
def put(self, mesg):
"""
broadcast message to all subscribers
args:
mesg (object):
"""
with self.lock:
logger.debug("relaying to {} subscribers".format(len(self.receivers)))
[r.put(mesg) for r in self.receivers]
def subscribe(self, queue):
"""
Add a queue to the subscription list
args:
out_queue (Queue.Queue):
"""
logger.debug("subscriber")
with self.lock:
self.receivers.append(queue)
def unsubscribe(self, out_queue):
"""
Remove a queue from the subscription list
args:
out_queue (Queue.Queue):
"""
logger.debug("unsubscriber")
with self.lock:
try:
self.receivers.remove(out_queue)
except ValueError:
logger.error("item not in queue")
def create_fits_hdu():
"""
Create a fake AARTFAAC file used as a base for the emulated servers. Could
be anything but for now we just take the fits file from the test data.
returns:
astropy.io.fits.HDUList: a fits object
"""
hdulist = fits.open(path.join(DATAPATH, 'accessors/aartfaac.fits'))
hdu = hdulist[0]
return hdu
def serialize_hdu(hdu):
"""
Serialize a fits object.
args:
hdu (astropy.io.fits.HDUList): a fits object
returns:
str: a serialized fits object.
"""
data = struct.pack('=%sf' % hdu.data.size, *hdu.data.flatten('F'))
header = hdu.header.tostring()
return data, header
def create_header(fits_length, array_length):
"""
make a AARTFAAC header. Header is padded with zeros up to 512 bytes.
args:
fits_lenght (int): how long will the fits header be
array_length (int): How long will the data be
returns:
str: aartfaac header ready for transmission.
"""
# 512 - 16: Q = 8, L = 4
return struct.pack('=QLL496x', CHECKSUM, fits_length, array_length)
def make_window(hdu):
"""
Construct a complete serialised image including aartfaac header
args:
hdu (astropy.io.fits.HDUList): the first header
returns:
str: serialised fits file
"""
result = StringIO.StringIO()
data, fits_header = serialize_hdu(hdu)
header = create_header(len(fits_header), len(data))
result.write(header)
result.write(fits_header)
result.write(data)
return result.getvalue()
def client_handler(conn, addr, freq):
"""
Handling a client connection. Will push a serialised fits image plus
AARTFAAC header to the connected client, triggered by an external queue
supplying timestamps.
args:
conn (socket.socket): The connection with the client
addr (str): address of the client
freq (int): the subband frequency of this connection
"""
repeater = Repeater()
port = conn.getsockname()[1]
logger.info('connection from {} on {}'.format(addr, port))
hdu = create_fits_hdu()
hdu.header['RESTFREQ'] = str(freq)
queue = Queue()
repeater.subscribe(queue)
while True:
# block until we get a timestamp
timestamp = queue.get()
logger.info("sending to {} on {} ts {}".format(addr, port, timestamp))
hdu.header['date-obs'] = timestamp.isoformat()
window = make_window(hdu)
try:
conn.send(window)
except socket.error:
logger.info("client {} disconnected".format(addr))
break
conn.close()
repeater.unsubscribe(queue)
def socket_listener(port, freq):
"""
Will listen on a specific socket and fire of threads if a client connects.
Will try to reconnect every 5 seconds in case of connect failure.
args:
port (int): On that port to listen
freq (int): The corresponds frequency that belongs to the port
"""
# loop and sleep for 5 until we can bind
while True:
try:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# don't wait for socket release
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.bind(('', port))
except socket.error as e:
logger.error("can't bind to port {}: {}".format(port, str(e)))
logger.error("retrying in 5 seconds...")
time.sleep(5)
else:
break
sock.listen(2)
atexit.register(lambda s: s.close(), sock) # close socket on exit
logger.info("Server listening on port {}".format(port))
while True:
conn, addr_port = sock.accept()
if DEBUGGING:
client_handler(conn, addr_port[0], freq)
else:
t = Thread(target=client_handler, name='repeater_thread',
args=(conn, addr_port[0], freq))
t.daemon = True
t.start()
def timer(queue):
"""
Pushes a timestamp on a Queue exactly every second.
args:
queue (Queue.Queue): a queue
"""
while True:
# We use monotonic so the timing doesn't drift.
duty_cycle = 1 # seconds
time.sleep(duty_cycle - monotonic.monotonic() % duty_cycle)
now = datetime.now()
logger.debug("timer is pushing {}".format(now))
queue.put(now)
def emulator(ports=DEFAULT_PORTS, freqs=DEFAULT_FREQS):
"""
Run the aartfaac stream emulator. Will listen on all ports defined in ports
and change the frequency in the fits headers according to the freqs list.
Daemon function, does not return.
args:
ports (tuple): list of ints representing ports
freqs (tuple): list of frequencies
"""
heartbeat_queue = Queue()
repeater = Repeater()
timer_thread = Thread(target=timer, name='timer_thread',
args=(heartbeat_queue,))
timer_thread.daemon = True
timer_thread.start()
repeater_thread = Thread(target=repeater.run, name='repeater_thread',
args=(heartbeat_queue,))
repeater_thread.daemon = True
repeater_thread.start()
# start all listening threads
for port, freq in zip(ports, freqs):
name = 'socket_{}_thread'.format(port)
args = port, freq
t = Thread(target=socket_listener, name=name, args=args)
t.daemon = True
t.start()
while active_count():
time.sleep(1)
if __name__ == '__main__':
logging.basicConfig(level=logging.DEBUG)
emulator()
|
ELM327CANInterface.py | '''
Created on Oct 5, 2013
@author: gcardwel,doug@doug-brunner.com
@summary: Driver plugin for ELM327 based interfaces.
@note: The ELM327 uses an AT command set (as in Hayes modem) and as such is a half duplex interface to the CAN bus.
Sending a frame means aborting reception, waiting for a prompt, setting CAN ID, then entering the frame to send.
@note: ELM327s are also highly bandwidth limited; the commercially available interfaces use 115.2 kbps async serial to
reach the ELM327 and send CAN data as hexadecimal. This means an 8 byte frame with 29 bit ID takes 25 bytes to
send (including CR), so reading more than 460.8 frames/second must cause a buffer overrun. This is about 1/4 of
the available bandwidth on a 250kbit CAN bus so appropriate filter settings are essential.
@note: The driver uses a separate thread for reading the serial port or TCP socket, implemented by the function
ELM327IO. It reads input, discards zero bytes (which ELM docs say can sometimes be inserted in error),
packages completed lines that appear to be CAN frames and places them in the receive queue. Lines that are not
packets and not empty are placed in the cmdResp queue, and command prompts from the ELM327 (indicating
readiness to receive commands) set the promptReady Event (reception of other data clears this event).
The main program thread implements reception by reading the Receive queue. Transmission and filter/baud setup
are implemented by sending CR to abort receive, waiting for PromptReady, sending the requisite commands, waiting
for a response if one is expected, then sending ATMA to resume reception if needed.
@note: Interesting apparently undocumented feature of the ELM327: if an extra character is appended to a CAN frame to
be sent (eg 0011228, CAN frame is 001122 extra char is 8) it takes it as a number of frames to read from the
bus before stopping. If this is absent it will read from the bus indefinitely.
'''
import sys
if not 'plugins' in __name__:
if not '..' in sys.path:
sys.path.append('..')
from comm import CANInterface
else:
from ..comm import CANInterface
import serial
import socket
import binascii
import time
import threading
import queue
import collections
import urllib.parse
import sys
DEBUG=False
class Error(CANInterface.Error):
pass
class SerialError(Error):
def __init__(self, value=None):
self._value = value
def __str__(self):
if self._value != None:
return 'Serial communications error with ELM327: ' + repr(self._value)
else:
return 'Serial communications error with ELM327'
class UnexpectedResponse(Error):
def __init__(self, value=None):
self._value = value
def __str__(self):
if self._value != None:
return 'Unexpected response to ' + repr(self._value) + ' from ELM327'
else:
return 'Unexpected response from ELM327'
class BadBitrate(Error):
def __init__(self, value=None):
self._value = value
def __str__(self):
if self._value != None:
return 'ELM327 cannot generate bitrate ' + repr(self._value)
else:
return 'ELM327 cannot generate bitrate'
class BadFilter(Error):
def __init__(self, value=None):
self._value = value
def __str__(self):
if self._value != None:
return 'Attempt to set invalid filter ' + repr(self._value)
else:
return 'Attempt to set invalid filter'
class SocketAsPort(object):
'''
Adapter for sockets that makes them look like serial ports; implements just the methods (and 'timeout' member)
that are used in this program.
'''
_s = None
_s_timeout = 0
timeout = 0
def __init__(self, socket):
self._s = socket
def _updateTimeout(self):
if self._s_timeout != self.timeout:
self._s.settimeout(self.timeout)
self._s_timeout = self.timeout
def read(self, bufsize):
self._updateTimeout()
return self._s.recv(bufsize)
def write(self, data):
self._updateTimeout()
self._s.send(data)
def close(self):
self._s.close()
def flushInput(self):
self._s.settimeout(0)
self._s_timeout = 0
while 1:
try:
self._s.recv(4096)
except socket.timeout:
break
self._updateTimeout()
class LoggingPort(serial.Serial):
_logLock = threading.Lock()
def __init__(self, debugLogfile, **kwargs):
super().__init__(**kwargs)
self._debugLogfile = debugLogfile
def _prefix(self):
return str(time.time()) + ' ' + str(threading.currentThread().ident)
def open(self, *args, **kwargs):
result = super().open(*args, **kwargs)
prefix = self._prefix()
with self._logLock:
self._debugLogfile.write(prefix + ' OPEN' + '\n')
return result
def close(self, *args, **kwargs):
result = super().close(*args, **kwargs)
prefix = self._prefix()
with self._logLock:
self._debugLogfile.write(prefix + ' CLOSE' + '\n')
return result
def read(self, *args, **kwargs):
result = super().read(*args, **kwargs)
prefix = self._prefix()
with self._logLock:
if len(result) > 0:
self._debugLogfile.write(prefix + ' RD ' + repr(result) + '\n')
return result
def write(self, data):
result = super().write(data)
prefix = self._prefix()
with self._logLock:
self._debugLogfile.write(prefix + ' WR ' + repr(data) + '\n')
return result
def PutDiscard(queue, item):
'''
Put something on a queue and discard the oldest entry if it's full. Has a race (consumer might call get() just after this func
finds the queue is full and we will needlessly discard an item) but this should only happen in the case of CPU starvation
or data that's not relevant anyway.
'''
if queue.full():
queue.get()
queue.put(item)
class ELM327IO(object):
'''
Reads input, discards zero bytes (which ELM docs say can sometimes be inserted in error),
packages completed lines that appear to be CAN frames and places them in the receive queue.
Lines that are not packets and not empty are placed in the cmdResp queue, and command prompts from the ELM327
(indicating readiness to receive commands) set the promptReady Event.
'''
_QUEUE_MAX_SIZE = 16384
_TICK_TIME = 0.001
_ELM_RECOVERY_TIME = 0.002
_CYCLE_TIMEOUT = 1.0
_EOL=b'\r'
_PROMPT=b'>'
def __init__(self, port):
self._port = None
self._receive = queue.Queue(self._QUEUE_MAX_SIZE)
self._transmit = queue.Queue(self._QUEUE_MAX_SIZE)
self._cmdResp = queue.Queue(self._QUEUE_MAX_SIZE)
self._promptReady = threading.Event()
self._pipelineClear = threading.Event()
self._terminate = threading.Event()
self._thread = None
self._lines = collections.deque()
self._timeToSend = 0
self._port = port
self._port.timeout = self._TICK_TIME
self._thread = threading.Thread(target=self.threadFunc)
self._thread.daemon = True # In case main thread crashes...
self._thread.start()
def threadFunc(self):
while 1:
if self._terminate.is_set():
if DEBUG:
print(str(time.time()) + ' ELM327IO(): terminate set')
return
setPipelineClear = (not self._pipelineClear.is_set())
setPromptReady = False
linesRead = self._port.read(4096).translate(None, b'\0').splitlines(keepends=True)
if len(linesRead) > 0:
if len(self._lines) > 0:
self._lines[-1] += linesRead[0]
del linesRead[0]
for lineRead in linesRead:
self._lines.append(lineRead)
if self._lines[-1] == self._PROMPT:
if DEBUG:
print(str(time.time()) + ' promptReady.set()')
# Manage the promptReady Event; it's set if and only if there is an incomplete line consisting of a prompt.
setPromptReady = True
del self._lines[-1]
while self._lines:
rawLine = self._lines.popleft()
if self._EOL in rawLine:
line = rawLine.strip(self._EOL)
# Is it empty?
if len(line) == 0:
pass
# Does it contain non-hex characters?
elif any(x for x in line if x not in b'0123456789ABCDEF'):
# Command response or a packet received with errors
PutDiscard(self._cmdResp, line)
else:
# Must be a valid received frame
if len(line) % 2 == 1:
idLen = 3
mask = 0
else:
idLen = 8
mask = 0x80000000
if len(line) >= idLen:
ident = int(line[0:idLen], 16)
data = binascii.unhexlify(line[idLen:])
packet = CANInterface.Packet(ident | mask, data)
PutDiscard(self._receive, packet)
else:
# Too short to be valid
PutDiscard(self._cmdResp, line)
else:
if rawLine == self._PROMPT:
if DEBUG:
print(str(time.time()) + ' promptReady.set()')
# Manage the promptReady Event; it's set if and only if there is an incomplete line consisting of a prompt.
setPromptReady = True
else:
self._lines.appendleft(rawLine)
break
if self._timeToSend < time.time() and not self._transmit.empty():
self._port.write(self._transmit.get_nowait())
self._promptReady.clear()
self._timeToSend = time.time() + self._ELM_RECOVERY_TIME
setPromptReady = False
setPipelineClear = False
if setPromptReady:
self._promptReady.set()
if setPipelineClear and self._transmit.empty():
self._pipelineClear.set()
def sync(self):
self._pipelineClear.clear()
if not self._pipelineClear.wait(self._CYCLE_TIMEOUT):
raise Error #FIXME
def syncAndGetPrompt(self, intfcTimeout, retries=5):
'''
Synchronize the I/O thread and obtain a prompt. If one is available return immediately; otherwise get one
by sending CR and waiting for a prompt to come back.
Loop is believed to be the best way of dealing with the race condition: ELM327 is receiving CAN data,
so isPromptReady() returns False, but just before we send the CR, ELM327 runs into buffer full, stops receive,
and returns to the prompt. ELM327 then gets the CR and resumes receiving, so the CR was actually
counterproductive. Buffer full conditions should not be allowed to happen (through appropriate filter settings)
but we should deal with them somewhat robustly if e.g. there is unexpected traffic on the CAN.
'''
self.sync()
if self.isPromptReady():
return
for _ in range(retries):
self.write(b'\r')
if self.waitPromptReady(intfcTimeout):
return
raise UnexpectedResponse(b'\r')
def getReceived(self, timeout=0):
return self._receive.get(timeout=timeout)
def getCmdResp(self, timeout=0):
return self._cmdResp.get(timeout=timeout)
def flushCmdResp(self):
while 1:
try:
self._cmdResp.get_nowait()
except queue.Empty:
break
def write(self, data):
self._transmit.put(data)
self.sync()
def terminate(self):
self._terminate.set()
self._thread.join()
def isPromptReady(self):
return self._promptReady.is_set()
def waitPromptReady(self, timeout):
return self._promptReady.wait(timeout)
class ELM327CANInterface(CANInterface.Interface):
'''
Interface using ELM327 via serial. Spawns a thread that manages communications with the device; two queues allow it to communicate with the main thread.
"Write" queue can request transmission of a frame, set baud rate, set filters, or request that monitoring stop.
"Read" queue contains messages received from the bus.
'''
_POSSIBLE_BAUDRATES = [500000, 115200, 38400, 9600, 230400, 460800, 57600, 28800, 14400, 4800, 2400, 1200]
def __init__(self, parsedURL):
self._slaveAddr = None
self._port = None
self._bitrate = None
self._txAddrStd = False
self._cfgdBitrate = None
self._cfgdTxAddrStd = False
self._baudDivisor = None
self._baud87Mult = None
self._hasSetCSM0 = False
self._tcpTimeout = 1.0
self._serialTimeout = 0.2
self._dumpTraffic = False
self._cfgdHeaderIdent = None
self._filter = None
self._cfgdFilter = None
self._noCsmQuirk = False
self._isSt = False
self._io = None
urlQueryDict = urllib.parse.parse_qs(parsedURL.query)
debugLogfile = None
if 'debuglog' in urlQueryDict:
if urlQueryDict['debuglog'][0] == 'stdout':
debugLogfile = sys.stdout
else:
debugLogfile = open(urlQueryDict['debuglog'][0], 'w')
if len(parsedURL.netloc):
# If netloc is present, we're using a TCP connection
addr = parsedURL.netloc.split(':')
if len(addr) == 1:
addr.append(35000) # use default port
elif len(addr) != 2:
raise CANInterface.Error('Interface address invalid')
s = socket.socket()
s.create_connection(addr, 0)
self._port = SocketAsPort(s)
self._intfcTimeout = self._tcpTimeout
else:
if debugLogfile != None:
port = LoggingPort(debugLogfile)
else:
port = serial.Serial()
port.port = parsedURL.path
port.open()
foundBaud = False
for baudRate in self._POSSIBLE_BAUDRATES:
if DEBUG:
print('Trying ' + str(baudRate))
port.baudrate = baudRate
port.interCharTimeout = min(10/baudRate, 0.0001)
port.timeout = 0.05
# Try sending a CR, if we get a prompt then it's probably the right baudrate
port.flushInput()
port.write(b'\r')
response = port.read(16384)
if len(response) == 0 or response[-1:] != b'>':
port.write(b'\r')
response = port.read(16384)
if len(response) == 0 or response[-1:] != b'>':
continue
# Turn off echo, this also serves as a test to make sure we didn't randomly get a > at the end of some gibberish
port.flushInput()
port.write(b'ATE0\r')
response = port.read(16)
if not b'OK' in response:
continue
# If we made contact at baudrate 500k, we're done
if baudRate == 500000:
foundBaud = True
break
# Not at 500k, try to change ELM to that
port.timeout = 1.28 # Maximum possible timeout for ELM327
port.flushInput()
port.write(b'ATBRD08\r')
response = port.read(2)
if response == b'?\r':
# Device does not allow baudrate change, but we found its operating baudrate
foundBaud = True
break
elif response == b'OK':
# Device allows baudrate change, try to switch to 500k
port.baudrate = 500000
port.flushInput()
response = port.read(11)
if response[0:6] != b'ELM327':
# Baudrate switch unsuccessful, try to recover
port.baudrate = baudRate
port.write(b'\r')
port.flushInput()
response = port.read(1024)
if len(response) == 0 or response[-1:] != b'>':
raise UnexpectedResponse('switching baudrate')
# Baudrate switched, send a CR to confirm we're on board
port.flushInput()
port.write(b'\r')
response = port.read(2)
if response != b'OK':
raise UnexpectedResponse('switching baudrate')
foundBaud = True
if response == b'ELM327 v1.5':
self._noCsmQuirk = True
break
if not foundBaud:
raise SerialError('could not find baudrate')
port.timeout = 0
self._port = port
self._intfcTimeout = self._serialTimeout
self._port.flushInput()
# Start the I/O thread, which takes over control of the port
self._io = ELM327IO(port)
self._runCmdWithCheck(b'ATWS', checkOK=False, closeOnFail=True) # Software reset
self._runCmdWithCheck(b'ATE0', closeOnFail=True) # Turn off echo
self._runCmdWithCheck(b'ATL0', closeOnFail=True) # Turn off newlines
self._runCmdWithCheck(b'ATS0', closeOnFail=True) # Turn off spaces
self._runCmdWithCheck(b'ATH1', closeOnFail=True) # Turn on headers
self._runCmdWithCheck(b'ATAL', closeOnFail=True) # Allow full length messages
try:
self._runCmdWithCheck(b'STFCP', closeOnFail=False) # See if this is an ST device
self._isSt = True
except UnexpectedResponse:
pass
self.setBitrate(CANInterface.URLBitrate(parsedURL))
self._slaveAddr = CANInterface.XCPSlaveCANAddr(0xFFFFFFFF, 0xFFFFFFFF)
def __enter__(self):
return self
def __exit__(self, exitType, value, traceback):
self.close()
def close(self):
if self._io != None:
self._io.terminate()
else:
if self._port != None:
self._port.close()
def setBitrate(self, bitrate):
self._bitrate = bitrate
self._updateBitrateTXType()
def setFilter(self, filt):
self._filter = filt
self._doSetFilter(filt)
def _doSetFilter(self, filt):
# User probably doesn't mean to set a filter that accepts both standard and extended IDs
if not filt[1] & 0x80000000:
raise BadFilter((hex(filt[0]), hex(filt[1])))
if filt[0] & 0x80000000:
stdFilter = '7ff' # Set a standard ID filter that matches nothing
stdMask = '000'
extFilter = '{:08x}'.format(filt[0] & 0x1FFFFFFF)
extMask = '{:08x}'.format(filt[1] & 0x1FFFFFFF)
else:
stdFilter = '{:03x}'.format(filt[0] & 0x7FF)
stdMask = '{:03x}'.format(filt[1] & 0x7FF)
extFilter = '1fffffff'
extMask = '00000000'
if self._isSt:
self._runCmdWithCheck(b'STFCP')
if filt[0] & 0x80000000:
self._runCmdWithCheck(b'STFAP' + bytes(extFilter, 'utf-8') + b',' + bytes(extMask, 'utf-8'))
else:
self._runCmdWithCheck(b'STFAP' + bytes(stdFilter, 'utf-8') + b',' + bytes(stdMask, 'utf-8'))
self._runCmdWithCheck(b'ATCF' + bytes(stdFilter, 'utf-8'))
self._runCmdWithCheck(b'ATCM' + bytes(stdMask, 'utf-8'))
self._runCmdWithCheck(b'ATCF' + bytes(extFilter, 'utf-8'))
self._runCmdWithCheck(b'ATCM' + bytes(extMask, 'utf-8'))
self._io.write(b'ATMA\r')
self._cfgdFilter = filt
def _setTXTypeByIdent(self, ident):
if ident & 0x80000000:
self._txAddrStd = False
else:
self._txAddrStd = True
def _updateBitrateTXType(self):
if self._bitrate != self._cfgdBitrate or self._txAddrStd != self._cfgdTxAddrStd:
if self._bitrate != self._cfgdBitrate:
self._calcBaudDivisor()
self._setBitrateTXType()
self._cfgdBitrate = self._bitrate
self._cfgdTxAddrStd = self._txAddrStd
def _calcBaudDivisor(self):
baudTol = 0.001
divisor = int(round(500000 / self._bitrate))
if abs(500000 / divisor / self._bitrate - 1) > baudTol:
divisor = int(round((500000 * 8 / 7) / self._bitrate))
if abs((500000 * 8 / 7) / divisor / self._bitrate - 1) > baudTol:
raise ValueError('')
self._baudDivisor = divisor
self._baud87Mult = True
else:
self._baudDivisor = divisor
self._baud87Mult = False
def _setBitrateTXType(self):
if self._txAddrStd:
canOptions = 0xE0
else:
canOptions = 0x60
if self._baud87Mult:
canOptions |= 0x10;
if self._baudDivisor == None:
raise Error #FIXME
# STN1110 requires that protocol not be set to B when altering B settings
self._runCmdWithCheck(b'ATSP1', closeOnFail=True)
self._runCmdWithCheck(b'ATPB' + binascii.hexlify(bytearray([canOptions, self._baudDivisor])), closeOnFail=True)
self._runCmdWithCheck(b'ATSPB', closeOnFail=True)
self._runCmdWithCheck(b'ATAT0', closeOnFail=True) # Disable adaptive timing
self._runCmdWithCheck(b'ATSTff', closeOnFail=True) # Set maximum timeout = 1.02 s
if not self._hasSetCSM0:
try:
self._runCmdWithCheck(b'ATCSM0', closeOnFail=False)
except UnexpectedResponse:
if not self._noCsmQuirk:
print('Warning: Failed to set CAN silent monitoring to off')
self._hasSetCSM0 = True
def _runCmdWithCheck(self, cmd, checkOK=True, closeOnFail=False):
def failAction():
if closeOnFail:
self.close()
raise UnexpectedResponse(cmd)
try:
self._io.syncAndGetPrompt(self._intfcTimeout)
except UnexpectedResponse:
if closeOnFail:
self.close()
raise
if DEBUG:
print(str(time.time()) + ' _runCmdWithCheck(): Prompt ready')
self._io.flushCmdResp()
self._io.write(cmd + b'\r')
if DEBUG:
print(str(time.time()) + ' _runCmdWithCheck(): put ' + cmd.decode('utf-8'))
if not self._io.waitPromptReady(self._intfcTimeout):
failAction()
if checkOK:
gotOK = False
while 1:
try:
response = self._io.getCmdResp(timeout=0)
if b'OK' in response:
gotOK = True
except queue.Empty:
break
if not gotOK:
failAction()
def exactMask(self, ident):
if ident & 0x80000000:
return 0x9FFFFFFF
else:
return 0x800007FF
def connect(self, address, dumpTraffic):
self._slaveAddr = address
self._dumpTraffic = dumpTraffic
self._doSetFilter((address.resId.raw, self.exactMask(address.resId.raw)))
def disconnect(self):
self._slaveAddr = CANInterface.XCPSlaveCANAddr(0xFFFFFFFF, 0xFFFFFFFF)
self._doSetFilter(self._filter)
self._dumpTraffic = False
def _doTransmit(self, data, ident):
if self._dumpTraffic:
print('TX ' + CANInterface.ID(ident).getString() + ' ' + CANInterface.getDataHexString(data))
self._setTXTypeByIdent(ident)
self._updateBitrateTXType()
if self._cfgdHeaderIdent != ident:
if ident & 0x80000000:
self._runCmdWithCheck(b'ATCP' + bytes('{:02x}'.format((ident >> 24) & 0x1F), 'utf-8'))
self._runCmdWithCheck(b'ATSH' + bytes('{:06x}'.format(ident & 0xFFFFFF), 'utf-8'))
else:
self._runCmdWithCheck(b'ATSH' + bytes('{:03x}'.format(ident & 0x7FF), 'utf-8'))
self._cfgdHeaderIdent = ident
else:
self._io.syncAndGetPrompt(self._intfcTimeout) # Not synchronized by calling _runCmdWithCheck(), so do it here
self._io.write(binascii.hexlify(data) + b'\r')
def transmit(self, data):
assert self._cfgdBitrate != None and self._cfgdFilter != None
self._doTransmit(data, self._slaveAddr.cmdId.raw)
def transmitTo(self, data, ident):
assert self._cfgdBitrate != None and self._cfgdFilter != None
self._doTransmit(data, ident)
def receive(self, timeout):
assert self._cfgdBitrate != None and self._cfgdFilter != None
if self._slaveAddr.resId.raw == 0xFFFFFFFF:
return []
msgs = []
endTime = time.time() + timeout
while 1:
try:
if len(msgs):
packet = self._io.getReceived(timeout=0)
else:
newTimeout = endTime - time.time()
if newTimeout < 0:
packet = self._io.getReceived(timeout=0)
else:
packet = self._io.getReceived(timeout=newTimeout)
except queue.Empty:
break
if packet.ident == self._slaveAddr.resId.raw and (packet.data[0] == 0xFF or packet.data[0] == 0xFE):
msgs.append(packet.data)
if self._dumpTraffic:
print('RX ' + self._slaveAddr.resId.getString() + ' ' + CANInterface.getDataHexString(packet.data))
return msgs
def receivePackets(self, timeout):
assert self._cfgdBitrate != None and self._cfgdFilter != None
packets = []
endTime = time.time() + timeout
while 1:
try:
if len(packets):
packet = self._io.getReceived(timeout=0)
else:
newTimeout = endTime - time.time()
if newTimeout < 0:
packet = self._io.getReceived(timeout=0)
else:
packet = self._io.getReceived(timeout=newTimeout)
except queue.Empty:
break
if len(packet.data) > 0 and (packet.data[0] == 0xFF or packet.data[0] == 0xFE):
packets.append(packet)
if self._dumpTraffic:
print('RX ' + CANInterface.ID(packet.ident).getString() + ' ' + CANInterface.getDataHexString(packet.data))
return packets
CANInterface.addInterface("elm327", ELM327CANInterface)
if __name__ == "__main__":
parsedurl = urllib.parse.urlparse('elm327:/dev/rfcomm0?debuglog=elm327.log')
elm327 = ELM327CANInterface(parsedurl)
elm327.setFilter((0x000, 0x80000000))
elm327.transmitTo(b'1234', 0x9FFFFFFF)
elm327.transmitTo(b'1234', 0x7FF)
time.sleep(1)
packets = elm327.receivePackets(1)
for packet in packets:
print(repr(packet))
elm327.close()
|
run_qa.py | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Finetuning the library models for question-answering on SQuAD (DistilBERT, Bert, XLM, XLNet)."""
import argparse
import glob
import logging
import os
import random
import timeit
import time
import sys
import threading
import numpy as np
import torch
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler
from torch.utils.data.distributed import DistributedSampler
from tqdm import tqdm, trange
from transformers import (
MODEL_FOR_QUESTION_ANSWERING_MAPPING,
WEIGHTS_NAME,
AdamW,
AutoConfig,
AutoModelForQuestionAnswering,
AutoTokenizer,
get_linear_schedule_with_warmup,
squad_convert_examples_to_features,
)
from transformers.data.metrics.squad_metrics import (
compute_predictions_log_probs,
compute_predictions_logits,
squad_evaluate,
)
from transformers.data.processors.squad import SquadResult, SquadV1Processor, SquadV2Processor
import intel_extension_for_pytorch as ipex
try:
from torch.utils.tensorboard import SummaryWriter
except ImportError:
from tensorboardX import SummaryWriter
logger = logging.getLogger(__name__)
MODEL_CONFIG_CLASSES = list(MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys())
MODEL_TYPES = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
def trace_handler(prof):
print(prof.key_averages().table(
sort_by="self_cpu_time_total", row_limit=-1))
prof.export_chrome_trace("./log/test_trace_" + str(prof.step_num) + ".json")
def set_seed(args):
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed)
def to_list(tensor):
return tensor.detach().cpu().tolist()
def train(args, train_dataset, model, tokenizer):
""" Train the model """
if args.local_rank in [-1, 0]:
tb_writer = SummaryWriter()
args.train_batch_size = args.per_gpu_train_batch_size * max(1, args.n_gpu)
train_sampler = RandomSampler(train_dataset) if args.local_rank == -1 else DistributedSampler(train_dataset)
train_dataloader = DataLoader(train_dataset, sampler=train_sampler, batch_size=args.train_batch_size)
if args.max_steps > 0:
t_total = args.max_steps
args.num_train_epochs = args.max_steps // (len(train_dataloader) // args.gradient_accumulation_steps) + 1
else:
t_total = len(train_dataloader) // args.gradient_accumulation_steps * args.num_train_epochs
# Prepare optimizer and schedule (linear warmup and decay)
no_decay = ["bias", "LayerNorm.weight"]
optimizer_grouped_parameters = [
{
"params": [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)],
"weight_decay": args.weight_decay,
},
{"params": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], "weight_decay": 0.0},
]
optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon)
scheduler = get_linear_schedule_with_warmup(
optimizer, num_warmup_steps=args.warmup_steps, num_training_steps=t_total
)
# Check if saved optimizer or scheduler states exist
if os.path.isfile(os.path.join(args.model_name_or_path, "optimizer.pt")) and os.path.isfile(
os.path.join(args.model_name_or_path, "scheduler.pt")
):
# Load in optimizer and scheduler states
optimizer.load_state_dict(torch.load(os.path.join(args.model_name_or_path, "optimizer.pt")))
scheduler.load_state_dict(torch.load(os.path.join(args.model_name_or_path, "scheduler.pt")))
if args.fp16:
try:
from apex import amp
except ImportError:
raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.")
model, optimizer = amp.initialize(model, optimizer, opt_level=args.fp16_opt_level)
# multi-gpu training (should be after apex fp16 initialization)
if args.n_gpu > 1:
model = torch.nn.DataParallel(model)
# Distributed training (should be after apex fp16 initialization)
if args.local_rank != -1:
model = torch.nn.parallel.DistributedDataParallel(
model, device_ids=[args.local_rank], output_device=args.local_rank, find_unused_parameters=True
)
# Train!
logger.info("***** Running training *****")
logger.info(" Num examples = %d", len(train_dataset))
logger.info(" Num Epochs = %d", args.num_train_epochs)
logger.info(" Instantaneous batch size per GPU = %d", args.per_gpu_train_batch_size)
logger.info(
" Total train batch size (w. parallel, distributed & accumulation) = %d",
args.train_batch_size
* args.gradient_accumulation_steps
* (torch.distributed.get_world_size() if args.local_rank != -1 else 1),
)
logger.info(" Gradient Accumulation steps = %d", args.gradient_accumulation_steps)
logger.info(" Total optimization steps = %d", t_total)
global_step = 1
epochs_trained = 0
steps_trained_in_current_epoch = 0
# Check if continuing training from a checkpoint
if os.path.exists(args.model_name_or_path):
try:
# set global_step to gobal_step of last saved checkpoint from model path
checkpoint_suffix = args.model_name_or_path.split("-")[-1].split("/")[0]
global_step = int(checkpoint_suffix)
epochs_trained = global_step // (len(train_dataloader) // args.gradient_accumulation_steps)
steps_trained_in_current_epoch = global_step % (len(train_dataloader) // args.gradient_accumulation_steps)
logger.info(" Continuing training from checkpoint, will skip to saved global_step")
logger.info(" Continuing training from epoch %d", epochs_trained)
logger.info(" Continuing training from global step %d", global_step)
logger.info(" Will skip the first %d steps in the first epoch", steps_trained_in_current_epoch)
except ValueError:
logger.info(" Starting fine-tuning.")
tr_loss, logging_loss = 0.0, 0.0
model.zero_grad()
train_iterator = trange(
epochs_trained, int(args.num_train_epochs), desc="Epoch", disable=args.local_rank not in [-1, 0]
)
# Added here for reproductibility
set_seed(args)
for _ in train_iterator:
epoch_iterator = tqdm(train_dataloader, desc="Iteration", disable=args.local_rank not in [-1, 0])
for step, batch in enumerate(epoch_iterator):
# Skip past any already trained steps if resuming training
if steps_trained_in_current_epoch > 0:
steps_trained_in_current_epoch -= 1
continue
model.train()
batch = tuple(t.to(args.device) for t in batch)
inputs = {
"input_ids": batch[0],
"attention_mask": batch[1],
"token_type_ids": batch[2],
"start_positions": batch[3],
"end_positions": batch[4],
}
if args.model_type in ["xlm", "roberta", "distilbert", "camembert"]:
del inputs["token_type_ids"]
if args.model_type in ["xlnet", "xlm"]:
inputs.update({"cls_index": batch[5], "p_mask": batch[6]})
if args.version_2_with_negative:
inputs.update({"is_impossible": batch[7]})
if hasattr(model, "config") and hasattr(model.config, "lang2id"):
inputs.update(
{"langs": (torch.ones(batch[0].shape, dtype=torch.int64) * args.lang_id).to(args.device)}
)
outputs = model(**inputs)
# model outputs are always tuple in transformers (see doc)
loss = outputs[0]
if args.n_gpu > 1:
loss = loss.mean() # mean() to average on multi-gpu parallel (not distributed) training
if args.gradient_accumulation_steps > 1:
loss = loss / args.gradient_accumulation_steps
if args.fp16:
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
else:
loss.backward()
tr_loss += loss.item()
if (step + 1) % args.gradient_accumulation_steps == 0:
if args.fp16:
torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), args.max_grad_norm)
else:
torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)
optimizer.step()
scheduler.step() # Update learning rate schedule
model.zero_grad()
global_step += 1
# Log metrics
if args.local_rank in [-1, 0] and args.logging_steps > 0 and global_step % args.logging_steps == 0:
# Only evaluate when single GPU otherwise metrics may not average well
if args.local_rank == -1 and args.evaluate_during_training:
results = evaluate(args, model, tokenizer)
for key, value in results.items():
tb_writer.add_scalar("eval_{}".format(key), value, global_step)
tb_writer.add_scalar("lr", scheduler.get_lr()[0], global_step)
tb_writer.add_scalar("loss", (tr_loss - logging_loss) / args.logging_steps, global_step)
logging_loss = tr_loss
# Save model checkpoint
if args.local_rank in [-1, 0] and args.save_steps > 0 and global_step % args.save_steps == 0:
output_dir = os.path.join(args.output_dir, "checkpoint-{}".format(global_step))
# Take care of distributed/parallel training
model_to_save = model.module if hasattr(model, "module") else model
model_to_save.save_pretrained(output_dir)
tokenizer.save_pretrained(output_dir)
torch.save(args, os.path.join(output_dir, "training_args.bin"))
logger.info("Saving model checkpoint to %s", output_dir)
torch.save(optimizer.state_dict(), os.path.join(output_dir, "optimizer.pt"))
torch.save(scheduler.state_dict(), os.path.join(output_dir, "scheduler.pt"))
logger.info("Saving optimizer and scheduler states to %s", output_dir)
if args.max_steps > 0 and global_step > args.max_steps:
epoch_iterator.close()
break
if args.max_steps > 0 and global_step > args.max_steps:
train_iterator.close()
break
if args.local_rank in [-1, 0]:
tb_writer.close()
return global_step, tr_loss / global_step
def load_and_cache_examples(args, tokenizer, evaluate=False, output_examples=False):
if args.local_rank not in [-1, 0] and not evaluate:
# Make sure only the first process in distributed training process the dataset, and the others will use the cache
torch.distributed.barrier()
# Load data features from cache or dataset file
input_dir = args.data_dir if args.data_dir else "."
cached_features_file = os.path.join(
input_dir,
"cached_{}_{}_{}".format(
"dev" if evaluate else "train",
list(filter(None, args.model_name_or_path.split("/"))).pop(),
str(args.max_seq_length),
),
)
# Init features and dataset from cache if it exists
if os.path.exists(cached_features_file) and not args.overwrite_cache:
logger.info("Loading features from cached file %s", cached_features_file)
features_and_dataset = torch.load(cached_features_file)
features, dataset, examples = (
features_and_dataset["features"],
features_and_dataset["dataset"],
features_and_dataset["examples"],
)
else:
logger.info("Creating features from dataset file at %s", input_dir)
if not args.data_dir and ((evaluate and not args.predict_file) or (not evaluate and not args.train_file)):
try:
import tensorflow_datasets as tfds
except ImportError:
raise ImportError("If not data_dir is specified, tensorflow_datasets needs to be installed.")
if args.version_2_with_negative:
logger.warn("tensorflow_datasets does not handle version 2 of SQuAD.")
tfds_examples = tfds.load("squad")
examples = SquadV1Processor().get_examples_from_dataset(tfds_examples, evaluate=evaluate)
else:
processor = SquadV2Processor() if args.version_2_with_negative else SquadV1Processor()
if evaluate:
examples = processor.get_dev_examples(args.data_dir, filename=args.predict_file)
else:
examples = processor.get_train_examples(args.data_dir, filename=args.train_file)
features, dataset = squad_convert_examples_to_features(
examples=examples,
tokenizer=tokenizer,
max_seq_length=args.max_seq_length,
doc_stride=args.doc_stride,
max_query_length=args.max_query_length,
is_training=not evaluate,
return_dataset="pt",
threads=args.threads,
)
if args.local_rank in [-1, 0]:
logger.info("Saving features into cached file %s", cached_features_file)
torch.save({"features": features, "dataset": dataset, "examples": examples}, cached_features_file)
if args.local_rank == 0 and not evaluate:
# Make sure only the first process in distributed training process the dataset, and the others will use the cache
torch.distributed.barrier()
if output_examples:
return dataset, examples, features
return dataset
def benchmark_evaluate(args, model, eval_dataloader):
steps_per_epoch = len(eval_dataloader)
total_steps = (args.perf_run_iters + args.perf_begin_iter)
test_epoches = int(total_steps / steps_per_epoch)
print('Evaluating BERT: Steps per Epoch {} total Steps {}'.format(steps_per_epoch, total_steps))
total_time = 0
i = 0;
#with torch.profiler.profile(
# activities=[
# torch.profiler.ProfilerActivity.CPU],
# schedule=torch.profiler.schedule(
# wait=1,
# warmup=9,
# active=5),
# #on_trace_ready=torch.profiler.tensorboard_trace_handler('./log/bert_bf16'),#trace_handler
# on_trace_ready=trace_handler#torch.profiler.tensorboard_trace_handler('./log/bert_bf16')
# # used when outputting for tensorboard
# ) as prof:
with tqdm(total=total_steps, desc="Evaluating") as pbar:
for epoch in range(test_epoches + 1):
for it, batch in enumerate(eval_dataloader):
if epoch * steps_per_epoch + it >= total_steps:
throughput = args.eval_batch_size * args.perf_run_iters / total_time
print('Batch size = %d' % 1)
print('Latency: %.3f ms' % (throughput / 10**6))
print("Throughput: {:.3f} sentence/s".format(throughput))
break
with torch.no_grad():
inputs = {
"input_ids": batch[0],
"attention_mask": batch[1],
"token_type_ids": batch[2],
}
time_start = time.time()
outputs = model(**inputs)
#prof.step()
time_end = time.time()
if epoch * steps_per_epoch + it > args.perf_begin_iter:
total_time +=(time_end - time_start)
pbar.update(1)
def main():
parser = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_type",
default=None,
type=str,
required=True,
help="Model type selected in the list: " + ", ".join(MODEL_TYPES),
)
parser.add_argument(
"--model_name_or_path",
default=None,
type=str,
required=True,
help="Path to pretrained model or model identifier from huggingface.co/models",
)
parser.add_argument(
"--output_dir",
default=None,
type=str,
required=True,
help="The output directory where the model checkpoints and predictions will be written.",
)
# Other parameters
parser.add_argument(
"--data_dir",
default=None,
type=str,
help="The input data dir. Should contain the .json files for the task."
+ "If no data dir or train/predict files are specified, will run with tensorflow_datasets.",
)
parser.add_argument(
"--train_file",
default=None,
type=str,
help="The input training file. If a data dir is specified, will look for the file there"
+ "If no data dir or train/predict files are specified, will run with tensorflow_datasets.",
)
parser.add_argument(
"--predict_file",
default=None,
type=str,
help="The input evaluation file. If a data dir is specified, will look for the file there"
+ "If no data dir or train/predict files are specified, will run with tensorflow_datasets.",
)
parser.add_argument(
"--config_name", default="", type=str, help="Pretrained config name or path if not the same as model_name"
)
parser.add_argument(
"--tokenizer_name",
default="",
type=str,
help="Pretrained tokenizer name or path if not the same as model_name",
)
parser.add_argument(
"--cache_dir",
default="",
type=str,
help="Where do you want to store the pre-trained models downloaded from s3",
)
parser.add_argument(
"--version_2_with_negative",
action="store_true",
help="If true, the SQuAD examples contain some that do not have an answer.",
)
parser.add_argument(
"--null_score_diff_threshold",
type=float,
default=0.0,
help="If null_score - best_non_null is greater than the threshold predict null.",
)
parser.add_argument(
"--max_seq_length",
default=384,
type=int,
help="The maximum total input sequence length after WordPiece tokenization. Sequences "
"longer than this will be truncated, and sequences shorter than this will be padded.",
)
parser.add_argument(
"--doc_stride",
default=128,
type=int,
help="When splitting up a long document into chunks, how much stride to take between chunks.",
)
parser.add_argument(
"--max_query_length",
default=64,
type=int,
help="The maximum number of tokens for the question. Questions longer than this will "
"be truncated to this length.",
)
parser.add_argument("--do_train", action="store_true", help="Whether to run training.")
parser.add_argument("--do_eval", action="store_true", help="Whether to run eval on the dev set.")
parser.add_argument(
"--evaluate_during_training", action="store_true", help="Run evaluation during training at each logging step."
)
parser.add_argument(
"--do_lower_case", action="store_true", help="Set this flag if you are using an uncased model."
)
parser.add_argument("--per_gpu_train_batch_size", default=8, type=int, help="Batch size per GPU/CPU for training.")
parser.add_argument(
"--per_gpu_eval_batch_size", default=8, type=int, help="Batch size per GPU/CPU for evaluation."
)
parser.add_argument("--learning_rate", default=5e-5, type=float, help="The initial learning rate for Adam.")
parser.add_argument(
"--gradient_accumulation_steps",
type=int,
default=1,
help="Number of updates steps to accumulate before performing a backward/update pass.",
)
parser.add_argument("--weight_decay", default=0.0, type=float, help="Weight decay if we apply some.")
parser.add_argument("--adam_epsilon", default=1e-8, type=float, help="Epsilon for Adam optimizer.")
parser.add_argument("--max_grad_norm", default=1.0, type=float, help="Max gradient norm.")
parser.add_argument(
"--num_train_epochs", default=3.0, type=float, help="Total number of training epochs to perform."
)
parser.add_argument(
"--max_steps",
default=-1,
type=int,
help="If > 0: set total number of training steps to perform. Override num_train_epochs.",
)
parser.add_argument("--warmup_steps", default=0, type=int, help="Linear warmup over warmup_steps.")
parser.add_argument(
"--n_best_size",
default=20,
type=int,
help="The total number of n-best predictions to generate in the nbest_predictions.json output file.",
)
parser.add_argument(
"--max_answer_length",
default=30,
type=int,
help="The maximum length of an answer that can be generated. This is needed because the start "
"and end predictions are not conditioned on one another.",
)
parser.add_argument(
"--verbose_logging",
action="store_true",
help="If true, all of the warnings related to data processing will be printed. "
"A number of warnings are expected for a normal SQuAD evaluation.",
)
parser.add_argument(
"--lang_id",
default=0,
type=int,
help="language id of input for language-specific xlm models (see tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)",
)
parser.add_argument("--logging_steps", type=int, default=500, help="Log every X updates steps.")
parser.add_argument("--save_steps", type=int, default=500, help="Save checkpoint every X updates steps.")
parser.add_argument(
"--eval_all_checkpoints",
action="store_true",
help="Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number",
)
parser.add_argument("--no_cuda", action="store_true", help="Whether not to use CUDA when available")
parser.add_argument(
"--overwrite_output_dir", action="store_true", help="Overwrite the content of the output directory"
)
parser.add_argument(
"--overwrite_cache", action="store_true", help="Overwrite the cached training and evaluation sets"
)
parser.add_argument("--seed", type=int, default=42, help="random seed for initialization")
parser.add_argument("--local_rank", type=int, default=-1, help="local_rank for distributed training on gpus")
parser.add_argument(
"--fp16",
action="store_true",
help="Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit",
)
parser.add_argument(
"--fp16_opt_level",
type=str,
default="O1",
help="For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."
"See details at https://nvidia.github.io/apex/amp.html",
)
parser.add_argument("--server_ip", type=str, default="", help="Can be used for distant debugging.")
parser.add_argument("--server_port", type=str, default="", help="Can be used for distant debugging.")
parser.add_argument("--threads", type=int, default=1, help="multiple threads for converting example to features")
parser.add_argument(
"--bf16",
action="store_true",
help="Whether to use 16-bit (mixed) precision instead of 32-bit")
parser.add_argument("--perf_begin_iter", type=int, default=15,
help="Number iterations to warm up")
parser.add_argument("--perf_run_iters", type=int, default=100,
help="Number iterations to collection performance data begin from perf_begin_iter")
parser.add_argument("--iter_num", type=int, default=40,
help="Number iterations to collect time")
parser.add_argument("--benchmark", action='store_true',
help="Bench the model speed")
parser.add_argument("--accuracy_only", action='store_true',
help="Bench the model accuracy")
parser.add_argument("--use_jit", action='store_true', help="For jit trace")
parser.add_argument('--int8', dest='int8', action='store_true',
help='use llga int8 in pytorch jit model')
parser.add_argument('--int8_fp32', dest='int8_fp32', action='store_true',
help='use int8 fp32 mix precision')
parser.add_argument("--int8_config", type=str, default="config.json",
help="quantization config file for int8 mode")
parser.add_argument("--do_calibration", action='store_true',
help="Enable calibration process")
parser.add_argument("--calibration_iters", type=int, default=100,
help="Number iterations to do calibration")
parser.add_argument("--use_share_weight", action='store_true',
help="Enable share weight mode")
parser.add_argument("--cores_per_instance", type=int, default=4,
help="Number iterations to collect time")
parser.add_argument("--total_cores", type=int, default=28,
help="Total cores used for this process, used for share_weight mode")
parser.add_argument("--tune", action='store_true',
help="use INC to quantize model to int8")
args = parser.parse_args()
if args.doc_stride >= args.max_seq_length - args.max_query_length:
logger.warning(
"WARNING - You've set a doc stride which may be superior to the document length in some "
"examples. This could result in errors when building features from the examples. Please reduce the doc "
"stride or increase the maximum length to ensure the features are correctly built."
)
if (
os.path.exists(args.output_dir)
and os.listdir(args.output_dir)
and args.do_train
and not args.overwrite_output_dir
):
raise ValueError(
"Output directory ({}) already exists and is not empty. Use --overwrite_output_dir to overcome.".format(
args.output_dir
)
)
# Setup distant debugging if needed
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print("Waiting for debugger attach")
ptvsd.enable_attach(address=(args.server_ip, args.server_port), redirect_output=True)
ptvsd.wait_for_attach()
# Setup CUDA, GPU & distributed training
if args.local_rank == -1 or args.no_cuda:
device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
args.n_gpu = 0 if args.no_cuda else torch.cuda.device_count()
else: # Initializes the distributed backend which will take care of sychronizing nodes/GPUs
torch.cuda.set_device(args.local_rank)
device = torch.device("cuda", args.local_rank)
torch.distributed.init_process_group(backend="nccl")
args.n_gpu = 1
args.device = device
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN,
)
logger.warning(
"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s",
args.local_rank,
device,
args.n_gpu,
bool(args.local_rank != -1),
args.fp16,
)
# Set seed
set_seed(args)
# Load pretrained model and tokenizer
if args.local_rank not in [-1, 0]:
# Make sure only the first process in distributed training will download model & vocab
torch.distributed.barrier()
args.model_type = args.model_type.lower()
config = AutoConfig.from_pretrained(
args.config_name if args.config_name else args.model_name_or_path,
cache_dir=args.cache_dir if args.cache_dir else None,
)
tokenizer = AutoTokenizer.from_pretrained(
args.tokenizer_name if args.tokenizer_name else args.model_name_or_path,
do_lower_case=args.do_lower_case,
cache_dir=args.cache_dir if args.cache_dir else None,
)
model = AutoModelForQuestionAnswering.from_pretrained(
args.model_name_or_path,
from_tf=bool(".ckpt" in args.model_name_or_path),
config=config,
cache_dir=args.cache_dir if args.cache_dir else None,
)
if args.local_rank == 0:
# Make sure only the first process in distributed training will download model & vocab
torch.distributed.barrier()
model.to(args.device)
logger.info("Training/evaluation parameters %s", args)
# Before we do anything with models, we want to ensure that we get fp16 execution of torch.einsum if args.fp16 is set.
# Otherwise it'll default to "promote" mode, and we'll get fp32 operations. Note that running `--fp16_opt_level="O2"` will
# remove the need for this code, but it is still valid.
if args.fp16:
try:
import apex
apex.amp.register_half_function(torch, "einsum")
except ImportError:
raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.")
# Training
if args.do_train:
train_dataset = load_and_cache_examples(args, tokenizer, evaluate=False, output_examples=False)
global_step, tr_loss = train(args, train_dataset, model, tokenizer)
logger.info(" global_step = %s, average loss = %s", global_step, tr_loss)
# Save the trained model and the tokenizer
if args.do_train and (args.local_rank == -1 or torch.distributed.get_rank() == 0):
logger.info("Saving model checkpoint to %s", args.output_dir)
# Save a trained model, configuration and tokenizer using `save_pretrained()`.
# They can then be reloaded using `from_pretrained()`
# Take care of distributed/parallel training
model_to_save = model.module if hasattr(model, "module") else model
model_to_save.save_pretrained(args.output_dir)
tokenizer.save_pretrained(args.output_dir)
# Good practice: save your training arguments together with the trained model
torch.save(args, os.path.join(args.output_dir, "training_args.bin"))
# Load a trained model and vocabulary that you have fine-tuned
model = AutoModelForQuestionAnswering.from_pretrained(args.output_dir) # , force_download=True)
tokenizer = AutoTokenizer.from_pretrained(args.output_dir, do_lower_case=args.do_lower_case)
model.to(args.device)
# Evaluation - we can ask to evaluate all the checkpoints (sub-directories) in a directory
results = {}
if args.do_eval and args.local_rank in [-1, 0]:
if args.do_train:
logger.info("Loading checkpoints saved during training for evaluation")
checkpoints = [args.output_dir]
if args.eval_all_checkpoints:
checkpoints = list(
os.path.dirname(c)
for c in sorted(glob.glob(args.output_dir + "/**/" + WEIGHTS_NAME, recursive=True))
)
logging.getLogger("transformers.modeling_utils").setLevel(logging.WARN) # Reduce model loading logs
else:
logger.info("Loading checkpoint %s for evaluation", args.model_name_or_path)
checkpoints = [args.model_name_or_path]
logger.info("Evaluate the following checkpoints: %s", checkpoints)
dataset, examples, features = load_and_cache_examples(args, tokenizer, evaluate=True, output_examples=True)
for checkpoint in checkpoints:
# Reload the model
global_step = checkpoint.split("-")[-1] if len(checkpoints) > 1 else ""
model = AutoModelForQuestionAnswering.from_pretrained(checkpoint) # , force_download=True)
model.to(args.device)
model.eval()
ipex.nn.utils._model_convert.replace_dropout_with_identity(model)
if not os.path.exists(args.output_dir) and args.local_rank in [-1, 0]:
os.makedirs(args.output_dir)
args.eval_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu)
# Note that DistributedSampler samples randomly
eval_sampler = SequentialSampler(dataset)
eval_dataloader = DataLoader(dataset, sampler=eval_sampler, batch_size=args.eval_batch_size)
# Note that DistributedSampler samples randomly
dataset_to_dict = [
{
"input_ids": data[0],
"attention_mask": data[1],
"token_type_ids": data[2]
} for data in dataset
]
calib_sampler = SequentialSampler(dataset_to_dict)
calib_dataloader = DataLoader(dataset_to_dict, sampler=eval_sampler, batch_size=args.eval_batch_size)
# multi-gpu evaluate
if args.n_gpu > 1 and not isinstance(model, torch.nn.DataParallel):
model = torch.nn.DataParallel(model)
#if args.tune:
def eval_func(model):
use_int8 = False
dumpy_tensor = torch.ones((args.eval_batch_size, 384), dtype=torch.long)
jit_inputs = (dumpy_tensor, dumpy_tensor, dumpy_tensor)
for path, dirs, files in os.walk('nc_workspace'):
if 'ipex_config_tmp.json' in files:
fullpath = os.path.join(path, 'ipex_config_tmp.json')
use_int8 = True
break
if use_int8:
conf = ipex.quantization.QuantConf(configure_file=fullpath)
# convert model to trace model.
if args.int8_fp32:
model = ipex.quantization.convert(model, conf, jit_inputs)
with torch.no_grad():
y = model(dumpy_tensor, dumpy_tensor, dumpy_tensor)
y = model(dumpy_tensor, dumpy_tensor, dumpy_tensor)
elif args.use_jit:
with torch.no_grad():
model = torch.jit.trace(model, jit_inputs, strict=False)
#model = torch.jit._recursive.wrap_cpp_module(torch._C._freeze_module(model._c, preserveParameters=True))
model = torch.jit.freeze(model)
all_results = []
start_time = timeit.default_timer()
for batch in tqdm(eval_dataloader, desc="Evaluating"):
model.eval()
batch = tuple(t.to(args.device) for t in batch)
with torch.no_grad():
inputs = {
"input_ids": batch[0],
"attention_mask": batch[1],
"token_type_ids": batch[2],
}
if args.model_type in ["xlm", "roberta", "distilbert", "camembert"]:
del inputs["token_type_ids"]
feature_indices = batch[3]
# XLNet and XLM use more arguments for their predictions
if args.model_type in ["xlnet", "xlm"]:
inputs.update({"cls_index": batch[4], "p_mask": batch[5]})
# for lang_id-sensitive xlm models
if hasattr(model, "config") and hasattr(model.config, "lang2id"):
inputs.update(
{"langs": (torch.ones(batch[0].shape, dtype=torch.int64) * args.lang_id).to(args.device)}
)
outputs = model(**inputs)
for i, feature_index in enumerate(feature_indices):
eval_feature = features[feature_index.item()]
unique_id = int(eval_feature.unique_id)
output = [to_list(output[i]) for output in outputs]
# Some models (XLNet, XLM) use 5 arguments for their predictions, while the other "simpler"
# models only use two.
if len(output) >= 5:
start_logits = output[0]
start_top_index = output[1]
end_logits = output[2]
end_top_index = output[3]
cls_logits = output[4]
result = SquadResult(
unique_id,
start_logits,
end_logits,
start_top_index=start_top_index,
end_top_index=end_top_index,
cls_logits=cls_logits,
)
else:
start_logits, end_logits = output
result = SquadResult(unique_id, start_logits, end_logits)
all_results.append(result)
evalTime = timeit.default_timer() - start_time
logger.info(" Evaluation done in total %f secs (%f sec per example)", evalTime, evalTime / len(dataset))
# Compute predictions
output_prediction_file = os.path.join(args.output_dir, "predictions_{}.json".format(global_step))
output_nbest_file = os.path.join(args.output_dir, "nbest_predictions_{}.json".format(global_step))
if args.version_2_with_negative:
output_null_log_odds_file = os.path.join(args.output_dir, "null_odds_{}.json".format(global_step))
else:
output_null_log_odds_file = None
# XLNet and XLM use a more complex post-processing procedure
if args.model_type in ["xlnet", "xlm"]:
start_n_top = model.config.start_n_top if hasattr(model, "config") else model.module.config.start_n_top
end_n_top = model.config.end_n_top if hasattr(model, "config") else model.module.config.end_n_top
predictions = compute_predictions_log_probs(
examples,
features,
all_results,
args.n_best_size,
args.max_answer_length,
output_prediction_file,
output_nbest_file,
output_null_log_odds_file,
start_n_top,
end_n_top,
args.version_2_with_negative,
tokenizer,
args.verbose_logging,
)
else:
predictions = compute_predictions_logits(
examples,
features,
all_results,
args.n_best_size,
args.max_answer_length,
args.do_lower_case,
output_prediction_file,
output_nbest_file,
output_null_log_odds_file,
args.verbose_logging,
args.version_2_with_negative,
args.null_score_diff_threshold,
tokenizer,
)
# Compute the F1 and exact scores.
results = squad_evaluate(examples, predictions)
accuracy = results["f1"]
print('Accuracy: %.3f ' % (accuracy))
return accuracy
if args.tune:
from neural_compressor.experimental import Quantization, common
import shutil
shutil.rmtree('nc_workspace', ignore_errors=True)
quantizer = Quantization('conf.yaml')
quantizer.eval_func = eval_func
quantizer.calib_dataloader = calib_dataloader
quantizer.model = common.Model(model)
model = quantizer.fit()
model.save(args.output_dir)
return
if args.benchmark or args.accuracy_only:
use_int8 = False
dumpy_tensor = torch.ones((args.eval_batch_size, 384), dtype=torch.long)
jit_inputs = (dumpy_tensor, dumpy_tensor, dumpy_tensor)
if args.int8:
config_file = os.path.join(args.output_dir, "best_configure.json")
assert os.path.exists(config_file), "there is no ipex config file, Please tune with Neural Compressor first!"
conf = ipex.quantization.QuantConf(configure_file=config_file)
model = ipex.quantization.convert(model, conf, jit_inputs)
with torch.no_grad():
y = model(dumpy_tensor, dumpy_tensor, dumpy_tensor)
y = model(dumpy_tensor, dumpy_tensor, dumpy_tensor)
if args.use_jit:
with torch.no_grad():
model = torch.jit.trace(model, jit_inputs, strict=False)
#model = torch.jit._recursive.wrap_cpp_module(torch._C._freeze_module(model._c, preserveParameters=True))
model = torch.jit.freeze(model)
if args.benchmark:
if args.use_share_weight:
threads = []
num_instances = args.total_cores // args.cores_per_instance
for i in range(0, num_instances):
t = threading.Thread(target=benchmark_evaluate, args=(args, model, eval_dataloader))
threads.append(t)
t.start()
for t in threads:
t.join()
else:
benchmark_evaluate(args, model, eval_dataloader)
exit()
if args.accuracy_only:
eval_func(model)
exit()
if __name__ == "__main__":
main()
|
app.py | import os
import sys
# <<<<<<< Updated upstream
# two file is created by developers
# from main import grading
# from helperFunction import readAndSaveAnswerFile
from sample.web.helperFunction import saveImage, writeAnswer
import sample.database.database as database
import flask
from flask import Flask, render_template, request,flash
from flask import url_for, redirect
from flask_dropzone import Dropzone
import threading
import time
from multiprocessing import Process, Pool
# for user loginpip
from flask_wtf import FlaskForm
from wtforms import StringField, BooleanField, PasswordField,validators, SubmitField
from wtforms.validators import DataRequired
import mysql.connector
from flask_bootstrap import Bootstrap
# ==================================================
# test for connect pythonanywhere
import mysql.connector
import sshtunnel
from mysql.connector.cursor import MySQLCursor
from flask_mail import Mail, Message
# =======
# from flask_wtf import FlaskForm
# from flask_mail import Mail
# import _mysql_connector
# import mysql.connector
# from mysql.connector.cursor import MySQLCursor
try:
# two file is created by developers
# from main import grading
# from helperFunction import readAndSaveAnswerFile
from sample.web.helperFunction import saveImage, writeAnswer
import sample.database.database as database
import flask
from flask import Flask, render_template, request,flash
from flask import url_for, redirect
from flask_dropzone import Dropzone
import threading
import time
from multiprocessing import Process, Pool
# for user login
from flask_wtf import FlaskForm
from wtforms import StringField, BooleanField, PasswordField
from wtforms.validators import DataRequired
import mysql.connector
# ==================================================
# test for connect pythonanywhere
import mysql.connector
import sshtunnel
from mysql.connector.cursor import MySQLCursor
from flask_mail import Mail
from flask import Message
except:
pass
# >>>>>>> Stashed changes
def connectDatabase(username, password):
sshtunnel.SSH_TIMEOUT = 5.0
sshtunnel.TUNNEL_TIMEOUT = 5.0
with sshtunnel.SSHTunnelForwarder(
('ssh.pythonanywhere.com'),
ssh_username='Gengruijie', ssh_password='Grj12345',
remote_bind_address=('Gengruijie.mysql.pythonanywhere-services.com', 3306)
) as tunnel:
connection = mysql.connector.connect(
user='Gengruijie', password='GRJ12345',
host='127.0.0.1', port=tunnel.local_bind_port,
database='Gengruijie$AutoGrading',
)
# Do stuff
query = "SELECT password, level, score from main where name = \"" + username + "\""
# print(query)
# cur = connection.cursor(buffered=True)
cursor = MySQLCursor(connection)
cursor.execute(query)
data = cursor.fetchall()
# print(data, password)
if data[0][0] != password:
return (False,1,1)
return (True, data)
# end test.
# ==================================================
# global variable:
loginUsers = []
# 定义的表单都需要继承自FlaskForm
class LoginForm(FlaskForm):
# 域初始化时,第一个参数是设置label属性的
username = StringField('User Name', validators=[DataRequired()])
password = PasswordField('Password', validators=[DataRequired()])
remember_me = BooleanField('remember me', default=False)
class RegistrationForm(FlaskForm):
username = StringField('Username:\n', [validators.Length(min=4, max=25)])
email = StringField('Email Address:\t', [validators.Length(min=6, max=35)])
password = PasswordField('New Password:\t', [
validators.DataRequired(),
validators.EqualTo('confirm', message='Passwords must match')
])
confirm = PasswordField('Repeat Password:\t')
is_professor = BooleanField('Are you a professor')
submit = SubmitField('Sign up')
class MyThread(threading.Thread):
def __init__(self):
self.run()
def run(self) -> object:
print("{} started!".format(self.getName())) # "Thread-x started!"
time.sleep(1) # Pretend to work for a second
print("{} finished!".format(self.getName())) # "Thread-x finished!"
app = Flask(__name__)
bootstrap=Bootstrap(app)
mail=Mail(app)
app.config['MAIL_SERVER']='smtp.gmail.com'
app.config['MAIL_PORT'] = 465
app.config['MAIL_USERNAME'] = 'haotian666666@gmail.com'
app.config['MAIL_PASSWORD'] = 'Uwha090909'
app.config['MAIL_USE_TLS'] = False
app.config['MAIL_USE_SSL'] = True
mail = Mail(app)
# <<<<<<< Updated upstream
# =======
#
# >>>>>>> Stashed changes
# conn = mysql.connector.connect(
# user="root",
# password="gengruijie",
# host="127.0.0.1",
# database="Lemma"
# )
# to config upload file
app.wsgi_app = app.wsgi_app
app.config['SECRET_KEY'] = "Hubert"
UPLOAD_FOLDER = "static/upload/unclassify"
dropzone = Dropzone(app)
app.config.update(
UPLOADED_PATH=os.getcwd() + '/static/upload',
DROPZONE_ALLOWED_FILE_TYPE='default',
DROPZONE_MAX_FILE_SIZE=3,
DROPZONE_INPUT_NAME='photo',
DROPZONE_MAX_FILES=30
)
@app.route('/demo', methods=['POST', 'GET'])
def demo():
return render_template("demo.html")
# sys.setdefaultencoding('Cp1252')
@app.route('/', methods=['POST', 'GET'])
def index():
return render_template("index.html")
# <<<<<<< Updated upstream
# @app.route("register", method=['POST','GET'])
# def register():
# msg = Message('Hello', sender = 'haotian666666@gmail.com', recipients = ['651938023@qq.com'])
# msg.body = "Hello Flask message sent from Flask-Mail"
# mail.send(msg)
# return "Sent"
@app.route('/register', methods=['GET', 'POST'])
def register():
username=None
valid=True
form = RegistrationForm()
if request.method == 'POST' and form.validate():
username = form.username.data
email = form.email.data
db=database.Database("Ruijie", "hhh", "142.93.59.116", "Users")
cursor=db.get_cursor()
cursor.execute("SELECT * FROM User_info where username='{}' or email='{}'".format(username,email))
tem=cursor.fetchall()
print(tem)
if(tem!=[]):
valid = False
else:
password=form.password.data
is_professor=form.is_professor.data
db.insert_data("User_info",[("username",username),("password",password),("email",email),("is_professor",is_professor)])
# flash('Thanks for registering')
# return redirect(url_for('login'))
return render_template('register.html', form=form,username=username,valid=valid)
@app.route('/login', methods=['POST', 'GET'])
def login():
form = LoginForm()
db = database.Database("Ruijie", "hhh", "142.93.59.116", "Users")
# print(db.show_database())
# print(db.queryData("User_info"))
if request.method == "POST":
user = request.form.get("username")
password = request.form.get("password")
if not user or not password:
flash('please enter the username and password')
# user_name and password is correct
if db.user_exist(user,password):
return "welcome! You are logged in"
# not correct
else:
flash('user name or password is incorrect')
return render_template('login.html', title="Sign In", form=form)
@app.route('/logout', methods=['POST', 'GET'])
def logout():
global loginUsers
# print("I am run here")
loginUsers= []
return render_template('index.html')
'''
@app.route('/', methods=['POST', 'GET'])
def upload_answer():
if request.method == "POST":
f = request.files.get('photo')
data = readAndSaveAnswerFile(f)
return render_template("index.html")
'''
@app.route('/upload_sheet', methods=['POST', 'GET'])
def upload_sheet():
if request.method == 'POST':
f = request.files.get('photo')
saveImage(f)
# answer = grading(f.filename,"answer.txt")
# writeAnswer(answer)
# print("this is upload_sheet")
return render_template('index.html')
@app.route('/grading', methods=['POST', 'GET'])
def grading():
return render_template('grading.html')
def upload_answer():
if request.method == "POST":
f = request.files.get('photo')
# data = readAndSaveAnswerFile(f)
return render_template("index.html")
def upload_sheet():
if request.method == 'POST':
f = request.files.get('photo')
saveImage(f)
# answer = grading(f.filename,"answer.txt")
# writeAnswer(answer)
return render_template('index.html')
@app.route('/grade', methods=['POST', 'GET'])
def grade():
# check = he.checkAnswerFile()
# if check == False:
# pass
f = open("static/result/result.txt")
f = f.read()
f = f.strip()
new_answer = []
answers = f.split('\n')
for answer in answers:
new_answer.append(answer.split('\t'))
return render_template('show_result.html',items=new_answer)
@app.route('/auto_upload', methods=['post'])
def myupload():
myFile = request.files['file']
myFile.save(os.path.join(UPLOAD_FOLDER, myFile.filename))
return "ok"
# <<<<<<< Updated upstream
# @app.route('/register', methods=['GET'])
# def register():
# return render_template('register.html')
# =======
@app.route('/register1', methods=['GET'])
def register1():
return render_template('register.html')
# >>>>>>> Stashed changes
@app.route('/Scores', methods=['GET'])
def Scores():
global loginUsers
username = loginUsers[0][0]
sshtunnel.SSH_TIMEOUT = 5.0
sshtunnel.TUNNEL_TIMEOUT = 5.0
with sshtunnel.SSHTunnelForwarder(
('ssh.pythonanywhere.com'),
ssh_username='Gengruijie', ssh_password='Grj12345',
remote_bind_address=('Gengruijie.mysql.pythonanywhere-services.com', 3306)
) as tunnel:
connection = mysql.connector.connect(
user='Gengruijie', password='GRJ12345',
host='127.0.0.1', port=tunnel.local_bind_port,
database='Gengruijie$AutoGrading',
)
# Do stuff
query = "SELECT level, score from main where name = \"" + username + "\""
# print(query)
# cur = connection.cursor(buffered=True)
cursor = MySQLCursor(connection)
cursor.execute(query)
data1 = cursor.fetchall()
# print(username)
if data1[0][0] == "student":
return render_template('Scores.html', name=username,score =data1[0][1] )
else :
sshtunnel.SSH_TIMEOUT = 5.0
sshtunnel.TUNNEL_TIMEOUT = 5.0
with sshtunnel.SSHTunnelForwarder(
('ssh.pythonanywhere.com'),
ssh_username='Gengruijie', ssh_password='Grj12345',
remote_bind_address=('Gengruijie.mysql.pythonanywhere-services.com', 3306)
) as tunnel:
connection = mysql.connector.connect(
user='Gengruijie', password='GRJ12345',
host='127.0.0.1', port=tunnel.local_bind_port,
database='Gengruijie$AutoGrading',
)
# Do stuff
query = "SELECT name, score from main"
# print(query)
# cur = connection.cursor(buffered=True)
cursor = MySQLCursor(connection)
cursor.execute(query)
data = cursor.fetchall()
# print(data)
result = []
for element in data :
if element[1] != "":
if element[0] == "Turner":
continue
result.append((element[0],element[1]))
return render_template('ScoresProfessor.html', name=username,items =result )
def flaskRun():
# print(os.path.realpath(__file__))
# print(os.path.dirname(os.path.realpath(__file__)))
app.run(host='0.0.0.0', debug=True )
if __name__ == '__main__':
# threading.Thread(target=moniter()).start()
# threading.Thread(target=run()).start()
p = Pool(2)
p.apply_async(flaskRun())
print("Waiting for all subprocess done...")
|
queue.py | # -*- encoding=utf-8 -*-
import threading
import time
class ThreadSafeQueueException(Exception):
pass
# 线程安全的队列
class ThreadSafeQueue(object):
def __init__(self, max_size=0):
self.queue = []
self.max_size = max_size
self.lock = threading.Lock()
self.condition = threading.Condition()
# 当前队列元素的数量
def size(self):
self.lock.acquire()
size = len(self.queue)
self.lock.release()
return size
# 往队列里面放入元素
def put(self, item):
if self.max_size != 0 and self.size() > self.max_size:
return ThreadSafeQueueException()
self.lock.acquire()
self.queue.append(item)
self.lock.release()
self.condition.acquire()
self.condition.notify()
self.condition.release()
def batch_put(self, item_list):
if not isinstance(item_list, list):
item_list = list(item_list)
for item in item_list:
self.put(item)
# 从队列取出元素
def pop(self, block=True, timeout=None):
if self.size() == 0:
# 需要阻塞等待
if block:
self.condition.acquire()
self.condition.wait(timeout=timeout)
self.condition.release()
else:
return None
self.lock.acquire()
item = None
if len(self.queue) > 0:
item = self.queue.pop()
self.lock.release()
return item
def get(self, index):
self.lock.acquire()
item = self.queue[index]
self.lock.release()
return item
if __name__ == '__main__':
queue = ThreadSafeQueue(max_size=100)
def producer():
while True:
queue.put(1)
time.sleep(1)
def consumer():
while True:
item = queue.pop(block=True, timeout=-1)
print('get item from queue: %d' % item)
time.sleep(1)
thread1 = threading.Thread(target=producer)
thread2 = threading.Thread(target=consumer)
thread1.start()
thread2.start()
thread1.join()
thread2.join()
|
threaded_rec.py | '''
Implement threaded requests
'''
import threading as th
import queue as Q
import requests
from pbutils.request.utils import all_contexts, populate_profile, create_request_params
class ThreadedReq:
'''
Threaded requests.
'''
def __init__(self, n_threads, config, environ, error_handler=None):
''' constructor '''
self.n_threads = n_threads
self.config = config
self.environ = environ
self.inq = Q.Queue()
self.outq = Q.Queue()
self.done = th.Event()
self.error_handler = error_handler
def run(self, profiles):
'''
Process all profiles: use do_responses to obtain results.
'''
for profile in profiles:
for context in all_contexts(profile, self.environ):
pprofile = populate_profile(profile, context)
req_params = create_request_params(pprofile)
self.inq.put((profile, req_params))
threads = []
for _ in range(self.n_threads):
thread = th.Thread(target=self.do_requests)
thread.start()
threads.append(thread)
for thread in threads:
thread.join()
self.done.set()
def do_requests(self):
'''
Read from inq, make the request, and put profile & response
onto outq.
'''
while True:
try:
profile, req_params = self.inq.get(block=False)
except Q.Empty:
break
try:
# resp = None
resp = requests.request(**req_params)
resp.raise_for_status()
except Exception as e:
if self.error_handler:
self.error_handler(profile, resp, e)
else:
raise
else:
self.outq.put((profile, resp))
def do_responses(self):
'''
Yield all responses as they occur.
'''
while True:
try:
profile, resp = self.outq.get(timeout=3) # to=3 a hack?
yield profile, resp
if self.outq.empty() and self.done.is_set():
break
except Q.Empty:
return
|
basic-counting.py | # coding: utf-8
import gimpbbio.gpio as gpio
import threading, time, datetime, signal
class GeigerCounterDataCollector:
def __init__(self):
self.input_pin = None
self.count_accumulator = 0
self.counts = [0] * 60
self.count_index = 0
self.counts_per_second = 0
self.counts_per_minute = 0
self.total_counts = 0
self.elapsed_seconds = 0
self.highest_cpm = 0
self.next_collection_call = time.time()
# Well-known conversion factor from the tube manufacturer
self.conversion_factor = 0.0057
self.quit_event = threading.Event()
self.data_file = open("/var/tmp/geiger_counter_data.txt", "w+")
def start(self, input_pin):
self.input_pin = input_pin
self.input_pin.open_for_input()
self.input_pin.watch(gpio.RISING, self.receive_data)
thread = threading.Thread(target = self.once_per_second)
thread.daemon = True
thread.start()
def stop(self):
self.input_pin.close()
self.quit_event.set()
time.sleep(0.1)
self.data_file.close()
def receive_data(self, pin):
self.count_accumulator += 1
self.data_file.write(str(datetime.datetime.now()) + '\n')
def once_per_second(self):
while not self.quit_event.isSet():
self.collect_data()
self.print_statistics()
# Schedule the next call for 1 second from the last one, prevents timer drift
self.next_collection_call += 1
self.quit_event.wait(self.next_collection_call - time.time())
def collect_data(self):
self.elapsed_seconds += 1
# Decrement CPM with expired data from a minute ago
self.counts_per_minute -= self.counts[self.count_index]
# Store new data
# Race condition here
self.counts[self.count_index] = self.count_accumulator
self.count_accumulator = 0
# Increment CPM with new data
self.counts_per_second = self.counts[self.count_index]
self.counts_per_minute += self.counts[self.count_index]
self.total_counts += self.counts[self.count_index]
if self.counts_per_minute > self.highest_cpm:
self.highest_cpm = self.counts_per_minute
self.count_index += 1
if self.count_index > 59:
self.count_index = 0
def print_statistics(self):
micro_sieverts_per_hour = self.counts_per_minute * self.conversion_factor
average_cpm = self.total_counts * 1.0 / self.elapsed_seconds * 60
print("CPS: {0}, rolling CPM: {1}, avg CPM: {2:.1f}, max CPM: {3}, μSv/hr: {4:.2f}".format(self.counts_per_second, self.counts_per_minute, average_cpm, self.highest_cpm, micro_sieverts_per_hour))
def signal_handler(signal, frame):
print('Exiting.')
collector.stop()
signal.signal(signal.SIGINT, signal_handler)
collector = GeigerCounterDataCollector()
collector.start(gpio.pins.p8_8)
collector.quit_event.wait()
|
backend_info.py | # This code is part of Qiskit.
#
# (C) Copyright IBM 2021.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
# pylint: disable=protected-access
"""Interactive backend widget."""
import threading
from typing import Union
import ipyvuetify as vue
from IPython.display import display # pylint: disable=import-error
from qiskit.test.mock.fake_backend import FakeBackend
from qiskit_ibm_provider.ibm_backend import IBMBackend
from .config_widget import config_tab
from .gates_widget import gates_tab
from .jobs_widget import jobs_tab
from .qubits_widget import qubits_tab
from ..visualization.interactive import iplot_error_map
def _async_job_loader(
tab: vue.TabItem, backend: Union[IBMBackend, FakeBackend]
) -> None:
"""Asynchronous job loader.
Args:
tab: Tab item.
backend: Backend to use.
"""
tab.children = [jobs_tab(backend)]
def backend_widget(backend: Union[IBMBackend, FakeBackend]) -> None:
"""Display backend information as a widget.
Args:
backend: Display information about this backend.
"""
cred = backend._credentials
last_tab = vue.TabItem(children=[])
card = vue.Card(
height=600,
outlined=True,
children=[
vue.Toolbar(
flat=True,
color="#002d9c",
children=[
vue.ToolbarTitle(
children=[
"{} @ ({}/{}/{})".format(
backend.name(), cred.hub, cred.group, cred.project
)
],
style_="color:white",
)
],
),
vue.Tabs(
vertical=True,
children=[
vue.Tab(children=["Configuration"]),
vue.Tab(children=["Qubits"]),
vue.Tab(children=["Non-local Gates"]),
vue.Tab(children=["Error map"]),
vue.Tab(children=["Job Summary"]),
vue.TabItem(children=[config_tab(backend)]),
vue.TabItem(children=[qubits_tab(backend)]),
vue.TabItem(children=[gates_tab(backend)]),
vue.TabItem(
children=[
iplot_error_map(
backend, figsize=(None, None), as_widget=True
)
]
),
last_tab,
],
),
],
)
# Load job data async for a bit better performance
thread = threading.Thread(target=_async_job_loader, args=(last_tab, backend))
thread.start()
display(card)
|
camera_setting.py | import logging
import threading
import subprocess
import numpy as np
import cv2
USB_GSTREAMER = True
def add_camera_args(parser):
"""Add parser augument for camera options."""
parser.add_argument('--rtsp', dest='use_rtsp',
help='use IP CAM (remember to also set --uri)',
action='store_true')
parser.add_argument('--uri', dest='rtsp_uri',
help='RTSP URI, e.g. rtsp://192.168.1.64:554',
default=None, type=str)
parser.add_argument('--latency', dest='rtsp_latency',
help='latency in ms for RTSP [200]',
default=200, type=int)
parser.add_argument('--file', dest='use_file',
help='use a video file as input (remember to '
'also set --filename)',
action='store_true')
parser.add_argument('--image', dest='use_image',
help='use an image file as input (remember to '
'also set --filename)',
action='store_true')
parser.add_argument('--filename', dest='filename',
help='video file name, e.g. test.mp4',
default=None, type=str)
parser.add_argument('--usb', dest='use_usb',
help='use USB webcam (remember to also set --vid)',
action='store_true')
parser.add_argument('--vid', dest='video_dev',
help='device # of USB webcam (/dev/video?) [0]',
default=0, type=int)
parser.add_argument('--width', dest='image_width',
help='image width [640]',
default=640, type=int)
parser.add_argument('--height', dest='image_height',
help='image height [480]',
default=480, type=int)
return parser
def open_cam_rtsp(uri, width, height, latency):
gst_str = ("rtspsrc location={} latency={} ! rtph264depay ! h264parse ! omxh264dec ! "
"nvvidconv ! video/x-raw, width=(int){}, height=(int){}, format=(string)BGRx ! "
"videoconvert ! appsink").format(uri, latency, width, height)
return cv2.VideoCapture(gst_str, cv2.CAP_GSTREAMER)
def open_cam_usb(dev, width, height):
"""Open a USB webcam."""
if USB_GSTREAMER:
gst_str = ('v4l2src device=/dev/video{} ! '
'video/x-raw, width=(int){}, height=(int){} ! '
'videoconvert ! appsink').format(dev, width, height)
return cv2.VideoCapture(gst_str, cv2.CAP_GSTREAMER)
else:
return cv2.VideoCapture(dev)
def open_cam_onboard():
"""Open the Jetson onboard camera."""
gst_str = ("nvarguscamerasrc ! "
"video/x-raw(memory:NVMM), "
"width=(int)1280, height=(int)720, "
"format=(string)NV12, framerate=(fraction)60/1 ! "
"nvvidconv flip-method=0 ! "
"video/x-raw, width=(int)1280, height=(int)720, format=(string)BGRx ! "
"videoconvert ! "
"video/x-raw, format=(string)BGR ! appsink")
return cv2.VideoCapture(gst_str, cv2.CAP_GSTREAMER)
def grab_img(cam):
"""This 'grab_img' function is designed to be run in the sub-thread.
Once started, this thread continues to grab a new image and put it
into the global 'img_handle', until 'thread_running' is set to False.
"""
while cam.thread_running:
_, cam.img_handle = cam.cap.read()
if cam.img_handle is None:
logging.warning('grab_img(): cap.read() returns None...')
break
cam.thread_running = False
class VideoWriter:
def __init__(self, width, height, args, fps=24):
# type: (str, int, int, int) -> None
assert args.output_file.endswith('.mp4'), 'please specify the (.mp4) at the end '
# self._name = name
# self._height = height
# self._width = width
self.args = args
fourcc = cv2.VideoWriter_fourcc(*'mp4v')
self.__writer = cv2.VideoWriter(args.output_file, fourcc, fps, (width, height))
def write(self, frame):
if frame.dtype != np.uint8: # 检查frame的类型
raise ValueError('frame.dtype should be np.uint8')
self.__writer.write(frame)
def release(self):
self.__writer.release()
class Camera():
"""Camera class which supports reading images from theses video sources:
1. Video file
2. USB webcam
3. Jetson onboard camera
"""
def __init__(self, args):
self.args = args
self.is_opened = False
self.use_thread = False
self.thread_running = False
self.img_handle = None
self.img_width = 0
self.img_height = 0
self.cap = None
self.thread = None
#-----#
self.vwriter = None
def open(self):
"""Open camera based on command line arguments."""
assert self.cap is None, 'Camera is already opened!'
args = self.args
if args.use_file: #video
self.cap = cv2.VideoCapture(args.filename)
self.cap.set(cv2.CAP_PROP_FPS,5)
# ignore image width/height settings here
#TODO may occurs error since differnet opencv verison has different attribute name
width, height = int(self.cap.get(cv2.CAP_PROP_FRAME_WIDTH)), \
int(self.cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
self.vwriter = VideoWriter(width=width, height=height, args=args, fps=24)
self.use_thread = False
# elif args.use_image:
# self.cap = 'OK'
# self.img_handle = cv2.imread(args.filename)
# # ignore image width/height settings here
# if self.img_handle is not None:
# self.is_opened = True
# self.img_height, self.img_width, _ = self.img_handle.shape
# self.use_thread = False
elif args.use_usb:
self.cap = open_cam_usb(
args.video_dev,
args.image_width,
args.image_height
)
self.use_thread = True
elif args.use_rtsp:
self.cap = open_cam_rtsp(args.rtsp_uri,
args.image_width,
args.image_height,
args.rtsp_latency)
self.use_thread = True
else: # by default, use the jetson onboard camera
self.cap = open_cam_onboard()
print('using onboard cam now !')
self.use_thread = True
if self.cap != 'OK':
if self.cap.isOpened():
# Try to grab the 1st image and determine width and height
_, img = self.cap.read()
if img is not None:
self.img_height, self.img_width, _ = img.shape
self.is_opened = True
#-------thread-----------------#
def start(self):
assert not self.thread_running
if self.use_thread:
self.thread_running = True
self.thread = threading.Thread(target=grab_img, args=(self,))
self.thread.start()
def stop(self):
self.thread_running = False
if self.use_thread:
self.thread.join()
def read(self):
if self.args.use_file:
_, img = self.cap.read()
# if img is None:
# #logging.warning('grab_img(): cap.read() returns None...')
# # looping around
# self.cap.release()
# self.cap = cv2.VideoCapture(self.args.filename)
# _, img = self.cap.read()
return img
elif self.args.use_image:
return np.copy(self.img_handle)
else:
return self.img_handle
def write(self, frame):
if self.vwriter:
self.vwriter.write(frame)
def release(self):
# if self.cap != 'OK':
self.cap.release()
if self.vwriter is not None:
self.vwriter.release()
|
client.py | import sys, socket
import threading
import traceback
import json
import datetime
class SocketError(OSError): # Socket Error, based on OSError
def __init__(self, *args):
super().__init__(f"Client cannot connect. Recheck adress and ensure what the server is online.")
class SendError(SocketError): # Send Error, based on SendError
def __init__(self, *args):
super().__init__(f"Client cannot send message. Recheck adress and ensure what the server is online and you are connected.")
class V2Message: # API V2 Message class
def __init__(self, bot, message):
self.contents = message["msg"] # Get "msg" from message JSON
self.author = message["user"] # Get author from JSON
self.time = datetime.datetime.now() # Set timestamp
self.bot = bot # Set bot object, this is just for reply
self.me = bot.username == self.author # Set if it is our message
def reply(self, text: str):
message = f"""To {self.author} message ({self.time}):
> {self.contents}
{text}""" # Reply text
self.bot.send(message) # Send Reply
def __str__(self):
return json.dumps({"user": self.author, "msg": self.contents}, ensure_ascii=False) # Dump it as string
def __bytes__(self):
return str(self).encode() # dump as string and encode
class V1Message:
def __init__(self, bot, message):
self.contents = message # Message contents
self.time = datetime.datetime.now() # Timestamp
self.me = message.startswith(f"<{self.username}> ") # If message starts with our nickname, this is maybe our message.
self.bot = bot # Bot
def reply(self, text):
message = f"""To message ({self.time}):
> {self.contents}
{text}""" # Reply text
self.bot.send(message) # Send reply
def __str__(self):# Return contents as string
return self.contents
def __bytes__(self):
return self.contents.encode() # Encode contents
class RequestV2Message:
def __init__(self, message):
self.contents = message["msg"] # Get "msg" from unsended message
self.author = message["user"] # Get author, but as always - this is our bot.
def __str__(self): # dump message as string
return json.dumps({"user": self.author, "msg": self.contents}, ensure_ascii=False)
def __bytes__(self):
return str(self).encode() # dump message as bytes
class RequestV1Message:
def __init__(self, message):
self.contents = message # here we can see only contents
def __str__(self):
return self.contents # Return contents of message
def __bytes__(self):
return self.contents.encode() # encode contents of message and return this
class AnonClient:
_VERSION = "0.0.3" # The version of client
def __init__(self, ip, port, name): # So, ip of server, port and bot name
self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) # Create socket object
self.ip = ip # Set up IP and port
self.port = port
self.version = 2 # API 2 by default
self.username = name # Username of bot
self.v1_client = "V1-Package" # Name of V1 Package, if got by V2-request
def connect(self):
try:
self.socket.connect((self.ip, self.port)) # Connect to IP
except:
raise SocketError() # Raise socket error if cannot connect
self.request = threading.Thread(target=self.message_request, args=(), daemon=True) # Create Thread to get messages
self.request.start() # Start it
try:
self.on_connect() # Execute code of on_connect function
except:
raise RuntimeError("Unknown error in on_connect execution.") # Raise error if error in on_connect
try:
while True:
pass # Because of daemon, bot will exit if no code is running in main thread
except:
pass
# If Ctrl + C or smth error will be called, the bot will stop
self.close()
def close(self):
try:
self.on_disconnect() # Execute on_disconnect
except:
raise RuntimeError("Unknown error in on_disconnect execution.")
#self.request.terminate() # this is for multiprocessing.Process, but we using Thread from threading
self.socket.close() # Close socket
def v1_send(self, text: str, on_send): # Send message using V1 code
try:
on_send(RequestV1Message(text)) # execute on_send with Object of API1 unsent message
except:
raise RuntimeError("Unknown error in on_send execution.")
text = f"<{self.username}> " + text # Add name to message
message = text.encode() # Encode it
self.socket.send(message) # Send it to socket
def v2_send(self, text: str, on_send):
message = {"user": self.username, "msg": text} # Create message JSON
try:
on_send(RequestV2Message(message)) # Execute on_send function with Object of API2 unsent message
except:
raise RuntimeError("Unknown error in on_send execution.")
message = json.dumps(message, ensure_ascii=False).encode() # Dump it with option ensure_ascii=False, because we need UTF-8, and not ASCII text.
self.socket.send(message) # Send it to socket
def send(self, text): # The basic send function
try:
if self.version == 1: # If preferred version API1...
self.v1_send(text, self.on_send) # ...Send it using API 1 send
if self.version == 2: # And if API2...
self.v2_send(text, self.on_send) # ... so api2 send comes in
except:
raise SendError() # If error in sending, raise error
def v1_request(self, on_message): # Request message
while True:
if self.socket.fileno() == -1: break # If socket closed, break the cycle
try:
message = self.socket.recv(2048) # Try to recieve message
except:
break # If error, just break.
if not message:
break # If message is None, break
try:
message = message.decode() # Try to parse message
except: # If message is undecodable, just call an error.
message = "Message was recieved, but the contents cannot be decoded :("
try:
on_message(V1Message(self, message)) # Call on_message event if message is decoded.
except:
raise RuntimeError("Unknown error in on_message execution.")
def v2_request(self, on_message):
while True:
if self.socket.fileno() == -1: break # If socket closed, break.
try:
message = self.socket.recv(2048) # Recieve message
except:
pass
if not message:
pass
try:
message = message.decode() # Decode message
try:
message = json.loads(message.strip()) # Try to load message as JSON object
except:
message = {"user": self.self.v1_client, "msg": message} # If error - maybe this is an API1 message.
except:
message = {"user": "[CLIENT]", "msg": "Message was recieved, but the contents cannot be decoded :("} # But if there is still an error
try:
on_message(V2Message(self, message)) # Execute on message with API2 Message
except:
raise RuntimeError("Unknown error in on_message execution.")
def message_request(self): # Basic message request thread
while True:
try:
if self.version == 1: # If preferred version is API1, try to get messages in API 1
self.v1_request(self.on_message)
elif self.version == 2: # If API2 -> API2 Request
self.v2_request(self.on_message)
except:
raise SocketError() # If error in requesting, raise Socket Error.
# Placeholders for all functions:
def on_message(self, *args, **kwargs):
pass
def on_send(self, *args, **kwargs):
pass
def on_connect(self, *args, **kwargs):
pass
def on_disconnect(self, *args, **kwargs):
pass
# Functions for decorators.
# If decorator setted up, set passed function to replace placeholders.
def event_message(self, func):
self.on_message = func
return func
def event_send(self, func):
self.on_send = func
return func
def event_connect(self, func):
self.on_connect = func
return func
def event_disconnect(self, func):
self.on_disconnect = func
return func
|
test_uuid.py | # -*- coding: utf-8 -*-
from threading import Thread
from Queue import Queue
import time
import unittest
import uuid
import libuuid
def test_property():
_PROPERTIES = [
'bytes', 'bytes_le', 'clock_seq', 'clock_seq_hi_variant',
'clock_seq_low', 'fields', 'hex', 'node', 'time', 'time_hi_version',
'time_low', 'time_mid', 'urn', 'variant', 'version']
def _check_property(func_name, prop):
u = getattr(libuuid, func_name)()
c = uuid.UUID(bytes=u.bytes)
assert getattr(u, prop) == getattr(c, prop)
for prop in _PROPERTIES:
yield _check_property, 'uuid1', prop
yield _check_property, 'uuid4', prop
def test_method():
_METHODS = [
'__hash__', '__int__', '__repr__', '__str__', 'get_bytes',
'get_bytes_le', 'get_clock_seq', 'get_clock_seq_hi_variant',
'get_clock_seq_low', 'get_fields', 'get_hex', 'get_node', 'get_time',
'get_time_hi_version', 'get_time_low', 'get_time_mid', 'get_urn',
'get_variant', 'get_version']
def _check_method(func_name, method):
u = getattr(libuuid, func_name)()
c = uuid.UUID(bytes=u.bytes)
assert getattr(u, method)() == getattr(c, method)()
for method in _METHODS:
yield _check_method, 'uuid1', method
yield _check_method, 'uuid4', method
def test_constants():
_CONSTANTS = ['NAMESPACE_DNS', 'NAMESPACE_OID', 'NAMESPACE_URL',
'NAMESPACE_X500', 'RESERVED_FUTURE', 'RESERVED_MICROSOFT',
'RESERVED_NCS', 'RFC_4122']
def _check_constant(const):
assert getattr(libuuid, const) == getattr(uuid, const)
for constant in _CONSTANTS:
yield _check_constant, constant
class TestUUID(unittest.TestCase):
def test_uuid1(self):
u = libuuid.uuid1()
u2 = uuid.UUID(bytes=u.bytes)
self.assertEqual(u.bytes, u2.bytes)
def test_uuid4(self):
u = libuuid.uuid4()
u2 = uuid.UUID(bytes=u.bytes)
self.assertEqual(u.bytes, u2.bytes)
def test_is_UUID_instance(self):
u = libuuid.uuid4()
self.assert_(isinstance(u, uuid.UUID))
def test_uuid4_args_unsupported(self):
self.assertRaises(NotImplementedError, lambda: libuuid.uuid1(42))
self.assertRaises(NotImplementedError, lambda: libuuid.uuid1(42, 42))
self.assertRaises(NotImplementedError, lambda: libuuid.uuid1(node=42))
self.assertRaises(NotImplementedError, lambda: libuuid.uuid1(clock_seq=42))
self.assertRaises(NotImplementedError, lambda: libuuid.uuid1(node=42, clock_seq=42))
def test_uuid1_bytes(self):
b = libuuid.uuid1_bytes()
self.assertEquals(type(b), str)
self.assertEquals(uuid.UUID(bytes=b).version, 1)
def test_uuid4_bytes(self):
b = libuuid.uuid4_bytes()
self.assertEquals(type(b), str)
self.assertEquals(uuid.UUID(bytes=b).version, 4)
def test_basic_sanity_uuid4(self):
buf = set()
for _ in xrange(10000):
u = libuuid.uuid4_bytes()
self.assert_(u not in buf)
buf.add(u)
def test_basic_sanity_uuid1(self):
buf = set()
clocks = []
for _ in xrange(1000):
u = libuuid.uuid1()
clocks.append(u.time)
self.assert_(u.bytes not in buf)
buf.add(u.bytes)
self.assertEquals(clocks, sorted(clocks), "Timestamps increment")
t = (time.time() * 1e7) + 0x01b21dd213814000L # RFC 4122 timestamp
diff = abs(t - clocks[-1])
self.assert_(diff < 10000, "Timestamp reasonable")
def test_multiple_threads(self):
q = Queue()
def _runsome():
for _ in xrange(200):
q.put(libuuid.uuid4().hex)
q.put(libuuid.uuid1().hex)
threads = [Thread(target=_runsome) for _ in xrange(50)]
for t in threads:
t.start()
for t in threads:
t.join()
result = list(q.queue)
self.assertEquals(len(result), len(set(result)))
|
simple_gripper_mimic_pybullet.py | import threading
import time
import airobot.utils.common as arutil
from airobot.ee_tool.simple_gripper_pybullet import SimpleGripperPybullet
from airobot.utils.arm_util import wait_to_reach_jnt_goal
class SimpleGripperMimicPybullet(SimpleGripperPybullet):
"""
A base class for gripper with mimic joints in pybullet.
Args:
cfgs (YACS CfgNode): configurations for the gripper.
pb_client (BulletClient): pybullet client.
Attributes:
cfgs (YACS CfgNode): configurations for the gripper.
gripper_close_angle (float): position value corresponding to the
fully closed position of the gripper.
gripper_open_angle (float): position value corresponding to the
fully open position of the gripper.
jnt_names (list): names of the gripper joints.
gripper_jnt_ids (list): pybullet joint ids of the gripper joints.
robot_id (int): robot id in Pybullet.
jnt_to_id (dict): mapping from the joint name to joint id.
"""
def __init__(self, cfgs, pb_client):
super(SimpleGripperMimicPybullet, self).__init__(cfgs=cfgs, pb_client=pb_client)
self._gripper_mimic_coeff = self.cfgs.EETOOL.MIMIC_COEFF
self._mthread_started = False
def feed_robot_info(self, robot_id, jnt_to_id):
"""
Setup the gripper, pass the robot info from the arm to the gripper.
Args:
robot_id (int): robot id in Pybullet.
jnt_to_id (dict): mapping from the joint name to joint id.
"""
super().feed_robot_info(robot_id, jnt_to_id)
# if the gripper has been activated once,
# the following code is used to prevent starting
# a new thread after the arm reset if a thread has been started
if not self._mthread_started:
self._mthread_started = True
# gripper thread
self._th_gripper = threading.Thread(target=self._th_mimic_gripper)
self._th_gripper.daemon = True
self._th_gripper.start()
else:
return
def set_jpos(self, pos, wait=True, ignore_physics=False):
"""
Set the gripper position.
Args:
pos (float): joint position.
wait (bool): wait until the joint position is set
to the target position.
Returns:
bool: A boolean variable representing if the action is
successful at the moment when the function exits.
"""
joint_name = self.jnt_names[0]
tgt_pos = arutil.clamp(
pos,
min(self.gripper_open_angle, self.gripper_close_angle),
max(self.gripper_open_angle, self.gripper_close_angle))
jnt_id = self.jnt_to_id[joint_name]
if ignore_physics:
self._zero_vel_mode()
mic_pos = self._mimic_gripper(pos)
self._hard_reset(mic_pos)
success = True
else:
self._pb.setJointMotorControl2(self.robot_id,
jnt_id,
self._pb.POSITION_CONTROL,
targetPosition=tgt_pos,
force=self._max_torque)
if not self._pb.in_realtime_mode():
self._set_rest_joints(tgt_pos)
success = False
if self._pb.in_realtime_mode() and wait:
success = wait_to_reach_jnt_goal(
tgt_pos,
get_func=self.get_jpos,
joint_name=joint_name,
get_func_derv=self.get_jvel,
timeout=self.cfgs.ARM.TIMEOUT_LIMIT,
max_error=self.cfgs.ARM.MAX_JOINT_ERROR
)
return success
def get_jpos(self):
"""
Return the joint position(s) of the gripper.
Returns:
float: joint position.
"""
if not self._is_activated:
raise RuntimeError('Call activate function first!')
jnt_id = self.jnt_to_id[self.jnt_names[0]]
pos = self._pb.getJointState(self.robot_id, jnt_id)[0]
return pos
def get_jvel(self):
"""
Return the joint velocity of the gripper.
Returns:
float: joint velocity.
"""
if not self._is_activated:
raise RuntimeError('Call activate function first!')
jnt_id = self.jnt_to_id[self.jnt_names[0]]
vel = self._pb.getJointState(self.robot_id, jnt_id)[1]
return vel
def _mimic_gripper(self, joint_val):
"""
Given the value for the first joint,
mimic the joint values for the rest joints.
"""
jnt_vals = [joint_val]
for i in range(1, len(self.jnt_names)):
jnt_vals.append(joint_val * self._gripper_mimic_coeff[i])
return jnt_vals
def _th_mimic_gripper(self):
"""
Make all the other joints of the gripper
follow the motion of the first joint of the gripper.
"""
while True:
if self._is_activated and self._pb.in_realtime_mode():
self._set_rest_joints()
time.sleep(0.005)
def _set_rest_joints(self, gripper_pos=None):
max_torq = self._max_torque
max_torques = [max_torq] * (len(self.jnt_names) - 1)
if gripper_pos is None:
gripper_pos = self.get_jpos()
gripper_poss = self._mimic_gripper(gripper_pos)[1:]
gripper_vels = [0.0] * len(max_torques)
self._pb.setJointMotorControlArray(self.robot_id,
self.gripper_jnt_ids[1:],
self._pb.POSITION_CONTROL,
targetPositions=gripper_poss,
targetVelocities=gripper_vels,
forces=max_torques)
|
extension.py | from system.platform import get_platform
from tools.OJ.CP.setup import CpSetup
import os
import subprocess
import json
from termcolor import cprint
import time
import threading
import socket
from settings.compiler import competitive_companion_port, parse_problem_with_template
from settings.compiler import editor
editor_file_path = []
editor_file_name = []
class CpExt:
HOST = '127.0.0.1'
PORT = competitive_companion_port
PARSED_URL = []
NOT_FINISHED = True
@staticmethod
def template(file_path, file_name='sol.cpp', open_editor=False):
try:
obj_template = CpSetup()
obj_template.template(file_path, file_name, parsingMode=True, open_editor=open_editor)
return
except:
return
@staticmethod
def rectify(s):
try:
i = s.find('{')
s = s[i:]
return s
except:
return ''
def create(self, problem, cnt=0, link=False):
try:
problem = self.rectify(problem)
dic = json.loads(problem)
if dic['url'] in self.PARSED_URL:
return
self.PARSED_URL.append(dic['url'])
if link:
dic = dic['result']
problem_name = dic['name']
try:
contest_name = dic['group']
except:
contest_name = 'NULL'
url = dic['url']
problem_time_limit = 'NULL'
problem_memory_limit = 'NULL'
try:
problem_time_limit = str(dic['timeLimit']) + ' ms'
problem_memory_limit = str(dic['memoryLimit']) + ' MB'
except Exception as e:
cprint(e, 'red')
pass
base = os.getcwd()
base_name = os.path.basename(base)
contest_path = os.path.join(base, contest_name)
if contest_name != 'NULL':
contest_path = self.get_contest_path(base, contest_name)
if contest_name != 'NULL':
try:
if cnt == 0:
if not os.path.isdir(contest_name):
os.mkdir(contest_name)
cprint(f" Folder {contest_name} is created.", 'blue')
info = '{"contest_name" : "$CONTEST" , "url" : "$URL"}'
info = info.replace('$CONTEST', contest_name)
info = info.replace('$URL', url)
with open(os.path.join(contest_path, '.info'), 'w') as f:
f.write(info)
cprint(f" All the problems will be parsed into '{contest_name}' folder.\n", 'magenta')
except Exception as e:
print(e)
os.chdir(contest_path)
if not os.path.isdir(problem_name):
os.mkdir(problem_name)
info = '{"name" : "$NAME" , "url" : "$URL","timeLimit" : "$timeLimit" , "memoryLimit":"$memoryLimit"}'
info = info.replace('$NAME', problem_name)
info = info.replace('$URL', url)
info = info.replace('$memoryLimit', problem_memory_limit)
info = info.replace('$timeLimit', problem_time_limit)
path = os.path.join(os.getcwd(), problem_name, "")
with open(path + '.info', 'w') as f:
f.write(info)
if parse_problem_with_template:
open_editor = False
if cnt == 0:
open_editor = True
self.template(path, open_editor=open_editor)
testcases = dic['tests']
no = 1
if not os.path.isdir(path + "testcases"):
os.mkdir(path + "testcases")
path = os.path.join(path, 'testcases')
for case in testcases:
file_name_in = 'Sample-' + str(no).zfill(2) + '.in'
file_name_out = 'Sample-' + str(no).zfill(2) + '.out'
no += 1
with open(os.path.join(path, file_name_in), 'w') as fin:
fin.write(case['input'])
with open(os.path.join(path, file_name_out), 'w') as f_out:
f_out.write(case['output'])
cprint(f' {problem_name} fetched successfully.', 'green')
os.chdir(contest_path)
except Exception as e:
cprint(e, 'red')
def time_out(self, target_time):
time.sleep(target_time)
self.NOT_FINISHED = False
def listen(self):
cprint(' ' * 17 + '...Parsing Problem...' + ' ' * 17, 'blue')
print()
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
s.bind((self.HOST, self.PORT))
cprint(" Listening (Click competitive companion extension)....", 'yellow')
print()
timeout = 1000
cnt = 0
ok = True
while ok and self.NOT_FINISHED:
try:
s.listen()
s.settimeout(timeout)
timeout = 2
conn, addr = s.accept()
with conn:
problem_json = ''
continue_loop = True
while continue_loop and self.NOT_FINISHED:
data = conn.recv(1024)
result = (data.decode('utf-8'))
if not data:
if problem_json == '':
break
t = threading.Thread(target=self.create, args=(problem_json, cnt))
t.start()
cnt += 1
continue_loop = False
ok = False
else:
problem_json += result
pass
except Exception as e:
# print(e)
ok = False
print()
t.join()
cprint(f' # Total {cnt} problems is fetched.', 'blue')
if cnt > 0 and editor != '$NONE':
cli_editors = ['nvim', 'vim', 'nano']
if editor not in cli_editors:
os.system(editor + ' .')
base = os.getcwd()
for file_path, file_name in zip(editor_file_path, editor_file_name):
os.chdir(file_path)
os.system(editor + ' ' + file_name)
os.chdir(base)
def link(self):
t = None
cprint(' ' * 17 + '...Parsing Problem...' + ' ' * 17, 'blue')
print()
cprint(" Enter the link of the problem : ", 'cyan', end='')
url = input()
print()
cnt = 0
ok = True
while ok:
try:
cmd = 'oj-api get-problem --compatibility ' + url
cmd = list(cmd.split())
problem_json = subprocess.run(cmd, universal_newlines=True, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
t = threading.Thread(target=self.create, args=(problem_json.stdout, cnt, True))
t.start()
ok = False
cnt += 1
except:
ok = False
print()
t.join()
print()
cprint(f' # Total {cnt} problems is fetched.', 'blue')
def id(self):
t = None
cprint(' ' * 17 + '...Parsing Problem...' + ' ' * 17, 'blue')
print()
cprint(" Enter the codeforces contest id : ", 'cyan', end='')
contest_id = input()
cprint(" Enter the codeforces problems id : ", 'cyan', end='')
problems = input()
problems = problems.split(sep=' ')
url = 'https://codeforces.com/contest/$CONTEST_ID/problem/$PROBLEM_ID'
url = url.replace('$CONTEST_ID', contest_id)
rem = url
print()
cnt = 0
for prob in problems:
try:
url = rem.replace('$PROBLEM_ID', prob)
cmd = 'oj-api get-problem --compatibility ' + url
cmd = list(cmd.split())
problem_json = subprocess.run(cmd, universal_newlines=True, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
t = threading.Thread(target=self.create, args=(problem_json.stdout, cnt, True))
t.start()
cnt += 1
except:
cprint(" Invalid id : " + prob, 'red')
print()
t.join()
print()
cprint(f' # Total {cnt} problems is fetched.', 'blue')
def parse_contest(self, url=''):
try:
cprint(' ' * 17 + '...Parsing Contest...' + ' ' * 17, 'blue')
if url == '':
cprint('Enter the url : ', 'cyan', end='')
url = input()
cprint('-' * 55, 'magenta')
# os.system(cmd)
t = time.time()
cmd = 'oj-api get-contest ' + url
cmd = list(cmd.split())
cp = subprocess.run(cmd, universal_newlines=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
contest = json.loads(cp.stdout)
result = "\tFetched Contest info..."
if contest['status'] == 'ok':
cprint(result, 'green')
else:
cprint("Sorry contest can't be fetched. Sorry sir. :( ", 'red')
return
problems = contest['result']['problems']
cnt = 0
for prob in problems:
try:
url = prob['url']
cmd = 'oj-api get-problem --compatibility ' + url
cmd = list(cmd.split())
problem_json = subprocess.run(cmd, universal_newlines=True, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
t = threading.Thread(target=self.create, args=(problem_json.stdout, cnt, True))
t.start()
cnt += 1
except:
cprint(" Invalid id : " + prob, 'red')
print()
t.join()
print()
cprint(f' # Total {cnt} problems is fetched.', 'blue')
except Exception as e:
cprint(e, 'red')
@staticmethod
def get_contest_path(base, contest_name):
if get_platform() == 'Windows':
sep = '\\'
else:
sep = '/'
cnt = len(base.split(sep=sep))
if cnt <= 2:
return base
base = base.rsplit(sep=sep, maxsplit=cnt - 2)
contest_path = 'None'
for b in base:
if b == contest_name:
break
if not contest_path:
contest_path = b
else:
contest_path = os.path.join(contest_path, b)
contest_path = os.path.join(contest_path, contest_name)
return contest_path
|
jarvis.py | import os
import re
import sys
from concurrent.futures import ThreadPoolExecutor
from datetime import datetime, timedelta
from email import message_from_bytes
from email.header import decode_header, make_header
from imaplib import IMAP4_SSL
from json import dumps as json_dumps
from json import load as json_load
from json import loads as json_loads
from math import ceil, floor, log, pow
from multiprocessing.context import TimeoutError as ThreadTimeoutError
from multiprocessing.pool import ThreadPool
from pathlib import Path
from platform import platform, system
from random import choice, choices, randrange
from shutil import disk_usage
from socket import AF_INET, SOCK_DGRAM, gethostname, socket
from ssl import create_default_context
from string import ascii_letters, digits
from subprocess import PIPE, Popen, call, check_output, getoutput
from threading import Thread
from time import perf_counter, sleep, time
from traceback import format_exc
from typing import Tuple, Union
from unicodedata import normalize
from urllib.request import urlopen
from webbrowser import open as web_open
from aeosa.aem.aemsend import EventError
from appscript import app as apple_script
from appscript.reference import CommandError
from certifi import where
from chatterbot import ChatBot
from chatterbot.trainers import ChatterBotCorpusTrainer
from dotenv import load_dotenv, set_key, unset_key
from geopy.distance import geodesic
from geopy.exc import GeocoderUnavailable, GeopyError
from geopy.geocoders import Nominatim, options
from gmailconnector.send_email import SendEmail
from gmailconnector.send_sms import Messenger
from googlehomepush import GoogleHome
from googlehomepush.http_server import serve_file
from holidays import CountryHoliday
from inflect import engine
from joke.jokes import chucknorris, geek, icanhazdad, icndb
from newsapi import NewsApiClient, newsapi_exception
from playsound import playsound
from psutil import Process, boot_time, cpu_count, virtual_memory
from pychromecast.error import ChromecastConnectionError
from PyDictionary import PyDictionary
from pyicloud import PyiCloudService
from pyicloud.exceptions import (PyiCloudAPIResponseException,
PyiCloudFailedLoginException)
from pyicloud.services.findmyiphone import AppleDevice
from pyrh import Robinhood
from pyttsx3 import init
from pytz import timezone
from randfacts import getFact
from requests import exceptions as requests_exceptions
from requests import get
from requests.auth import HTTPBasicAuth
from search_engine_parser.core.engines.google import Search as GoogleSearch
from search_engine_parser.core.exceptions import NoResultsOrTrafficError
from speech_recognition import (Microphone, Recognizer, RequestError,
UnknownValueError, WaitTimeoutError)
from speedtest import ConfigRetrievalError, Speedtest
from timezonefinder import TimezoneFinder
from wakeonlan import send_magic_packet as wake
from wikipedia import exceptions as wiki_exceptions
from wikipedia import summary
from wolframalpha import Client as Think
from wordninja import split as splitter
from yaml import FullLoader
from yaml import dump as yaml_dump
from yaml import load as yaml_load
from helper_functions.alarm import Alarm
from helper_functions.conversation import Conversation
from helper_functions.database import Database, file_name
from helper_functions.facial_recognition import Face
from helper_functions.ip_scanner import LocalIPScan
from helper_functions.keywords import Keywords
from helper_functions.lights import MagicHomeApi
from helper_functions.logger import logger
from helper_functions.preset_values import preset_values
from helper_functions.reminder import Reminder
from helper_functions.robinhood import RobinhoodGatherer
from helper_functions.temperature import Temperature
from helper_functions.tv_controls import TV
def listener(phrase_limit: int, timeout: int = None, sound: bool = True) -> str:
"""Function to activate listener, this function will be called by most upcoming functions to listen to user input.
Args:
phrase_limit: Time in seconds for the listener to actively listen to a sound.
timeout: Time in seconds for the overall listener to be active.
sound: Flag whether or not to play the listener indicator sound. Defaults to True unless set to False.
Returns:
str:
- On success, returns recognized statement from the microphone.
- On failure, returns ``SR_ERROR`` as a string which is conditioned to respond appropriately.
"""
try:
sys.stdout.write("\rListener activated..") and playsound('indicators/start.mp3') if sound else \
sys.stdout.write("\rListener activated..")
if timeout and phrase_limit:
listened = recognizer.listen(source, phrase_time_limit=phrase_limit, timeout=timeout)
else:
listened = recognizer.listen(source, phrase_time_limit=phrase_limit)
sys.stdout.write("\r") and playsound('indicators/end.mp3') if sound else sys.stdout.write("\r")
return_val = recognizer.recognize_google(listened)
sys.stdout.write(f'\r{return_val}')
except (UnknownValueError, RequestError, WaitTimeoutError):
return_val = 'SR_ERROR'
return return_val
def split(key: str) -> bool:
"""Splits the input at 'and' or 'also' and makes it multiple commands to execute if found in statement.
Args:
key: Takes the voice recognized statement as argument.
Returns:
bool:
Return value from ``conditions()``
"""
exit_check = False # this is specifically to catch the sleep command which should break the while loop in renew()
if ' and ' in key and not any(word in key.lower() for word in keywords.avoid()):
for each in key.split(' and '):
exit_check = conditions(each.strip())
elif ' also ' in key and not any(word in key.lower() for word in keywords.avoid()):
for each in key.split(' also '):
exit_check = conditions(each.strip())
else:
exit_check = conditions(key.strip())
return exit_check
def part_of_day() -> str:
"""Checks the current hour to determine the part of day.
Returns:
str:
Morning, Afternoon, Evening or Night based on time of day.
"""
am_pm = datetime.now().strftime("%p")
current_hour = int(datetime.now().strftime("%I"))
if current_hour in range(4, 12) and am_pm == 'AM':
greet = 'Morning'
elif am_pm == 'PM' and (current_hour == 12 or current_hour in range(1, 4)):
greet = 'Afternoon'
elif current_hour in range(4, 8) and am_pm == 'PM':
greet = 'Evening'
else:
greet = 'Night'
return greet
def initialize() -> None:
"""Awakens from sleep mode. ``greet_check`` is to ensure greeting is given only for the first function call."""
if greet_check.get('status'):
speaker.say("What can I do for you?")
else:
speaker.say(f'Good {part_of_day()}.')
greet_check['status'] = True
renew()
def renew() -> None:
"""Keeps listening and sends the response to ``conditions()`` function.
Notes:
- This function runs only for a minute.
- split(converted) is a condition so that, loop breaks when if sleep in ``conditions()`` returns True.
"""
speaker.runAndWait()
waiter = 0
while waiter < 12:
waiter += 1
try:
if waiter == 1:
converted = listener(timeout=3, phrase_limit=5)
else:
converted = listener(timeout=3, phrase_limit=5, sound=False)
remove = ['buddy', 'jarvis', 'hey', 'hello', 'sr_error']
converted = ' '.join([i for i in converted.split() if i.lower() not in remove])
if converted:
if split(converted):
break # split() returns what conditions function returns. Condition() returns True only for sleep.
speaker.runAndWait()
except (UnknownValueError, RequestError, WaitTimeoutError):
pass
def time_converter(seconds: float) -> str:
"""Modifies seconds to appropriate days/hours/minutes/seconds.
Args:
seconds: Takes number of seconds as argument.
Returns:
str:
Seconds converted to days or hours or minutes or seconds.
"""
days = round(seconds // 86400)
seconds = round(seconds % (24 * 3600))
hours = round(seconds // 3600)
seconds %= 3600
minutes = round(seconds // 60)
seconds %= 60
if days:
return f'{days} days, {hours} hours, {minutes} minutes, and {seconds} seconds'
elif hours:
return f'{hours} hours, {minutes} minutes, and {seconds} seconds'
elif minutes:
return f'{minutes} minutes, and {seconds} seconds'
elif seconds:
return f'{seconds} seconds'
def conditions(converted: str) -> bool:
"""Conditions function is used to check the message processed.
Uses the keywords to do a regex match and trigger the appropriate function which has dedicated task.
Args:
converted: Takes the voice recognized statement as argument.
Returns:
bool:
Boolean True only when asked to sleep for conditioned sleep message.
"""
sys.stdout.write(f'\r{converted}')
converted_lower = converted.lower()
todo_checks = ['to do', 'to-do', 'todo']
if any(word in converted_lower for word in keywords.date()) and \
not any(word in converted_lower for word in keywords.avoid()):
current_date()
elif any(word in converted_lower for word in keywords.time()) and \
not any(word in converted_lower for word in keywords.avoid()):
place = ''
for word in converted.split():
if word[0].isupper():
place += word + ' '
elif '.' in word:
place += word + ' '
if place:
current_time(place)
else:
current_time()
elif any(word in converted_lower for word in keywords.weather()) and \
not any(word in converted_lower for word in keywords.avoid()):
place = ''
for word in converted.split():
if word[0].isupper():
place += word + ' '
elif '.' in word:
place += word + ' '
weather_cond = ['tomorrow', 'day after', 'next week', 'tonight', 'afternoon', 'evening']
if any(match in converted_lower for match in weather_cond):
if place:
weather_condition(msg=converted, place=place)
else:
weather_condition(msg=converted)
elif place:
weather(place)
else:
weather()
elif any(word in converted_lower for word in keywords.system_info()):
system_info()
elif any(word in converted for word in keywords.ip_info()) or 'IP' in converted.split():
if 'public' in converted_lower:
if not internet_checker():
speaker.say("You are not connected to the internet sir!")
return False
if ssid := get_ssid():
ssid = f'for the connection {ssid} '
else:
ssid = ''
if public_ip := json_load(urlopen('http://ipinfo.io/json')).get('ip'):
output = f"My public IP {ssid}is {public_ip}"
elif public_ip := json_loads(urlopen('http://ip.jsontest.com').read()).get('ip'):
output = f"My public IP {ssid}is {public_ip}"
else:
output = 'I was unable to fetch the public IP sir!'
else:
ip_address = vpn_checker().split(':')[-1]
output = f"My local IP address for {gethostname()} is {ip_address}"
sys.stdout.write(f'\r{output}')
speaker.say(output)
elif any(word in converted_lower for word in keywords.wikipedia()):
wikipedia_()
elif any(word in converted_lower for word in keywords.news()):
news()
elif any(word in converted_lower for word in keywords.report()):
report()
elif any(word in converted_lower for word in keywords.robinhood()):
robinhood()
elif any(word in converted_lower for word in keywords.repeat()):
repeater()
elif any(word in converted_lower for word in keywords.location()):
location()
elif any(word in converted_lower for word in keywords.locate()):
locate(converted)
elif any(word in converted_lower for word in keywords.gmail()):
gmail()
elif any(word in converted_lower for word in keywords.meaning()):
meaning(converted.split()[-1])
elif any(word in converted_lower for word in keywords.delete_todo()) and \
any(word in converted_lower for word in todo_checks):
delete_todo()
elif any(word in converted_lower for word in keywords.list_todo()):
todo()
elif any(word in converted_lower for word in keywords.add_todo()) and \
any(word in converted_lower for word in todo_checks):
add_todo()
elif any(word in converted_lower for word in keywords.delete_db()):
delete_db()
elif any(word in converted_lower for word in keywords.create_db()):
create_db()
elif any(word in converted_lower for word in keywords.distance()) and \
not any(word in converted_lower for word in keywords.avoid()):
"""the loop below differentiates between two places and one place with two words
eg: New York will be considered as one word and New York and Las Vegas will be considered as two words"""
check = converted.split() # str to list
places = []
for word in check:
if word[0].isupper() or '.' in word: # looks for words that start with uppercase
try:
next_word = check[check.index(word) + 1] # looks if words after an uppercase word is also one
if next_word[0].isupper():
places.append(f"{word + ' ' + check[check.index(word) + 1]}")
else:
if word not in ' '.join(places):
places.append(word)
except IndexError: # catches exception on lowercase word after an upper case word
if word not in ' '.join(places):
places.append(word)
"""the condition below assumes two different words as two places but not including two words starting upper case
right next to each other"""
if len(places) >= 2:
start = places[0]
end = places[1]
elif len(places) == 1:
start = None
end = places[0]
else:
start, end = None, None
distance(start, end)
elif any(word in converted_lower for word in conversation.form()):
speaker.say("I am a program, I'm without form.")
elif any(word in converted_lower for word in keywords.geopy()):
# tries to look for words starting with an upper case letter
place = ''
for word in converted.split():
if word[0].isupper():
place += word + ' '
elif '.' in word:
place += word + ' '
# if no words found starting with an upper case letter, fetches word after the keyword 'is' eg: where is Chicago
if not place:
keyword = 'is'
before_keyword, keyword, after_keyword = converted.partition(keyword)
place = after_keyword.replace(' in', '').strip()
locate_places(place.strip())
elif any(word in converted_lower for word in keywords.directions()):
place = ''
for word in converted.split():
if word[0].isupper():
place += word + ' '
elif '.' in word:
place += word + ' '
place = place.replace('I ', '').strip()
if place:
directions(place)
else:
speaker.say("I can't take you to anywhere without a location sir!")
directions(place=None)
elif any(word in converted_lower for word in keywords.webpage()) and \
not any(word in converted_lower for word in keywords.avoid()):
converted = converted.replace(' In', 'in').replace(' Co. Uk', 'co.uk')
host = (word for word in converted.split() if '.' in word)
webpage(host)
elif any(word in converted_lower for word in keywords.kill_alarm()):
kill_alarm()
elif any(word in converted_lower for word in keywords.alarm()):
alarm(converted_lower)
elif any(word in converted_lower for word in keywords.google_home()):
google_home()
elif any(word in converted_lower for word in keywords.jokes()):
jokes()
elif any(word in converted_lower for word in keywords.reminder()):
reminder(converted_lower)
elif any(word in converted_lower for word in keywords.notes()):
notes()
elif any(word in converted_lower for word in keywords.github()):
auth = HTTPBasicAuth(git_user, git_pass)
response = get('https://api.github.com/user/repos?type=all&per_page=100', auth=auth).json()
result, repos, total, forked, private, archived, licensed = [], [], 0, 0, 0, 0, 0
for i in range(len(response)):
total += 1
forked += 1 if response[i]['fork'] else 0
private += 1 if response[i]['private'] else 0
archived += 1 if response[i]['archived'] else 0
licensed += 1 if response[i]['license'] else 0
repos.append({response[i]['name'].replace('_', ' ').replace('-', ' '): response[i]['clone_url']})
if 'how many' in converted:
speaker.say(f'You have {total} repositories sir, out of which {forked} are forked, {private} are private, '
f'{licensed} are licensed, and {archived} archived.')
else:
[result.append(clone_url) if clone_url not in result and re.search(rf'\b{word}\b', repo.lower()) else None
for word in converted_lower.split() for item in repos for repo, clone_url in item.items()]
if result:
github(target=result)
else:
speaker.say("Sorry sir! I did not find that repo.")
elif any(word in converted_lower for word in keywords.txt_message()):
number = '-'.join([str(s) for s in re.findall(r'\b\d+\b', converted)])
send_sms(number)
elif any(word in converted_lower for word in keywords.google_search()):
phrase = converted.split('for')[-1] if 'for' in converted else None
google_search(phrase)
elif any(word in converted_lower for word in keywords.tv()):
television(converted)
elif any(word in converted_lower for word in keywords.apps()):
apps(converted.split()[-1])
elif any(word in converted_lower for word in keywords.music()):
if 'speaker' in converted_lower:
music(converted)
else:
music()
elif any(word in converted_lower for word in keywords.volume()):
if 'mute' in converted_lower:
level = 0
elif 'max' in converted_lower or 'full' in converted_lower:
level = 100
else:
level = re.findall(r'\b\d+\b', converted) # gets integers from string as a list
level = int(level[0]) if level else 50 # converted to int for volume
volume_controller(level)
speaker.say(f"{choice(ack)}!")
elif any(word in converted_lower for word in keywords.face_detection()):
face_recognition_detection()
elif any(word in converted_lower for word in keywords.speed_test()):
speed_test()
elif any(word in converted_lower for word in keywords.bluetooth()):
bluetooth(phrase=converted_lower)
elif any(word in converted_lower for word in keywords.brightness()) and 'lights' not in converted_lower:
speaker.say(choice(ack))
if 'set' in converted_lower or re.findall(r'\b\d+\b', converted_lower):
level = re.findall(r'\b\d+\b', converted_lower) # gets integers from string as a list
if not level:
level = ['50'] # pass as list for brightness, as args must be iterable
Thread(target=set_brightness, args=level).start()
elif 'decrease' in converted_lower or 'reduce' in converted_lower or 'lower' in converted_lower or \
'dark' in converted_lower or 'dim' in converted_lower:
Thread(target=decrease_brightness).start()
elif 'increase' in converted_lower or 'bright' in converted_lower or 'max' in converted_lower or \
'brighten' in converted_lower or 'light up' in converted_lower:
Thread(target=increase_brightness).start()
elif any(word in converted_lower for word in keywords.lights()):
if not vpn_checker().startswith('VPN'):
lights(converted=converted_lower)
elif any(word in converted_lower for word in keywords.guard_enable() or keywords.guard_disable()):
if any(word in converted_lower for word in keywords.guard_enable()):
logger.info('Enabled Security Mode')
speaker.say(f"Enabled security mode sir! I will look out for potential threats and keep you posted. "
f"Have a nice {part_of_day()}, and enjoy yourself sir!")
speaker.runAndWait()
guard()
elif any(word in converted_lower for word in keywords.flip_a_coin()):
playsound('indicators/coin.mp3')
sleep(0.5)
speaker.say(f"""{choice(['You got', 'It landed on', "It's"])} {choice(['heads', 'tails'])} sir""")
elif any(word in converted_lower for word in keywords.facts()):
speaker.say(getFact(False))
elif any(word in converted_lower for word in keywords.meetings()):
if os.path.isfile('meetings'):
meeting_reader()
else:
if os.environ.get('called_by_offline'):
speaker.say("Meetings file is not ready yet. Please try again in a minute or two.")
return False
meeting = ThreadPool(processes=1).apply_async(func=meetings)
speaker.say("Please give me a moment sir! Let me check your calendar.")
speaker.runAndWait()
try:
speaker.say(meeting.get(timeout=60))
except ThreadTimeoutError:
logger.error('Unable to read the calendar within 60 seconds.')
speaker.say("I wasn't able to read your calendar within the set time limit sir!")
speaker.runAndWait()
elif any(word in converted_lower for word in keywords.voice_changer()):
voice_changer(converted)
elif any(word in converted_lower for word in keywords.system_vitals()):
system_vitals()
elif any(word in converted_lower for word in keywords.vpn_server()):
if vpn_server_check():
speaker.say('An operation for VPN Server is already in progress sir! Please wait and retry.')
elif 'start' in converted_lower or 'trigger' in converted_lower or 'initiate' in converted_lower or \
'enable' in converted_lower or 'spin up' in converted_lower:
Thread(target=vpn_server, args=['START']).start()
speaker.say('VPN Server has been initiated sir! Login details will be sent to you shortly.')
elif 'stop' in converted_lower or 'shut' in converted_lower or 'close' in converted_lower or \
'disable' in converted_lower:
Thread(target=vpn_server, args=['STOP']).start()
speaker.say('VPN Server will be shutdown sir!')
else:
speaker.say("I don't understand the request sir! You can ask me to enable or disable the VPN server.")
elif any(word in converted_lower for word in keywords.personal_cloud()):
if 'enable' in converted_lower or 'initiate' in converted_lower or 'kick off' in converted_lower or \
'start' in converted_lower:
Thread(target=personal_cloud.enable).start()
speaker.say("Personal Cloud has been triggered sir! I will send the login details to your phone number "
"once the server is up and running.")
elif 'disable' in converted_lower or 'stop' in converted_lower:
Thread(target=personal_cloud.disable).start()
speaker.say(choice(ack))
else:
speaker.say("I didn't quite get that sir! Please tell me if I should enable or disable your server.")
elif any(word in converted_lower for word in conversation.greeting()):
speaker.say('I am spectacular. I hope you are doing fine too.')
elif any(word in converted_lower for word in conversation.capabilities()):
speaker.say('There is a lot I can do. For example: I can get you the weather at any location, news around '
'you, meanings of words, launch applications, create a to-do list, check your emails, get your '
'system configuration, tell your investment details, locate your phone, find distance between '
'places, set an alarm, play music on smart devices around you, control your TV, tell a joke, send'
' a message, set reminders, scan and clone your GitHub repositories, and much more. Time to ask,.')
elif any(word in converted_lower for word in conversation.languages()):
speaker.say("Tricky question!. I'm configured in python, and I can speak English.")
elif any(word in converted_lower for word in conversation.whats_up()):
speaker.say("My listeners are up. There is nothing I cannot process. So ask me anything..")
elif any(word in converted_lower for word in conversation.what()):
speaker.say("I'm just a pre-programmed virtual assistant, trying to become a natural language UI.")
elif any(word in converted_lower for word in conversation.who()):
speaker.say("I am Jarvis. A virtual assistant designed by Mr.Raauv.")
elif any(word in converted_lower for word in conversation.about_me()):
speaker.say("I am Jarvis. A virtual assistant designed by Mr.Raauv.")
speaker.say("I'm just a pre-programmed virtual assistant, trying to become a natural language UI.")
speaker.say("I can seamlessly take care of your daily tasks, and also help with most of your work!")
elif any(word in converted_lower for word in keywords.sleep()):
if 'pc' in converted_lower or 'computer' in converted_lower or 'imac' in converted_lower or \
'screen' in converted_lower:
pc_sleep()
else:
speaker.say("Activating sentry mode, enjoy yourself sir!")
if greet_check:
greet_check.pop('status')
return True
elif any(word in converted_lower for word in keywords.restart()):
if 'pc' in converted_lower or 'computer' in converted_lower or 'imac' in converted_lower:
logger.info(f'JARVIS::Restart for {host_info("model")} has been requested.')
restart(target='PC')
else:
logger.info('JARVIS::Self reboot has been requested.')
if 'quick' in converted_lower or 'fast' in converted_lower:
restart(quick=True)
else:
restart()
elif any(word in converted_lower for word in keywords.kill()) and \
not any(word in converted_lower for word in keywords.avoid()):
raise KeyboardInterrupt
elif any(word in converted_lower for word in keywords.shutdown()):
shutdown()
elif any(word in converted_lower for word in keywords.chatbot()):
chatter_bot()
else:
logger.info(f'Received the unrecognized lookup parameter: {converted}')
Thread(target=unrecognized_dumper, args=[converted]).start() # writes to training_data.yaml in a thread
if alpha(converted):
if google_maps(converted):
if google(converted):
# if none of the conditions above are met, opens a google search on default browser
sys.stdout.write(f"\r{converted}")
if google_maps.has_been_called:
google_maps.has_been_called = False
speaker.say("I have also opened a google search for your request.")
else:
speaker.say(f"I heard {converted}. Let me look that up.")
speaker.runAndWait()
speaker.say("I have opened a google search for your request.")
search = str(converted).replace(' ', '+')
unknown_url = f"https://www.google.com/search?q={search}"
web_open(unknown_url)
def unrecognized_dumper(converted: str) -> None:
"""If none of the conditions are met, converted text is written to a yaml file.
Args:
converted: Takes the voice recognized statement as argument.
"""
train_file = {'Uncategorized': converted}
if os.path.isfile('training_data.yaml'):
content = open(r'training_data.yaml', 'r').read()
for key, value in train_file.items():
if str(value) not in content: # avoids duplication in yaml file
dict_file = [{key: [value]}]
with open(r'training_data.yaml', 'a') as writer:
yaml_dump(dict_file, writer)
else:
for key, value in train_file.items():
train_file = [{key: [value]}]
with open(r'training_data.yaml', 'w') as writer:
yaml_dump(train_file, writer)
def location_services(device: AppleDevice) -> Union[None, Tuple[str, str, str]]:
"""Gets the current location of an apple device.
Args:
device: Passed when locating a particular apple device.
Returns:
None or Tuple[str, str, str]:
- On success, returns ``current latitude``, ``current longitude`` and ``location`` information as a ``dict``.
- On failure, calls the ``restart()`` or ``terminator()`` function depending on the error.
"""
try:
# tries with icloud api to get your device's location for precise location services
if not device:
device = device_selector()
raw_location = device.location()
# noinspection PyUnresolvedReferences,PyProtectedMember
if not raw_location and sys._getframe(1).f_code.co_name == 'locate':
return 'None', 'None', 'None'
elif not raw_location:
raise PyiCloudAPIResponseException(reason=f'Unable to retrieve location for {device}')
else:
current_lat_ = raw_location['latitude']
current_lon_ = raw_location['longitude']
except (PyiCloudAPIResponseException, PyiCloudFailedLoginException):
# uses latitude and longitude information from your IP's client when unable to connect to icloud
icloud_error = sys.exc_info()[0]
logger.error(f'Unable to retrieve location::{icloud_error.__name__}\n{format_exc()}') # include traceback
current_lat_ = st.results.client['lat']
current_lon_ = st.results.client['lon']
speaker.say("I have trouble accessing the i-cloud API, so I'll be using your I.P address to get your location. "
"Please note that this may not be accurate enough for location services.")
speaker.runAndWait()
except requests_exceptions.ConnectionError:
current_lat_, current_lon_ = None, None
sys.stdout.write('\rBUMMER::Unable to connect to the Internet')
speaker.say("I was unable to connect to the internet. Please check your connection settings and retry.")
speaker.runAndWait()
sys.stdout.write(f"\rMemory consumed: {size_converter(0)}"
f"\nTotal runtime: {time_converter(perf_counter())}")
terminator()
try:
# Uses the latitude and longitude information and converts to the required address.
locator = geo_locator.reverse(f'{current_lat_}, {current_lon_}', language='en')
return current_lat_, current_lon_, locator.raw['address']
except (GeocoderUnavailable, GeopyError):
logger.error('Error retrieving address from latitude and longitude information. Initiating self reboot.')
speaker.say('Received an error while retrieving your address sir! I think a restart should fix this.')
restart(quick=True)
def report() -> None:
"""Initiates a list of functions, that I tend to check first thing in the morning."""
sys.stdout.write("\rStarting today's report")
report.has_been_called = True
current_date()
current_time()
weather()
todo()
gmail()
robinhood()
news()
report.has_been_called = False
def current_date() -> None:
"""Says today's date and adds the current time in speaker queue if report or time_travel function was called."""
dt_string = datetime.now().strftime("%A, %B")
date_ = engine().ordinal(datetime.now().strftime("%d"))
year = datetime.now().strftime("%Y")
event = celebrate()
if time_travel.has_been_called:
dt_string = dt_string + date_
else:
dt_string = dt_string + date_ + ', ' + year
speaker.say(f"It's {dt_string}")
if event and event == 'Birthday':
speaker.say(f"It's also your {event} sir!")
elif event:
speaker.say(f"It's also {event} sir!")
if report.has_been_called or time_travel.has_been_called:
speaker.say('The current time is, ')
def current_time(place: str = None) -> None:
"""Says current time at the requested location if any, else with respect to the current timezone.
Args:
place: Takes name of the place as argument.
"""
if place:
tf = TimezoneFinder()
place_tz = geo_locator.geocode(place)
coordinates = place_tz.latitude, place_tz.longitude
located = geo_locator.reverse(coordinates, language='en')
data = located.raw
address = data['address']
city = address['city'] if 'city' in address.keys() else None
state = address['state'] if 'state' in address.keys() else None
time_location = f'{city} {state}'.replace('None', '') if city or state else place
zone = tf.timezone_at(lat=place_tz.latitude, lng=place_tz.longitude)
datetime_zone = datetime.now(timezone(zone))
date_tz = datetime_zone.strftime("%A, %B %d, %Y")
time_tz = datetime_zone.strftime("%I:%M %p")
dt_string = datetime.now().strftime("%A, %B %d, %Y")
if date_tz != dt_string:
date_tz = datetime_zone.strftime("%A, %B %d")
speaker.say(f'The current time in {time_location} is {time_tz}, on {date_tz}.')
else:
speaker.say(f'The current time in {time_location} is {time_tz}.')
else:
c_time = datetime.now().strftime("%I:%M %p")
speaker.say(f'{c_time}.')
def webpage(target: str or list) -> None:
"""Opens up a webpage using the default browser to the target host.
If no target received, will ask for user confirmation. If no '.' in the phrase heard, phrase will default to .com.
Args:
target: Receives the webpage that has to be opened as an argument.
"""
host = []
try:
[host.append(i) for i in target]
except TypeError:
host = None
if not host:
speaker.say("Which website shall I open sir?")
speaker.runAndWait()
converted = listener(3, 4)
if converted != 'SR_ERROR':
if 'exit' in converted or 'quit' in converted or 'Xzibit' in converted:
return
elif '.' in converted and len(list(converted)) == 1:
_target = (word for word in converted.split() if '.' in word)
webpage(_target)
else:
converted = converted.lower().replace(' ', '')
target_ = [f"{converted}" if '.' in converted else f"{converted}.com"]
webpage(target_)
else:
for web in host:
web_url = f"https://{web}"
web_open(web_url)
speaker.say(f"I have opened {host}")
def weather(place: str = None) -> None:
"""Says weather at any location if a specific location is mentioned.
Says weather at current location by getting IP using reverse geocoding if no place is received.
Args:
place: Takes the location name as an optional argument.
"""
sys.stdout.write('\rGetting your weather info')
if place:
desired_location = geo_locator.geocode(place)
coordinates = desired_location.latitude, desired_location.longitude
located = geo_locator.reverse(coordinates, language='en')
data = located.raw
address = data['address']
city = address['city'] if 'city' in address.keys() else None
state = address['state'] if 'state' in address.keys() else None
lat = located.latitude
lon = located.longitude
else:
city, state = location_info['city'], location_info['state']
lat = current_lat
lon = current_lon
api_endpoint = "http://api.openweathermap.org/data/2.5/"
weather_url = f'{api_endpoint}onecall?lat={lat}&lon={lon}&exclude=minutely,hourly&appid={weather_api}'
r = urlopen(weather_url) # sends request to the url created
response = json_loads(r.read()) # loads the response in a json
weather_location = f'{city} {state}'.replace('None', '') if city != state else city or state
temp = response['current']['temp']
condition = response['current']['weather'][0]['description']
feels_like = response['current']['feels_like']
maxi = response['daily'][0]['temp']['max']
high = int(round(temperature.k2f(maxi), 2))
mini = response['daily'][0]['temp']['min']
low = int(round(temperature.k2f(mini), 2))
temp_f = int(round(temperature.k2f(temp), 2))
temp_feel_f = int(round(temperature.k2f(feels_like), 2))
sunrise = datetime.fromtimestamp(response['daily'][0]['sunrise']).strftime("%I:%M %p")
sunset = datetime.fromtimestamp(response['daily'][0]['sunset']).strftime("%I:%M %p")
if time_travel.has_been_called:
if 'rain' in condition or 'showers' in condition:
feeling = 'rainy'
weather_suggest = 'You might need an umbrella" if you plan to head out.'
elif temp_feel_f < 40:
feeling = 'freezing'
weather_suggest = 'Perhaps" it is time for winter clothing.'
elif temp_feel_f in range(41, 60):
feeling = 'cool'
weather_suggest = 'I think a lighter jacket would suffice" if you plan to head out.'
elif temp_feel_f in range(61, 75):
feeling = 'optimal'
weather_suggest = 'It might be a perfect weather for a hike, or perhaps a walk.'
elif temp_feel_f in range(75, 85):
feeling = 'warm'
weather_suggest = 'It is a perfect weather for some outdoor entertainment.'
elif temp_feel_f > 85:
feeling = 'hot'
weather_suggest = "I would not recommend thick clothes today."
else:
feeling, weather_suggest = '', ''
wind_speed = response['current']['wind_speed']
if wind_speed > 10:
output = f'The weather in {city} is a {feeling} {temp_f}°, but due to the current wind conditions ' \
f'(which is {wind_speed} miles per hour), it feels like {temp_feel_f}°. {weather_suggest}'
else:
output = f'The weather in {city} is a {feeling} {temp_f}°, and it currently feels like {temp_feel_f}°. ' \
f'{weather_suggest}'
elif place or not report.has_been_called:
output = f'The weather in {weather_location} is {temp_f}°F, with a high of {high}, and a low of {low}. ' \
f'It currently feels like {temp_feel_f}°F, and the current condition is {condition}.'
else:
output = f'The weather in {weather_location} is {temp_f}°F, with a high of {high}, and a low of {low}. ' \
f'It currently feels Like {temp_feel_f}°F, and the current condition is {condition}. ' \
f'Sunrise at {sunrise}. Sunset at {sunset}.'
if 'alerts' in response:
alerts = response['alerts'][0]['event']
start_alert = datetime.fromtimestamp(response['alerts'][0]['start']).strftime("%I:%M %p")
end_alert = datetime.fromtimestamp(response['alerts'][0]['end']).strftime("%I:%M %p")
else:
alerts, start_alert, end_alert = None, None, None
if alerts and start_alert and end_alert:
output += f' You have a weather alert for {alerts} between {start_alert} and {end_alert}'
sys.stdout.write(f"\r{output}")
speaker.say(output)
def weather_condition(msg: str, place: str = None) -> None:
"""Weather report when phrase has conditions like tomorrow, day after, next week and specific part of the day etc.
Notes:
- ``weather_condition()`` uses conditional blocks to fetch keywords and determine the output.
Args:
place: Name of place where the weather information is needed.
msg: Takes the voice recognized statement as argument.
"""
if place:
desired_location = geo_locator.geocode(place)
coordinates = desired_location.latitude, desired_location.longitude
located = geo_locator.reverse(coordinates, language='en')
data = located.raw
address = data['address']
city = address['city'] if 'city' in address.keys() else None
state = address['state'] if 'state' in address.keys() else None
lat = located.latitude
lon = located.longitude
else:
city, state, = location_info['city'], location_info['state']
lat = current_lat
lon = current_lon
api_endpoint = "http://api.openweathermap.org/data/2.5/"
weather_url = f'{api_endpoint}onecall?lat={lat}&lon={lon}&exclude=minutely,hourly&appid={weather_api}'
r = urlopen(weather_url) # sends request to the url created
response = json_loads(r.read()) # loads the response in a json
weather_location = f'{city} {state}' if city and state else place
if 'tonight' in msg:
key = 0
tell = 'tonight'
elif 'day after' in msg:
key = 2
tell = 'day after tomorrow '
elif 'tomorrow' in msg:
key = 1
tell = 'tomorrow '
elif 'next week' in msg:
key = -1
next_week = datetime.fromtimestamp(response['daily'][-1]['dt']).strftime("%A, %B %d")
tell = f"on {' '.join(next_week.split()[0:-1])} {engine().ordinal(next_week.split()[-1])}"
else:
key = 0
tell = 'today '
if 'morning' in msg:
when = 'morn'
tell += 'morning'
elif 'evening' in msg:
when = 'eve'
tell += 'evening'
elif 'tonight' in msg:
when = 'night'
elif 'night' in msg:
when = 'night'
tell += 'night'
else:
when = 'day'
tell += ''
if 'alerts' in response:
alerts = response['alerts'][0]['event']
start_alert = datetime.fromtimestamp(response['alerts'][0]['start']).strftime("%I:%M %p")
end_alert = datetime.fromtimestamp(response['alerts'][0]['end']).strftime("%I:%M %p")
else:
alerts, start_alert, end_alert = None, None, None
temp = response['daily'][key]['temp'][when]
feels_like = response['daily'][key]['feels_like'][when]
condition = response['daily'][key]['weather'][0]['description']
sunrise = response['daily'][key]['sunrise']
sunset = response['daily'][key]['sunset']
maxi = response['daily'][key]['temp']['max']
mini = response['daily'][1]['temp']['min']
high = int(round(temperature.k2f(maxi), 2))
low = int(round(temperature.k2f(mini), 2))
temp_f = int(round(temperature.k2f(temp), 2))
temp_feel_f = int(round(temperature.k2f(feels_like), 2))
sunrise = datetime.fromtimestamp(sunrise).strftime("%I:%M %p")
sunset = datetime.fromtimestamp(sunset).strftime("%I:%M %p")
output = f'The weather in {weather_location} {tell} would be {temp_f}°F, with a high ' \
f'of {high}, and a low of {low}. But due to {condition} it will fee like it is {temp_feel_f}°F. ' \
f'Sunrise at {sunrise}. Sunset at {sunset}. '
if alerts and start_alert and end_alert:
output += f'There is a weather alert for {alerts} between {start_alert} and {end_alert}'
sys.stdout.write(f'\r{output}')
speaker.say(output)
def system_info() -> None:
"""Gets the system configuration."""
total, used, free = disk_usage("/")
total = size_converter(total)
used = size_converter(used)
free = size_converter(free)
ram = size_converter(virtual_memory().total).replace('.0', '')
ram_used = size_converter(virtual_memory().percent).replace(' B', ' %')
physical = cpu_count(logical=False)
logical = cpu_count(logical=True)
o_system = platform().split('.')[0]
sys_config = f"You're running {o_system}, with {physical} physical cores and {logical} logical cores. " \
f"Your physical drive capacity is {total}. You have used up {used} of space. Your free space is " \
f"{free}. Your RAM capacity is {ram}. You are currently utilizing {ram_used} of your memory."
sys.stdout.write(f'\r{sys_config}')
speaker.say(sys_config)
def wikipedia_() -> None:
"""Gets any information from wikipedia using it's API."""
speaker.say("Please tell the keyword.")
speaker.runAndWait()
keyword = listener(3, 5)
if keyword != 'SR_ERROR':
if any(word in keyword.lower() for word in keywords.exit()):
return
else:
sys.stdout.write(f'\rGetting your info from Wikipedia API for {keyword}')
try:
result = summary(keyword)
except wiki_exceptions.DisambiguationError as e: # checks for the right keyword in case of 1+ matches
sys.stdout.write(f'\r{e}')
speaker.say('Your keyword has multiple results sir. Please pick any one displayed on your screen.')
speaker.runAndWait()
keyword1 = listener(3, 5)
result = summary(keyword1) if keyword1 != 'SR_ERROR' else None
except wiki_exceptions.PageError:
speaker.say(f"I'm sorry sir! I didn't get a response for the phrase: {keyword}. Try again!")
result = None
wikipedia_()
# stops with two sentences before reading whole passage
formatted = '. '.join(result.split('. ')[0:2]) + '.'
speaker.say(formatted)
speaker.say("Do you want me to continue sir?") # gets confirmation to read the whole passage
speaker.runAndWait()
response = listener(3, 3)
if response != 'SR_ERROR':
if any(word in response.lower() for word in keywords.ok()):
speaker.say('. '.join(result.split('. ')[3:-1]))
else:
sys.stdout.write("\r")
speaker.say("I'm sorry sir, I didn't get your response.")
def news(news_source: str = 'fox') -> None:
"""Says news around the user's location.
Args:
news_source: Source from where the news has to be fetched. Defaults to ``fox``.
"""
sys.stdout.write(f'\rGetting news from {news_source} news.')
news_client = NewsApiClient(api_key=news_api)
try:
all_articles = news_client.get_top_headlines(sources=f'{news_source}-news')
except newsapi_exception.NewsAPIException:
all_articles = None
speaker.say("I wasn't able to get the news sir! I think the News API broke, you may try after sometime.")
if all_articles:
speaker.say("News around you!")
for article in all_articles['articles']:
speaker.say(article['title'])
if os.environ.get('called_by_offline'):
return
speaker.say("That's the end of news around you.")
if report.has_been_called or time_travel.has_been_called:
speaker.runAndWait()
def apps(keyword: str or None) -> None:
"""Launches the requested application and if Jarvis is unable to find the app, asks for the app name from the user.
Args:
keyword: Gets app name as an argument to launch the application.
"""
offline = os.environ.get('called_by_offline')
ignore = ['app', 'application']
if not keyword or keyword in ignore:
speaker.say("Which app shall I open sir?")
if offline:
return
speaker.runAndWait()
keyword = listener(3, 4)
if keyword != 'SR_ERROR':
if 'exit' in keyword or 'quit' in keyword or 'Xzibit' in keyword:
return
else:
speaker.say("I didn't quite get that. Try again.")
apps(None)
v = (check_output("ls /Applications/", shell=True))
apps_ = (v.decode('utf-8').split('\n'))
app_check = False
for app in apps_:
if re.search(keyword, app, flags=re.IGNORECASE) is not None:
keyword = app
app_check = True
break
if not app_check:
speaker.say(f"I did not find the app {keyword}. Try again.")
if offline:
return
apps(None)
else:
app_status = os.system(f"open /Applications/'{keyword}' > /dev/null 2>&1")
keyword = keyword.replace('.app', '')
if app_status == 256:
speaker.say(f"I'm sorry sir! I wasn't able to launch {keyword}. "
f"You might need to check its permissions.")
else:
speaker.say(f"I have opened {keyword}")
def robinhood() -> None:
"""Gets investment details from robinhood API."""
sys.stdout.write('\rGetting your investment details.')
rh = Robinhood()
rh.login(username=robinhood_user, password=robinhood_pass, qr_code=robinhood_qr)
raw_result = rh.positions()
result = raw_result['results']
stock_value = RobinhoodGatherer().watcher(rh, result)
sys.stdout.write(f'\r{stock_value}')
speaker.say(stock_value)
sys.stdout.write("\r")
def repeater() -> None:
"""Repeats whatever is heard."""
speaker.say("Please tell me what to repeat.")
speaker.runAndWait()
keyword = listener(3, 10)
if keyword != 'SR_ERROR':
sys.stdout.write(f'\r{keyword}')
if 'exit' in keyword or 'quit' in keyword or 'Xzibit' in keyword:
pass
else:
speaker.say(f"I heard {keyword}")
def chatter_bot() -> None:
"""Initiates chatter bot."""
file1 = 'db.sqlite3'
file2 = f"/Users/{os.environ.get('USER')}/nltk_data"
if os.path.isfile(file1) and os.path.isdir(file2):
bot = ChatBot("Chatterbot", storage_adapter="chatterbot.storage.SQLStorageAdapter")
else:
speaker.say('Give me a moment while I train the module.')
speaker.runAndWait()
bot = ChatBot("Chatterbot", storage_adapter="chatterbot.storage.SQLStorageAdapter")
trainer = ChatterBotCorpusTrainer(bot)
trainer.train("chatterbot.corpus.english")
speaker.say('The chat-bot is ready. You may start a conversation now.')
speaker.runAndWait()
keyword = listener(3, 5)
if keyword != 'SR_ERROR':
if any(word in keyword.lower() for word in keywords.exit()):
speaker.say('Let me remove the training modules.')
os.system('rm db* > /dev/null 2>&1')
os.system(f'rm -rf {file2}')
else:
response = bot.get_response(keyword)
if response == 'What is AI?':
speaker.say(f'The chat bot is unable to get a response for the phrase, {keyword}. Try something else.')
else:
speaker.say(f'{response}')
speaker.runAndWait()
chatter_bot()
def device_selector(converted: str = None) -> AppleDevice:
"""Selects a device using the received input string.
See Also:
- Opens an html table with the index value and name of device.
- When chosen an index value, the device name will be returned.
Args:
converted: Takes the voice recognized statement as argument.
Returns:
AppleDevice:
Returns the selected device from the class ``AppleDevice``
"""
icloud_api = PyiCloudService(icloud_user, icloud_pass)
devices = [device for device in icloud_api.devices]
target_device = None
if converted:
# todo: remove this and automate the work some how, consider loading a mapping file upon start up
nth = {"first or 1st or number one or 1": 1, "second or 2nd or two or 2": 2, "third or 3rd or three or 3": 3,
"fourth or 4th or four or 4": 4, "fifth or 4th or five or 5": 5, "sixth or 6th or six or 6": 6,
"seventh or 7th or seven or 7": 7, "eighth or 8th or eight or 8": 8, "ninth or 9th or nine or 9": 9,
"tenth or 10th or ten or 10": 10, "eleventh or 11th or eleven or 11": 11,
"twelfth or 12th or twelve or 12": 12}
inject_data = ''
for index, device in enumerate(devices):
inject_data += f"""<tr>\n\t<td>{index + 1}</th>\n\t<td>{device}</th>\n</tr>\n"""
inject_data += """</table>\n</body>\n</html>"""
styling = """<head>\n<style>table {\n\tfont-family: arial, sans-serif;\n\tborder-collapse: collapse;
width: 100%;\n}\n\ntd, th {\n\tborder: 1px solid #dddddd;\n\ttext-align: left;\n\tpadding: 8px;\n}\n\n
tr:nth-child(even) {\n\tbackground-color: #dddddd;\n}\n</style>\n</head>"""
html_data = f"""<!DOCTYPE html>\n<html>\n{styling}\n<body>\n<h2>Choose an index value:</h2>\n<table>\n\t
<tr>\n\t<th>Index</th>\n\t<th>Device Info</th>\n\t</tr>\n\t{inject_data}"""
devices_file = 'devices.html'
with open(devices_file, 'w') as file:
file.write(html_data)
file.close()
web_open('file:///' + os.getcwd() + '/' + devices_file)
speaker.say('Choose an option from your screen sir!')
speaker.runAndWait()
converted = listener(5, 5)
os.remove(devices_file) if os.path.isfile(devices_file) else None
if converted != 'SR_ERROR':
for key, value in nth.items():
for k in key.split(' or '):
if k in converted:
target_device = icloud_api.devices[value - 1] # index in html and nth dict are incremented by 1
else:
target_device = [device for device in devices if device.get('name') == gethostname() or
gethostname() == device.get('name') + '.local'][0]
return target_device if target_device else icloud_api.iphone
def location() -> None:
"""Gets the user's current location."""
city, state, country = location_info['city'], location_info['state'], location_info['country']
speaker.say(f"You're at {city} {state}, in {country}")
def locate(converted: str, no_repeat: bool = False) -> None:
"""Locates an apple device using icloud api for python.
Args:
no_repeat: A place holder flag switched during ``recursion`` so that, ``Jarvis`` doesn't repeat himself.
converted: Takes the voice recognized statement as argument and extracts device name from it.
"""
target_device = device_selector(converted)
sys.stdout.write(f"\rLocating your {target_device}")
if no_repeat:
speaker.say("Would you like to get the location details?")
else:
target_device.play_sound()
before_keyword, keyword, after_keyword = str(target_device).partition(':') # partitions the hostname info
speaker.say(f"Your {before_keyword} should be ringing now sir!")
speaker.runAndWait()
speaker.say("Would you like to get the location details?")
speaker.runAndWait()
phrase = listener(3, 3)
if phrase == 'SR_ERROR':
if no_repeat:
return
speaker.say("I didn't quite get that. Try again.")
locate(converted=converted, no_repeat=True)
else:
if any(word in phrase.lower() for word in keywords.ok()):
ignore_lat, ignore_lon, location_info_ = location_services(target_device)
lookup = str(target_device).split(':')[0].strip()
if location_info_ == 'None':
speaker.say(f"I wasn't able to locate your {lookup} sir! It is probably offline.")
else:
post_code = '"'.join(list(location_info_['postcode'].split('-')[0]))
iphone_location = f"Your {lookup} is near {location_info_['road']}, {location_info_['city']} " \
f"{location_info_['state']}. Zipcode: {post_code}, {location_info_['country']}"
speaker.say(iphone_location)
stat = target_device.status()
bat_percent = f"Battery: {round(stat['batteryLevel'] * 100)} %, " if stat['batteryLevel'] else ''
device_model = stat['deviceDisplayName']
phone_name = stat['name']
speaker.say(f"Some more details. {bat_percent} Name: {phone_name}, Model: {device_model}")
speaker.say("I can also enable lost mode. Would you like to do it?")
speaker.runAndWait()
phrase = listener(3, 3)
if any(word in phrase.lower() for word in keywords.ok()):
message = 'Return my phone immediately.'
target_device.lost_device(number=icloud_recovery, text=message)
speaker.say("I've enabled lost mode on your phone.")
else:
speaker.say("No action taken sir!")
def music(device: str = None) -> None:
"""Scans music directory in the user profile for ``.mp3`` files and plays using default player.
Args:
device: Takes device name as argument.
"""
sys.stdout.write("\rScanning music files...")
path = os.walk(f"{home}/Music")
get_all_files = (os.path.join(root, f) for root, _, files in path for f in files)
music_files = [file for file in get_all_files if os.path.splitext(file)[1] == '.mp3']
chosen = choice(music_files)
if device:
google_home(device, chosen)
else:
call(["open", chosen])
sys.stdout.write("\r")
speaker.say("Enjoy your music sir!")
def gmail() -> None:
"""Reads unread emails from the gmail account for which the credentials are stored in env variables."""
sys.stdout.write("\rFetching unread emails..")
offline = os.environ.get('called_by_offline')
try:
mail = IMAP4_SSL('imap.gmail.com') # connects to imaplib
mail.login(gmail_user, gmail_pass)
mail.list()
mail.select('inbox') # choose inbox
except TimeoutError as TimeOut:
logger.error(TimeOut)
speaker.say("I wasn't able to check your emails sir. You might need to check to logs.")
return
return_code, messages = mail.search(None, 'UNSEEN') # looks for unread emails
if return_code == 'OK':
n = len(messages[0].split())
else:
speaker.say("I'm unable access your email sir.")
return
if n == 0:
speaker.say("You don't have any emails to catch up sir")
return
else:
if not offline:
speaker.say(f'You have {n} unread emails sir. Do you want me to check it?')
speaker.runAndWait()
response = listener(3, 3)
if response != 'SR_ERROR':
if any(word in response.lower() for word in keywords.ok()):
for nm in messages[0].split():
ignore, mail_data = mail.fetch(nm, '(RFC822)')
for response_part in mail_data:
if isinstance(response_part, tuple): # checks for type(response_part)
original_email = message_from_bytes(response_part[1])
sender = make_header(decode_header((original_email['From']).split(' <')[0]))
subject = make_header(decode_header(original_email['Subject'])) \
if original_email['Subject'] else None
if subject:
subject = ''.join(str(subject).splitlines())
raw_receive = (original_email['Received'].split(';')[-1]).strip()
if '(PDT)' in raw_receive:
datetime_obj = datetime.strptime(raw_receive, "%a, %d %b %Y %H:%M:%S -0700 (PDT)") \
+ timedelta(hours=2)
elif '(PST)' in raw_receive:
datetime_obj = datetime.strptime(raw_receive, "%a, %d %b %Y %H:%M:%S -0800 (PST)") \
+ timedelta(hours=2)
else:
sys.stdout.write(f'\rEmail from {sender} has a weird time stamp. Please check.')
datetime_obj = datetime.now() # sets to current date if PST or PDT are not found
received_date = datetime_obj.strftime("%Y-%m-%d")
current_date_ = datetime.today().date()
yesterday = current_date_ - timedelta(days=1)
# replaces current date with today or yesterday
if received_date == str(current_date_):
receive = datetime_obj.strftime("Today, at %I:%M %p")
elif received_date == str(yesterday):
receive = datetime_obj.strftime("Yesterday, at %I:%M %p")
else:
receive = datetime_obj.strftime("on %A, %B %d, at %I:%M %p")
sys.stdout.write(f'\rReceived:{receive}\tSender: {sender}\tSubject: {subject}')
speaker.say(f"You have an email from, {sender}, with subject, {subject}, {receive}")
speaker.runAndWait() if not offline else None
def meaning(keyword: str or None) -> None:
"""Gets meaning for a word skimmed from the user statement using PyDictionary.
Args:
keyword: Takes a keyword as argument for which the meaning was requested.
"""
offline = os.environ.get('called_by_offline')
dictionary = PyDictionary()
if keyword == 'word':
keyword = None
if keyword is None:
speaker.say("Please tell a keyword.")
speaker.runAndWait()
response = listener(3, 3)
if response != 'SR_ERROR':
if any(word in response.lower() for word in keywords.exit()):
return
else:
meaning(keyword=response)
else:
definition = dictionary.meaning(keyword)
if definition:
n = 0
vowel = ['A', 'E', 'I', 'O', 'U']
for key, value in definition.items():
insert = 'an' if key[0] in vowel else 'a'
repeat = 'also' if n != 0 else ''
n += 1
mean = ', '.join(value[0:2])
speaker.say(f'{keyword} is {repeat} {insert} {key}, which means {mean}.')
if offline:
return
speaker.say(f'Do you wanna know how {keyword} is spelled?')
speaker.runAndWait()
response = listener(3, 3)
if any(word in response.lower() for word in keywords.ok()):
for letter in list(keyword.lower()):
speaker.say(letter)
speaker.runAndWait()
elif offline:
return
else:
speaker.say("Keyword should be a single word sir! Try again")
meaning(None)
return
def create_db() -> None:
"""Creates a database for to-do list by calling the ``create_db`` function in ``database`` module."""
speaker.say(database.create_db())
if todo.has_been_called:
todo.has_been_called = False
todo()
elif add_todo.has_been_called:
add_todo.has_been_called = False
add_todo()
def todo(no_repeat: bool = False) -> None:
"""Says the item and category stored in the to-do list.
Args:
no_repeat: A place holder flag switched during ``recursion`` so that, ``Jarvis`` doesn't repeat himself.
"""
sys.stdout.write("\rLooking for to-do database..")
if not os.path.isfile(file_name) and (time_travel.has_been_called or report.has_been_called):
pass
elif not os.path.isfile(file_name):
if os.environ.get('called_by_offline'):
speaker.say("Your don't have any items in your to-do list sir!")
return
if no_repeat:
speaker.say("Would you like to create a database for your to-do list?")
else:
speaker.say("You don't have a database created for your to-do list sir. Would you like to spin up one now?")
speaker.runAndWait()
key = listener(3, 3)
if key != 'SR_ERROR':
if any(word in key.lower() for word in keywords.ok()):
todo.has_been_called = True
sys.stdout.write("\r")
create_db()
else:
return
else:
if no_repeat:
return
speaker.say("I didn't quite get that. Try again.")
todo(no_repeat=True)
else:
sys.stdout.write("\rQuerying DB for to-do list..")
result = {}
for category, item in database.downloader():
# condition below makes sure one category can have multiple items without repeating category for each item
if category not in result:
result.update({category: item}) # creates dict for category and item if category is not found in result
else:
result[category] = result[category] + ', ' + item # updates category if already found in result
sys.stdout.write("\r")
if result:
if os.environ.get('called_by_offline'):
speaker.say(json_dumps(result))
return
speaker.say('Your to-do items are')
for category, item in result.items(): # browses dictionary and stores result in response and says it
response = f"{item}, in {category} category."
speaker.say(response)
sys.stdout.write(f"\r{response}")
elif report.has_been_called and not time_travel.has_been_called:
speaker.say("You don't have any tasks in your to-do list sir.")
elif time_travel.has_been_called:
pass
else:
speaker.say("You don't have any tasks in your to-do list sir.")
if report.has_been_called or time_travel.has_been_called:
speaker.runAndWait()
def add_todo() -> None:
"""Adds new items to the to-do list."""
sys.stdout.write("\rLooking for to-do database..")
# if database file is not found calls create_db()
if not os.path.isfile(file_name):
sys.stdout.write("\r")
speaker.say("You don't have a database created for your to-do list sir.")
speaker.say("Would you like to spin up one now?")
speaker.runAndWait()
key = listener(3, 3)
if key != 'SR_ERROR':
if any(word in key.lower() for word in keywords.ok()):
add_todo.has_been_called = True
sys.stdout.write("\r")
create_db()
else:
return
speaker.say("What's your plan sir?")
speaker.runAndWait()
item = listener(3, 5)
if item != 'SR_ERROR':
if 'exit' in item or 'quit' in item or 'Xzibit' in item:
speaker.say('Your to-do list has been left intact sir.')
else:
sys.stdout.write(f"\rItem: {item}")
speaker.say(f"I heard {item}. Which category you want me to add it to?")
speaker.runAndWait()
category = listener(3, 3)
if category == 'SR_ERROR':
category = 'Unknown'
if 'exit' in category or 'quit' in category or 'Xzibit' in category:
speaker.say('Your to-do list has been left intact sir.')
else:
sys.stdout.write(f"\rCategory: {category}")
# passes the category and item to uploader() in helper_functions/database.py which updates the database
response = database.uploader(category, item)
speaker.say(response)
speaker.say("Do you want to add anything else to your to-do list?")
speaker.runAndWait()
category_continue = listener(3, 3)
if any(word in category_continue.lower() for word in keywords.ok()):
add_todo()
else:
speaker.say('Alright')
def delete_todo() -> None:
"""Deletes items from an existing to-do list."""
sys.stdout.write("\rLooking for to-do database..")
if not os.path.isfile(file_name):
speaker.say("You don't have a database created for your to-do list sir.")
return
speaker.say("Which one should I remove sir?")
speaker.runAndWait()
item = listener(3, 5)
if item != 'SR_ERROR':
if 'exit' in item or 'quit' in item or 'Xzibit' in item:
return
response = database.deleter(item)
# if the return message from database starts with 'Looks' it means that the item wasn't matched for deletion
if response.startswith('Looks'):
sys.stdout.write(f'\r{response}')
speaker.say(response)
speaker.runAndWait()
delete_todo()
else:
speaker.say(response)
def delete_db() -> None:
"""Deletes the ``tasks.db`` database file after getting confirmation."""
if not os.path.isfile(file_name):
speaker.say('I did not find any database sir.')
return
else:
speaker.say(f'{choice(confirmation)} delete your database?')
speaker.runAndWait()
response = listener(3, 3)
if response != 'SR_ERROR':
if any(word in response.lower() for word in keywords.ok()):
os.remove(file_name)
speaker.say("I've removed your database sir.")
else:
speaker.say("Your database has been left intact sir.")
return
def distance(starting_point: str = None, destination: str = None) -> None:
"""Calculates distance between two locations.
Notes:
- If starting point is None, Jarvis takes the current location as starting point.
- If destination is None, Jarvis will ask for a destination from the user.
Args:
starting_point: Takes the starting place name as an optional argument.
destination: Takes the destination place name as optional argument.
"""
if not destination:
speaker.say("Destination please?")
if os.environ.get('called_by_offline'):
return
speaker.runAndWait()
destination = listener(3, 4)
if destination != 'SR_ERROR':
if len(destination.split()) > 2:
speaker.say("I asked for a destination sir, not a sentence. Try again.")
distance()
if 'exit' in destination or 'quit' in destination or 'Xzibit' in destination:
return
if starting_point:
# if starting_point is received gets latitude and longitude of that location
desired_start = geo_locator.geocode(starting_point)
sys.stdout.write(f"\r{desired_start.address} **")
start = desired_start.latitude, desired_start.longitude
start_check = None
else:
# else gets latitude and longitude information of current location
start = (current_lat, current_lon)
start_check = 'My Location'
sys.stdout.write("::TO::") if starting_point else sys.stdout.write("\r::TO::")
desired_location = geo_locator.geocode(destination)
if desired_location:
end = desired_location.latitude, desired_location.longitude
else:
end = destination[0], destination[1]
miles = round(geodesic(start, end).miles) # calculates miles from starting point to destination
sys.stdout.write(f"** {desired_location.address} - {miles}")
if directions.has_been_called:
# calculates drive time using d = s/t and distance calculation is only if location is same country
directions.has_been_called = False
avg_speed = 60
t_taken = miles / avg_speed
if miles < avg_speed:
drive_time = int(t_taken * 60)
speaker.say(f"It might take you about {drive_time} minutes to get there sir!")
else:
drive_time = ceil(t_taken)
if drive_time == 1:
speaker.say(f"It might take you about {drive_time} hour to get there sir!")
else:
speaker.say(f"It might take you about {drive_time} hours to get there sir!")
elif start_check:
speaker.say(f"Sir! You're {miles} miles away from {destination}.")
if not locate_places.has_been_called: # promotes using locate_places() function
speaker.say(f"You may also ask where is {destination}")
else:
speaker.say(f"{starting_point} is {miles} miles away from {destination}.")
return
def locate_places(place: str or None) -> None:
"""Gets location details of a place.
Args:
place: Takes a place name as argument.
"""
offline = os.environ.get('called_by_offline')
if not place:
if offline:
speaker.say('I need a location to get you the details sir!')
return
speaker.say("Tell me the name of a place!")
speaker.runAndWait()
converted = listener(3, 4)
if converted != 'SR_ERROR':
if 'exit' in converted or 'quit' in converted or 'Xzibit' in converted:
return
for word in converted.split():
if word[0].isupper():
place += word + ' '
elif '.' in word:
place += word + ' '
if not place:
keyword = 'is'
before_keyword, keyword, after_keyword = converted.partition(keyword)
place = after_keyword.replace(' in', '').strip()
try:
destination_location = geo_locator.geocode(place)
coordinates = destination_location.latitude, destination_location.longitude
located = geo_locator.reverse(coordinates, language='en')
data = located.raw
address = data['address']
county = address['county'] if 'county' in address else None
city = address['city'] if 'city' in address.keys() else None
state = address['state'] if 'state' in address.keys() else None
country = address['country'] if 'country' in address else None
if place in country:
speaker.say(f"{place} is a country")
elif place in (city or county):
speaker.say(f"{place} is in {state}" if country == location_info['country'] else f"{place} is in "
f"{state} in {country}")
elif place in state:
speaker.say(f"{place} is a state in {country}")
elif (city or county) and state and country:
speaker.say(f"{place} is in {city or county}, {state}" if country == location_info['country']
else f"{place} is in {city or county}, {state}, in {country}")
if offline:
return
locate_places.has_been_called = True
except (TypeError, AttributeError):
speaker.say(f"{place} is not a real place on Earth sir! Try again.")
if offline:
return
locate_places(place=None)
distance(starting_point=None, destination=place)
def directions(place: str or None) -> None:
"""Opens google maps for a route between starting and destination.
Uses reverse geocoding to calculate latitude and longitude for both start and destination.
Args:
place: Takes a place name as argument.
"""
if not place:
speaker.say("You might want to give a location.")
speaker.runAndWait()
converted = listener(3, 4)
if converted != 'SR_ERROR':
place = ''
for word in converted.split():
if word[0].isupper():
place += word + ' '
elif '.' in word:
place += word + ' '
place = place.replace('I ', '').strip()
if not place:
speaker.say("I can't take you to anywhere without a location sir!")
directions(place=None)
if 'exit' in place or 'quit' in place or 'Xzibit' in place:
return
destination_location = geo_locator.geocode(place)
coordinates = destination_location.latitude, destination_location.longitude
located = geo_locator.reverse(coordinates, language='en')
data = located.raw
address = data['address']
end_country = address['country'] if 'country' in address else None
end = f"{located.latitude},{located.longitude}"
start_country = location_info['country']
start = current_lat, current_lon
maps_url = f'https://www.google.com/maps/dir/{start}/{end}/'
web_open(maps_url)
speaker.say("Directions on your screen sir!")
if start_country and end_country:
if re.match(start_country, end_country, flags=re.IGNORECASE):
directions.has_been_called = True
distance(starting_point=None, destination=place)
else:
speaker.say("You might need a flight to get there!")
return
def alarm(msg: str) -> None:
"""Passes hour, minute and am/pm to ``Alarm`` class which initiates a thread for alarm clock in the background.
Args:
msg: Takes the voice recognized statement as argument and extracts time from it.
"""
extracted_time = re.findall(r'([0-9]+:[0-9]+\s?(?:a.m.|p.m.:?))', msg) or \
re.findall(r'([0-9]+\s?(?:a.m.|p.m.:?))', msg) or re.findall(r'([0-9]+\s?(?:am|pm:?))', msg)
if extracted_time:
extracted_time = extracted_time[0]
am_pm = extracted_time.split()[-1]
am_pm = str(am_pm).replace('a.m.', 'AM').replace('p.m.', 'PM')
alarm_time = extracted_time.split()[0]
if ":" in extracted_time:
hour = int(alarm_time.split(":")[0])
minute = int(alarm_time.split(":")[-1])
else:
hour = int(alarm_time.split()[0])
minute = 0
# makes sure hour and minutes are two digits
hour, minute = f"{hour:02}", f"{minute:02}"
am_pm = str(am_pm).replace('a.m.', 'AM').replace('p.m.', 'PM')
if int(hour) <= 12 and int(minute) <= 59:
open(f'alarm/{hour}_{minute}_{am_pm}.lock', 'a')
Alarm(hour, minute, am_pm).start()
if 'wake' in msg.lower().strip():
speaker.say(f"{choice(ack)}! I will wake you up at {hour}:{minute} {am_pm}.")
else:
speaker.say(f"{choice(ack)}! Alarm has been set for {hour}:{minute} {am_pm}.")
sys.stdout.write(f"\rAlarm has been set for {hour}:{minute} {am_pm} sir!")
else:
speaker.say(f"An alarm at {hour}:{minute} {am_pm}? Are you an alien? "
f"I don't think a time like that exists on Earth.")
else:
speaker.say('Please tell me a time sir!')
if os.environ.get('called_by_offline'):
return
speaker.runAndWait()
converted = listener(3, 4)
if converted != 'SR_ERROR':
if 'exit' in converted or 'quit' in converted or 'Xzibit' in converted:
return
else:
alarm(converted)
def kill_alarm() -> None:
"""Removes lock file to stop the alarm which rings only when the certain lock file is present.
Notes:
- ``alarm_state`` is the list of lock files currently present.
"""
alarm_state = []
[alarm_state.append(file) for file in os.listdir('alarm') if file != '.keep']
alarm_state.remove('.DS_Store') if '.DS_Store' in alarm_state else None
if not alarm_state:
speaker.say("You have no alarms set sir!")
elif len(alarm_state) == 1:
hour, minute, am_pm = alarm_state[0][0:2], alarm_state[0][3:5], alarm_state[0][6:8]
os.remove(f"alarm/{alarm_state[0]}")
speaker.say(f"Your alarm at {hour}:{minute} {am_pm} has been silenced sir!")
else:
sys.stdout.write(f"\r{', '.join(alarm_state).replace('.lock', '')}")
speaker.say("Please let me know which alarm you want to remove. Current alarms on your screen sir!")
speaker.runAndWait()
converted = listener(3, 4)
if converted != 'SR_ERROR':
alarm_time = converted.split()[0]
am_pm = converted.split()[-1]
if ":" in converted:
hour = int(alarm_time.split(":")[0])
minute = int(alarm_time.split(":")[-1])
else:
hour = int(alarm_time.split()[0])
minute = 0
hour, minute = f"{hour:02}", f"{minute:02}"
am_pm = str(am_pm).replace('a.m.', 'AM').replace('p.m.', 'PM')
if os.path.exists(f'alarm/{hour}_{minute}_{am_pm}.lock'):
os.remove(f"alarm/{hour}_{minute}_{am_pm}.lock")
speaker.say(f"Your alarm at {hour}:{minute} {am_pm} has been silenced sir!")
else:
speaker.say(f"I wasn't able to find an alarm at {hour}:{minute} {am_pm}. Try again.")
kill_alarm()
def comma_separator(list_: list) -> str:
"""Separates commas using simple ``.join()`` function and analysis based on length of the list taken as argument.
Args:
list_: Takes a list of elements as an argument.
Returns:
str:
Comma separated list of elements.
"""
return ', and '.join([', '.join(list_[:-1]), list_[-1]] if len(list_) > 2 else list_)
def google_home(device: str = None, file: str = None) -> None:
"""Uses ``socket lib`` to extract ip address and scan ip range for google home devices.
Notes:
- Can also play music on multiple devices at once.
See Also:
- Changes made to google-home-push module:
1. Modified the way local IP is received: https://github.com/deblockt/google-home-push/pull/7
2. Instead of commenting/removing the final print statement on: site-packages/googlehomepush/__init__.py
- I have used ``sys.stdout = open(os.devnull, 'w')`` to suppress any print statements.
- To enable this again at a later time use ``sys.stdout = sys.__stdout__``
- When music is played and immediately stopped/tasked the google home device, it is most likely to except.
- Broken Pipe error. This usually happens when a socket is written after it is fully closed.
- This error occurs when one end of the connection tries sending data while the other has closed the connection.
- This can simply be ignored or handled adding the code below in socket module (NOT PREFERRED).
.. code-block:: python
except IOError as error:
import errno
if error.errno != errno.EPIPE:
sys.stdout.write(error)
Args:
device: Name of the google home device on which the music has to be played.
file: Scanned audio file to be played.
"""
network_id = vpn_checker()
if network_id.startswith('VPN'):
return
if not os.environ.get('called_by_offline'):
speaker.say('Scanning your IP range for Google Home devices sir!')
sys.stdout.write('\rScanning your IP range for Google Home devices..')
speaker.runAndWait()
network_id = '.'.join(network_id.split('.')[0:3])
def ip_scan(host_id: int) -> Tuple[str, str]:
"""Scans the IP range using the received args as host id in an IP address.
Args:
host_id: Host ID passed in a multi-threaded fashion to scan for google home devices in IP range.
Returns:
Tuple(str, str):
Device name and it's IP address.
"""
try:
device_info = GoogleHome(host=f"{network_id}.{host_id}").cc
device_info = str(device_info)
device_name = device_info.split("'")[3]
device_ip = device_info.split("'")[1]
# port = sample.split("'")[2].split()[1].replace(',', '')
return device_name, device_ip
except ChromecastConnectionError:
pass
# scan time after MultiThread: < 10 seconds (usual bs: 3 minutes)
devices = []
with ThreadPoolExecutor(max_workers=100) as executor: # max workers set to 5K (to scan 255 IPs) for less wait time
for info in executor.map(ip_scan, range(1, 101)): # scans host IDs 1 to 255 (eg: 192.168.1.1 to 192.168.1.255)
devices.append(info) # this includes all the NoneType values returned by unassigned host IDs
devices = dict([i for i in devices if i]) # removes None values and converts list to dictionary of name and ip pair
if not device or not file:
sys.stdout.write("\r")
speaker.say(f"You have {len(devices)} devices in your IP range sir! {comma_separator(list(devices.keys()))}. "
f"You can choose one and ask me to play some music on any of these.")
return
else:
chosen = [value for key, value in devices.items() if key.lower() in device.lower()]
if not chosen:
speaker.say("I don't see any matching devices sir!. Let me help you.")
google_home()
for target in chosen:
file_url = serve_file(file, "audio/mp3") # serves the file on local host and generates the play url
sys.stdout.write("\r")
sys.stdout = open(os.devnull, 'w') # suppresses print statement from "googlehomepush/__init.py__"
GoogleHome(host=target).play(file_url, "audio/mp3")
sys.stdout = sys.__stdout__ # removes print statement's suppression above
speaker.say("Enjoy your music sir!") if len(chosen) == 1 else \
speaker.say(f"That's interesting, you've asked me to play on {len(chosen)} devices at a time. "
f"I hope you'll enjoy this sir.")
speaker.runAndWait()
def jokes() -> None:
"""Uses jokes lib to say chucknorris jokes."""
speaker.say(choice([geek, icanhazdad, chucknorris, icndb])())
def reminder(converted: str) -> None:
"""Passes hour, minute, am/pm and reminder message to Reminder class which initiates a thread for reminder.
Args:
converted: Takes the voice recognized statement as argument and extracts the time and message from it.
"""
message = re.search(' to (.*) at ', converted) or re.search(' about (.*) at ', converted)
if not message:
message = re.search(' to (.*)', converted) or re.search(' about (.*)', converted)
if not message:
speaker.say('Reminder format should be::Remind me to do something, at some time.')
sys.stdout.write('\rReminder format should be::Remind ME to do something, AT some time.')
return
extracted_time = re.findall(r'([0-9]+:[0-9]+\s?(?:a.m.|p.m.:?))', converted) or re.findall(
r'([0-9]+\s?(?:a.m.|p.m.:?))', converted)
if not extracted_time:
if os.environ.get('called_by_offline'):
speaker.say('Reminder format should be::Remind me to do something, at some time.')
return
speaker.say("When do you want to be reminded sir?")
speaker.runAndWait()
converted = listener(3, 4)
if converted != 'SR_ERROR':
extracted_time = re.findall(r'([0-9]+:[0-9]+\s?(?:a.m.|p.m.:?))', converted) or re.findall(
r'([0-9]+\s?(?:a.m.|p.m.:?))', converted)
else:
return
if message and extracted_time:
to_about = 'about' if 'about' in converted else 'to'
message = message.group(1).strip()
extracted_time = extracted_time[0]
am_pm = extracted_time.split()[-1]
am_pm = str(am_pm).replace('a.m.', 'AM').replace('p.m.', 'PM')
alarm_time = extracted_time.split()[0]
if ":" in extracted_time:
hour = int(alarm_time.split(":")[0])
minute = int(alarm_time.split(":")[-1])
else:
hour = int(alarm_time.split()[0])
minute = 0
# makes sure hour and minutes are two digits
hour, minute = f"{hour:02}", f"{minute:02}"
if int(hour) <= 12 and int(minute) <= 59:
open(f'reminder/{hour}_{minute}_{am_pm}|{message.replace(" ", "_")}.lock', 'a')
Reminder(hour, minute, am_pm, message).start()
speaker.say(f"{choice(ack)}! I will remind you {to_about} {message}, at {hour}:{minute} {am_pm}.")
sys.stdout.write(f"\r{message} at {hour}:{minute} {am_pm}")
else:
speaker.say(f"A reminder at {hour}:{minute} {am_pm}? Are you an alien? "
f"I don't think a time like that exists on Earth.")
else:
speaker.say('Reminder format should be::Remind me to do something, at some time.')
sys.stdout.write('Reminder format should be::Remind ME to do something, AT some time.')
return
def google_maps(query: str) -> bool:
"""Uses google's places api to get places near by or any particular destination.
This function is triggered when the words in user's statement doesn't match with any predefined functions.
Args:
query: Takes the voice recognized statement as argument.
Returns:
bool:
Boolean True if google's maps API is unable to fetch consumable results.
"""
maps_url = "https://maps.googleapis.com/maps/api/place/textsearch/json?"
response = get(maps_url + 'query=' + query + '&key=' + maps_api)
collection = response.json()['results']
required = []
for element in range(len(collection)):
try:
name = collection[element]['name']
rating = collection[element]['rating']
full_address = collection[element]['formatted_address']
geometry = collection[element]['geometry']['location']
address = re.search('(.*)Rd|(.*)Ave|(.*)St |(.*)St,|(.*)Blvd|(.*)Ct', full_address)
address = address.group().replace(',', '')
new_dict = {"Name": name, "Rating": rating, "Address": address, "Location": geometry, "place": full_address}
required.append(new_dict)
except (AttributeError, KeyError):
pass
if required:
required = sorted(required, key=lambda sort: sort['Rating'], reverse=True)
else:
return True
results = len(required)
speaker.say(f"I found {results} results sir!") if results != 1 else None
start = current_lat, current_lon
n = 0
for item in required:
item['Address'] = item['Address'].replace(' N ', ' North ').replace(' S ', ' South ').replace(' E ', ' East ') \
.replace(' W ', ' West ').replace(' Rd', ' Road').replace(' St', ' Street').replace(' Ave', ' Avenue') \
.replace(' Blvd', ' Boulevard').replace(' Ct', ' Court')
# noinspection PyTypeChecker,PyUnresolvedReferences
latitude, longitude = item['Location']['lat'], item['Location']['lng']
end = f"{latitude},{longitude}"
far = round(geodesic(start, end).miles)
miles = f'{far} miles' if far > 1 else f'{far} mile'
n += 1
if results == 1:
option = 'only option I found is'
next_val = "Do you want to head there sir?"
elif n <= 2:
option = f'{engine().ordinal(n)} option is'
next_val = "Do you want to head there sir?"
elif n <= 5:
option = 'next option would be'
next_val = "Would you like to try that?"
else:
option = 'other'
next_val = 'How about that?'
speaker.say(f"The {option}, {item['Name']}, with {item['Rating']} rating, "
f"on{''.join([j for j in item['Address'] if not j.isdigit()])}, which is approximately "
f"{miles} away.")
speaker.say(f"{next_val}")
sys.stdout.write(f"\r{item['Name']} -- {item['Rating']} -- "
f"{''.join([j for j in item['Address'] if not j.isdigit()])}")
speaker.runAndWait()
converted = listener(3, 3)
if converted != 'SR_ERROR':
if 'exit' in converted or 'quit' in converted or 'Xzibit' in converted:
break
elif any(word in converted.lower() for word in keywords.ok()):
maps_url = f'https://www.google.com/maps/dir/{start}/{end}/'
web_open(maps_url)
speaker.say("Directions on your screen sir!")
return False
elif results == 1:
return False
elif n == results:
speaker.say("I've run out of options sir!")
return False
else:
continue
else:
google_maps.has_been_called = True
return False
def notes() -> None:
"""Listens to the user and saves everything to a ``notes.txt`` file."""
converted = listener(5, 10)
if converted != 'SR_ERROR':
if 'exit' in converted or 'quit' in converted or 'Xzibit' in converted:
return
else:
with open(r'notes.txt', 'a') as writer:
writer.write(f"{datetime.now().strftime('%A, %B %d, %Y')}\n{datetime.now().strftime('%I:%M %p')}\n"
f"{converted}\n")
def github(target: list) -> None:
"""Clones the github repository matched with existing repository in conditions function.
Asks confirmation if the results are more than 1 but less than 3 else asks to be more specific.
Args:
target: Takes repository name as argument which has to be cloned.
"""
if len(target) == 1:
os.system(f"cd {home} && git clone -q {target[0]}")
cloned = target[0].split('/')[-1].replace('.git', '')
speaker.say(f"I've cloned {cloned} on your home directory sir!")
return
elif len(target) <= 3:
newest = [new.split('/')[-1] for new in target]
sys.stdout.write(f"\r{', '.join(newest)}")
speaker.say(f"I found {len(target)} results. On your screen sir! Which one shall I clone?")
speaker.runAndWait()
converted = listener(3, 5)
if converted != 'SR_ERROR':
if any(word in converted.lower() for word in keywords.exit()):
return
if 'first' in converted.lower():
item = 1
elif 'second' in converted.lower():
item = 2
elif 'third' in converted.lower():
item = 3
else:
item = None
speaker.say("Only first second or third can be accepted sir! Try again!")
github(target)
os.system(f"cd {home} && git clone -q {target[item]}")
cloned = target[item].split('/')[-1].replace('.git', '')
speaker.say(f"I've cloned {cloned} on your home directory sir!")
else:
speaker.say(f"I found {len(target)} repositories sir! You may want to be more specific.")
def notify(user: str, password: str, number: str, body: str) -> None:
"""Send text message through SMS gateway of destination number.
References:
Uses `gmail-connector <https://pypi.org/project/gmail-connector/>`__ to send the SMS.
Args:
user: Gmail username to authenticate SMTP lib.
password: Gmail password to authenticate SMTP lib.
number: Phone number stored as env var.
body: Content of the message.
"""
subject = "Message from Jarvis" if number == phone_number else "Jarvis::Message from Vignesh"
Messenger(gmail_user=user, gmail_pass=password, phone_number=number, subject=subject,
message=f'\n\n{body}').send_sms()
def send_sms(number: int or None) -> None:
"""Sends a message to the number received.
If no number was received, it will ask for a number, looks if it is 10 digits and then sends a message.
Args:
number: Phone number to which the message has to be sent.
"""
if not number:
speaker.say("Please tell me a number sir!")
speaker.runAndWait()
number = listener(3, 5)
if number != 'SR_ERROR':
if 'exit' in number or 'quit' in number or 'Xzibit' in number:
return
else:
sys.stdout.write(f'\rNumber: {number}')
elif len(''.join([str(s) for s in re.findall(r'\b\d+\b', number)])) != 10:
sys.stdout.write(f'\r{number}')
speaker.say("I don't think that's a right number sir! Phone numbers are 10 digits. Try again!")
send_sms(number=None)
if number and len(''.join([str(s) for s in re.findall(r'\b\d+\b', number)])) == 10:
speaker.say("What would you like to send sir?")
speaker.runAndWait()
body = listener(3, 5)
if body != 'SR_ERROR':
sys.stdout.write(f'\r{body}::to::{number}')
speaker.say(f'{body} to {number}. Do you want me to proceed?')
speaker.runAndWait()
converted = listener(3, 3)
if converted != 'SR_ERROR':
if not any(word in converted.lower() for word in keywords.ok()):
speaker.say("Message will not be sent sir!")
else:
notify(user=gmail_user, password=gmail_pass, number=number, body=body)
speaker.say("Message has been sent sir!")
return
# noinspection PyUnboundLocalVariable
def television(converted: str) -> None:
"""Controls all actions on a TV (LG Web OS).
Notes:
- In the ``__main__`` method tv is set to None.
- Jarvis will try to ping the TV and then power it on if the host is unreachable initially.
- Once the tv is turned on, the TV class is also initiated and assigned to tv variable.
Args:
converted: Takes the voice recognized statement as argument.
"""
phrase_exc = converted.replace('TV', '')
phrase = phrase_exc.lower()
# 'tv_status = lambda: os.system(f"ping ....) #noqa' is an alternate but adhering to pep 8 best practice using a def
def tv_status():
"""Pings the tv and returns the status. 0 if able to ping, 256 if unable to ping."""
return os.system(f"ping -c 1 -t 1 {tv_ip} >/dev/null") # pings TV IP and returns 0 if host is reachable
if vpn_checker().startswith('VPN'):
return
elif ('turn off' in phrase or 'shutdown' in phrase or 'shut down' in phrase) and tv_status() != 0:
speaker.say("I wasn't able to connect to your TV sir! I guess your TV is powered off already.")
return
elif tv_status() != 0:
Thread(target=wake, args=[tv_mac]).start() # turns TV on in a thread
speaker.say("Looks like your TV is powered off sir! Let me try to turn it back on!")
speaker.runAndWait() # speaks the message to buy some time while the TV is connecting to network
if tv_status() != 0: # checks if TV is reachable even before trying to launch the TV connector
speaker.say("I wasn't able to connect to your TV sir! Please make sure you are on the "
"same network as your TV, and your TV is connected to a power source.")
return
if not tv_state.get('status'):
try:
tv = TV(ip_address=tv_ip, client_key=tv_client_key)
except ConnectionResetError as error:
logger.error(f"Failed to connect to the TV. {error}")
speaker.say("I was unable to connect to the TV sir! It appears to be a connection issue. "
"You might want to try again later.")
return
tv_state['status'] = True
if 'turn on' in phrase or 'connect' in phrase:
speaker.say("TV features have been integrated sir!")
return
if tv_state.get('status'):
if 'turn on' in phrase or 'connect' in phrase:
speaker.say('Your TV is already powered on sir!')
elif 'increase' in phrase:
tv.increase_volume()
speaker.say(f'{choice(ack)}!')
elif 'decrease' in phrase or 'reduce' in phrase:
tv.decrease_volume()
speaker.say(f'{choice(ack)}!')
elif 'mute' in phrase:
tv.mute()
speaker.say(f'{choice(ack)}!')
elif 'pause' in phrase or 'hold' in phrase:
tv.pause()
speaker.say(f'{choice(ack)}!')
elif 'resume' in phrase or 'play' in phrase:
tv.play()
speaker.say(f'{choice(ack)}!')
elif 'rewind' in phrase:
tv.rewind()
speaker.say(f'{choice(ack)}!')
elif 'forward' in phrase:
tv.forward()
speaker.say(f'{choice(ack)}!')
elif 'stop' in phrase:
tv.stop()
speaker.say(f'{choice(ack)}!')
elif 'set' in phrase:
vol = int(''.join([str(s) for s in re.findall(r'\b\d+\b', phrase_exc)]))
sys.stdout.write(f'\rRequested volume: {vol}')
if vol:
tv.set_volume(vol)
speaker.say(f"I've set the volume to {vol}% sir.")
else:
speaker.say(f"{vol} doesn't match the right format sir!")
elif 'volume' in phrase:
speaker.say(f"The current volume on your TV is, {tv.get_volume()}%")
elif 'app' in phrase or 'application' in phrase:
sys.stdout.write(f'\r{tv.list_apps()}')
speaker.say('App list on your screen sir!')
speaker.runAndWait()
sleep(5)
elif 'open' in phrase or 'launch' in phrase:
app_name = ''
for word in phrase_exc.split():
if word[0].isupper():
app_name += word + ' '
if not app_name:
speaker.say("I didn't quite get that.")
else:
try:
tv.launch_app(app_name.strip())
speaker.say(f"I've launched {app_name} on your TV sir!")
except ValueError:
speaker.say(f"I didn't find the app {app_name} on your TV sir!")
elif "what's" in phrase or 'currently' in phrase:
speaker.say(f'{tv.current_app()} is running on your TV.')
elif 'change' in phrase or 'source' in phrase:
tv_source = ''
for word in phrase_exc.split():
if word[0].isupper():
tv_source += word + ' '
if not tv_source:
speaker.say("I didn't quite get that.")
else:
try:
tv.set_source(tv_source.strip())
speaker.say(f"I've changed the source to {tv_source}.")
except ValueError:
speaker.say(f"I didn't find the source {tv_source} on your TV sir!")
elif 'shutdown' in phrase or 'shut down' in phrase or 'turn off' in phrase:
Thread(target=tv.shutdown).start()
speaker.say(f'{choice(ack)}! Turning your TV off.')
tv_state.pop('status')
else:
speaker.say("I didn't quite get that.")
else:
converted = converted.replace('my', 'your').replace('please', '').replace('will you', '').strip()
speaker.say(f"I'm sorry sir! I wasn't able to {converted}, as the TV state is unknown!")
def alpha(text: str) -> bool:
"""Uses wolfram alpha API to fetch results for uncategorized phrases heard.
Args:
text: Takes the voice recognized statement as argument.
Raises:
Broad ``Exception`` clause indicating that the Full Results API did not find an input parameter while parsing.
Returns:
bool:
Boolean True if wolfram alpha API is unable to fetch consumable results.
References:
`Error 1000 <https://products.wolframalpha.com/show-steps-api/documentation/#:~:text=(Error%201000)>`__
"""
alpha_client = Think(app_id=think_id)
# noinspection PyBroadException
try:
res = alpha_client.query(text)
except Exception:
return True
if res['@success'] == 'false':
return True
else:
try:
response = next(res.results).text
response = response.replace('\n', '. ').strip()
sys.stdout.write(f'\r{response}')
if response == '(no data available)':
return True
speaker.say(response)
except (StopIteration, AttributeError):
return True
def google(query: str, suggestion_count: int = 0) -> bool:
"""Uses Google's search engine parser and gets the first result that shows up on a google search.
Notes:
- If it is unable to get the result, Jarvis sends a request to ``suggestqueries.google.com``
- This is to rephrase the query and then looks up using the search engine parser once again.
- ``suggestion_count`` is used to limit the number of times suggestions are used.
- ``suggestion_count`` is also used to make sure the suggestions and parsing don't run on an infinite loop.
- This happens when ``google`` gets the exact search as suggested ones which failed to fetch results earlier.
Args:
suggestion_count: Integer value that keeps incrementing when ``Jarvis`` looks up for suggestions.
query: Takes the voice recognized statement as argument.
Returns:
bool:
Boolean ``True`` if google search engine is unable to fetch consumable results.
"""
search_engine = GoogleSearch()
results = []
try:
google_results = search_engine.search(query, cache=False)
a = {"Google": google_results}
results = [result['titles'] for k, v in a.items() for result in v]
except NoResultsOrTrafficError:
suggest_url = "http://suggestqueries.google.com/complete/search"
params = {
"client": "firefox",
"q": query,
}
r = get(suggest_url, params)
if not r:
return True
try:
suggestion = r.json()[1][1]
suggestion_count += 1
if suggestion_count >= 3: # avoids infinite suggestions over the same suggestion
speaker.say(r.json()[1][0].replace('=', '')) # picks the closest match and opens a google search
speaker.runAndWait()
return False
else:
google(suggestion, suggestion_count)
except IndexError:
return True
if results:
[results.remove(result) for result in results if len(result.split()) < 3] # removes results with dummy words
else:
return False
if results:
results = results[0:3] # picks top 3 (first appeared on Google)
results.sort(key=lambda x: len(x.split()), reverse=True) # sorts in reverse by the word count of each sentence
output = results[0] # picks the top most result
if '\n' in output:
required = output.split('\n')
modify = required[0].strip()
split_val = ' '.join(splitter(modify.replace('.', 'rEpLaCInG')))
sentence = split_val.replace(' rEpLaCInG ', '.')
repeats = []
[repeats.append(word) for word in sentence.split() if word not in repeats]
refined = ' '.join(repeats)
output = refined + required[1] + '.' + required[2]
output = output.replace('\\', ' or ')
match = re.search(r'(\w{3},|\w{3}) (\d,|\d|\d{2},|\d{2}) \d{4}', output)
if match:
output = output.replace(match.group(), '')
output = output.replace('\\', ' or ')
sys.stdout.write(f'\r{output}')
speaker.say(output)
speaker.runAndWait()
return False
else:
return True
def google_search(phrase: str or None) -> None:
"""Opens up a google search for the phrase received. If nothing was received, gets phrase from user.
Args:
phrase: Takes the voice recognized statement as argument.
"""
if not phrase:
speaker.say("Please tell me the search phrase.")
speaker.runAndWait()
converted = listener(3, 5)
if converted != 'SR_ERROR':
if 'exit' in converted or 'quit' in converted or 'xzibit' in converted or 'cancel' in converted:
return
else:
phrase = converted.lower()
search = str(phrase).replace(' ', '+')
unknown_url = f"https://www.google.com/search?q={search}"
web_open(unknown_url)
speaker.say(f"I've opened up a google search for: {phrase}.")
def volume_controller(level: int) -> None:
"""Controls volume from the numbers received. Defaults to 50%.
Args:
level: Level of volume to which the system has to set.
"""
sys.stdout.write("\r")
level = round((8 * level) / 100)
os.system(f'osascript -e "set Volume {level}"')
def face_recognition_detection() -> None:
"""Initiates face recognition script and looks for images stored in named directories within ``train`` directory."""
sys.stdout.write("\r")
train_dir = 'train'
os.mkdir(train_dir) if not os.path.isdir(train_dir) else None
speaker.say('Initializing facial recognition. Please smile at the camera for me.')
speaker.runAndWait()
sys.stdout.write('\rLooking for faces to recognize.')
try:
result = Face().face_recognition()
except BlockingIOError:
logger.error('Unable to access the camera.')
speaker.say("I was unable to access the camera. Facial recognition can work only when cameras are "
"present and accessible.")
return
if not result:
sys.stdout.write('\rLooking for faces to detect.')
speaker.say("No faces were recognized. Switching on to face detection.")
speaker.runAndWait()
result = Face().face_detection()
if not result:
sys.stdout.write('\rNo faces were recognized nor detected.')
speaker.say('No faces were recognized. nor detected. Please check if your camera is working, '
'and look at the camera when you retry.')
return
sys.stdout.write('\rNew face has been detected. Like to give it a name?')
speaker.say('I was able to detect a face, but was unable to recognize it.')
os.system('open cv2_open.jpg')
speaker.say("I've taken a photo of you. Preview on your screen. Would you like to give it a name, "
"so that I can add it to my database of known list? If you're ready, please tell me a name, "
"or simply say exit.")
speaker.runAndWait()
phrase = listener(3, 5)
if any(word in phrase.lower() for word in keywords.ok()):
sys.stdout.write(f"\r{phrase}")
phrase = phrase.replace(' ', '_')
# creates a named directory if it is not found already else simply ignores
os.system(f'cd {train_dir} && mkdir {phrase}') if not os.path.exists(f'{train_dir}/{phrase}') else None
c_time = datetime.now().strftime("%I_%M_%p")
img_name = f"{phrase}_{c_time}.jpg" # adds current time to image name to avoid overwrite
os.rename('cv2_open.jpg', img_name) # renames the files
os.system(f"mv {img_name} {train_dir}/{phrase}") # move files into named directory within train_dir
speaker.say(f"Image has been saved as {img_name}. I will be able to recognize {phrase} in the future.")
else:
os.remove('cv2_open.jpg')
speaker.say("I've deleted the image.")
else:
speaker.say(f'Hi {result}! How can I be of service to you?')
def speed_test() -> None:
"""Initiates speed test and says the ping rate, download and upload speed."""
client_locator = geo_locator.reverse(st.lat_lon, language='en')
client_location = client_locator.raw['address']
city, state = client_location.get('city'), client_location.get('state')
isp = st.results.client.get('isp').replace(',', '').replace('.', '')
sys.stdout.write(f"\rStarting speed test with your ISP: {isp}. Location: {city}, {state}")
speaker.say(f"Starting speed test sir! I.S.P: {isp}. Location: {city} {state}")
speaker.runAndWait()
st.download() and st.upload()
ping = round(st.results.ping)
download = size_converter(st.results.download)
upload = size_converter(st.results.upload)
sys.stdout.write(f'\rPing: {ping}m/s\tDownload: {download}\tUpload: {upload}')
speaker.say(f'Ping rate: {ping} milli seconds.')
speaker.say(f'Download speed: {download} per second.')
speaker.say(F'Upload speed: {upload} per second.')
def connector(phrase: str, targets: dict) -> bool:
"""Scans bluetooth devices in range and establishes connection with the matching device in phrase.
Args:
phrase: Takes the spoken phrase as an argument.
targets: Takes a dictionary of scanned devices as argument.
Returns:
bool:
Boolean True or False based on connection status.
"""
connection_attempt = False
for target in targets:
if target['name']:
target['name'] = normalize("NFKD", target['name'])
if any(re.search(line, target['name'], flags=re.IGNORECASE) for line in phrase.split()):
connection_attempt = True
if 'disconnect' in phrase:
output = getoutput(f"blueutil --disconnect {target['address']}")
if not output:
sys.stdout.write(f"\rDisconnected from {target['name']}")
sleep(2) # included a sleep here, so it avoids voice swapping between devices
speaker.say(f"Disconnected from {target['name']} sir!")
else:
speaker.say(f"I was unable to disconnect {target['name']} sir!. "
f"Perhaps it was never connected.")
elif 'connect' in phrase:
output = getoutput(f"blueutil --connect {target['address']}")
if not output:
sys.stdout.write(f"\rConnected to {target['name']}")
sleep(2) # included a sleep here, so it avoids voice swapping between devices
speaker.say(f"Connected to {target['name']} sir!")
else:
speaker.say(f"Unable to connect {target['name']} sir!, please make sure the device is "
f"turned on and ready to pair.")
break
return connection_attempt
def bluetooth(phrase: str) -> None:
"""Find and connect to bluetooth devices near by.
Args:
phrase: Takes the voice recognized statement as argument.
"""
if 'turn off' in phrase or 'power off' in phrase:
call("blueutil --power 0", shell=True)
sys.stdout.write('\rBluetooth has been turned off')
speaker.say("Bluetooth has been turned off sir!")
elif 'turn on' in phrase or 'power on' in phrase:
call("blueutil --power 1", shell=True)
sys.stdout.write('\rBluetooth has been turned on')
speaker.say("Bluetooth has been turned on sir!")
elif 'disconnect' in phrase and ('bluetooth' in phrase or 'devices' in phrase):
call("blueutil --power 0", shell=True)
sleep(2)
call("blueutil --power 1", shell=True)
speaker.say('All bluetooth devices have been disconnected sir!')
else:
sys.stdout.write('\rScanning paired Bluetooth devices')
paired = getoutput("blueutil --paired --format json")
paired = json_loads(paired)
if not connector(phrase=phrase, targets=paired):
sys.stdout.write('\rScanning UN-paired Bluetooth devices')
speaker.say('No connections were established sir, looking for un-paired devices.')
speaker.runAndWait()
unpaired = getoutput("blueutil --inquiry --format json")
unpaired = json_loads(unpaired)
connector(phrase=phrase, targets=unpaired) if unpaired else speaker.say('No un-paired devices found sir! '
'You may want to be more precise.')
def increase_brightness() -> None:
"""Increases the brightness to maximum in macOS."""
for _ in range(32):
os.system("""osascript -e 'tell application "System Events"' -e 'key code 144' -e ' end tell'""")
def decrease_brightness() -> None:
"""Decreases the brightness to bare minimum in macOS."""
for _ in range(32):
os.system("""osascript -e 'tell application "System Events"' -e 'key code 145' -e ' end tell'""")
def set_brightness(level: int) -> None:
"""Set brightness to a custom level.
- Since Jarvis uses in-built apple script, the only way to achieve this is to:
- set the brightness to bare minimum and increase [*]% from there or vice-versa.
Args:
level: Percentage of brightness to be set.
"""
level = round((32 * int(level)) / 100)
for _ in range(32):
os.system("""osascript -e 'tell application "System Events"' -e 'key code 145' -e ' end tell'""")
for _ in range(level):
os.system("""osascript -e 'tell application "System Events"' -e 'key code 144' -e ' end tell'""")
def lights(converted: str) -> None:
"""Controller for smart lights.
Args:
converted: Takes the voice recognized statement as argument.
"""
def light_switch():
"""Says a message if the physical switch is toggled off."""
speaker.say("I guess your light switch is turned off sir! I wasn't able to read the device. "
"Try toggling the switch and ask me to restart myself!")
def turn_off(host: str):
"""Turns off the device.
Args:
host: Takes target device IP address as an argument.
"""
controller = MagicHomeApi(device_ip=host, device_type=1, operation='Turn Off')
controller.turn_off()
def warm(host: str):
"""Sets lights to warm/yellow.
Args:
host: Takes target device IP address as an argument.
"""
controller = MagicHomeApi(device_ip=host, device_type=1, operation='Warm Lights')
controller.update_device(r=0, g=0, b=0, warm_white=255)
def cool(host: str):
"""Sets lights to cool/white.
Args:
host: Takes target device IP address as an argument.
"""
controller = MagicHomeApi(device_ip=host, device_type=2, operation='Cool Lights')
controller.update_device(r=255, g=255, b=255, warm_white=255, cool_white=255)
def preset(host: str, value: int):
"""Changes light colors to preset values.
Args:
host: Takes target device IP address as an argument.
value: Preset value extracted from list of verified values.
"""
controller = MagicHomeApi(device_ip=host, device_type=2, operation='Preset Values')
controller.send_preset_function(preset_number=value, speed=101)
def lumen(host: str, warm_lights: bool, rgb: int = 255):
"""Sets lights to custom brightness.
Args:
host: Takes target device IP address as an argument.
warm_lights: Boolean value if lights have been set to warm or cool.
rgb: Red, Green andBlue values to alter the brightness.
"""
if warm_lights:
controller = MagicHomeApi(device_ip=host, device_type=1, operation='Custom Brightness')
controller.update_device(r=255, g=255, b=255, warm_white=rgb)
else:
controller = MagicHomeApi(device_ip=host, device_type=2, operation='Custom Brightness')
controller.update_device(r=255, g=255, b=255, warm_white=rgb, cool_white=rgb)
if 'hallway' in converted:
if not (light_host_id := hallway_ip):
light_switch()
return
elif 'kitchen' in converted:
if not (light_host_id := kitchen_ip):
light_switch()
return
elif 'bedroom' in converted:
if not (light_host_id := bedroom_ip):
light_switch()
return
else:
light_host_id = hallway_ip + kitchen_ip + bedroom_ip
lights_count = len(light_host_id)
plural = 'lights!' if lights_count > 1 else 'light!'
if 'turn on' in converted or 'cool' in converted or 'white' in converted:
warm_light.pop('status') if warm_light.get('status') else None
tone = 'white' if 'white' in converted else 'cool'
speaker.say(f'{choice(ack)}! Turning on {lights_count} {plural}') if 'turn on' in converted else \
speaker.say(f'{choice(ack)}! Setting {lights_count} {plural} to {tone}!')
with ThreadPoolExecutor(max_workers=lights_count) as executor:
executor.map(cool, light_host_id)
elif 'turn off' in converted:
speaker.say(f'{choice(ack)}! Turning off {lights_count} {plural}')
with ThreadPoolExecutor(max_workers=lights_count) as executor:
executor.map(turn_off, light_host_id)
elif 'warm' in converted or 'yellow' in converted:
warm_light['status'] = True
speaker.say(f'{choice(ack)}! Setting {lights_count} {plural} to yellow!') if 'yellow' in converted else \
speaker.say(f'Sure sir! Setting {lights_count} {plural} to warm!')
with ThreadPoolExecutor(max_workers=lights_count) as executor:
executor.map(warm, light_host_id)
elif 'red' in converted:
speaker.say(f"{choice(ack)}! I've changed {lights_count} {plural} to red!")
for light_ip in light_host_id:
preset(host=light_ip, value=preset_values['red'])
elif 'blue' in converted:
speaker.say(f"{choice(ack)}! I've changed {lights_count} {plural} to blue!")
for light_ip in light_host_id:
preset(host=light_ip, value=preset_values['blue'])
elif 'green' in converted:
speaker.say(f"{choice(ack)}! I've changed {lights_count} {plural} to green!")
for light_ip in light_host_id:
preset(host=light_ip, value=preset_values['green'])
elif 'set' in converted or 'percentage' in converted or '%' in converted or 'dim' in converted \
or 'bright' in converted:
if 'bright' in converted:
level = 100
elif 'dim' in converted:
level = 50
else:
if level := re.findall(r'\b\d+\b', converted):
level = int(level[0])
else:
level = 100
speaker.say(f"{choice(ack)}! I've set {lights_count} {plural} to {level}%!")
level = round((255 * level) / 100)
for light_ip in light_host_id:
lumen(host=light_ip, warm_lights=warm_light.get('status'), rgb=level)
else:
speaker.say(f"I didn't quite get that sir! What do you want me to do to your {plural}?")
def vpn_checker() -> str:
"""Uses simple check on network id to see if it is connected to local host or not.
Returns:
str:
Private IP address of host machine.
"""
socket_ = socket(AF_INET, SOCK_DGRAM)
socket_.connect(("8.8.8.8", 80))
ip_address = socket_.getsockname()[0]
socket_.close()
if not (ip_address.startswith('192') | ip_address.startswith('127')):
ip_address = 'VPN:' + ip_address
info = json_load(urlopen('http://ipinfo.io/json'))
sys.stdout.write(f"\rVPN connection is detected to {info.get('ip')} at {info.get('city')}, "
f"{info.get('region')} maintained by {info.get('org')}")
speaker.say("You have your VPN turned on. Details on your screen sir! Please note that none of the home "
"integrations will work with VPN enabled.")
return ip_address
def celebrate() -> str:
"""Function to look if the current date is a holiday or a birthday.
Returns:
str:
A string of the event observed today.
"""
day = datetime.today().date()
today = datetime.now().strftime("%d-%B")
us_holidays = CountryHoliday('US').get(day) # checks if the current date is a US holiday
in_holidays = CountryHoliday('IND', prov='TN', state='TN').get(day) # checks if Indian (esp TN) holiday
if in_holidays:
return in_holidays
elif us_holidays and 'Observed' not in us_holidays:
return us_holidays
elif today == birthday:
return 'Birthday'
def time_travel() -> None:
"""Triggered only from ``activator()`` to give a quick update on the user's daily routine."""
part_day = part_of_day()
meeting = None
if not os.path.isfile('meetings') and part_day == 'Morning' and datetime.now().strftime('%A') not in \
['Saturday', 'Sunday']:
meeting = ThreadPool(processes=1).apply_async(func=meetings)
speaker.say(f"Good {part_day} Vignesh.")
if part_day == 'Night':
if event := celebrate():
speaker.say(f'Happy {event}!')
return
current_date()
current_time()
weather()
speaker.runAndWait()
if os.path.isfile('meetings') and part_day == 'Morning' and datetime.now().strftime('%A') not in \
['Saturday', 'Sunday']:
meeting_reader()
elif meeting:
try:
speaker.say(meeting.get(timeout=30))
except ThreadTimeoutError:
pass # skip terminate, close and join thread since the motive is to skip meetings info in case of a timeout
todo()
gmail()
speaker.say('Would you like to hear the latest news?')
speaker.runAndWait()
phrase = listener(3, 3)
if any(word in phrase.lower() for word in keywords.ok()):
news()
time_travel.has_been_called = False
def guard() -> None:
"""Security Mode will enable camera and microphone in the background.
Notes:
- If any speech is recognized or a face is detected, there will another thread triggered to send notifications.
- Notifications will be triggered only after 5 minutes of previous notification.
"""
import cv2
cam_source, cam = None, None
for i in range(0, 3):
cam = cv2.VideoCapture(i) # tries thrice to choose the camera for which Jarvis has access
if cam is None or not cam.isOpened() or cam.read() == (False, None):
pass
else:
cam_source = i # source for security cam is chosen
cam.release()
break
if cam_source is None:
cam_error = 'Guarding mode disabled as I was unable to access any of the cameras.'
logger.error(cam_error)
response = Messenger(gmail_user=gmail_user, gmail_pass=gmail_pass, phone_number=phone_number,
subject="IMPORTANT::Guardian mode faced an exception.", message=cam_error).send_sms()
if response.get('ok') and response.get('status') == 200:
logger.info('SMS notification has been sent.')
else:
logger.error(f'Unable to send SMS notification.\n{response}')
scale_factor = 1.1 # Parameter specifying how much the image size is reduced at each image scale.
min_neighbors = 5 # Parameter specifying how many neighbors each candidate rectangle should have, to retain it.
notified, date_extn, converted = None, None, None
while True:
# Listens for any recognizable speech and saves it to a notes file
try:
sys.stdout.write("\rSECURITY MODE")
listened = recognizer.listen(source, timeout=3, phrase_time_limit=10)
converted = recognizer.recognize_google(listened)
converted = converted.replace('Jarvis', '').strip()
sys.stdout.write(f"\r{converted}")
except (UnknownValueError, RequestError, WaitTimeoutError):
pass
if converted and any(word.lower() in converted.lower() for word in keywords.guard_disable()):
logger.info('Disabled security mode')
speaker.say(f'Welcome back sir! Good {part_of_day()}.')
if os.path.exists(f'threat/{date_extn}.jpg'):
speaker.say("We had a potential threat sir! Please check your email to confirm.")
speaker.runAndWait()
sys.stdout.write('\rDisabled Security Mode')
break
elif converted:
logger.info(f'Conversation::{converted}')
if cam_source is not None:
# captures images and keeps storing it to a folder
validation_video = cv2.VideoCapture(cam_source)
cascade = cv2.CascadeClassifier(cv2.data.haarcascades + "haarcascade_frontalface_default.xml")
ignore, image = validation_video.read()
scale = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
faces = cascade.detectMultiScale(scale, scale_factor, min_neighbors)
date_extn = f"{datetime.now().strftime('%B_%d_%Y_%I_%M_%S_%p')}"
try:
if faces:
pass
except ValueError:
# log level set to critical because this is a known exception when try check 'if faces'
cv2.imwrite(f'threat/{date_extn}.jpg', image)
logger.info(f'Image of detected face stored as {date_extn}.jpg')
if not os.path.exists(f'threat/{date_extn}.jpg'):
date_extn = None
# if no notification was sent yet or if a phrase or face is detected notification thread will be triggered
if (not notified or float(time() - notified) > 300) and (converted or date_extn):
notified = time()
Thread(target=threat_notify, args=(converted, date_extn)).start()
def threat_notify(converted: str, date_extn: str or None) -> None:
"""Sends an SMS and email notification in case of a threat.
References:
Uses `gmail-connector <https://pypi.org/project/gmail-connector/>`__ to send the SMS and email.
Args:
converted: Takes the voice recognized statement as argument.
date_extn: Name of the attachment file which is the picture of the intruder.
"""
dt_string = f"{datetime.now().strftime('%B %d, %Y %I:%M %p')}"
title_ = f'Intruder Alert on {dt_string}'
if converted:
response = Messenger(gmail_user=gmail_user, gmail_pass=gmail_pass, phone_number=phone_number,
subject="!!INTRUDER ALERT!!", message=f"{dt_string}\n{converted}").send_sms()
body_ = f"""<html><head></head><body><h2>Conversation of Intruder:</h2><br>{converted}<br><br>
<h2>Attached is a photo of the intruder.</h2>"""
else:
response = Messenger(gmail_user=gmail_user, gmail_pass=gmail_pass, phone_number=phone_number,
subject="!!INTRUDER ALERT!!",
message=f"{dt_string}\nCheck your email for more information.").send_sms()
body_ = """<html><head></head><body><h2>No conversation was recorded,
but attached is a photo of the intruder.</h2>"""
if response.get('ResponseMetadata').get('HTTPStatusCode') == 200:
logger.info('SMS notification has been sent.')
else:
logger.error(f'Unable to send SMS notification.\n{response}')
if date_extn:
attachment_ = f'threat/{date_extn}.jpg'
response_ = SendEmail(gmail_user=gmail_user, gmail_pass=gmail_pass,
recipient=robinhood_user, subject=title_, body=body_, attachment=attachment_).send_email()
if response_.get('ok'):
logger.info('Email has been sent!')
else:
logger.error(f"Email dispatch failed with response: {response_.get('body')}\n")
def offline_communicator_initiate() -> None:
"""Initiates Jarvis API and Ngrok for requests from external sources if they aren't running already.
Notes:
- ``forever_ngrok.py`` is a simple script that triggers ngrok connection in the port ``4483``.
- The connection is tunneled through a public facing URL which is used to make ``POST`` requests to Jarvis API.
- ``uvicorn`` command launches JarvisAPI ``fast.py`` using the same port ``4483``
"""
ngrok_status, uvicorn_status = False, False
target_scripts = ['forever_ngrok.py', 'uvicorn']
for target_script in target_scripts:
pid_check = check_output(f"ps -ef | grep {target_script}", shell=True)
pid_list = pid_check.decode('utf-8').split('\n')
for id_ in pid_list:
if id_ and 'grep' not in id_ and '/bin/sh' not in id_:
if target_script == 'forever_ngrok.py':
ngrok_status = True
logger.info('An instance of ngrok connection for offline communicator is running already.')
elif target_script == 'uvicorn':
uvicorn_status = True
logger.info('An instance of uvicorn application for offline communicator is running already.')
if not ngrok_status:
logger.info('Initiating ngrok connection for offline communicator.')
initiator = f'cd {home}/JarvisHelper && source venv/bin/activate && export ENV=1 && python3 {target_scripts[0]}'
apple_script('Terminal').do_script(initiator)
if not uvicorn_status:
logger.info('Initiating FastAPI for offline listener.')
offline_script = f'cd {os.getcwd()} && source venv/bin/activate && cd api && ' \
f'export offline_phrase={offline_phrase} && ' \
'uvicorn fast:app --reload --port=4483'
apple_script('Terminal').do_script(offline_script)
def offline_communicator() -> None:
"""Reads ``offline_request`` file generated by `fast.py <https://git.io/JBPFQ>`__ containing request sent via API.
See Also:
To replicate a working model for offline communicator:
- Run ``ngrok`` on port 4483 or any desired port.
- The port number should match with the one in `fast.py <https://git.io/JBPFQ>`__
- To "log" the response and send it out as notification, I made some changes to the pyttsx3 module. (below)
- I also stop the response from being spoken.
- ``voice_changer()`` is called as the voice property is reset when ``speaker.stop()`` is used.
Changes in `pyttsx3`:
- Created a global variable in ``say()`` -> ``pyttsx3/engine.py`` (before proxy) and store the response.
- Created a new method and return the global variable which I created in ``say()``
- The new method (``vig()`` in this case) is called to get the response which is sent as SMS notification.
- Doing so, avoids changes to all functions within ``conditions()`` to notify the response from Jarvis.
Env Vars:
- ``offline_phrase`` - Unique phrase to authenticate the requests coming from an external source.
Notes:
More cool stuff:
- I have linked the ngrok ``public_url`` tunnelling the FastAPI to a JavaScript on my webpage.
- When a request is submitted, the JavaScript makes a POST call to the API.
- The API does the authentication and creates the ``offline_request`` file if authenticated.
- Check it out: `JarvisOffline <https://thevickypedia.com/jarvisoffline>`__
Warnings:
- Restarts quietly in case of a ``RuntimeError`` however, the offline request will still be be executed.
- This happens when ``speaker`` is stopped while another loop of speaker is in progress by regular interaction.
"""
while True:
if os.path.isfile('offline_request'):
with open('offline_request', 'r') as off_request:
command = off_request.read()
logger.info(f'Received offline input::{command}')
response = None
try:
if command:
os.remove('offline_request')
os.environ['called_by_offline'] = '1' # Write env var so some function can use it
split(command)
del os.environ['called_by_offline'] # deletes the env var
sys.stdout.write('\r')
response = speaker.vig()
else:
response = 'Received a null request. Please try to resend it'
current_time_ = datetime.now(timezone('US/Central'))
dt_string = current_time_.strftime("%A, %B %d, %Y %I:%M:%S %p")
with open('offline_response', 'w') as off_response:
off_response.write(dt_string + '\n\n' + response)
speaker.stop()
voice_changer()
except RuntimeError:
if command and not response:
with open('offline_request', 'w') as off_request:
off_request.write(command)
logger.error(f'Received a RuntimeError while executing offline request.\n{format_exc()}')
restart(quiet=True, quick=True)
if STOPPER.get('status'):
break
def meeting_reader() -> None:
"""Speaks meeting information that ``meeting_gatherer()`` stored in a file named 'meetings'.
If the file is not available, meeting information is directly fetched from the ``meetings()`` function.
"""
with open('meetings', 'r') as meeting:
meeting_info = meeting.read()
sys.stdout.write(f'\r{meeting_info}')
speaker.say(meeting_info)
def meeting_gatherer() -> None:
"""Gets return value from ``meetings()`` and writes it to file named ``meetings``.
This function runs in a dedicated thread every 30 minutes to avoid wait time when meetings information is requested.
"""
logger.info('Meeting gather has been initiated.')
while True:
if os.path.isfile('meetings') and int(datetime.now().timestamp()) - int(os.stat('meetings').st_mtime) < 1_800:
os.remove('meetings') # removes the file if it is older than 30 minutes
data = meetings()
if data.startswith('You'):
with open('meetings', 'w') as gatherer:
gatherer.write(data)
gatherer.close()
elif data == "The calendar Office is unavailable sir!":
break
if STOPPER.get('status'):
break
sleep(900)
def meetings(meeting_file: str = 'calendar.scpt') -> str:
"""Uses ``applescript`` to fetch events/meetings from local Calendar (including subscriptions) or Microsoft Outlook.
Args:
meeting_file: Takes applescript filename as argument. Defaults to calendar.scpt unless an alternate is passed.
Returns:
str:
- On success, returns a message saying which meeting is scheduled at what time.
- If no events, returns a message saying there are no events in the next 12 hours.
- On failure, returns a message saying Jarvis was unable to read calendar/outlook.
"""
args = [1, 3]
source_app = meeting_file.replace('.scpt', '')
failure = None
process = Popen(['/usr/bin/osascript', meeting_file] + [str(arg) for arg in args], stdout=PIPE, stderr=PIPE)
out, err = process.communicate()
os.system(f'git checkout -- {meeting_file}') # Undo the unspecified changes done by ScriptEditor
if error := process.returncode: # stores process.returncode in error if process.returncode is not 0
err_msg = err.decode('UTF-8')
err_code = err_msg.split()[-1].strip()
if err_code == '(-1728)': # If the calendar named 'Office' in unavailable in the calendar application
logger.error("Calendar, 'Office' is unavailable.")
return "The calendar Office is unavailable sir!"
elif err_code == '(-1712)': # If an event takes 2+ minutes, the Apple Event Manager reports a time-out error.
failure = f"{source_app}/event took an unusually long time to respond/complete.\nInclude, " \
f"'with timeout of 300 seconds' to your {meeting_file} right after the " \
f"'tell application {source_app}' step and 'end timeout' before the 'end tell' step."
elif err_code in ['(-10810)', '(-609)', '(-600)']: # If unable to launch the app or app terminates.
apps(f'Launch {meeting_file}')
if not failure:
failure = f"Unable to read {source_app} - [{error}]\n{err_msg}"
logger.error(failure)
failure = failure.replace('"', '') # An identifier can’t go after this “"”
os.system(f"""osascript -e 'display notification "{failure}" with title "Jarvis"'""")
return f"I was unable to read your {source_app} sir! Please make sure it is in sync."
events = out.decode().strip()
if not events or events == ',':
return "You don't have any meetings in the next 12 hours sir!"
events = events.replace(', date ', ' rEpLaCInG ')
event_time = events.split('rEpLaCInG')[1:]
event_name = events.split('rEpLaCInG')[0].split(', ')
event_name = [i.strip() for n, i in enumerate(event_name) if i not in event_name[n + 1:]] # remove duplicates
count = len(event_time)
[event_name.remove(e) for e in event_name if len(e) <= 5] if count != len(event_name) else None
meeting_status = f'You have {count} meetings in the next 12 hours sir! ' if count > 1 else ''
events = {}
for i in range(count):
if i < len(event_name):
event_time[i] = re.search(' at (.*)', event_time[i]).group(1).strip()
dt_string = datetime.strptime(event_time[i], '%I:%M:%S %p')
event_time[i] = dt_string.strftime('%I:%M %p')
events.update({event_name[i]: event_time[i]})
ordered_data = sorted(events.items(), key=lambda x: datetime.strptime(x[1], '%I:%M %p'))
for index, meeting in enumerate(ordered_data):
if count == 1:
meeting_status += f"You have a meeting at {meeting[1]} sir! {meeting[0].upper()}. "
else:
meeting_status += f"{meeting[0]} at {meeting[1]}, " if index + 1 < len(ordered_data) else \
f"{meeting[0]} at {meeting[1]}."
return meeting_status
def system_vitals() -> None:
"""Reads system vitals on MacOS.
See Also:
- Jarvis will suggest a reboot if the system uptime is more than 2 days.
- If confirmed, invokes `restart <https://thevickypedia.github.io/Jarvis/#jarvis.restart>`__ function.
"""
if not root_password:
speaker.say("You haven't provided a root password for me to read system vitals sir! "
"Add the root password as an environment variable for me to read.")
return
version = host_info(required='version')
model = host_info(required='model')
cpu_temp, gpu_temp, fan_speed, output = None, None, None, ""
if version >= 12: # smc information is available only on 12+ versions (tested on 11.3, 12.1 and 16.1 versions)
critical_info = [each.strip() for each in (os.popen(
f'echo {root_password} | sudo -S powermetrics --samplers smc -i1 -n1'
)).read().split('\n') if each != '']
sys.stdout.write('\r')
for info in critical_info:
if 'CPU die temperature' in info:
cpu_temp = info.strip('CPU die temperature: ').replace(' C', '').strip()
if 'GPU die temperature' in info:
gpu_temp = info.strip('GPU die temperature: ').replace(' C', '').strip()
if 'Fan' in info:
fan_speed = info.strip('Fan: ').replace(' rpm', '').strip()
else:
fan_speed = check_output(
f'echo {root_password} | sudo -S spindump 1 1 -file /tmp/spindump.txt > /dev/null 2>&1;grep "Fan speed" '
'/tmp/spindump.txt;sudo rm /tmp/spindump.txt', shell=True).decode('utf-8')
if cpu_temp:
cpu = f'Your current average CPU temperature is {format_nos(temperature.c2f(extract_nos(cpu_temp)))}°F. '
output += cpu
speaker.say(cpu)
if gpu_temp:
gpu = f'GPU temperature is {format_nos(temperature.c2f(extract_nos(gpu_temp)))}°F. '
output += gpu
speaker.say(gpu)
if fan_speed:
fan = f'Current fan speed is {format_nos(extract_nos(fan_speed))} RPM. '
output += fan
speaker.say(fan)
restart_time = datetime.fromtimestamp(boot_time())
second = (datetime.now() - restart_time).total_seconds()
restart_time = datetime.strftime(restart_time, "%A, %B %d, at %I:%M %p")
restart_duration = time_converter(seconds=second)
output += f'Restarted on: {restart_time} - {restart_duration} ago from now.'
if os.environ.get('called_by_offline'):
speaker.say(output)
return
sys.stdout.write(f'\r{output}')
speaker.say(f'Your {model} was last booted on {restart_time}. '
f'Current boot time is: {restart_duration}.')
if second >= 172_800:
if boot_extreme := re.search('(.*) days', restart_duration):
warn = int(boot_extreme.group().replace(' days', '').strip())
speaker.say(f'Sir! your {model} has been running continuously for more than {warn} days. You must '
f'consider a reboot for better performance. Would you like me to restart it for you sir?')
speaker.runAndWait()
response = listener(3, 3)
if any(word in response.lower() for word in keywords.ok()):
logger.info(f'JARVIS::Restarting {host_info("model")}')
restart(target='PC_Proceed')
def get_ssid() -> str:
"""Gets SSID of the network connected.
Returns:
str:
WiFi or Ethernet SSID.
"""
process = Popen(
['/System/Library/PrivateFrameworks/Apple80211.framework/Versions/Current/Resources/airport', '-I'],
stdout=PIPE)
out, err = process.communicate()
if error := process.returncode:
logger.error(f"Failed to fetch SSID with exit code: {error}\n{err}")
# noinspection PyTypeChecker
return dict(map(str.strip, info.split(': ')) for info in out.decode('utf-8').split('\n')[:-1]).get('SSID')
class PersonalCloud:
"""Controller for `Personal Cloud <https://github.com/thevickypedia/personal_cloud>`__.
>>> PersonalCloud
References:
`PersonalCloud README.md <https://github.com/thevickypedia/personal_cloud/blob/main/README.md>`__
See Also:
PersonalCloud integration requires Admin previleages for the default ``Terminal``.
Step 1:
- Mac OS 10.14.* and higher - System Preferences -> Security & Privacy -> Privacy -> Full Disk Access
- Mac OS 10.13.* and lower - System Preferences -> Security & Privacy -> Privacy -> Accessibility
Step 2:
Unlock for admin privileges. Click on the "+" icon. Select Applications -> Utilities -> Terminal
"""
@staticmethod
def get_port() -> int:
"""Chooses a TCP PORT number dynamically that is not being used to ensure we don't rely on a single port.
- Well-Known ports: 0 to 1023
- Registered ports: 1024 to 49151
- Dynamically available: 49152 to 65535
- Alternate to active_sessions ->
- ``check_output(f"echo {PASSWORD} | sudo -S lsof -PiTCP -sTCP:LISTEN 2>&1;", shell=True).decode('utf-8')``
- ``remove`` variable should be an actual function as per pep-8 standards, bypassing it using # noqa
Returns:
int:
Randomly chosen port number that is not in use.
"""
active_sessions = check_output("netstat -anvp tcp | awk 'NR<3 || /LISTEN/' 2>&1;", shell=True).decode('utf-8')
active_ports = [row.split()[3].split('.')[-1] for index, row in enumerate(active_sessions.split('\n')) if
row and index > 1]
port = randrange(49152, 65535)
if port not in active_ports:
return port
@staticmethod
def delete_repo() -> None:
"""Called during enable and disable to delete any existing bits for a clean start next time."""
os.system(f'rm -rf {home}/personal_cloud') # delete repo for a fresh start
# noinspection PyUnresolvedReferences
@staticmethod
def enable() -> None:
"""Enables `personal cloud <https://github.com/thevickypedia/personal_cloud>`__.
Notes:
- Clones ``personal_cloud`` repo in a dedicated Terminal.
- Creates a virtual env and installs the requirements within it (ETA: ~20 seconds)
- If ``personal_cloud_host`` env var is provided, Jarvis will mount the drive if connected to the device.
- Sets env vars required for the personal cloud.
- Generates random username and passphrase for login info.
- Triggers personal cloud using another Terminal session.
- Sends an SMS with ``endpoint``, ``username`` and ``password`` to the ``phone_number``.
"""
personal_cloud.delete_repo()
initial_script = f"cd {home} && git clone -q https://github.com/thevickypedia/personal_cloud.git && " \
f"cd personal_cloud && python3 -m venv venv && source venv/bin/activate && " \
f"pip3 install -r requirements.txt"
try:
apple_script('Terminal').do_script(initial_script)
except (CommandError, EventError) as ps_error:
logger.error(ps_error)
notify(user=offline_receive_user, password=offline_receive_pass, number=phone_number,
body="Sir! I was unable to trigger your personal cloud due to lack of permissions.\n"
"Please check the log file.")
return
personal_cloud_port = personal_cloud.get_port()
personal_cloud_username = ''.join(choices(ascii_letters, k=10))
personal_cloud_password = ''.join(choices(ascii_letters + digits, k=10))
personal_cloud_host = f"'{os.environ.get('personal_cloud_host')}'"
# export PORT for both ngrok and exec scripts as they will be running in different Terminal sessions
ngrok_script = f"cd {home}/personal_cloud && export port={personal_cloud_port} && " \
f"source venv/bin/activate && cd helper_functions && python3 ngrok.py"
exec_script = f"export host_path='{personal_cloud_host}'" if personal_cloud_host and \
os.path.isdir(personal_cloud_host) else ''
exec_script += f"export port={personal_cloud_port} && " \
f"export username={personal_cloud_username} && " \
f"export password={personal_cloud_password} && " \
f"export gmail_user={gmail_user} && " \
f"export gmail_pass={gmail_pass} && " \
f"export recipient={icloud_user} && " \
f"cd {home}/personal_cloud && source venv/bin/activate && python3 authserver.py"
cloned_path = f'{home}/personal_cloud'
while True: # wait for the requirements to be installed after the repo was cloned
packages = [path.stem.split('-')[0] for path in Path(cloned_path).glob('**/site-packages/*')]
if packages and not [req for req in [pkg.partition('==')[0] for pkg in
Path(f'{cloned_path}/requirements.txt').read_text().splitlines()] if
req not in packages]:
sleep(5) # give some breathing time for indexing
apple_script('Terminal').do_script(exec_script)
apple_script('Terminal').do_script(ngrok_script)
break
while True: # wait for the endpoint url (as file) to get generated within personal_cloud
if os.path.exists(f'{cloned_path}/helper_functions/url'):
with open(f'{cloned_path}/helper_functions/url', 'r') as file:
url = file.read() # commit # dfc37853dfe232e268843cbe53719bd9a09903c4 on personal_cloud
if url.startswith('http'):
notify(user=offline_receive_user, password=offline_receive_pass, number=phone_number,
body=f"URL: {url}\nUsername: {personal_cloud_username}\nPassword: {personal_cloud_password}")
else:
notify(user=offline_receive_user, password=offline_receive_pass, number=phone_number,
body="Unable to start ngrok! Please check the logs for more information.")
break
@staticmethod
def disable() -> None:
"""Kills `authserver.py <https://git.io/JchR5>`__ and `ngrok.py <https://git.io/JchBu>`__ to stop hosting.
This eliminates the hassle of passing args and handling threads.
"""
pid_check = check_output("ps -ef | grep 'authserver.py\\|ngrok.py'", shell=True)
pid_list = pid_check.decode('utf-8').split('\n')
for pid_info in pid_list:
if pid_info and 'Library' in pid_info and ('/bin/sh' not in pid_info or 'grep' not in pid_info):
os.system(f'kill -9 {pid_info.split()[1]} >/dev/null 2>&1') # redirects stderr output to stdout
personal_cloud.delete_repo()
def vpn_server_check() -> bool:
"""Checks if an instance of VPN Server is running.
Returns:
bool:
Returns ``True`` if an instance of ``vpn.py`` is currently running.
"""
pid_check = check_output("ps -ef | grep vpn.py", shell=True)
pid_list = pid_check.decode('utf-8').split('\n')
for id_ in pid_list:
if id_ and 'grep' not in id_ and '/bin/sh' not in id_:
return True
def vpn_server(operation: str) -> None:
"""Automator to ``START`` or ``STOP`` the VPN portal.
Args:
operation: Takes ``START`` or ``STOP`` as an argument.
See Also:
- Check Read Me in `vpn-server <https://git.io/JzCbi>`__ for more information.
- Pulls the latest version before starting the server.
- Adds an env var ``ENV: Jarvis`` so, ``vpn-server`` can log the details in a log file.
"""
base_script = f'cd {home}/vpn-server && git pull --quiet && source venv/bin/activate && export ENV=Jarvis'
os.system(f'{base_script} && python vpn.py {operation} && exit')
def internet_checker() -> Union[Speedtest, bool]:
"""Uses speed test api to check for internet connection.
Returns:
``Speedtest`` or bool:
- On success, returns Speedtest module.
- On failure, returns boolean False.
"""
try:
return Speedtest()
except ConfigRetrievalError:
return False
def morning() -> None:
"""Checks for the current time of the day and day of the week to trigger a series of morning messages."""
clock = datetime.now()
if clock.strftime('%A') not in ['Saturday', 'Sunday'] and int(clock.strftime('%S')) < 10:
speaker.say("Good Morning. It's 7 AM.")
time_travel.has_been_called = True
weather()
time_travel.has_been_called = False
volume_controller(level=100)
speaker.runAndWait()
volume_controller(level=50)
def sentry_mode() -> None:
"""Listens forever and invokes ``activator()`` when heard something. Stops when ``STOPPER`` flag is set to ``True``.
References:
Invokes `morning <https://thevickypedia.github.io/Jarvis/#jarvis.morning>`__ function at 7 AM.
"""
while True:
if datetime.now().strftime("%I:%M %p") == '07:00 AM':
morning()
if STOPPER.get('status'):
break
sys.stdout.write("\rSentry Mode")
try:
activator(recognizer.recognize_google(recognizer.listen(source=source, phrase_time_limit=5)))
except (UnknownValueError, WaitTimeoutError, RequestError):
sys.stdout.write("\r")
except RuntimeError:
logger.error(f'Received a RuntimeError while executing regular interaction.\n{format_exc()}')
restart(quiet=True, quick=True)
except KeyboardInterrupt:
exit_process()
terminator()
def activator(key_original: str) -> None:
"""When invoked by ``sentry_mode()``, checks for the right keyword to wake up and gets into action.
Args:
key_original: Takes the processed string from ``sentry_mode()`` as input.
"""
key = key_original.lower()
key_split = key.split()
if 'jarvis' not in key and 'buddy' not in key:
return
logger.info(f'Woke up for: "{key_original}"')
Thread(target=playsound, args=['indicators/acknowledgement.mp3']).start()
sys.stdout.write("\r")
time_of_day = ['morning', 'night', 'afternoon', 'after noon', 'evening', 'goodnight']
wake_up_words = ['look alive', 'wake up', 'wakeup', 'show time', 'showtime', 'time to work', 'spin up']
if [word for word in key_split if word in time_of_day]:
time_travel.has_been_called = True
if 'night' in key_split or 'goodnight' in key_split:
Thread(target=pc_sleep).start()
time_travel()
elif 'you there' in key:
speaker.say(f'{choice(wake_up1)}')
initialize()
elif any(word in key for word in wake_up_words):
speaker.say(f'{choice(wake_up2)}')
initialize()
elif key == 'jarvis':
speaker.say(f'{choice(wake_up3)}')
initialize()
else:
converted = ' '.join([i for i in key_original.split() if i.lower() not in ['buddy', 'jarvis']])
if converted:
split(converted.strip())
else:
speaker.say(f'{choice(wake_up3)}')
initialize()
try:
speaker.runAndWait()
except RuntimeError:
restart(quiet=True, quick=True)
def size_converter(byte_size: int) -> str:
"""Gets the current memory consumed and converts it to human friendly format.
Args:
byte_size: Receives byte size as argument.
Returns:
str:
Converted understandable size.
"""
if not byte_size:
from resource import RUSAGE_SELF, getrusage
byte_size = getrusage(RUSAGE_SELF).ru_maxrss
size_name = ("B", "KB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB")
integer = int(floor(log(byte_size, 1024)))
power = pow(1024, integer)
size = round(byte_size / power, 2)
return f'{size} {size_name[integer]}'
def exit_message() -> str:
"""Variety of exit messages based on day of week and time of day.
Returns:
str:
A greeting bye message.
"""
am_pm = datetime.now().strftime("%p") # current part of day (AM/PM)
hour = datetime.now().strftime("%I") # current hour
day = datetime.now().strftime("%A") # current day
if am_pm == 'AM' and int(hour) < 10:
exit_msg = f"Have a nice day, and happy {day}."
elif am_pm == 'AM' and int(hour) >= 10:
exit_msg = f"Enjoy your {day}."
elif am_pm == 'PM' and (int(hour) == 12 or int(hour) < 3) and day in weekend:
exit_msg = "Have a nice afternoon, and enjoy your weekend."
elif am_pm == 'PM' and (int(hour) == 12 or int(hour) < 3):
exit_msg = "Have a nice afternoon."
elif am_pm == 'PM' and int(hour) < 6 and day in weekend:
exit_msg = "Have a nice evening, and enjoy your weekend."
elif am_pm == 'PM' and int(hour) < 6:
exit_msg = "Have a nice evening."
elif day in weekend:
exit_msg = "Have a nice night, and enjoy your weekend."
else:
exit_msg = "Have a nice night."
if event := celebrate():
exit_msg += f'\nAnd by the way, happy {event}'
return exit_msg
def terminator() -> None:
"""Exits the process with specified status without calling cleanup handlers, flushing stdio buffers, etc.
Using this, eliminates the hassle of forcing multiple threads to stop.
"""
# noinspection PyUnresolvedReferences,PyProtectedMember
os._exit(0)
def remove_files() -> None:
"""Function that deletes multiple files when called during exit operation.
Warnings:
Deletes:
- all ``.lock`` files created for alarms and reminders.
- ``location.yaml`` file, to recreate a new one next time around.
- ``meetings`` file, to recreate a new one next time around.
"""
[os.remove(f"alarm/{file}") for file in os.listdir('alarm') if file != '.keep']
[os.remove(f"reminder/{file}") for file in os.listdir('reminder') if file != '.keep']
os.remove('location.yaml') if os.path.isfile('location.yaml') else None
os.remove('meetings') if os.path.isfile('meetings') else None
def exit_process() -> None:
"""Function that holds the list of operations done upon exit."""
STOPPER['status'] = True
logger.info('JARVIS::Stopping Now::STOPPER flag has been set to True')
reminders = {}
alarms = [file for file in os.listdir('alarm') if file != '.keep' and file != '.DS_Store']
for file in os.listdir('reminder'):
if file != '.keep' and file != '.DS_Store':
split_val = file.replace('.lock', '').split('|')
reminders.update({split_val[0]: split_val[-1]})
if reminders:
logger.info(f'JARVIS::Deleting Reminders - {reminders}')
if len(reminders) == 1:
speaker.say('You have a pending reminder sir!')
else:
speaker.say(f'You have {len(reminders)} pending reminders sir!')
for key, value in reminders.items():
speaker.say(f"{value.replace('_', ' ')} at "
f"{key.replace('_', ':').replace(':PM', ' PM').replace(':AM', ' AM')}")
if alarms:
logger.info(f'JARVIS::Deleting Alarms - {alarms}')
alarms = ', and '.join(alarms) if len(alarms) != 1 else ''.join(alarms)
alarms = alarms.replace('.lock', '').replace('_', ':').replace(':PM', ' PM').replace(':AM', ' AM')
sys.stdout.write(f"\r{alarms}")
speaker.say(f'You have a pending alarm at {alarms} sir!')
if reminders or alarms:
speaker.say('This will be removed while shutting down!')
speaker.say('Shutting down now sir!')
speaker.say(exit_message())
try:
speaker.runAndWait()
except RuntimeError:
logger.error(f'Received a RuntimeError while self terminating.\n{format_exc()}')
remove_files()
sys.stdout.write(f"\rMemory consumed: {size_converter(0)}"
f"\nTotal runtime: {time_converter(perf_counter())}")
def extract_nos(input_: str) -> float:
"""Extracts number part from a string.
Args:
input_: Takes string as an argument.
Returns:
float:
Float values.
"""
return float('.'.join(re.findall(r"\d+", input_)))
def format_nos(input_: float) -> int:
"""Removes ``.0`` float values.
Args:
input_: Int if found, else returns the received float value.
Returns:
int:
Formatted integer.
"""
return int(input_) if isinstance(input_, float) and input_.is_integer() else input_
def extract_str(input_: str) -> str:
"""Extracts strings from the received input.
Args:
input_: Takes a string as argument.
Returns:
str:
A string after removing special characters.
"""
return ''.join([i for i in input_ if not i.isdigit() and i not in [',', '.', '?', '-', ';', '!', ':']])
def host_info(required: str) -> Union[str, float]:
"""Gets both the model and version of the hosted device.
Args:
required: model or version
Returns:
str or float:
Model or version of the machine based on the arg received.
"""
device = (check_output("sysctl hw.model", shell=True)).decode('utf-8').split('\n') # gets model info
result = list(filter(None, device))[0] # removes empty string ('\n')
model = extract_str(result).strip('hwmodel ')
version = extract_nos(''.join(device))
if required == 'model':
return model
elif required == 'version':
return version
def pc_sleep() -> None:
"""Locks the host device using osascript and reduces brightness to bare minimum."""
Thread(target=decrease_brightness).start()
# os.system("""osascript -e 'tell app "System Events" to sleep'""") # requires restarting Jarvis manually
os.system("""osascript -e 'tell application "System Events" to keystroke "q" using {control down, command down}'""")
if not (report.has_been_called or time_travel.has_been_called):
speaker.say(choice(ack))
def stop_terminal() -> None:
"""Uses pid to kill terminals as terminals await user confirmation interrupting shutdown/restart."""
pid_check = check_output("ps -ef | grep 'iTerm\\|Terminal'", shell=True)
pid_list = pid_check.decode('utf-8').split('\n')
for id_ in pid_list:
if id_ and 'Applications' in id_ and '/usr/bin/login' not in id_:
os.system(f'kill -9 {id_.split()[1]} >/dev/null 2>&1') # redirects stderr output to stdout
# noinspection PyUnresolvedReferences,PyProtectedMember
def restart(target: str = None, quiet: bool = False, quick: bool = False) -> None:
"""Restart triggers ``restart.py`` which in turn starts Jarvis after 5 seconds.
Notes:
- Doing this changes the PID to avoid any Fatal Errors occurred by long running threads.
- restart(PC) will restart the machine after getting confirmation.
Warnings:
- ``restart(target=!PC)`` will restart the machine without getting any approval as the confirmation is requested
- in `system_vitals <https://thevickypedia.github.io/Jarvis/#jarvis.system_vitals>`__.
- This is done only when the system vitals are read, and the uptime is more than 2 days.
Args:
target:
- ``None``: Restarts Jarvis to reset PID
- ``PC``: Restarts the machine after getting confirmation.
quiet: If a boolean ``True`` is passed, a silent restart will be performed.
quick:
- If a boolean ``True`` is passed, local IP values are stored in ``.env`` file for quick re-use.
- Converts ``hallway_ip``, ``bedroom_ip`` and ``kitchen_ip`` into a string before storing it as env vars.
- Doesn't convert ``tv_ip`` as a string as it is already one.
"""
offline = os.environ.get('called_by_offline')
if target:
if offline:
speaker.say(f"ERROR::Cannot restart {host_info(required='model')} via offline communicator.")
return
if target == 'PC':
speaker.say(f'{choice(confirmation)} restart your {host_info("model")}?')
speaker.runAndWait()
converted = listener(3, 3)
else:
converted = 'yes'
if any(word in converted.lower() for word in keywords.ok()):
stop_terminal()
call(['osascript', '-e', 'tell app "System Events" to restart'])
raise KeyboardInterrupt
else:
speaker.say("Machine state is left intact sir!")
return
STOPPER['status'] = True
logger.info('JARVIS::Restarting Now::STOPPER flag has been set.')
logger.info(f'Called by {sys._getframe(1).f_code.co_name}')
sys.stdout.write(f"\rMemory consumed: {size_converter(0)}\tTotal runtime: {time_converter(perf_counter())}")
if not quiet:
try:
speaker.say('Restarting now sir! I will be up and running momentarily.')
speaker.runAndWait() if not offline else None
except RuntimeError:
logger.error(f'Received a RuntimeError while restarting.\n{format_exc()}')
if quick:
set_key(dotenv_path='.env', key_to_set='hallway_ip', value_to_set=str(hallway_ip)) if hallway_ip else None
set_key(dotenv_path='.env', key_to_set='bedroom_ip', value_to_set=str(bedroom_ip)) if bedroom_ip else None
set_key(dotenv_path='.env', key_to_set='kitchen_ip', value_to_set=str(kitchen_ip)) if kitchen_ip else None
set_key(dotenv_path='.env', key_to_set='tv_ip', value_to_set=tv_ip) if tv_ip else None
set_key(dotenv_path='.env', key_to_set='tv_mac', value_to_set=tv_mac) if tv_mac else None
os.system('python3 restart.py')
exit(1) # Don't call terminator() as, os._exit(1) in that func will kill the background threads running in parallel
def shutdown(proceed: bool = False) -> None:
"""Gets confirmation and turns off the machine.
Args:
proceed: Boolean value whether or not to get confirmation.
"""
if not proceed:
speaker.say(f"{choice(confirmation)} turn off the machine?")
speaker.runAndWait()
converted = listener(3, 3)
else:
converted = 'yes'
if converted != 'SR_ERROR':
if any(word in converted.lower() for word in keywords.ok()):
stop_terminal()
call(['osascript', '-e', 'tell app "System Events" to shut down'])
raise KeyboardInterrupt
else:
speaker.say("Machine state is left intact sir!")
return
def voice_changer(change: str = None) -> None:
"""Defaults to a particular voice module.
Alternatively the user can choose from a variety of voices available for that particular device.
Args:
change: Initiates changing voices with the volume ID given in statement.
"""
alter_msg = 0
voices = speaker.getProperty("voices") # gets the list of voices available
# noinspection PyTypeChecker,PyUnresolvedReferences
avail_voices = len(voices)
# noinspection PyUnresolvedReferences
def voice_default(voice_id=(7, 0)) -> None: # default values set as tuple
"""Sets default voice module number.
Args:
voice_id: Default voice ID.
"""
speaker.setProperty("voice", voices[voice_id[0]].id) # voice module #7 for MacOS
if change:
if not (distribution := [int(s) for s in re.findall(r'\b\d+\b', change)]): # walrus on if not distribution
distribution = range(avail_voices)
for module_id in distribution:
if module_id < avail_voices:
voice_default([module_id]) # passing a list as default is tuple and index values are used to reference
sys.stdout.write(f'\rVoice module has been re-configured to {module_id}')
if not alter_msg:
speaker.say('Voice module has been re-configured sir! Would you like me to retain this?')
alter_msg = 1
elif alter_msg == 1:
speaker.say("Here's an example of one of my other voices sir!. Would you like me to use this one?")
alter_msg = 2
else:
speaker.say('How about this one sir?')
else:
speaker.say(f'The voice module number {module_id} is not available for your device sir! '
f'You may want to try a module number between 0 and {avail_voices - 1}')
speaker.runAndWait()
keyword = listener(3, 3)
if keyword == 'SR_ERROR':
voice_default()
speaker.say("Sorry sir! I had trouble understanding. I'm back to my default voice.")
return
elif 'exit' in keyword or 'quit' in keyword or 'Xzibit' in keyword:
voice_default()
speaker.say('Reverting the changes to default voice module sir!')
return
elif any(word in keyword.lower() for word in keywords.ok()):
speaker.say(choice(ack))
return
elif custom_id := [int(id_) for id_ in re.findall(r'\b\d+\b', keyword)]:
voice_changer(str(custom_id))
break
else:
voice_default()
def clear_logs() -> None:
"""Deletes log files that were updated before 48 hours."""
[os.remove(f"logs/{file}") for file in os.listdir('logs') if file != '.keep' and
int(datetime.now().timestamp()) - int(os.stat(f'logs/{file}').st_mtime) > 172_800]
def starter() -> None:
"""Initiates crucial functions which needs to be called during start up.
- Loads the ``.env`` file so that all the necessary credentials and api keys can be accessed as ``ENV vars``
Methods:
volume_controller(): To default the master volume 50%.
voice_changer(): To change the voice to default value.
clear_logs(): To purge log files older than 48 hours.
"""
volume_controller(level=50)
voice_changer()
clear_logs()
if os.path.isfile('.env'):
logger.info('Loading .env file.')
load_dotenv(dotenv_path='.env', verbose=True, override=True) # loads the .env file
if __name__ == '__main__':
if system() != 'Darwin':
exit('Unsupported Operating System.\nWindows support was recently deprecated. '
'Refer https://github.com/thevickypedia/Jarvis/commit/cf54b69363440d20e21ba406e4972eb058af98fc')
logger.info('JARVIS::Starting Now')
sys.stdout.write('\rVoice ID::Female: 1/17 Male: 0/7') # Voice ID::reference
speaker = init() # initiates speaker
recognizer = Recognizer() # initiates recognizer that uses google's translation
keywords = Keywords() # stores Keywords() class from helper_functions/keywords.py
conversation = Conversation() # stores Conversation() class from helper_functions/conversation.py
database = Database() # initiates Database() for TO-DO items
temperature = Temperature() # initiates Temperature() for temperature conversions
personal_cloud = PersonalCloud() # initiates PersonalCloud() to enable or disable HDD hosting
limit = sys.getrecursionlimit() # fetches current recursion limit
sys.setrecursionlimit(limit * 10) # increases the recursion limit by 10 times
home = os.path.expanduser('~') # gets the path to current user profile
starter() # initiates crucial functions which needs to be called during start up
git_user = os.environ.get('git_user')
git_pass = os.environ.get('git_pass')
weather_api = os.environ.get('weather_api')
news_api = os.environ.get('news_api')
maps_api = os.environ.get('maps_api')
gmail_user = os.environ.get('gmail_user')
gmail_pass = os.environ.get('gmail_pass')
robinhood_user = os.environ.get('robinhood_user')
robinhood_pass = os.environ.get('robinhood_pass')
robinhood_qr = os.environ.get('robinhood_qr')
birthday = os.environ.get('birthday')
offline_receive_user = os.environ.get('offline_receive_user')
offline_receive_pass = os.environ.get('offline_receive_pass')
offline_phrase = os.environ.get('offline_phrase')
icloud_user = os.environ.get('icloud_user')
icloud_pass = os.environ.get('icloud_pass')
icloud_recovery = os.environ.get('icloud_recovery')
phone_number = os.environ.get('phone_number')
think_id = os.environ.get('think_id')
router_pass = os.environ.get('router_pass')
tv_client_key = os.environ.get('tv_client_key')
root_password = os.environ.get('root_password')
if st := internet_checker():
sys.stdout.write(f'\rINTERNET::Connected to {get_ssid()}. Scanning localhost for connected devices.')
else:
sys.stdout.write('\rBUMMER::Unable to connect to the Internet')
speaker.say("I was unable to connect to the internet sir! Please check your connection settings and retry.")
speaker.runAndWait()
sys.stdout.write(f"\rMemory consumed: {size_converter(0)}"
f"\nTotal runtime: {time_converter(perf_counter())}")
terminator()
# Retrieves devices IP by doing a local IP range scan using Netgear API
# Note: This can also be done my manually passing the IP addresses in a list (for lights) or string (for TV)
# Using Netgear API will avoid the manual change required to rotate the IPs whenever the router is restarted
# noinspection is used since, variables declared after 'and' in walrus operator are recognized as unbound variables
if (hallway_ip := os.environ.get('hallway_ip')) and (kitchen_ip := os.environ.get('kitchen_ip')) and \
(bedroom_ip := os.environ.get('bedroom_ip')) and (tv_ip := os.environ.get('tv_ip')):
hallway_ip = eval(hallway_ip)
# noinspection PyUnboundLocalVariable
kitchen_ip = eval(kitchen_ip)
# noinspection PyUnboundLocalVariable
bedroom_ip = eval(bedroom_ip)
unset_key(dotenv_path='.env', key_to_unset='hallway_ip')
unset_key(dotenv_path='.env', key_to_unset='kitchen_ip')
unset_key(dotenv_path='.env', key_to_unset='bedroom_ip')
unset_key(dotenv_path='.env', key_to_unset='tv_ip')
unset_key(dotenv_path='.env', key_to_unset='tv_mac')
else:
local_devices = LocalIPScan(router_pass=router_pass)
hallway_ip = [val for val in local_devices.hallway()]
kitchen_ip = [val for val in local_devices.kitchen()]
bedroom_ip = [val for val in local_devices.bedroom()]
tv_ip, tv_mac = local_devices.tv()
if not root_password:
sys.stdout.write('\rROOT PASSWORD is not set!')
# warm_light is initiated with an empty dict and the key status is set to True when requested to switch to yellow
# greet_check is used in initialize() to greet only for the first run
# tv is set to an empty dict instead of TV() at the start to avoid turning on the TV unnecessarily
tv_state, warm_light, greet_check, STOPPER = {}, {}, {}, {}
# stores necessary values for geo location to receive the latitude, longitude and address
options.default_ssl_context = create_default_context(cafile=where())
geo_locator = Nominatim(scheme='http', user_agent='test/1', timeout=3)
# checks modified time of location.yaml (if exists) and uses the data only if it was modified less than 72 hours ago
if os.path.isfile('location.yaml') and \
int(datetime.now().timestamp()) - int(os.stat('location.yaml').st_mtime) < 259_200:
location_details = yaml_load(open('location.yaml'), Loader=FullLoader)
current_lat = location_details['latitude']
current_lon = location_details['longitude']
location_info = location_details['address']
else:
current_lat, current_lon, location_info = location_services(device_selector())
location_dumper = [{'latitude': current_lat}, {'longitude': current_lon}, {'address': location_info}]
with open('location.yaml', 'w') as location_writer:
for dumper in location_dumper:
yaml_dump(dumper, location_writer, default_flow_style=False)
# different responses for different conditions in sentry mode
wake_up1 = ['For you sir - Always!', 'At your service sir!']
wake_up2 = ['Up and running sir!', "We are online and ready sir!", "I have indeed been uploaded sir!",
'My listeners have been activated sir!']
wake_up3 = ["I'm here sir!"]
confirmation = ['Requesting confirmation sir! Did you mean', 'Sir, are you sure you want to']
ack = ['Check', 'Will do sir!', 'You got it sir!', 'Roger that!', 'Done sir!', 'By all means sir!', 'Indeed sir!',
'Gladly sir!', 'Without fail sir!', 'Sure sir!', 'Buttoned up sir!', 'Executed sir!']
weekend = ['Friday', 'Saturday']
# {function_name}.has_been_called is use to denote which function has triggered the other
report.has_been_called, locate_places.has_been_called, directions.has_been_called, google_maps.has_been_called, \
time_travel.has_been_called = False, False, False, False, False
for functions in [delete_todo, todo, add_todo]:
functions.has_been_called = False
sys.stdout.write(f"\rCurrent Process ID: {Process(os.getpid()).pid}\tCurrent Volume: 50%")
if os.path.exists(f"{home}/JarvisHelper"):
Thread(target=offline_communicator_initiate).start()
Thread(target=offline_communicator).start()
else:
logger.error(f'Unable to initiate OfflineCommunicator since, JarvisHelper is unavailable in {home}')
Thread(target=meeting_gatherer).start()
Thread(target=playsound, args=['indicators/initialize.mp3']).start()
with Microphone() as source:
recognizer.adjust_for_ambient_noise(source)
sentry_mode()
|
ftpserver.py | # -*- coding: utf-8 -*-
"""Simple FTP Server"""
from __future__ import print_function
import argparse
import os
import sys
import threading
import time
import logging
_stash = globals()["_stash"]
try:
import pyftpdlib
except ImportError:
print("Installing pyftpdlib...")
_stash("pip install pyftpdlib")
es = os.getenv("?")
if es != 0:
print(_stash.text_color("Failed to install pyftpdlib!", "red"))
sys.exit(1)
from pyftpdlib.authorizers import DummyAuthorizer
from pyftpdlib.servers import FTPServer
from pyftpdlib.handlers import FTPHandler
def run(ns):
"""starts the server."""
auth = DummyAuthorizer()
if ns.user is not None:
auth.add_user(ns.user, ns.pswd, ns.path, perm=ns.perm)
else:
auth.add_anonymous(ns.path, perm=ns.perm)
handler = FTPHandler
handler.authorizer = auth
handler.banner = "StaSh v{v} FTP-Server".format(v=_stash.__version__)
address = ("0.0.0.0", ns.port)
server = FTPServer(address, handler)
server.max_cons = 128
server.max_cons_per_ip = 128
# setup logging
logger = logging.getLogger("pyftpdlib")
logger.setLevel(logging.CRITICAL)
logger.propagate = False
# server needs to run in a thread to be killable
thr = threading.Thread(name="FTP-Server Thread", target=server.serve_forever)
thr.daemon = True
thr.start()
print("FTP-Server started on {h}:{p}".format(h=address[0], p=str(address[1])))
try:
while True:
time.sleep(0.2)
except KeyboardInterrupt:
print("Stopping Server...")
server.close_all()
if __name__ == "__main__":
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument("-p", "--port", action="store", type=int, default=21, dest="port", help="port to listen on")
parser.add_argument("-u", "--user", action="store", default=None, dest="user", help="username (default: anonymous)")
parser.add_argument("--pswd", action="store", default=None, dest="pswd", help="password")
parser.add_argument("--perm", action="store", default="elradfmwM", dest="perm", help="permissions of the user")
parser.add_argument("--path", action="store", default=os.getcwd(), dest="path", help="path to serve")
ns = parser.parse_args()
if (ns.user is not None) and (ns.pswd is None):
print(_stash.text_color("Error: If user is given, pswd must also be given!", "red"))
sys.exit(1)
if (ns.pswd is not None) and (ns.user is None):
print(_stash.text_color("Error: If pswd is given, user must also be given!", "red"))
sys.exit(1)
run(ns)
|
tpu_estimator.py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===================================================================
"""TPUEstimator class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import copy
import os
import signal
import threading
import time
import traceback
import numpy as np
import six
from six.moves import queue as Queue # pylint: disable=redefined-builtin
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.contrib.tpu.python.ops import tpu_ops
from tensorflow.contrib.tpu.python.tpu import session_support
from tensorflow.contrib.tpu.python.tpu import tpu
from tensorflow.contrib.tpu.python.tpu import tpu_config
from tensorflow.contrib.tpu.python.tpu import tpu_context
from tensorflow.contrib.tpu.python.tpu import tpu_feed
from tensorflow.contrib.tpu.python.tpu import training_loop
from tensorflow.contrib.tpu.python.tpu import util as util_lib
from tensorflow.contrib.training.python.training import hparam
from tensorflow.core.framework import variable_pb2
from tensorflow.core.framework.summary_pb2 import Summary
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.estimator import estimator as estimator_lib
from tensorflow.python.estimator import model_fn as model_fn_lib
from tensorflow.python.estimator import util
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import summary_ops_v2 as contrib_summary
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.summary import summary
from tensorflow.python.training import basic_session_run_hooks
from tensorflow.python.training import evaluation
from tensorflow.python.training import session_run_hook
from tensorflow.python.training import training
from tensorflow.python.training import training_util
from tensorflow.python.util import nest
from tensorflow.python.util import tf_inspect
_INITIAL_LOSS = 1e7
_ZERO_LOSS = 0.
_TPU_ESTIMATOR = 'tpu_estimator'
_ITERATIONS_PER_LOOP_VAR = 'iterations_per_loop'
_BATCH_SIZE_KEY = 'batch_size'
_CROSS_REPLICA_SUM_OP = 'CrossReplicaSum'
_ONE_GIGABYTE = 1024 * 1024 * 1024
_TPU_ENQUEUE_OPS = '_tpu_enqueue_ops'
_TPU_TRAIN_OP = '_tpu_train_op'
_RESERVED_PARAMS_KEYS = [_BATCH_SIZE_KEY]
# TODO(b/65703635): Flip the value and remove all dead code. Currently, this is
# only used for per-core based deployments. For per-host based pipelines, if a
# user returns a Dataset instance it will be automatically wrapped in a
# tf.while_loop (This can be disabled by returning features and labels
# explicitly).
_WRAP_INPUT_FN_INTO_WHILE_LOOP = False
ops.register_proto_function(
'{}_{}'.format(_TPU_ESTIMATOR, _ITERATIONS_PER_LOOP_VAR),
proto_type=variable_pb2.VariableDef,
to_proto=resource_variable_ops._to_proto_fn, # pylint: disable=protected-access
from_proto=resource_variable_ops._from_proto_fn) # pylint: disable=protected-access
def _create_global_step(graph):
graph = graph or ops.get_default_graph()
if training.get_global_step(graph) is not None:
raise ValueError('"global_step" already exists.')
# Create in proper graph and base name_scope.
with graph.as_default() as g, g.name_scope(None):
return variable_scope.get_variable(
ops.GraphKeys.GLOBAL_STEP,
shape=[],
dtype=dtypes.int64,
initializer=init_ops.zeros_initializer(),
trainable=False,
use_resource=True,
collections=[ops.GraphKeys.GLOBAL_VARIABLES, ops.GraphKeys.GLOBAL_STEP])
def _create_or_get_iterations_per_loop():
graph = ops.get_default_graph()
collection_name = '{}_{}'.format(_TPU_ESTIMATOR, _ITERATIONS_PER_LOOP_VAR)
iter_vars = graph.get_collection(collection_name)
if len(iter_vars) == 1:
return iter_vars[0]
elif len(iter_vars) > 1:
raise RuntimeError('Multiple iterations_per_loop_var in collection.')
with ops.colocate_with(training_util.get_global_step()):
with variable_scope.variable_scope(
_TPU_ESTIMATOR, reuse=variable_scope.AUTO_REUSE):
return variable_scope.get_variable(
_ITERATIONS_PER_LOOP_VAR,
initializer=init_ops.zeros_initializer(),
shape=[],
dtype=dtypes.int32,
trainable=False,
collections=[collection_name, ops.GraphKeys.LOCAL_VARIABLES],
use_resource=True)
def _sync_variables_ops():
# Gets the variables back from TPU nodes. This means the variables updated
# by TPU will now be *synced* to host memory.
return [
array_ops.check_numerics(v.read_value(),
'Gradient for %s is NaN' % v.name).op
for v in variables.trainable_variables()
]
def _increase_eval_step_op(iterations_per_loop):
"""Returns an op to increase the eval step for TPU evaluation.
Args:
iterations_per_loop: Tensor. The number of eval steps running in TPU
system before returning to CPU host for each `Session.run`.
Returns:
An operation
"""
eval_step = evaluation._get_or_create_eval_step() # pylint: disable=protected-access
# Estimator evaluate increases 1 by default. So, we increase the difference.
return state_ops.assign_add(
eval_step,
math_ops.cast(iterations_per_loop - 1, dtype=eval_step.dtype),
use_locking=True)
class _SIGNAL(object):
"""Signal used to control the thread of infeed/outfeed.
All preserved signals must be negative numbers. Positive numbers are used to
indicate the number of iterations for next training/evaluation loop.
"""
NEXT_BATCH = -1
STOP = -2
class TPUEstimatorSpec(
collections.namedtuple('TPUEstimatorSpec', [
'mode',
'predictions',
'loss',
'train_op',
'eval_metrics',
'export_outputs',
'scaffold_fn',
'host_call'
])):
"""Ops and objects returned from a `model_fn` and passed to `TPUEstimator`.
See `EstimatorSpec` for `mode`, 'predictions, 'loss', 'train_op', and
'export_outputs`.
For evaluation, `eval_metrics `is a tuple of `metric_fn` and `tensors`, where
`metric_fn` runs on CPU to generate metrics and `tensors` represents the
`Tensor`s transferred from TPU system to CPU host and passed to `metric_fn`.
To be precise, TPU evaluation expects a slightly different signature from the
@{tf.estimator.Estimator}. While `EstimatorSpec.eval_metric_ops` expects a
dict, `TPUEstimatorSpec.eval_metrics` is a tuple of `metric_fn` and `tensors`.
The `tensors` could be a list of `Tensor`s or dict of names to `Tensor`s. The
`tensors` usually specify the model logits, which are transferred back from
TPU system to CPU host. All tensors must have be batch-major, i.e., the batch
size is the first dimension. Once all tensors are available at CPU host from
all shards, they are concatenated (on CPU) and passed as positional arguments
to the `metric_fn` if `tensors` is list or keyword arguments if `tensors` is
dict. `metric_fn` takes the `tensors` and returns a dict from metric string
name to the result of calling a metric function, namely a `(metric_tensor,
update_op)` tuple. See `TPUEstimator` for MNIST example how to specify the
`eval_metrics`.
`scaffold_fn` is a function running on CPU to generate the `Scaffold`. This
function should not capture any Tensors in `model_fn`.
`host_call` is a tuple of a `function` and a list or dictionary of `tensors`
to pass to that function and returns a list of Tensors. `host_call` currently
works for train() and evaluate(). The Tensors returned by the function is
executed on the CPU on every step, so there is communication overhead when
sending tensors from TPU to CPU. To reduce the overhead, try reducing the
size of the tensors. The `tensors` are concatenated along their major (batch)
dimension, and so must be >= rank 1. The `host_call` is useful for writing
summaries with @{tf.contrib.summary.create_file_writer}.
"""
def __new__(cls,
mode,
predictions=None,
loss=None,
train_op=None,
eval_metrics=None,
export_outputs=None,
scaffold_fn=None,
host_call=None):
"""Creates a validated `TPUEstimatorSpec` instance."""
host_calls = {}
if eval_metrics is not None:
host_calls['eval_metrics'] = eval_metrics
if host_call is not None:
host_calls['host_call'] = host_call
_OutfeedHostCall.validate(host_calls)
return super(TPUEstimatorSpec, cls).__new__(
cls,
mode=mode,
predictions=predictions,
loss=loss,
train_op=train_op,
eval_metrics=eval_metrics,
export_outputs=export_outputs,
scaffold_fn=scaffold_fn,
host_call=host_call)
def as_estimator_spec(self):
"""Creates an equivalent `EstimatorSpec` used by CPU train/eval."""
host_calls = {}
if self.eval_metrics is not None:
host_calls['eval_metrics'] = self.eval_metrics
if self.host_call is not None:
host_calls['host_call'] = self.host_call
host_call_ret = _OutfeedHostCall.create_cpu_hostcall(host_calls)
eval_metric_ops = None
if self.eval_metrics is not None:
eval_metric_ops = host_call_ret['eval_metrics']
hooks = None
if self.host_call is not None:
hooks = [_OutfeedHostCallHook(host_call_ret['host_call'])]
scaffold = self.scaffold_fn() if self.scaffold_fn else None
return model_fn_lib.EstimatorSpec(
mode=self.mode,
predictions=self.predictions,
loss=self.loss,
train_op=self.train_op,
eval_metric_ops=eval_metric_ops,
export_outputs=self.export_outputs,
scaffold=scaffold,
training_hooks=hooks,
evaluation_hooks=hooks,
prediction_hooks=hooks)
class _OpQueueContext(object):
"""Manages work queue and thread for a infeed/outfeed thread."""
def __init__(self, name, target, args):
self._name = name
self._queue = Queue.Queue()
args = (self,) + args
self._thread = threading.Thread(name=name, target=target, args=args)
self._thread.daemon = True
self._thread.start()
def stop(self):
self._queue.put(_SIGNAL.STOP)
def send_next_batch_signal(self, iterations):
self._queue.put(iterations)
def read_iteration_counts(self):
while True:
iterations = self._queue.get(block=True)
logging.debug('%s read iterations %s', self._name, iterations)
if iterations == _SIGNAL.STOP:
logging.info('%s received shutdown signal, stopping.', self._name)
return
yield iterations
def join(self):
logging.info('Shutting down %s thread.' % self._name)
self.stop()
self._thread.join()
class _OpSignalOnceQueueContext(_OpQueueContext):
"""Manages work queue and thread for a infeed/outfeed thread.
This subclass only signals once.
"""
def __init__(self, name, target, args):
super(_OpSignalOnceQueueContext, self).__init__(name, target, args)
self._has_signaled = False
def send_next_batch_signal(self, iterations):
if not self._has_signaled:
self._queue.put(iterations)
self._has_signaled = True
class TPUInfeedOutfeedSessionHook(session_run_hook.SessionRunHook):
"""A Session hook setting up the TPU initialization, infeed, and outfeed.
This hook does two major things:
1. initialize and shutdown TPU system.
2. launch and join the threads for infeed enqueue and (optional) outfeed
dequeue.
"""
def __init__(self,
ctx,
enqueue_ops,
dequeue_ops,
run_infeed_loop_on_coordinator=True):
self._master_job = ctx.master_job
self._enqueue_ops = enqueue_ops
self._dequeue_ops = dequeue_ops
self._run_infeed_loop_on_coordinator = run_infeed_loop_on_coordinator
self._initial_infeed_sleep_secs = (
ctx.config.tpu_config.initial_infeed_sleep_secs)
self._session_cancel_timer = None
self._feed_error = None
self._finished = False
def begin(self):
logging.info('TPU job name %s', self._master_job)
self._iterations_per_loop_var = _create_or_get_iterations_per_loop()
self._init_ops = [tpu.initialize_system(job=self._master_job)]
self._finalize_ops = [tpu.shutdown_system(job=self._master_job)]
summary_writer_init_ops = contrib_summary.summary_writer_initializer_op()
self._init_ops.extend(summary_writer_init_ops)
# Get all the writer resources from the initializer, so we know what to
# flush.
for op in summary_writer_init_ops:
self._finalize_ops.append(contrib_summary.flush(writer=op.inputs[0]))
def _log_error(self, session, error):
"""Log an infeed or outfeed error.
This logs a short error message immediately, and schedules a timer to
emit the full stack trace and error message after a short period of time.
If the main session has terminated by the time the timer triggers, we
assume the real source of the error was from the main session and avoid
emitting a stack trace for the infeed.
Args:
session: `tf.Session`, session to be terminated error: exception that
triggered logging.
error: the Exception to log.
"""
logging.warning(
'\n\n'
'Error occurred during infeed/outfeed. This may be due to a compile '
'error in the main session. Waiting for a short time for the main '
'session to come back.\n\n%s', error)
self._feed_error = traceback.format_exc()
# If we've already encountered a feed error, don't schedule another
# cancellation op.
if self._session_cancel_timer:
return
def _cancel_session():
# Close the session to avoid the main thread from hanging. If input
# pipeline triggers any error, the infeed thread dies but the main thread
# for TPU computation waits for the infeed enqueue forever. Close the
# Session to cancel the main thread Session.run execution.
#
# We sleep for a few seconds before closing to give some time
# for the TPU compilation error, if any, propagating, from TPU to CPU
# host. Compilation errors should be reported by the main thread so that
# the program can be interrupted and users can take action. Due to a race
# condition, the infeed thread might see an error first. Closing the
# session here immediately would result in a session cancellation
# exception in the main thread, instead of the expected compile error.
# User code that depends on having the proper exception type will
# therefore be confused.
time.sleep(5)
# If the main session is still running, the infeed/outfeed errors are
# legitimate, and should be logged.
if not self._finished and self._feed_error:
logging.error('Feed error: %s', self._feed_error)
logging.error('Closing session. A RuntimeError should follow.')
session.close()
self._session_cancel_timer = threading.Thread(target=_cancel_session)
self._session_cancel_timer.daemon = True
self._session_cancel_timer.start()
def _run_infeed(self, queue_ctx, session):
logging.info('Starting infeed thread controller.')
if self._initial_infeed_sleep_secs:
logging.info('%s thread sleeping for %d seconds.', self._name,
self._initial_infeed_sleep_secs)
time.sleep(self._initial_infeed_sleep_secs)
logging.info('%s thread starting after sleep', self._name)
try:
if self._run_infeed_loop_on_coordinator:
for count, steps in enumerate(queue_ctx.read_iteration_counts()):
for i in xrange(steps):
logging.debug('Infeed enqueue for iteration (%d, %d)', count, i)
session.run(self._enqueue_ops)
else:
for _ in queue_ctx.read_iteration_counts():
session.run(self._enqueue_ops)
logging.info('Infeed thread finished, shutting down.')
except Exception as e: # pylint: disable=broad-except
self._log_error(session, e)
def _run_outfeed(self, queue_ctx, session):
logging.info('Starting outfeed thread controller.')
try:
for count, steps in enumerate(queue_ctx.read_iteration_counts()):
for i in xrange(steps):
logging.debug('Outfeed dequeue for iteration (%d, %d)', count, i)
session.run(self._dequeue_ops)
logging.info('Outfeed thread finished, shutting down.')
except Exception as e: # pylint: disable=broad-except
self._log_error(session, e)
def _create_infeed_controller(self, name, target, args):
return _OpQueueContext(name=name, target=target, args=args)
def after_create_session(self, session, coord):
logging.info('Init TPU system')
session.run(self._init_ops,
options=config_pb2.RunOptions(timeout_in_ms=5 * 60 * 1000))
self._infeed_controller = self._create_infeed_controller(
name='InfeedController', target=self._run_infeed, args=(session,))
self._outfeed_controller = _OpQueueContext(
name='OutfeedController', target=self._run_outfeed, args=(session,))
def before_run(self, run_context):
self._feed_error = None
# Wait for the cancellation timer to complete before continuing.
if self._session_cancel_timer:
self._session_cancel_timer.join()
self._session_cancel_timer = None
iterations = run_context.session.run(self._iterations_per_loop_var)
logging.info('Enqueue next (%d) batch(es) of data to infeed.', iterations)
self._infeed_controller.send_next_batch_signal(iterations)
logging.info('Dequeue next (%d) batch(es) of data from outfeed.',
iterations)
self._outfeed_controller.send_next_batch_signal(iterations)
def end(self, session):
if self._session_cancel_timer:
logging.warning('Feed error occurred; waiting for message.')
self._session_cancel_timer.join()
self._finished = True
logging.info('Stop infeed thread controller')
self._infeed_controller.join()
logging.info('Stop output thread controller')
self._outfeed_controller.join()
logging.info('Shutdown TPU system.')
session.run(self._finalize_ops)
class TPUInfeedOutfeedSessionHookForPrediction(TPUInfeedOutfeedSessionHook):
def __init__(self, ctx, enqueue_ops, dequeue_ops):
super(TPUInfeedOutfeedSessionHookForPrediction, self).__init__(
ctx, enqueue_ops, dequeue_ops, run_infeed_loop_on_coordinator=False)
def _create_infeed_controller(self, name, target, args):
return _OpSignalOnceQueueContext(name=name, target=target, args=args)
class _TPUStopAtStepHook(session_run_hook.SessionRunHook):
"""Hook that requests stop at a specified step.
This hook is similar to the `session_run_hook._StopAfterNEvalsHook` with
following differences for TPU training:
1. This hook sets the variable for iterations_per_loop, which is used by
`TPUInfeedOutfeedSessionHook` to control the iterations for infeed/outfeed.
As the hook execution order is not guaranteed, the variable update is
handled in `after_create_session` and `after_run` as
`TPUInfeedOutfeedSessionHook` reads the variable value in `before_run`.
2. For each training loop (session.run), the global step could be increased
multiple times on TPU. The global step tensor value will be explicitly read
again in `after_run` to ensure the latest value is retrieved to avoid race
condition.
"""
def __init__(self, iterations, num_steps=None, last_step=None):
"""Initializes a `StopAtStepHook`.
Args:
iterations: The number of iterations to run optimizer per training loop.
num_steps: Number of steps to execute.
last_step: Step after which to stop.
Raises:
ValueError: If one of the arguments is invalid.
"""
if num_steps is None and last_step is None:
raise ValueError('One of num_steps or last_step must be specified.')
if num_steps is not None and last_step is not None:
raise ValueError('Only one of num_steps or last_step can be specified.')
self._num_steps = num_steps
self._last_step = last_step
self._iterations = iterations
def _next_iterations(self, global_step, last_step):
gap = last_step - global_step
return min(gap, self._iterations)
def begin(self):
self._global_step_tensor = training_util.get_global_step()
if self._global_step_tensor is None:
raise RuntimeError('Global step should be created.')
self._iterations_per_loop_var = _create_or_get_iterations_per_loop()
def after_create_session(self, session, coord):
global_step = session.run(self._global_step_tensor)
if self._last_step is None:
self._last_step = global_step + self._num_steps
iterations = self._next_iterations(global_step, self._last_step)
self._iterations_per_loop_var.load(iterations, session=session)
def after_run(self, run_context, run_values):
# Global step cannot be retrieved via SessionRunArgs and before_run due to
# race condition.
global_step = run_context.session.run(self._global_step_tensor)
if global_step >= self._last_step:
run_context.request_stop()
else:
iterations = self._next_iterations(global_step, self._last_step)
self._iterations_per_loop_var.load(
iterations, session=run_context.session)
class _SetEvalIterationsHook(session_run_hook.SessionRunHook):
"""Hook that requests stop at a specified step."""
def __init__(self, num_steps):
"""Initializes a `_SetEvalIterationsHook`.
Args:
num_steps: Number of steps to execute.
"""
self._num_steps = num_steps
def begin(self):
self._iterations_per_loop_var = _create_or_get_iterations_per_loop()
def after_create_session(self, session, coord):
self._iterations_per_loop_var.load(self._num_steps, session=session)
class _StoppingPredictHook(session_run_hook.SessionRunHook):
"""Hook that requests stop according to the stopping signal in prediction."""
def __init__(self, scalar_stopping_signal):
self._scalar_stopping_signal = scalar_stopping_signal
def begin(self):
self._iterations_per_loop_var = _create_or_get_iterations_per_loop()
def after_create_session(self, session, coord):
# This is not necessary as we do not run infeed enqueue and outfeed dequeue
# in side threads for prediction model. But it makes the
# TPUInfeedOutfeedSessionHook prints nice message.
self._iterations_per_loop_var.load(1, session=session)
def before_run(self, run_context):
return session_run_hook.SessionRunArgs(self._scalar_stopping_signal)
def after_run(self, run_context, run_values):
_ = run_context
scalar_stopping_signal = run_values.results
if _StopSignals.should_stop(scalar_stopping_signal):
# NOTE(xiejw): In prediction, stopping signals are inserted for each
# batch. And we append one more batch to signal the system it should stop.
# The data flow might look like
#
# batch 0: images, labels, stop = 0 (user provided)
# batch 1: images, labels, stop = 0 (user provided)
# ...
# batch 99: images, labels, stop = 0 (user provided)
# batch 100: images, labels, stop = 1 (TPUEstimator appended)
#
# where the final batch (id = 100) is appended by TPUEstimator, so we
# should drop it before returning the predictions to user.
# To achieve that, we throw the OutOfRangeError in after_run. Once
# Monitored Session sees this error in SessionRunHook.after_run, the
# "current" prediction, i.e., batch with id=100, will be discarded
# immediately
raise errors.OutOfRangeError(None, None, 'Stopped by stopping signal.')
def generate_per_core_enqueue_ops_fn_for_host(ctx, input_fn,
inputs_structure_recorder):
"""Generates infeed enqueue ops for per-core input_fn on a single host."""
captured_infeed_queue = _CapturedObject()
def enqueue_ops_fn():
"""A fn returns enqueue_ops."""
num_cores_per_host = ctx.num_of_cores_per_host
per_host_sharded_inputs = []
for core_ordinal in range(num_cores_per_host):
with ops.name_scope('ordinal_%d' % (core_ordinal)):
inputs = _Inputs.from_input_fn(input_fn())
if inputs.is_dataset:
raise TypeError(
'`input_fn` returning `Dataset` is not yet supported in '
'per-Core input pipeline deployment yet. Please set '
'TPUConfig.per_host_input_for_training to True or return '
'`features` and `labels` from `input_fn`')
features, labels = inputs.features_and_labels()
inputs_structure_recorder.validate_and_record_structure(
features, labels)
flattened_inputs = (
inputs_structure_recorder.flatten_features_and_labels(
features, labels))
per_host_sharded_inputs.append(flattened_inputs)
infeed_queue = tpu_feed.InfeedQueue(
number_of_tuple_elements=len(per_host_sharded_inputs[0]))
captured_infeed_queue.capture(infeed_queue)
infeed_queue.set_configuration_from_sharded_input_tensors(
per_host_sharded_inputs)
per_host_enqueue_ops = infeed_queue.generate_enqueue_ops(
per_host_sharded_inputs, tpu_ordinal_function=ctx.tpu_ordinal_function)
return per_host_enqueue_ops
return enqueue_ops_fn, captured_infeed_queue
def generate_per_host_enqueue_ops_fn_for_host(
ctx, input_fn, inputs_structure_recorder, batch_axis, device, host_id):
"""Generates infeed enqueue ops for per-host input_fn on a single host."""
captured_infeed_queue = _CapturedObject()
hooks = []
with ops.device(device):
inputs = _Inputs.from_input_fn(input_fn())
is_dataset = inputs.is_dataset
if ctx.mode == model_fn_lib.ModeKeys.PREDICT:
if not is_dataset:
raise TypeError(
'For mode PREDICT, `input_fn` must return `Dataset` instead of '
'`features` and `labels`.')
if batch_axis is not None:
raise TypeError('For mode PREDICT, batch_axis is not supported yet.')
inputs = _InputsWithStoppingSignals(
dataset=inputs.dataset, batch_size=ctx.batch_size_for_input_fn,
add_padding=True)
if is_dataset:
hooks.append(inputs.dataset_initializer_hook())
# TODO(ylc): Refactoring the code to merge the tpu ordinal logic here and the
# _TPUContext.tpu_ordinal_function. We should either introduce another
# abstraction or a different helper method.
def _tpu_ordinal_function_impl(shard_index_in_host):
# We put both enqueue/dequeue op at tpu.core(0) in each replica.
replica = ctx.device_assignment.lookup_replicas(
host_id, (0, 0, 0))[shard_index_in_host]
return ctx.device_assignment.tpu_ordinal(replica=replica)
if ctx.model_parallelism_enabled:
tpu_ordinal_function = _tpu_ordinal_function_impl
else:
tpu_ordinal_function = None
def enqueue_ops_fn():
with ops.device(device):
num_of_replicas_per_host = ctx.num_of_replicas_per_host
# Convert user input to features and labels. If the user returns a
# dataset, it is initialized and the features and labels extracted via
# `dataset.iterator.get_next()`
features, labels = inputs.features_and_labels()
signals = inputs.signals()
inputs_structure_recorder.validate_and_record_structure(
features, labels, signals)
unsharded_tensor_list = (
inputs_structure_recorder.flatten_features_and_labels(
features, labels, signals))
infeed_queue = tpu_feed.InfeedQueue(
tuple_types=[t.dtype for t in unsharded_tensor_list],
tuple_shapes=[t.shape for t in unsharded_tensor_list],
shard_dimensions=batch_axis)
captured_infeed_queue.capture(infeed_queue)
infeed_queue.set_number_of_shards(num_of_replicas_per_host)
per_host_enqueue_ops = (
infeed_queue.split_inputs_and_generate_enqueue_ops(
unsharded_tensor_list,
placement_function=lambda x: device,
tpu_ordinal_function=tpu_ordinal_function))
if signals is None:
return per_host_enqueue_ops
else:
return {
'ops': per_host_enqueue_ops,
'signals': signals,
}
return enqueue_ops_fn, captured_infeed_queue, hooks, is_dataset
def generate_per_host_v2_enqueue_ops_fn_for_host(
ctx, input_fn, inputs_structure_recorder, device, host_id):
"""Generates infeed enqueue ops for per-host input_fn on a single host."""
del host_id # unused
captured_infeed_queue = _CapturedObject()
hooks = []
with ops.device(device):
inputs = _Inputs.from_input_fn(input_fn())
is_dataset = inputs.is_dataset
if not is_dataset:
raise TypeError('`input_fn` must return a `Dataset` for the PER_HOST_V2 '
'input pipeline configuration.')
if ctx.mode == model_fn_lib.ModeKeys.PREDICT:
# TODO(b/XXX): Add predict support for PER_HOST_V2
raise TypeError('Most PREDICT not yet supported in PER_HOST_V2 mode.')
hooks.append(inputs.dataset_initializer_hook())
def enqueue_ops_fn():
"""Generates the per_host enqueue ops."""
control_deps = []
per_host_sharded_inputs = []
num_replicas_per_host = ctx.num_of_replicas_per_host
with ops.device(device):
if not inputs.is_dataset:
raise TypeError('`input_fn` must return a `Dataset` for this mode.')
for _ in range(num_replicas_per_host):
# Use control dependencies to ensure a deterministic ordering.
with ops.control_dependencies(control_deps):
features, labels = inputs.features_and_labels() # Calls get_next()
inputs_structure_recorder.validate_and_record_structure(
features, labels)
flattened_inputs = (
inputs_structure_recorder.flatten_features_and_labels(
features, labels))
control_deps.extend(flattened_inputs)
per_host_sharded_inputs.append(flattened_inputs)
infeed_queue = tpu_feed.InfeedQueue(
number_of_tuple_elements=len(per_host_sharded_inputs[0]))
captured_infeed_queue.capture(infeed_queue)
infeed_queue.set_configuration_from_sharded_input_tensors(
per_host_sharded_inputs)
per_host_enqueue_ops = infeed_queue.generate_enqueue_ops(
per_host_sharded_inputs, tpu_ordinal_function=ctx.tpu_ordinal_function)
return per_host_enqueue_ops
return enqueue_ops_fn, captured_infeed_queue, hooks, is_dataset
class _InputPipeline(object):
"""`_InputPipeline` handles invoking `input_fn` and piping to infeed queue.
`_InputPipeline` abstracts the per-core/per-host `input_fn` invocation from
call site. To be precise, based on the configuration in `_TPUContext`, it
invokes `input_fn` for all cores (usually multi-host TPU training) or for one
host (usually for single-host TPU evaluation), and sends all `features` and
`labels` returned by `input_fn` to TPU infeed. For per-core invocation,
`features` and `labels` are piped to infeed directly, one tuple for each
core. For per-host invocation, `features` and `labels` are split at host
(with respect to `batch_axis`) and piped to all cores accordingly.
In addition, flatten/unflatten are handled by `_InputPipeline` also. Model
inputs returned by the `input_fn` can have one of the following forms:
1. features
2. (features, labels)
Internally, form 1 is reformed to `(features, None)` as features and labels
are passed separately to underlying methods. For TPU training, TPUEstimator
may expect multiple `features` and `labels` tuples one for each core.
TPUEstimator allows various different structures for inputs (namely `features`
and `labels`). `features` can be `Tensor` or dict of string name to `Tensor`,
and `labels` could be `None`, `Tensor`, or dict of string name to `Tensor`.
TPU infeed/outfeed library expects flattened tensor list. So, `features` and
`labels` need to be flattened, before infeed enqueue, and the structure of
them needs to be recorded, in order to restore them after infeed dequeue.
"""
class InputsStructureRecorder(object):
"""The recorder to record inputs structure."""
def __init__(self):
# Holds the structure of inputs
self._feature_names = []
self._label_names = []
self._has_labels = False
self._signals_helper = None
# Internal state.
self._initialized = False
def has_labels(self):
return self._has_labels
def validate_and_record_structure(self, features, labels, signals=None):
"""Validates and records the structure of features` and `labels`."""
def _extract_key_names(tensor_or_dict):
if tensor_or_dict is None:
return []
return sorted(tensor_or_dict.keys()) if isinstance(
tensor_or_dict, dict) else []
# Extract structure.
has_labels = labels is not None
feature_names = _extract_key_names(features)
label_names = _extract_key_names(labels)
if signals is not None and self._signals_helper is None:
# Record signals helper.
self._signals_helper = _SignalsHelper(signals)
if self._initialized:
# Verify the structure is same. The following should never happen.
assert feature_names == self._feature_names, 'feature keys mismatched'
assert label_names == self._label_names, 'label keys mismatched'
assert has_labels == self._has_labels, 'label presence mismatched'
else:
# Record structure.
self._initialized = True
self._feature_names = feature_names
self._label_names = label_names
self._has_labels = has_labels
def flatten_features_and_labels(self, features, labels, signals=None):
"""Flattens the `features` and `labels` to a single tensor list."""
flattened_inputs = []
if self._feature_names:
# We need a fixed ordering for enqueueing and dequeueing.
flattened_inputs.extend(
[features[name] for name in self._feature_names])
else:
flattened_inputs.append(features)
if labels is not None:
if self._label_names:
# We need a fixed ordering for enqueueing and dequeueing.
flattened_inputs.extend([labels[name] for name in self._label_names])
else:
flattened_inputs.append(labels)
if signals is not None:
flattened_inputs.extend(_SignalsHelper.as_tensor_list(signals))
return flattened_inputs
def unflatten_features_and_labels(self, flattened_inputs):
"""Restores the flattened inputs to original features and labels form.
Args:
flattened_inputs: Flattened inputs for each shard.
Returns:
A tuple of (`features`, `labels`), where `labels` could be None.
Each one, if present, should have identical structure (single tensor vs
dict) as the one returned by input_fn.
Raises:
ValueError: If the number of expected tensors from `flattened_inputs`
mismatches the recorded structure.
"""
expected_num_features = (
len(self._feature_names) if self._feature_names else 1)
if self._has_labels:
expected_num_labels = (
len(self._label_names) if self._label_names else 1)
else:
expected_num_labels = 0
expected_num_signals = (
self._signals_helper.num_signals if self._signals_helper else 0)
expected_num_tensors = (
expected_num_features + expected_num_labels + expected_num_signals)
if expected_num_tensors != len(flattened_inputs):
raise ValueError(
'The number of flattened tensors mismatches expected num. '
'Expected {}, got {}'.format(expected_num_tensors,
len(flattened_inputs)))
if self._feature_names:
unflattened_features = dict(
zip(self._feature_names, flattened_inputs[:expected_num_features]))
else:
# Single tensor case
unflattened_features = flattened_inputs[0]
if expected_num_labels == 0:
unflattened_label = None
elif self._label_names:
label_list = flattened_inputs[
expected_num_features:expected_num_features + expected_num_labels]
unflattened_label = dict(zip(self._label_names, label_list))
else:
# Single tensor case.
unflattened_label = flattened_inputs[expected_num_features]
signals = None
if expected_num_signals != 0:
tensor_list_for_signals = flattened_inputs[
expected_num_features + expected_num_labels:]
signals = self._signals_helper.unflatten(tensor_list_for_signals)
return _Inputs(unflattened_features, unflattened_label, signals=signals)
def __init__(self, input_fn, batch_axis, ctx):
"""Constructor.
Args:
input_fn: input fn for train or eval.
batch_axis: A python tuple of int values describing how each tensor
produced by the Estimator `input_fn` should be split across the TPU
compute shards.
ctx: A `_TPUContext` instance with mode.
Raises:
ValueError: If both `sharded_features` and `num_cores` are `None`.
"""
self._inputs_structure_recorder = _InputPipeline.InputsStructureRecorder()
self._sharded_per_core = ctx.is_input_sharded_per_core()
self._input_fn = input_fn
self._infeed_queue = None
self._ctx = ctx
self._batch_axis = batch_axis
def generate_infeed_enqueue_ops_and_dequeue_fn(self):
"""Generates infeed enqueue ops and dequeue_fn."""
# While tf.while_loop is called, the body function, which invokes
# `enqueue_fn` passed in, is called to construct the graph. So, input_fn
# structure is recorded.
enqueue_ops, all_hooks, run_infeed_loop_on_coordinator = (
self._invoke_input_fn_and_record_structure())
self._validate_input_pipeline()
def dequeue_fn():
"""dequeue_fn is used by TPU to retrieve the tensors."""
# In the model-parallel case, both the host-side and device-side
# computations must agree on the core on which infeed takes place. We
# choose to perform infeed on logical core 0 of each replica.
values = self._infeed_queue.generate_dequeue_op(tpu_device=0)
# The unflatten process uses the structure information recorded above.
return self._inputs_structure_recorder.unflatten_features_and_labels(
values)
return (enqueue_ops, dequeue_fn, all_hooks, run_infeed_loop_on_coordinator)
def _invoke_input_fn_and_record_structure(self):
"""Deploys the input pipeline and record input structure."""
enqueue_ops = []
infeed_queues = []
all_hooks = []
num_hosts = self._ctx.num_hosts
tpu_host_placement_fn = self._ctx.tpu_host_placement_function
run_infeed_loop_on_coordinator = True
if self._sharded_per_core:
# Per-Core input pipeline deployment.
# Invoke input pipeline for each core and placed on the corresponding
# host.
for host_id in range(num_hosts):
host_device = tpu_host_placement_fn(host_id=host_id)
with ops.device(host_device):
with ops.name_scope('input_pipeline_task%d' % (host_id)):
enqueue_ops_fn, captured_infeed_queue = (
generate_per_core_enqueue_ops_fn_for_host(
self._ctx, self._input_fn, self._inputs_structure_recorder))
if _WRAP_INPUT_FN_INTO_WHILE_LOOP:
run_infeed_loop_on_coordinator = False
enqueue_ops.append(
_wrap_computation_in_while_loop(
device=host_device, op_fn=enqueue_ops_fn))
else:
enqueue_ops.append(enqueue_ops_fn())
# Infeed_queue_getter must be called after enqueue_ops_fn is called.
infeed_queues.append(captured_infeed_queue.get())
else:
for host_id in range(num_hosts):
host_device = tpu_host_placement_fn(host_id=host_id)
with ops.device(host_device):
with ops.name_scope('input_pipeline_task%d' % (host_id)):
if self._ctx.is_input_per_host_with_iterators():
enqueue_ops_fn, captured_infeed_queue, hooks, is_dataset = (
generate_per_host_v2_enqueue_ops_fn_for_host(
self._ctx, self._input_fn,
self._inputs_structure_recorder, host_device, host_id))
else:
enqueue_ops_fn, captured_infeed_queue, hooks, is_dataset = (
generate_per_host_enqueue_ops_fn_for_host(
self._ctx, self._input_fn,
self._inputs_structure_recorder, self._batch_axis,
host_device, host_id))
all_hooks.extend(hooks)
# NOTE(xiejw): We dispatch here based on the return type of the
# users `input_fn`.
#
# 1. If input_fn returns a Dataset instance, we initialize the
# iterator outside of tf.while_loop, and call the iterator.get_next
# inside tf.while_loop. This should be always safe.
#
# 2. If input_fn returns (features, labels), it is too late to wrap
# them inside tf.while_loop, as resource initialization cannot be
# handled in TF control flow properly. In this case, we will use
# python loop to enqueue the data into TPU system. This may be
# slow compared to the previous case.
if is_dataset:
run_infeed_loop_on_coordinator = False
wrap_fn = (
_wrap_computation_in_while_loop
if self._ctx.mode != model_fn_lib.ModeKeys.PREDICT else
_wrap_computation_in_while_loop_with_stopping_signals)
enqueue_ops.append(
wrap_fn(device=host_device, op_fn=enqueue_ops_fn))
else:
enqueue_ops.append(enqueue_ops_fn())
infeed_queues.append(captured_infeed_queue.get())
# infeed_queue is used to generate dequeue ops. The only thing it uses for
# dequeue is dtypes and types. So, any one can be used. Here, grab the
# first one.
self._infeed_queue = infeed_queues[0]
return enqueue_ops, all_hooks, run_infeed_loop_on_coordinator
def _validate_input_pipeline(self):
# Perform some sanity checks to log user friendly information. We should
# error out to give users better error message. But, if
# _WRAP_INPUT_FN_INTO_WHILE_LOOP is False (legacy behavior), we cannot break
# user code, so, log a warning.
if ops.get_default_graph().get_collection(ops.GraphKeys.QUEUE_RUNNERS):
err_msg = ('Input pipeline contains one or more QueueRunners. '
'It could be slow and not scalable. Please consider '
'converting your input pipeline to use `tf.data` instead (see '
'https://www.tensorflow.org/programmers_guide/datasets for '
'instructions.')
if _WRAP_INPUT_FN_INTO_WHILE_LOOP:
raise RuntimeError(err_msg)
else:
logging.warn(err_msg)
class _ModelFnWrapper(object):
"""A `model_fn` wrapper.
This makes calling model_fn on CPU and TPU easier and more consistent and
performs necessary check and mutation required by TPU training and evaluation.
In addition, this wrapper manages converting the `model_fn` to a single TPU
train and eval step.
"""
def __init__(self, model_fn, config, params, ctx):
self._model_fn = model_fn
self._config = config
self._params = params
self._ctx = ctx
def call_without_tpu(self, features, labels, is_export_mode):
return self._call_model_fn(features, labels, is_export_mode=is_export_mode)
def convert_to_single_tpu_train_step(self, dequeue_fn):
"""Converts user provided model_fn` as a single train step on TPU.
The user provided `model_fn` takes input tuple
(features, labels) and produces the EstimatorSpec with train_op and loss for
train `mode`. This usually represents a single train computation on CPU.
For TPU training, a train (computation) step is first wrapped in a
tf.while_loop control flow to repeat for many times and then replicated to
all TPU shards. Besides the input should be taken from TPU infeed rather
than input pipeline (input_fn) directly. To fit TPU loop and replicate
pattern, the original train computation should be reformed, which is the
returned `train_step`.
Args:
dequeue_fn: The function to retrieve inputs, features and labels, from TPU
infeed dequeue channel.
Returns:
A tuple of train_fn, host_calls, and captured scaffold_fn. The train_fn
representing the train step for TPU.
"""
host_call = _OutfeedHostCall(self._ctx)
captured_scaffold_fn = _CapturedObject()
def train_step(loss):
"""Training step function for use inside a while loop."""
del loss # unused; required in function signature.
inputs = dequeue_fn()
features, labels = inputs.features_and_labels()
estimator_spec = self._verify_estimator_spec(
self._call_model_fn(features, labels))
loss, train_op = estimator_spec.loss, estimator_spec.train_op
if isinstance(estimator_spec, TPUEstimatorSpec):
captured_scaffold_fn.capture(estimator_spec.scaffold_fn)
else:
captured_scaffold_fn.capture(None)
# We must run train_op to update the variables prior to running the
# outfeed.
with ops.control_dependencies([train_op]):
host_call_outfeed_ops = []
if (isinstance(estimator_spec, TPUEstimatorSpec) and
estimator_spec.host_call is not None):
host_call.record({'host_call': estimator_spec.host_call})
host_call_outfeed_ops = host_call.create_enqueue_op()
with ops.control_dependencies(host_call_outfeed_ops):
return array_ops.identity(loss)
return train_step, host_call, captured_scaffold_fn
def convert_to_single_tpu_eval_step(self, dequeue_fn):
"""Converts user provided model_fn` as a single eval step on TPU.
Similar to training, the user provided `model_fn` takes input tuple
(features, labels) and produces the TPUEstimatorSpec with eval_metrics for
eval `mode`. This usually represents a single evaluation computation on CPU.
For TPU evaluation, a eval (computation) step is first wrapped in a
tf.while_loop control flow to repeat for many times and then replicated to
all TPU shards. Besides the input and output are slightly different. Input,
features and labels, should be taken from TPU infeed rather than input
pipeline (input_fn) directly. Output is managed in two stages. First, the
model outputs as the result of evaluation computation, usually model logits,
should be transferred from TPU system to CPU. Then, all model outputs are
concatenated first on CPU and sent to the metric_fn for metrics computation.
To fit TPU evaluation pattern, the original eval computation should be
reformed, which is the returned `eval_step`.
Args:
dequeue_fn: The function to retrieve inputs, features and labels, from TPU
infeed dequeue channel.
Returns:
A tuple of eval_fn, host_calls, and captured scaffold_fn. The eval_fn
representing the eval step for TPU.
"""
host_calls = _OutfeedHostCall(self._ctx)
captured_scaffold_fn = _CapturedObject()
def eval_step(total_loss):
"""Evaluation step function for use inside a while loop."""
inputs = dequeue_fn()
features, labels = inputs.features_and_labels()
tpu_estimator_spec = self._call_model_fn(features, labels)
if not isinstance(tpu_estimator_spec, TPUEstimatorSpec):
raise RuntimeError(
'estimator_spec used by TPU evaluation must have type'
'`TPUEstimatorSpec`. Got {}'.format(type(tpu_estimator_spec)))
loss = tpu_estimator_spec.loss
captured_scaffold_fn.capture(tpu_estimator_spec.scaffold_fn)
to_record = {}
to_record['eval_metrics'] = tpu_estimator_spec.eval_metrics
if tpu_estimator_spec.host_call is not None:
# We assume that evaluate won't update global step, so we don't wrap
# this host_call.
to_record['host_call'] = tpu_estimator_spec.host_call
host_calls.record(to_record)
with ops.control_dependencies(host_calls.create_enqueue_op()):
return math_ops.add(total_loss, loss)
return eval_step, host_calls, captured_scaffold_fn
def convert_to_single_tpu_predict_step(self, dequeue_fn):
"""Converts user provided model_fn` as a single predict step on TPU.
Args:
dequeue_fn: The function to retrieve inputs, features and labels, from TPU
infeed dequeue channel.
Returns:
A tuple of predict_fn, host_calls, and captured scaffold_fn. The
predict_fn representing the predict step for TPU.
"""
host_calls = _OutfeedHostCall(self._ctx)
captured_scaffold_fn = _CapturedObject()
def predict_step(unused_scalar_stopping_signal):
"""Evaluation step function for use inside a while loop."""
inputs = dequeue_fn()
features, labels = inputs.features_and_labels()
stopping_signals = inputs.signals()
assert stopping_signals is not None, (
'Internal Error: `signals` is missing.')
tpu_estimator_spec = self._call_model_fn(
features, labels, is_export_mode=False)
if not isinstance(tpu_estimator_spec, TPUEstimatorSpec):
raise RuntimeError(
'estimator_spec used by TPU prediction must have type'
'`TPUEstimatorSpec`. Got {}'.format(type(tpu_estimator_spec)))
captured_scaffold_fn.capture(tpu_estimator_spec.scaffold_fn)
to_record = {}
identity_fn = lambda **kwargs: kwargs
# TODO(xiejw): Adds validation for prediction dictionrary.
# TODO(xiejw): Adds support for single tensor as predictions.
if not isinstance(tpu_estimator_spec.predictions, dict):
raise TypeError('TPUEstimatorSpec.predictions must be dict of Tensors.')
to_record['predictions'] = [identity_fn, tpu_estimator_spec.predictions]
to_record['signals'] = [identity_fn, stopping_signals]
if tpu_estimator_spec.host_call is not None:
to_record['host_call'] = tpu_estimator_spec.host_call
host_calls.record(to_record)
with ops.control_dependencies(host_calls.create_enqueue_op()):
return _StopSignals.as_scalar_stopping_signal(stopping_signals)
return predict_step, host_calls, captured_scaffold_fn
def _call_model_fn(self, features, labels, is_export_mode=False):
"""Calls the model_fn with required parameters."""
model_fn_args = util.fn_args(self._model_fn)
kwargs = {}
# Makes deep copy with `config` and params` in case user mutates them.
config = copy.deepcopy(self._config)
params = copy.deepcopy(self._params)
if 'labels' in model_fn_args:
kwargs['labels'] = labels
elif labels is not None:
raise ValueError(
'model_fn does not take labels, but input_fn returns labels.')
if 'mode' in model_fn_args:
kwargs['mode'] = self._ctx.mode
if 'config' in model_fn_args:
kwargs['config'] = config
if 'params' in model_fn_args:
kwargs['params'] = params
if 'params' not in model_fn_args:
raise ValueError('model_fn ({}) does not include params argument, '
'required by TPUEstimator to pass batch size as '
'params[\'batch_size\']'.format(self._model_fn))
if is_export_mode:
batch_size_for_model_fn = None
else:
batch_size_for_model_fn = self._ctx.batch_size_for_model_fn
if batch_size_for_model_fn is not None:
if isinstance(params, hparam.HParams):
params.add_hparam(_BATCH_SIZE_KEY, batch_size_for_model_fn)
else:
params[_BATCH_SIZE_KEY] = batch_size_for_model_fn
estimator_spec = self._model_fn(features=features, **kwargs)
if (self._ctx.is_running_on_cpu(is_export_mode) and
isinstance(estimator_spec, TPUEstimatorSpec)):
# The estimator_spec will be passed to `Estimator` directly, which expects
# type `EstimatorSpec`.
return estimator_spec.as_estimator_spec()
else:
return estimator_spec
def _verify_estimator_spec(self, estimator_spec):
"""Validates the estimator_spec."""
if isinstance(estimator_spec, TPUEstimatorSpec):
return estimator_spec
err_msg = '{} returned by EstimatorSpec is not supported in TPUEstimator.'
if estimator_spec.training_chief_hooks:
raise ValueError(err_msg.format('training_chief_hooks'))
if estimator_spec.training_hooks:
raise ValueError(err_msg.format('training_hooks'))
if estimator_spec.evaluation_hooks:
raise ValueError(err_msg.format('evaluation_hooks'))
if estimator_spec.scaffold:
logging.warning('EstimatorSpec.Scaffold is ignored by TPU train/eval. '
'Please use TPUEstimatorSpec.')
return estimator_spec
class _OutfeedHostCall(object):
"""Support for `eval_metrics` and `host_call` in TPUEstimatorSpec."""
def __init__(self, ctx):
self._ctx = ctx
self._names = []
# All of these are dictionaries of lists keyed on the name.
self._host_fns = {}
self._tensor_keys = collections.defaultdict(list)
self._tensors = collections.defaultdict(list)
self._tensor_dtypes = collections.defaultdict(list)
self._tensor_shapes = collections.defaultdict(list)
@staticmethod
def validate(host_calls):
"""Validates the `eval_metrics` and `host_call` in `TPUEstimatorSpec`."""
for name, host_call in host_calls.items():
if not isinstance(host_call, (tuple, list)):
raise ValueError('{} should be tuple or list'.format(name))
if len(host_call) != 2:
raise ValueError('{} should have two elements.'.format(name))
if not callable(host_call[0]):
raise TypeError('{}[0] should be callable.'.format(name))
if not isinstance(host_call[1], (tuple, list, dict)):
raise ValueError('{}[1] should be tuple or list, or dict.'.format(name))
if isinstance(host_call[1], (tuple, list)):
fullargspec = tf_inspect.getfullargspec(host_call[0])
fn_args = util.fn_args(host_call[0])
# wrapped_hostcall_with_global_step uses varargs, so we allow that.
if fullargspec.varargs is None and len(host_call[1]) != len(fn_args):
raise RuntimeError(
'In TPUEstimatorSpec.{}, length of tensors {} does not match '
'method args of the function, which takes {}.'.format(
name, len(host_call[1]), len(fn_args)))
@staticmethod
def create_cpu_hostcall(host_calls):
"""Runs on the host_call on CPU instead of TPU when use_tpu=False."""
_OutfeedHostCall.validate(host_calls)
ret = {}
for name, host_call in host_calls.items():
host_fn, tensors = host_call
if isinstance(tensors, (tuple, list)):
ret[name] = host_fn(*tensors)
else:
# Must be dict.
try:
ret[name] = host_fn(**tensors)
except TypeError as e:
logging.warning(
'Exception while calling %s: %s. It is likely the tensors '
'(%s[1]) do not match the '
'function\'s arguments', name, e, name)
raise e
return ret
def record(self, host_calls):
"""Records the host_call structure."""
for name, host_call in host_calls.items():
host_fn, tensor_list_or_dict = host_call
self._names.append(name)
self._host_fns[name] = host_fn
if isinstance(tensor_list_or_dict, dict):
for (key, tensor) in six.iteritems(tensor_list_or_dict):
self._tensor_keys[name].append(key)
self._tensors[name].append(tensor)
self._tensor_dtypes[name].append(tensor.dtype)
self._tensor_shapes[name].append(tensor.shape)
else:
# List or tuple.
self._tensor_keys[name] = None
for tensor in tensor_list_or_dict:
self._tensors[name].append(tensor)
self._tensor_dtypes[name].append(tensor.dtype)
self._tensor_shapes[name].append(tensor.shape)
def create_enqueue_op(self):
"""Create the op to enqueue the recorded host_calls.
Returns:
A list of enqueue ops, which is empty if there are no host calls.
"""
if not self._names:
return []
tensors = []
# TODO(jhseu): Consider deduping tensors.
for name in self._names:
tensors.extend(self._tensors[name])
with ops.device(tpu.core(0)):
return [tpu_ops.outfeed_enqueue_tuple(tensors)]
def create_tpu_hostcall(self):
"""Sends the tensors through outfeed and runs the host_fn on CPU.
The tensors are concatenated along dimension 0 to form a global tensor
across all shards. The concatenated function is passed to the host_fn and
executed on the first host.
Returns:
A dictionary mapping name to the return type of the host_call by that
name.
Raises:
RuntimeError: If outfeed tensor is scalar.
"""
if not self._names:
return []
ret = {}
# For each i, dequeue_ops[i] is a list containing the tensors from all
# shards. This list is concatenated later.
dequeue_ops = []
tensor_dtypes = []
tensor_shapes = []
for name in self._names:
for _ in self._tensors[name]:
dequeue_ops.append([])
for dtype in self._tensor_dtypes[name]:
tensor_dtypes.append(dtype)
for shape in self._tensor_shapes[name]:
tensor_shapes.append(shape)
# Outfeed ops execute on each replica's first logical core. Note: we must
# constraint it such that we have at most one outfeed dequeue and enqueue
# per replica.
tpu_device_placement_fn = self._ctx.tpu_device_placement_function
for i in xrange(self._ctx.num_replicas):
with ops.device(tpu_device_placement_fn(i)):
outfeed_tensors = tpu_ops.outfeed_dequeue_tuple(
dtypes=tensor_dtypes, shapes=tensor_shapes)
for j, item in enumerate(outfeed_tensors):
dequeue_ops[j].append(item)
# Deconstruct dequeue ops.
dequeue_ops_by_name = {}
pos = 0
for name in self._names:
dequeue_ops_by_name[name] = dequeue_ops[pos:pos+len(self._tensors[name])]
pos += len(self._tensors[name])
# It is assumed evaluation always happens on single host TPU system. So,
# place all ops on tpu host if possible.
#
# TODO(jhseu): Evaluate whether this is right for summaries.
with ops.device(self._ctx.tpu_host_placement_function(core_id=0)):
for name in self._names:
dequeue_ops = dequeue_ops_by_name[name]
for i, item in enumerate(dequeue_ops):
if dequeue_ops[i][0].shape.ndims == 0:
raise RuntimeError(
'All tensors outfed from TPU should preserve batch size '
'dimension, but got scalar {}'.format(dequeue_ops[i][0]))
# TODO(xiejw): Allow users to specify the axis for batch size
# dimension.
dequeue_ops[i] = array_ops.concat(dequeue_ops[i], axis=0)
if self._tensor_keys[name] is not None:
# The user-provided eval_metrics[1] is a dict.
dequeue_ops = dict(zip(self._tensor_keys[name], dequeue_ops))
try:
ret[name] = self._host_fns[name](**dequeue_ops)
except TypeError as e:
logging.warning(
'Exception while calling %s: %s. It is likely the tensors '
'(%s[1]) do not match the '
'function\'s arguments', name, e, name)
raise e
else:
ret[name] = self._host_fns[name](*dequeue_ops)
return ret
class _OutfeedHostCallHook(session_run_hook.SessionRunHook):
"""Hook to run host calls when use_tpu=False."""
def __init__(self, tensors):
self._tensors = tensors
def begin(self):
# We duplicate this code from the TPUInfeedOutfeedSessionHook rather than
# create a separate hook to guarantee execution order, because summaries
# need to be initialized before the outfeed thread starts.
# TODO(jhseu): Make a wrapper hook instead?
self._init_ops = contrib_summary.summary_writer_initializer_op()
# Get all the writer resources from the initializer, so we know what to
# flush.
self._finalize_ops = []
for op in self._init_ops:
self._finalize_ops.append(contrib_summary.flush(writer=op.inputs[0]))
def after_create_session(self, session, coord):
session.run(self._init_ops)
def before_run(self, run_context):
return basic_session_run_hooks.SessionRunArgs(self._tensors)
def end(self, session):
session.run(self._finalize_ops)
class ExamplesPerSecondHook(basic_session_run_hooks.StepCounterHook):
"""Calculate and report global_step/sec and examples/sec during runtime."""
def __init__(self,
batch_size,
every_n_steps=100,
every_n_secs=None,
output_dir=None,
summary_writer=None):
self._batch_size = batch_size
super(ExamplesPerSecondHook, self).__init__(
every_n_steps=every_n_steps,
every_n_secs=every_n_secs,
output_dir=output_dir,
summary_writer=summary_writer)
def _log_and_record(self, elapsed_steps, elapsed_time, global_step):
global_step_per_sec = elapsed_steps / elapsed_time
examples_per_sec = self._batch_size * global_step_per_sec
if self._summary_writer is not None:
global_step_summary = Summary(value=[
Summary.Value(tag='global_step/sec', simple_value=global_step_per_sec)
])
example_summary = Summary(value=[
Summary.Value(tag='examples/sec', simple_value=examples_per_sec)
])
self._summary_writer.add_summary(global_step_summary, global_step)
self._summary_writer.add_summary(example_summary, global_step)
logging.info('global_step/sec: %g', global_step_per_sec)
logging.info('examples/sec: %g', examples_per_sec)
class InstallSignalHandlerHook(session_run_hook.SessionRunHook):
"""Change SIGINT (CTRL^C) handler to force quit the process.
The default behavior often results in hanging processes.
The original handler is restored after training/evaluation.
"""
def __init__(self):
self._signal_fn = signal.getsignal(signal.SIGINT)
def before_run(self, run_context):
signal.signal(signal.SIGINT, signal.SIG_DFL)
def end(self, session):
signal.signal(signal.SIGINT, self._signal_fn)
class TPUEstimator(estimator_lib.Estimator):
"""Estimator with TPU support.
TPUEstimator handles many of the details of running on TPU devices, such as
replicating inputs and models for each core, and returning to host
periodically to run hooks.
TPUEstimator transforms a global batch size in params to a per-shard batch
size when calling the `input_fn` and `model_fn`. Users should specify
global batch size in constructor, and then get the batch size for each shard
in `input_fn` and `model_fn` by `params['batch_size']`.
- For training, `model_fn` gets per-core batch size; `input_fn` may get
per-core or per-host batch size depending on `per_host_input_for_training`
in `TPUConfig` (See docstring for TPUConfig for details).
- For evaluation and prediction, `model_fn` gets per-core batch size and
`input_fn` get per-host batch size.
Evaluation
==========
`model_fn` should return `TPUEstimatorSpec`, which expects the `eval_metrics`
for TPU evaluation.
`TPUEstimatorSpec.eval_metrics` is a tuple of `metric_fn` and `tensors`, where
`tensors` could be a list of `Tensor`s or dict of names to `Tensor`s. (See
`TPUEstimatorSpec` for details). `metric_fn` takes the `tensors` and returns
a dict from metric string name to the result of calling a metric function,
namely a `(metric_tensor, update_op)` tuple.
One can set `use_tpu` to `False` for testing. All training, evaluation, and
predict will be executed on CPU. `input_fn` and `model_fn` will receive
`train_batch_size` or `eval_batch_size` unmodified as `params['batch_size']`.
Current limitations:
--------------------
1. TPU evaluation only works on a single host (one TPU worker).
2. `input_fn` for evaluation should **NOT** raise an end-of-input exception
(`OutOfRangeError` or `StopIteration`). And all evaluation steps and all
batches should have the same size.
Example (MNIST):
----------------
```
# The metric Fn which runs on CPU.
def metric_fn(labels, logits):
predictions = tf.argmax(logits, 1)
return {
'accuracy': tf.metrics.precision(
labels=labels, predictions=predictions),
}
# Your model Fn which runs on TPU (eval_metrics is list in this example)
def model_fn(features, labels, mode, config, params):
...
logits = ...
if mode = tf.estimator.ModeKeys.EVAL:
return tpu_estimator.TPUEstimatorSpec(
mode=mode,
loss=loss,
eval_metrics=(metric_fn, [labels, logits]))
# or specify the eval_metrics tensors as dict.
def model_fn(features, labels, mode, config, params):
...
final_layer_output = ...
if mode = tf.estimator.ModeKeys.EVAL:
return tpu_estimator.TPUEstimatorSpec(
mode=mode,
loss=loss,
eval_metrics=(metric_fn, {
'labels': labels,
'logits': final_layer_output,
}))
```
Prediction
==========
Prediction on TPU is an experimental feature to support large batch inference.
It is not designed for latency-critical system. In addition, due to some
usability issues, for prediction with small dataset, CPU `.predict`, i.e.,
creating a new `TPUEstimator` instance with `use_tpu=False`, might be more
convenient.
Note: In contrast to TPU training/evaluation, the `input_fn` for prediction
*should* raise an end-of-input exception (`OutOfRangeError` or
`StopIteration`), which serves as the stopping signal to `TPUEstimator`. To be
precise, the ops created by `input_fn` produce one batch of the data.
The `predict()` API processes one batch at a time. When reaching the end of
the data source, an end-of-input exception should be raised by one of these
operations. The user usually does not need to do this manually. As long as the
dataset is not repeated forever, the `tf.data` API will raise an end-of-input
exception automatically after the last batch has been produced.
Note: Estimator.predict returns a Python generator. Please consume all the
data from the generator so that TPUEstimator can shutdown the TPU system
properly for user.
Current limitations:
--------------------
1. TPU prediction only works on a single host (one TPU worker).
2. `input_fn` must return a `Dataset` instance rather than `features`. In
fact, .train() and .evaluate() also support Dataset as return value.
Example (MNIST):
----------------
```
height = 32
width = 32
total_examples = 100
def predict_input_fn(params):
batch_size = params['batch_size']
images = tf.random_uniform(
[total_examples, height, width, 3], minval=-1, maxval=1)
dataset = tf.data.Dataset.from_tensor_slices(images)
dataset = dataset.map(lambda images: {'image': images})
dataset = dataset.batch(batch_size)
return dataset
def model_fn(features, labels, params, mode):
# Generate predictions, called 'output', from features['image']
if mode == tf.estimator.ModeKeys.PREDICT:
return tf.contrib.tpu.TPUEstimatorSpec(
mode=mode,
predictions={
'predictions': output,
'is_padding': features['is_padding']
})
tpu_est = TPUEstimator(
model_fn=model_fn,
...,
predict_batch_size=16)
# Fully consume the generator so that TPUEstimator can shutdown the TPU
# system.
for item in tpu_est.predict(input_fn=input_fn):
# Filter out item if the `is_padding` is 1.
# Process the 'predictions'
```
Exporting
=========
Exporting `SavedModel` support on TPU is not yet implemented. So,
`export_savedmodel` is executed on CPU, even if `use_tpu` is true.
"""
def __init__(self,
model_fn=None,
model_dir=None,
config=None,
params=None,
use_tpu=True,
train_batch_size=None,
eval_batch_size=None,
predict_batch_size=None,
batch_axis=None):
"""Constructs an `TPUEstimator` instance.
Args:
model_fn: Model function as required by `Estimator`. For training, the
returned `EstimatorSpec` cannot have hooks as it is not supported in
`TPUEstimator`.
model_dir: Directory to save model parameters, graph and etc. This can
also be used to load checkpoints from the directory into a estimator to
continue training a previously saved model. If `None`, the model_dir in
`config` will be used if set. If both are set, they must be same. If
both are `None`, a temporary directory will be used.
config: An `tpu_config.RunConfig` configuration object. Cannot be `None`.
params: An optional `dict` of hyper parameters that will be passed into
`input_fn` and `model_fn`. Keys are names of parameters, values are
basic python types. There are reserved keys for `TPUEstimator`,
including 'batch_size'.
use_tpu: A bool indicating whether TPU support is enabled. Currently,
- TPU training and evaluation respect this bit.
- Predict still happens on CPU.
train_batch_size: An int representing the global training batch size.
TPUEstimator transforms this global batch size to a per-shard batch
size, as params['batch_size'], when calling `input_fn` and `model_fn`.
Cannot be `None` if `use_tpu` is `True`.
Must be divisible by total number of replicas.
eval_batch_size: An int representing evaluation batch size.
Must be divisible by total number of replicas.
predict_batch_size: An int representing the prediction batch size.
Must be divisible by total number of replicas.
batch_axis: A python tuple of int values describing how each tensor
produced by the Estimator `input_fn` should be split across the TPU
compute shards. For example, if your input_fn produced (images, labels)
where the images tensor is in `HWCN` format, your shard dimensions would
be [3, 0], where 3 corresponds to the `N` dimension of your images
Tensor, and 0 corresponds to the dimension along which to split the
labels to match up with the corresponding images. If None is supplied,
and per_host_input_for_training is True, batches will be sharded based
on the major dimension. If tpu_config.per_host_input_for_training is
False or `PER_HOST_V2`, batch_axis is ignored.
Raises:
ValueError: `params` has reserved keys already.
"""
if config is None or not isinstance(config, tpu_config.RunConfig):
raise ValueError(
'`config` must be provided with type `tpu_config.RunConfig`')
if params is not None and any(k in params for k in _RESERVED_PARAMS_KEYS):
raise ValueError('{} are reserved keys but existed in params {}.'.format(
_RESERVED_PARAMS_KEYS, params))
if use_tpu:
# Perform some very basic validations. More validations will be found in
# _TPUContext.
if train_batch_size is None:
raise ValueError('`train_batch_size` cannot be `None`')
util_lib.check_positive_integer(train_batch_size, 'train_batch_size')
if (config.tpu_config.per_host_input_for_training is
tpu_config.InputPipelineConfig.PER_SHARD_V1 and
config.tpu_config.computation_shape):
raise ValueError(
'Model parallelism only supports per host input for training. '
'Please adjust TPURunconfig.per_host_input_for_training.')
if eval_batch_size is not None:
util_lib.check_positive_integer(eval_batch_size, 'eval_batch_size')
if predict_batch_size is not None:
util_lib.check_positive_integer(predict_batch_size,
'predict_batch_size')
# Verifies the model_fn signature according to Estimator framework.
estimator_lib._verify_model_fn_args(model_fn, params) # pylint: disable=protected-access
# We cannot store config and params in this constructor as parent
# constructor might change them, such as assigning a temp dir for
# config.model_dir.
model_function = self._augment_model_fn(model_fn, batch_axis)
# Overwrite log_step_count_steps to disable TensorLoggingHook and
# StepCounterHook from being created in Estimator. TPUEstimator already
# added equivalent hooks in _augment_model_fn above.
self._log_every_n_steps = config.log_step_count_steps
config = config.replace(log_step_count_steps=None)
# Passing non-None params as wrapped model_fn has it.
params = params or {}
super(TPUEstimator, self).__init__(
model_fn=model_function,
model_dir=model_dir,
config=config,
params=params)
self._iterations_per_training_loop = (
self._config.tpu_config.iterations_per_loop)
# All properties passed to _TPUContext are immutable.
# pylint: disable=protected-access
self._ctx = tpu_context._get_tpu_context(
self._config, train_batch_size,
eval_batch_size, predict_batch_size,
use_tpu)
self._is_input_fn_invoked = None
def _create_global_step(self, graph):
"""Creates a global step suitable for TPUs.
Args:
graph: The graph in which to create the global step.
Returns:
A global step `Tensor`.
Raises:
ValueError: if the global step tensor is already defined.
"""
return _create_global_step(graph)
def _convert_train_steps_to_hooks(self, steps, max_steps):
with self._ctx.with_mode(model_fn_lib.ModeKeys.TRAIN) as ctx:
if ctx.is_running_on_cpu():
return super(TPUEstimator, self)._convert_train_steps_to_hooks(
steps, max_steps)
# On TPU.
if steps is None and max_steps is None:
raise ValueError(
'For TPU training, one of `steps` or `max_steps` must be set. '
'Cannot be both `None`.')
# Estimator.train has explicit positiveness check.
if steps is not None:
util_lib.check_positive_integer(steps, 'Train steps')
if max_steps is not None:
util_lib.check_positive_integer(max_steps, 'Train max_steps')
return [
_TPUStopAtStepHook(self._iterations_per_training_loop, steps, max_steps)
]
def _convert_eval_steps_to_hooks(self, steps):
with self._ctx.with_mode(model_fn_lib.ModeKeys.EVAL) as ctx:
if ctx.is_running_on_cpu():
return super(TPUEstimator, self)._convert_eval_steps_to_hooks(steps)
if steps is None:
raise ValueError('Evaluate `steps` must be set on TPU. Cannot be `None`.')
util_lib.check_positive_integer(steps, 'Eval steps')
return [
evaluation._StopAfterNEvalsHook( # pylint: disable=protected-access
num_evals=steps),
_SetEvalIterationsHook(steps)
]
def _call_input_fn(self, input_fn, mode):
"""Calls the input function.
Args:
input_fn: The input function.
mode: ModeKeys
Returns:
Either features or (features, labels) where features and labels are:
features - `Tensor` or dictionary of string feature name to `Tensor`.
labels - `Tensor` or dictionary of `Tensor` with labels.
Raises:
ValueError: if input_fn takes invalid arguments or does not have `params`.
"""
input_fn_args = util.fn_args(input_fn)
config = self.config # a deep copy.
kwargs = {}
if 'params' in input_fn_args:
kwargs['params'] = self.params # a deep copy.
else:
raise ValueError('input_fn ({}) does not include params argument, '
'required by TPUEstimator to pass batch size as '
'params["batch_size"]'.format(input_fn))
if 'config' in input_fn_args:
kwargs['config'] = config
if 'mode' in input_fn_args:
kwargs['mode'] = mode
# Records the fact input_fn has been invoked.
self._is_input_fn_invoked = True
with self._ctx.with_mode(mode) as ctx:
# Setting the batch size in params first. This helps user to have same
# input_fn for use_tpu=True/False.
batch_size_for_input_fn = ctx.batch_size_for_input_fn
if batch_size_for_input_fn is not None:
if isinstance(kwargs['params'], hparam.HParams):
kwargs['params'].add_hparam(_BATCH_SIZE_KEY, batch_size_for_input_fn)
else:
kwargs['params'][_BATCH_SIZE_KEY] = batch_size_for_input_fn
# For export_savedmodel, input_fn is never passed to Estimator. So,
# `is_export_mode` must be False.
if ctx.is_running_on_cpu(is_export_mode=False):
with ops.device('/device:CPU:0'):
return input_fn(**kwargs)
# For TPU computation, input_fn should be invoked in a tf.while_loop for
# performance. While constructing the tf.while_loop, the structure of
# inputs returned by the `input_fn` needs to be recorded. The structure
# includes whether features or labels is dict or single Tensor, dict keys,
# tensor shapes, and dtypes. The recorded structure is used to create the
# infeed dequeue ops, which must be wrapped and passed as a Fn, called
# inside the TPU computation, as the TPU computation is wrapped inside a
# tf.while_loop also. So, we either pass input_fn to model_fn or pass
# dequeue_fn to model_fn. Here, `input_fn` is passed directly as
# `features` in `model_fn` signature.
def _input_fn():
return input_fn(**kwargs)
return _input_fn
def _validate_features_in_predict_input(self, result):
"""Skip the validation.
For TPUEstimator, we do not need to check the result type. `_InputPipeline`
has stronger check. Parent class's check generates confusing warning msg.
Args:
result: `features` returned by input_fn.
"""
pass
def _augment_model_fn(self, model_fn, batch_axis):
"""Returns a new model_fn, which wraps the TPU support."""
def _model_fn(features, labels, mode, config, params):
"""A Estimator `model_fn` for TPUEstimator."""
with self._ctx.with_mode(mode) as ctx:
model_fn_wrapper = _ModelFnWrapper(model_fn, config, params, ctx)
if mode != model_fn_lib.ModeKeys.PREDICT:
is_export_mode = False
else:
# For export_savedmodel, input_fn is never passed to Estimator. So, by
# checking the self._is_input_fn_invoked bit, we can know, given the
# mode == PREDICT, it is the .predict API, not export_savedmodel API.
if self._is_input_fn_invoked:
is_export_mode = False
else:
is_export_mode = True
# Clear the bit.
self._is_input_fn_invoked = None
if ctx.is_running_on_cpu(is_export_mode=is_export_mode):
logging.info('Running %s on CPU', mode)
return model_fn_wrapper.call_without_tpu(
features, labels, is_export_mode=is_export_mode)
assert labels is None, '`labels` passed to `model_fn` must be `None`.'
# TPUEstimator._call_input_fn passes `input_fn` as features to here.
assert callable(features), '`input_fn` is not callable.'
input_fn = features
input_holders = _InputPipeline(input_fn, batch_axis, ctx)
enqueue_ops, dequeue_fn, input_hooks, run_infeed_loop_on_coordinator = (
input_holders.generate_infeed_enqueue_ops_and_dequeue_fn())
graph = ops.get_default_graph()
for enqueue_op in enqueue_ops:
if isinstance(enqueue_op, list):
graph.get_collection_ref(_TPU_ENQUEUE_OPS).extend(enqueue_op)
else:
graph.add_to_collection(_TPU_ENQUEUE_OPS, enqueue_op)
if mode == model_fn_lib.ModeKeys.TRAIN:
loss, host_call, scaffold = (
_train_on_tpu_system(ctx, model_fn_wrapper, dequeue_fn))
host_ops = host_call.create_tpu_hostcall()
if host_ops is None:
host_ops = []
shutdown_hooks = []
shutdown_mode = os.environ.get('TF_TPU_GRACEFUL_SHUTDOWN_MODE',
'shutdown_worker')
if shutdown_mode:
if shutdown_mode == 'shutdown_worker':
finalizer_hooks = [
session_support.ShutdownLameWorkers(timeout_ms=1000),
]
elif shutdown_mode == 'shutdown_computation':
finalizer_hooks = [
session_support.RestartComputation(timeout_ms=1000),
]
else:
raise ValueError('Unknown TF_TPU_GRACEFUL_SHUTDOWN_MODE "%s"' %
shutdown_mode)
shutdown_hooks.append(session_support.GracefulShutdownHook(
checkpoint_prefix=self.model_dir + '/model.ckpt',
on_shutdown_hooks=finalizer_hooks
))
with ops.control_dependencies([loss]):
global_step = array_ops.identity(training.get_global_step())
hooks = input_hooks + shutdown_hooks
logging_hook_frequency = ( # Divide and round up
(self._log_every_n_steps +
self._config.tpu_config.iterations_per_loop - 1) //
self._config.tpu_config.iterations_per_loop)
hooks.extend([
TPUInfeedOutfeedSessionHook(
ctx,
enqueue_ops,
host_ops,
run_infeed_loop_on_coordinator=(
run_infeed_loop_on_coordinator)),
InstallSignalHandlerHook(),
training.LoggingTensorHook(
{
'loss': array_ops.identity(loss),
'step': global_step,
},
every_n_iter=logging_hook_frequency)
])
examples_hook = ExamplesPerSecondHook(
ctx.global_batch_size,
output_dir=self.model_dir,
every_n_steps=self._log_every_n_steps)
examples_hook._set_steps_per_run( # pylint: disable=protected-access
self._config.tpu_config.iterations_per_loop)
hooks.append(examples_hook)
chief_hooks = []
if (self._config.save_checkpoints_secs or
self._config.save_checkpoints_steps):
checkpoint_hook = training.CheckpointSaverHook(
self.model_dir,
save_secs=self._config.save_checkpoints_secs,
save_steps=self._config.save_checkpoints_steps,
scaffold=scaffold)
checkpoint_hook._set_steps_per_run( # pylint: disable=protected-access
self._config.tpu_config.iterations_per_loop)
chief_hooks.append(checkpoint_hook)
summary.scalar(model_fn_lib.LOSS_METRIC_KEY, loss)
with ops.control_dependencies([loss]):
update_ops = _sync_variables_ops()
# Validate the TPU training graph to catch basic errors
_validate_tpu_training_graph()
train_op = control_flow_ops.group(*update_ops)
graph.add_to_collection(_TPU_TRAIN_OP, train_op)
return model_fn_lib.EstimatorSpec(
mode,
loss=loss,
training_chief_hooks=chief_hooks,
training_hooks=hooks,
train_op=train_op,
scaffold=scaffold)
if mode == model_fn_lib.ModeKeys.EVAL:
total_loss, host_calls, scaffold = _eval_on_tpu_system(
ctx, model_fn_wrapper, dequeue_fn)
iterations_per_loop_var = _create_or_get_iterations_per_loop()
mean_loss = math_ops.div(total_loss,
math_ops.cast(
iterations_per_loop_var,
dtype=total_loss.dtype))
# Creates a dummy metric update_op for all metrics. Estimator expects
# all metrics in eval_metric_ops have update_op and calls them one by
# one. The real metric update_ops are invoked in a separated thread.
# So, here give Estimator the dummy op for all metrics.
with ops.control_dependencies([mean_loss]):
# After TPU evaluation computation is done (the mean_loss tensor),
# reads all variables back from TPU and updates the eval step
# counter properly
internal_ops_to_run = _sync_variables_ops()
internal_ops_to_run.append(
_increase_eval_step_op(iterations_per_loop_var))
with ops.control_dependencies(internal_ops_to_run):
dummy_update_op = control_flow_ops.no_op()
host_call_ret = host_calls.create_tpu_hostcall()
eval_metric_ops = {}
eval_update_ops = []
for k, v in host_call_ret['eval_metrics'].items():
eval_metric_ops[k] = (v[0], dummy_update_op)
eval_update_ops.append(v[1])
if 'host_call' not in host_call_ret:
host_ops = []
else:
host_ops = host_call_ret['host_call']
hooks = [
TPUInfeedOutfeedSessionHook(
ctx,
enqueue_ops,
eval_update_ops + host_ops,
run_infeed_loop_on_coordinator=(
run_infeed_loop_on_coordinator)),
] + input_hooks
return model_fn_lib.EstimatorSpec(
mode,
loss=mean_loss,
evaluation_hooks=hooks,
eval_metric_ops=eval_metric_ops,
scaffold=scaffold)
# Predict
assert mode == model_fn_lib.ModeKeys.PREDICT
dummy_predict_op, host_calls, scaffold = _predict_on_tpu_system(
ctx, model_fn_wrapper, dequeue_fn)
with ops.control_dependencies([dummy_predict_op]):
internal_ops_to_run = _sync_variables_ops()
with ops.control_dependencies(internal_ops_to_run):
dummy_predict_op = control_flow_ops.no_op()
# In train and evaluation, the main TPU program is passed to monitored
# training session to run. Infeed enqueue and outfeed dequeue are
# executed in side threads. This is not the configuration for
# prediction mode.
#
# For prediction, the Estimator executes the EstimatorSpec.predictions
# directly and yield the element (via generator) to call site. So, the
# outfeed based prediction must be passed to MonitoredSession directly.
# Other parts of the TPU execution are organized as follows.
#
# 1. All outfeed based Tensors must be grouped with predictions Tensors
# to form a single invocation. This avoid the issue we might trigger
# multiple outfeeds incorrectly. To achieve this, `host_call` is
# placed in control_dependencies of `stopping_signals`, and
# `stopping_signals` is passed into _StoppingPredictHook, which sets
# the `stopping_signals` as SessionRunArgs. MonitoredSession merges
# all SessionRunArgs with the fetch in session.run together.
#
# 2. The TPU program (dummy_predict_op) and enqueue_ops (infeed Enqueue)
# are grouped together. They will be launched once and only once in
# side threads and they quit naturally according to the SAME stopping
# condition.
enqueue_ops.append(dummy_predict_op)
host_call_ret = host_calls.create_tpu_hostcall()
if 'host_call' not in host_call_ret:
host_ops = []
else:
host_ops = host_call_ret['host_call']
predictions = host_call_ret['predictions']
_verify_cross_hosts_transfer_size(
predictions, message=(
'The estimated size for TPUEstimatorSpec.predictions is too '
'large.'))
signals = host_call_ret['signals']
with ops.control_dependencies(host_ops):
host_ops = [] # Empty, we do do not need it anymore.
scalar_stopping_signal = _StopSignals.as_scalar_stopping_signal(
signals)
predictions = _PaddingSignals.slice_tensor_or_dict(
predictions, signals)
hooks = [
_StoppingPredictHook(scalar_stopping_signal),
TPUInfeedOutfeedSessionHookForPrediction(ctx, enqueue_ops,
host_ops),
] + input_hooks
return model_fn_lib.EstimatorSpec(
mode,
prediction_hooks=hooks,
predictions=predictions,
scaffold=scaffold)
return _model_fn
def _eval_on_tpu_system(ctx, model_fn_wrapper, dequeue_fn):
"""Executes `model_fn_wrapper` multiple times on all TPU shards."""
iterations_per_loop_var = _create_or_get_iterations_per_loop()
single_tpu_eval_step, host_calls, captured_scaffold_fn = (
model_fn_wrapper.convert_to_single_tpu_eval_step(dequeue_fn))
def multi_tpu_eval_steps_on_single_shard():
return training_loop.repeat(
iterations_per_loop_var,
single_tpu_eval_step, [_ZERO_LOSS])
(loss,) = tpu.shard(
multi_tpu_eval_steps_on_single_shard,
inputs=[],
num_shards=ctx.num_replicas,
outputs_from_all_shards=False,
device_assignment=ctx.device_assignment)
scaffold = _get_scaffold(captured_scaffold_fn)
return loss, host_calls, scaffold
def _train_on_tpu_system(ctx, model_fn_wrapper, dequeue_fn):
"""Executes `model_fn_wrapper` multiple times on all TPU shards."""
iterations_per_loop_var = _create_or_get_iterations_per_loop()
single_tpu_train_step, host_call, captured_scaffold_fn = (
model_fn_wrapper.convert_to_single_tpu_train_step(dequeue_fn))
def multi_tpu_train_steps_on_single_shard():
return training_loop.repeat(
iterations_per_loop_var,
single_tpu_train_step, [_INITIAL_LOSS])
(loss,) = tpu.shard(
multi_tpu_train_steps_on_single_shard,
inputs=[],
num_shards=ctx.num_replicas,
outputs_from_all_shards=False,
device_assignment=ctx.device_assignment)
scaffold = _get_scaffold(captured_scaffold_fn)
return loss, host_call, scaffold
def _predict_on_tpu_system(ctx, model_fn_wrapper, dequeue_fn):
"""Executes `model_fn_wrapper` multiple times on all TPU shards."""
num_cores = ctx.num_cores
single_tpu_predict_step, host_calls, captured_scaffold_fn = (
model_fn_wrapper.convert_to_single_tpu_predict_step(dequeue_fn))
def multi_tpu_predict_steps_on_single_shard():
def cond(scalar_stopping_signal):
return math_ops.logical_not(
_StopSignals.should_stop(scalar_stopping_signal))
inputs = [_StopSignals.NON_STOPPING_SIGNAL]
outputs = training_loop.while_loop(
cond, single_tpu_predict_step, inputs=inputs, name=b'loop')
return outputs
(dummy_predict_op,) = tpu.shard(
multi_tpu_predict_steps_on_single_shard,
inputs=[],
num_shards=num_cores,
outputs_from_all_shards=False)
scaffold = _get_scaffold(captured_scaffold_fn)
return dummy_predict_op, host_calls, scaffold
def _wrap_computation_in_while_loop(device, op_fn):
"""Wraps the ops generated by `op_fn` in tf.while_loop."""
def computation(i):
with ops.control_dependencies(op_fn()):
return i + 1
iterations_per_loop_var = _create_or_get_iterations_per_loop()
# By setting parallel_iterations=1, the parallel execution in while_loop is
# basically turned off.
with ops.device(device):
iterations = array_ops.identity(iterations_per_loop_var)
return control_flow_ops.while_loop(
lambda i: i < iterations,
computation, [constant_op.constant(0)],
parallel_iterations=1)
def _wrap_computation_in_while_loop_with_stopping_signals(device, op_fn):
"""Wraps the ops generated by `op_fn` in tf.while_loop."""
def cond(scalar_stopping_signal):
return math_ops.logical_not(
_StopSignals.should_stop(scalar_stopping_signal))
def computation(unused_scalar_stopping_signal):
return_value = op_fn()
execute_ops = return_value['ops']
signals = return_value['signals']
with ops.control_dependencies(execute_ops):
return _StopSignals.as_scalar_stopping_signal(signals)
# By setting parallel_iterations=1, the parallel execution in while_loop is
# basically turned off.
with ops.device(device):
return control_flow_ops.while_loop(
cond,
computation, [_StopSignals.NON_STOPPING_SIGNAL],
parallel_iterations=1)
def _validate_tpu_training_graph():
"""Validate graph before running distributed training.
Raises:
ValueError: If the graph seems invalid for running on device
"""
operations = ops.get_default_graph().get_operations()
# Check if there is atleast one CrossReplicaSum operation in the graph
# This should be introduced by using the CrossShardOptimizer wrapper
cross_replica_sum_ops = [
o for o in operations if o.type == _CROSS_REPLICA_SUM_OP
]
if not cross_replica_sum_ops:
raise ValueError(
'CrossShardOptimizer must be used for model training on TPUs.')
class _CapturedObject(object):
"""A placeholder to capture an object.
This is useful when we need to capture a Python object in the Tensorflow
control flow body function and use it outside the control flow.
"""
def __init__(self):
self._object = None
self._captured = False
def capture(self, o):
if self._captured:
raise RuntimeError(
'InternalError: Object can be captured only. Please file bug .')
self._captured = True
self._object = o
def get(self):
if not self._captured:
raise RuntimeError(
'InternalError: Object is not captured properly before `get`. '
'Please file bug .')
return self._object
def _get_scaffold(captured_scaffold_fn):
"""Retrieves the Scaffold from `captured_scaffold_fn`."""
with _CapturingContext(message='Inside scaffold_fn'):
scaffold_fn = captured_scaffold_fn.get()
if scaffold_fn:
scaffold = scaffold_fn()
if scaffold is None:
raise ValueError(
'TPUEstimatorSpec.scaffold_fn returns None, which is not allowed')
else:
scaffold = None
if scaffold:
wrapped_finalize = scaffold.finalize
def _finalize():
with _CapturingContext('Inside Scaffold.finalize'):
wrapped_finalize()
scaffold.finalize = _finalize
return scaffold
class _CapturingContext(control_flow_ops.ControlFlowContext):
"""Tracks references to Tensors defined in TPU replication."""
def __init__(self, message):
control_flow_ops.ControlFlowContext.__init__(self)
self._message = message
def AddOp(self, op): # pylint: disable=invalid-name
for c in op.inputs:
if tpu._TPU_REPLICATE_ATTR in c.op.node_def.attr: # pylint: disable=protected-access
raise ValueError('{}: Op {} depends on TPU computation {}, '
'which is not allowed.'.format(self._message, op, c))
def __enter__(self):
# pylint: disable=protected-access
self._g = ops.get_default_graph()
self._old = self._g._get_control_flow_context()
self._g._set_control_flow_context(self)
# pylint: enable=protected-access
def __exit__(self, _, __, ___): # pylint: disable=invalid-name
self._g._set_control_flow_context(self._old) # pylint: disable=protected-access
class _Inputs(object):
"""A data structure representing the input_fn returned values.
This also supports the returned value from input_fn as `Dataset`.
"""
def __init__(self, features=None, labels=None, dataset=None, signals=None):
if dataset is not None and (features is not None or labels is not None or
signals is not None):
raise RuntimeError('Internal Error: Either (features and labels) or '
'dataset should be provided, not both. Please file '
'bug')
self._features = features
self._labels = labels
self._signals = signals
self._dataset = dataset
self._iterator = None
@staticmethod
def from_input_fn(return_values):
"""Returns an `_Inputs` instance according to `input_fn` return value."""
if isinstance(return_values, dataset_ops.Dataset):
dataset = return_values
return _Inputs(dataset=dataset)
features, labels = _Inputs._parse_inputs(return_values)
return _Inputs(features, labels)
@staticmethod
def _parse_inputs(return_values):
if isinstance(return_values, tuple):
features, labels = return_values
else:
features, labels = return_values, None
return features, labels
@property
def is_dataset(self):
"""Returns True if the return value from input_fn is Dataset."""
return self._dataset is not None
def dataset_initializer_hook(self):
"""Returns a `SessionRunHook` to initialize this dataset.
This must be called before `features_and_labels`.
"""
iterator = self._dataset.make_initializable_iterator()
# pylint: disable=protected-access
hook = estimator_lib._DatasetInitializerHook(iterator)
self._iterator = iterator
return hook
def features_and_labels(self):
"""Gets `features` and `labels`."""
if self.is_dataset:
if self._iterator is None:
raise RuntimeError('Internal error: Must call dataset_initializer_hook '
'before calling features_and_labels(). Please file '
'a bug!')
return _Inputs._parse_inputs(self._iterator.get_next())
return (self._features, self._labels)
def signals(self):
return self._signals
@property
def dataset(self):
return self._dataset
class _InputsWithStoppingSignals(_Inputs):
"""Inputs with `_StopSignals` inserted into the dataset."""
def __init__(self, dataset, batch_size, add_padding=False):
assert dataset is not None
user_provided_dataset = dataset.map(
_InputsWithStoppingSignals.insert_stopping_signal(
stop=False, batch_size=batch_size, add_padding=add_padding))
final_batch_dataset = dataset.take(1).map(
_InputsWithStoppingSignals.insert_stopping_signal(
stop=True, batch_size=batch_size, add_padding=add_padding))
dataset = user_provided_dataset.concatenate(final_batch_dataset).prefetch(2)
super(_InputsWithStoppingSignals, self).__init__(dataset=dataset)
self._current_inputs = None
def features_and_labels(self):
if self._current_inputs is not None:
raise RuntimeError(
'Internal Error: The previous inputs have not been properly '
'consumed. First call features_and_labels, then call signals.')
inputs_with_signals = self._iterator.get_next()
features = inputs_with_signals['features']
labels = inputs_with_signals.get('labels')
self._current_inputs = inputs_with_signals
return features, labels
def signals(self):
"""Returns the `Signals` from `_Inputs`."""
if self._current_inputs is None:
raise RuntimeError(
'Internal Error: The current inputs have not been properly '
'generated. First call features_and_labels, then call signals.')
signals = self._current_inputs['signals']
self._current_inputs = None
return signals
@staticmethod
def insert_stopping_signal(stop, batch_size, add_padding=False):
"""Inserts stopping_signal into dataset via _map_fn.
Here we change the data structure in the dataset, such that the return value
is a dictionary now and `features`, `labels`, and `signals` are three
distinguished keys in that dict. This provides a better structure, which
eases the process to decompose the inputs (see `features_and_labels`).
Args:
stop: bool, state of current stopping signals.
batch_size: int, batch size.
add_padding: bool, whether to pad the tensor to full batch size.
Returns:
A map_fn passed to dataset.map API.
"""
def _map_fn(*args):
"""The map fn to insert signals."""
if len(args) == 1:
# Unpack the single Tensor/dict argument as features. This is required
# for the input_fn returns no labels.
args = args[0]
features, labels = _Inputs._parse_inputs(args)
new_input_dict = {}
if add_padding:
padding_mask, features, labels = (
_PaddingSignals.pad_features_and_labels(
features, labels, batch_size))
new_input_dict['features'] = features
if labels is not None:
new_input_dict['labels'] = labels
else:
new_input_dict['features'] = features
if labels is not None:
new_input_dict['labels'] = labels
padding_mask = None
new_input_dict['signals'] = _StopSignals(
stop=stop, batch_size=batch_size, padding_mask=padding_mask).as_dict()
return new_input_dict
return _map_fn
class _StopSignals(object):
"""Signals class holding all logic to handle TPU stopping condition."""
NON_STOPPING_SIGNAL = False
STOPPING_SIGNAL = True
def __init__(self, stop, batch_size, padding_mask=None):
self._stop = stop
self._batch_size = batch_size
self._padding_mask = padding_mask
def as_dict(self):
"""Returns the signals as Python dict."""
shape = [self._batch_size, 1]
dtype = dtypes.bool
if self._stop:
stopping = array_ops.ones(shape=shape, dtype=dtype)
else:
stopping = array_ops.zeros(shape=shape, dtype=dtype)
signals = {'stopping': stopping}
if self._padding_mask is not None:
signals['padding_mask'] = self._padding_mask
return signals
@staticmethod
def as_scalar_stopping_signal(signals):
return array_ops.identity(signals['stopping'][0][0])
@staticmethod
def should_stop(scalar_stopping_signal):
if isinstance(scalar_stopping_signal, ops.Tensor):
# STOPPING_SIGNAL is a constant True. Here, the logical_and is just the TF
# way to express the bool check whether scalar_stopping_signal is True.
return math_ops.logical_and(
scalar_stopping_signal, _StopSignals.STOPPING_SIGNAL)
else:
# For non Tensor case, it is used in SessionRunHook. So, we cannot modify
# the graph anymore. Here, we use pure Python.
return bool(scalar_stopping_signal)
class _PaddingSignals(object):
"""Signals class holding all logic to handle padding."""
@staticmethod
def pad_features_and_labels(features, labels, batch_size):
"""Pads out the batch dimension of features and labels."""
real_batch_size = array_ops.shape(
_PaddingSignals._find_any_tensor(features))[0]
batch_size_tensor = constant_op.constant(batch_size, dtypes.int32)
check_greater = check_ops.assert_greater_equal(
batch_size_tensor, real_batch_size,
data=(batch_size_tensor, real_batch_size),
message='The real batch size should not be greater than batch_size.')
with ops.control_dependencies([check_greater]):
missing_count = batch_size_tensor - real_batch_size
def pad_single_tensor(tensor):
"""Pads out the batch dimension of a tensor to the complete batch_size."""
rank = len(tensor.shape)
assert rank > 0
padding = array_ops.stack([[0, missing_count]] + [[0, 0]] * (rank - 1))
padded_shape = (batch_size,) + tuple(tensor.shape[1:])
padded_tensor = array_ops.pad(tensor, padding)
padded_tensor.set_shape(padded_shape)
return padded_tensor
def nest_pad(tensor_or_dict):
return nest.map_structure(pad_single_tensor, tensor_or_dict)
features = nest_pad(features)
if labels is not None:
labels = nest_pad(labels)
padding_mask = _PaddingSignals._padding_mask(
real_batch_size, missing_count, batch_size)
return padding_mask, features, labels
@staticmethod
def slice_tensor_or_dict(tensor_or_dict, signals):
"""Slice the real Tensors according to padding mask in signals."""
padding_mask = signals['padding_mask']
batch_size = array_ops.shape(padding_mask)[0]
def verify_batch_size(tensor):
check_batch_size = math_ops.equal(batch_size, tensor.shape[0])
with ops.control_dependencies([check_batch_size]):
return array_ops.identity(tensor)
def slice_single_tensor(tensor):
rank = len(tensor.shape)
assert rank > 0
real_batch_size = batch_size - math_ops.reduce_sum(padding_mask)
return verify_batch_size(tensor)[0:real_batch_size]
# As we split the Tensors to all TPU cores and concat them back, it is
# important to ensure the real data is placed before padded ones, i.e.,
# order is preserved. By that, the sliced padding mask should have all 0's.
# If this assertion failed, # the slice logic here would not hold.
sliced_padding_mask = slice_single_tensor(padding_mask)
assert_padding_mask = math_ops.equal(
math_ops.reduce_sum(sliced_padding_mask), 0)
with ops.control_dependencies([assert_padding_mask]):
should_stop = _StopSignals.should_stop(
_StopSignals.as_scalar_stopping_signal(signals))
is_full_batch = math_ops.equal(math_ops.reduce_sum(padding_mask), 0)
def slice_fn(tensor):
# If the current batch is full batch or part of stopping signals, we do
# not need to slice to save performance.
return control_flow_ops.cond(
math_ops.logical_or(should_stop, is_full_batch),
(lambda: verify_batch_size(tensor)),
(lambda: slice_single_tensor(tensor)))
return nest.map_structure(slice_fn, tensor_or_dict)
@staticmethod
def _find_any_tensor(batch_features):
tensors = [x for x in nest.flatten(batch_features)
if isinstance(x, ops.Tensor)]
if not tensors:
raise ValueError('Cannot find any Tensor in features dict.')
return tensors[0]
@staticmethod
def _padding_mask(real_batch_size, missing_count, batch_size):
padding_mask = array_ops.concat(
[
array_ops.zeros((real_batch_size,), dtype=dtypes.int32),
array_ops.ones((missing_count,), dtype=dtypes.int32)
],
axis=0)
padding_mask.set_shape((batch_size,))
return padding_mask
class _SignalsHelper(object):
"""A general helper class to handle common signals manipulation."""
def __init__(self, signals):
self._signal_keys = []
for key in sorted(signals.iterkeys()):
self._signal_keys.append(key)
@property
def num_signals(self):
return len(self._signal_keys)
def unflatten(self, tensor_list):
return dict(zip(self._signal_keys, tensor_list))
@staticmethod
def as_tensor_list(signals):
return [signals[key] for key in sorted(signals.iterkeys())]
def _verify_cross_hosts_transfer_size(tensor_dict, message):
total_size = 0
tensor_structure = {}
for key, tensor in tensor_dict.items():
shape = tensor.shape
size = np.product(shape) * tensor.dtype.size
tensor_structure[key] = shape
total_size += size
if total_size >= _ONE_GIGABYTE:
raise ValueError(
'{} The transfer size is larger than the protobuf limit. Please '
'consider to use Tensors with smaller shapes or reduce batch '
'size. Given:\n'
'{}'.format(message, '\n'.join([
' -- Key: {}, Shape: {}'.format(k, v)
for k, v in tensor_structure.items()])))
|
executor.py | from concurrent.futures import Future
import typeguard
import logging
import threading
import queue
import pickle
from multiprocessing import Process, Queue
from typing import Dict, List, Optional, Tuple, Union
import math
from ipyparallel.serialize import pack_apply_message
from ipyparallel.serialize import deserialize_object
from parsl.app.errors import RemoteExceptionWrapper
from parsl.executors.high_throughput import zmq_pipes
from parsl.executors.high_throughput import interchange
from parsl.executors.errors import BadMessage, ScalingFailed, DeserializationError, SerializationError
from parsl.executors.status_handling import StatusHandlingExecutor
from parsl.providers.provider_base import ExecutionProvider
from parsl.data_provider.staging import Staging
from parsl.addresses import get_all_addresses
from parsl.utils import RepresentationMixin
from parsl.providers import LocalProvider
logger = logging.getLogger(__name__)
BUFFER_THRESHOLD = 1024 * 1024
ITEM_THRESHOLD = 1024
class HighThroughputExecutor(StatusHandlingExecutor, RepresentationMixin):
"""Executor designed for cluster-scale
The HighThroughputExecutor system has the following components:
1. The HighThroughputExecutor instance which is run as part of the Parsl script.
2. The Interchange which is acts as a load-balancing proxy between workers and Parsl
3. The multiprocessing based worker pool which coordinates task execution over several
cores on a node.
4. ZeroMQ pipes connect the HighThroughputExecutor, Interchange and the process_worker_pool
Here is a diagram
.. code:: python
| Data | Executor | Interchange | External Process(es)
| Flow | | |
Task | Kernel | | |
+----->|-------->|------------>|->outgoing_q---|-> process_worker_pool
| | | | batching | | |
Parsl<---Fut-| | | load-balancing| result exception
^ | | | watchdogs | | |
| | | Q_mngmnt | | V V
| | | Thread<--|-incoming_q<---|--- +---------+
| | | | | |
| | | | | |
+----update_fut-----+
Each of the workers in each process_worker_pool has access to its local rank through
an environmental variable, ``PARSL_WORKER_RANK``. The local rank is unique for each process
and is an integer in the range from 0 to the number of workers per in the pool minus 1.
The workers also have access to the ID of the worker pool as ``PARSL_WORKER_POOL_ID``
and the size of the worker pool as ``PARSL_WORKER_COUNT``.
Parameters
----------
provider : :class:`~parsl.providers.provider_base.ExecutionProvider`
Provider to access computation resources. Can be one of :class:`~parsl.providers.aws.aws.EC2Provider`,
:class:`~parsl.providers.cobalt.cobalt.Cobalt`,
:class:`~parsl.providers.condor.condor.Condor`,
:class:`~parsl.providers.googlecloud.googlecloud.GoogleCloud`,
:class:`~parsl.providers.gridEngine.gridEngine.GridEngine`,
:class:`~parsl.providers.jetstream.jetstream.Jetstream`,
:class:`~parsl.providers.local.local.Local`,
:class:`~parsl.providers.sge.sge.GridEngine`,
:class:`~parsl.providers.slurm.slurm.Slurm`, or
:class:`~parsl.providers.torque.torque.Torque`.
label : str
Label for this executor instance.
launch_cmd : str
Command line string to launch the process_worker_pool from the provider. The command line string
will be formatted with appropriate values for the following values (debug, task_url, result_url,
cores_per_worker, nodes_per_block, heartbeat_period ,heartbeat_threshold, logdir). For example:
launch_cmd="process_worker_pool.py {debug} -c {cores_per_worker} --task_url={task_url} --result_url={result_url}"
address : string
An address to connect to the main Parsl process which is reachable from the network in which
workers will be running. This can be either a hostname as returned by `hostname` or an
IP address. Most login nodes on clusters have several network interfaces available, only
some of which can be reached from the compute nodes.
By default, the executor will attempt to enumerate and connect through all possible addresses.
Setting an address here overrides the default behavior.
default=None
worker_ports : (int, int)
Specify the ports to be used by workers to connect to Parsl. If this option is specified,
worker_port_range will not be honored.
worker_port_range : (int, int)
Worker ports will be chosen between the two integers provided.
interchange_port_range : (int, int)
Port range used by Parsl to communicate with the Interchange.
working_dir : str
Working dir to be used by the executor.
worker_debug : Bool
Enables worker debug logging.
managed : Bool
If this executor is managed by the DFK or externally handled.
cores_per_worker : float
cores to be assigned to each worker. Oversubscription is possible
by setting cores_per_worker < 1.0. Default=1
mem_per_worker : float
GB of memory required per worker. If this option is specified, the node manager
will check the available memory at startup and limit the number of workers such that
the there's sufficient memory for each worker. Default: None
max_workers : int
Caps the number of workers launched by the manager. Default: infinity
prefetch_capacity : int
Number of tasks that could be prefetched over available worker capacity.
When there are a few tasks (<100) or when tasks are long running, this option should
be set to 0 for better load balancing. Default is 0.
suppress_failure : Bool
If set, the interchange will suppress failures rather than terminate early. Default: True
heartbeat_threshold : int
Seconds since the last message from the counterpart in the communication pair:
(interchange, manager) after which the counterpart is assumed to be un-available. Default: 120s
heartbeat_period : int
Number of seconds after which a heartbeat message indicating liveness is sent to the
counterpart (interchange, manager). Default: 30s
poll_period : int
Timeout period to be used by the executor components in milliseconds. Increasing poll_periods
trades performance for cpu efficiency. Default: 10ms
worker_logdir_root : string
In case of a remote file system, specify the path to where logs will be kept.
"""
@typeguard.typechecked
def __init__(self,
label: str = 'HighThroughputExecutor',
provider: ExecutionProvider = LocalProvider(),
launch_cmd: Optional[str] = None,
address: Optional[str] = None,
worker_ports: Optional[Tuple[int, int]] = None,
worker_port_range: Optional[Tuple[int, int]] = (54000, 55000),
interchange_port_range: Optional[Tuple[int, int]] = (55000, 56000),
storage_access: Optional[List[Staging]] = None,
working_dir: Optional[str] = None,
worker_debug: bool = False,
cores_per_worker: float = 1.0,
mem_per_worker: Optional[float] = None,
max_workers: Union[int, float] = float('inf'),
prefetch_capacity: int = 0,
heartbeat_threshold: int = 120,
heartbeat_period: int = 30,
poll_period: int = 10,
suppress_failure: bool = True,
managed: bool = True,
worker_logdir_root: Optional[str] = None):
logger.debug("Initializing HighThroughputExecutor")
StatusHandlingExecutor.__init__(self, provider)
self.label = label
self.launch_cmd = launch_cmd
self.worker_debug = worker_debug
self.storage_access = storage_access
self.working_dir = working_dir
self.managed = managed
self.blocks = {} # type: Dict[str, str]
self.cores_per_worker = cores_per_worker
self.mem_per_worker = mem_per_worker
self.max_workers = max_workers
self.prefetch_capacity = prefetch_capacity
self.address = address
if self.address:
self.all_addresses = address
else:
self.all_addresses = ','.join(get_all_addresses())
mem_slots = max_workers
cpu_slots = max_workers
if hasattr(self.provider, 'mem_per_node') and \
self.provider.mem_per_node is not None and \
mem_per_worker is not None and \
mem_per_worker > 0:
mem_slots = math.floor(self.provider.mem_per_node / mem_per_worker)
if hasattr(self.provider, 'cores_per_node') and \
self.provider.cores_per_node is not None:
cpu_slots = math.floor(self.provider.cores_per_node / cores_per_worker)
self.workers_per_node = min(max_workers, mem_slots, cpu_slots)
if self.workers_per_node == float('inf'):
self.workers_per_node = 1 # our best guess-- we do not have any provider hints
self._task_counter = 0
self.hub_address = None # set to the correct hub address in dfk
self.hub_port = None # set to the correct hub port in dfk
self.worker_ports = worker_ports
self.worker_port_range = worker_port_range
self.interchange_port_range = interchange_port_range
self.heartbeat_threshold = heartbeat_threshold
self.heartbeat_period = heartbeat_period
self.poll_period = poll_period
self.suppress_failure = suppress_failure
self.run_dir = '.'
self.worker_logdir_root = worker_logdir_root
if not launch_cmd:
self.launch_cmd = ("process_worker_pool.py {debug} {max_workers} "
"-a {addresses} "
"-p {prefetch_capacity} "
"-c {cores_per_worker} "
"-m {mem_per_worker} "
"--poll {poll_period} "
"--task_port={task_port} "
"--result_port={result_port} "
"--logdir={logdir} "
"--block_id={{block_id}} "
"--hb_period={heartbeat_period} "
"--hb_threshold={heartbeat_threshold} ")
def initialize_scaling(self):
""" Compose the launch command and call the scale_out
This should be implemented in the child classes to take care of
executor specific oddities.
"""
debug_opts = "--debug" if self.worker_debug else ""
max_workers = "" if self.max_workers == float('inf') else "--max_workers={}".format(self.max_workers)
worker_logdir = "{}/{}".format(self.run_dir, self.label)
if self.worker_logdir_root is not None:
worker_logdir = "{}/{}".format(self.worker_logdir_root, self.label)
l_cmd = self.launch_cmd.format(debug=debug_opts,
prefetch_capacity=self.prefetch_capacity,
addresses=self.all_addresses,
task_port=self.worker_task_port,
result_port=self.worker_result_port,
cores_per_worker=self.cores_per_worker,
mem_per_worker=self.mem_per_worker,
max_workers=max_workers,
nodes_per_block=self.provider.nodes_per_block,
heartbeat_period=self.heartbeat_period,
heartbeat_threshold=self.heartbeat_threshold,
poll_period=self.poll_period,
logdir=worker_logdir)
self.launch_cmd = l_cmd
logger.debug("Launch command: {}".format(self.launch_cmd))
self._scaling_enabled = True
logger.debug("Starting HighThroughputExecutor with provider:\n%s", self.provider)
if hasattr(self.provider, 'init_blocks'):
try:
self.scale_out(blocks=self.provider.init_blocks)
except Exception as e:
logger.error("Scaling out failed: {}".format(e))
raise e
def start(self):
"""Create the Interchange process and connect to it.
"""
self.outgoing_q = zmq_pipes.TasksOutgoing("127.0.0.1", self.interchange_port_range)
self.incoming_q = zmq_pipes.ResultsIncoming("127.0.0.1", self.interchange_port_range)
self.command_client = zmq_pipes.CommandClient("127.0.0.1", self.interchange_port_range)
self.is_alive = True
self._queue_management_thread = None
self._start_queue_management_thread()
self._start_local_queue_process()
logger.debug("Created management thread: {}".format(self._queue_management_thread))
self.initialize_scaling()
def _queue_management_worker(self):
"""Listen to the queue for task status messages and handle them.
Depending on the message, tasks will be updated with results, exceptions,
or updates. It expects the following messages:
.. code:: python
{
"task_id" : <task_id>
"result" : serialized result object, if task succeeded
... more tags could be added later
}
{
"task_id" : <task_id>
"exception" : serialized exception object, on failure
}
We do not support these yet, but they could be added easily.
.. code:: python
{
"task_id" : <task_id>
"cpu_stat" : <>
"mem_stat" : <>
"io_stat" : <>
"started" : tstamp
}
The `None` message is a die request.
"""
logger.debug("[MTHREAD] queue management worker starting")
while not self.bad_state_is_set:
try:
msgs = self.incoming_q.get(timeout=1)
except queue.Empty:
logger.debug("[MTHREAD] queue empty")
# Timed out.
pass
except IOError as e:
logger.exception("[MTHREAD] Caught broken queue with exception code {}: {}".format(e.errno, e))
return
except Exception as e:
logger.exception("[MTHREAD] Caught unknown exception: {}".format(e))
return
else:
if msgs is None:
logger.debug("[MTHREAD] Got None, exiting")
return
else:
for serialized_msg in msgs:
try:
msg = pickle.loads(serialized_msg)
tid = msg['task_id']
except pickle.UnpicklingError:
raise BadMessage("Message received could not be unpickled")
except Exception:
raise BadMessage("Message received does not contain 'task_id' field")
if tid == -1 and 'exception' in msg:
logger.warning("Executor shutting down due to exception from interchange")
exception, _ = deserialize_object(msg['exception'])
self.set_bad_state_and_fail_all(exception)
break
task_fut = self.tasks[tid]
if 'result' in msg:
result, _ = deserialize_object(msg['result'])
task_fut.set_result(result)
elif 'exception' in msg:
try:
s, _ = deserialize_object(msg['exception'])
# s should be a RemoteExceptionWrapper... so we can reraise it
if isinstance(s, RemoteExceptionWrapper):
try:
s.reraise()
except Exception as e:
task_fut.set_exception(e)
elif isinstance(s, Exception):
task_fut.set_exception(s)
else:
raise ValueError("Unknown exception-like type received: {}".format(type(s)))
except Exception as e:
# TODO could be a proper wrapped exception?
task_fut.set_exception(
DeserializationError("Received exception, but handling also threw an exception: {}".format(e)))
else:
raise BadMessage("Message received is neither result or exception")
if not self.is_alive:
break
logger.info("[MTHREAD] queue management worker finished")
# When the executor gets lost, the weakref callback will wake up
# the queue management thread.
def weakref_cb(self, q=None):
"""We do not use this yet."""
q.put(None)
def _start_local_queue_process(self):
""" Starts the interchange process locally
Starts the interchange process locally and uses an internal command queue to
get the worker task and result ports that the interchange has bound to.
"""
comm_q = Queue(maxsize=10)
self.queue_proc = Process(target=interchange.starter,
args=(comm_q,),
kwargs={"client_ports": (self.outgoing_q.port,
self.incoming_q.port,
self.command_client.port),
"worker_ports": self.worker_ports,
"worker_port_range": self.worker_port_range,
"hub_address": self.hub_address,
"hub_port": self.hub_port,
"logdir": "{}/{}".format(self.run_dir, self.label),
"suppress_failure": self.suppress_failure,
"heartbeat_threshold": self.heartbeat_threshold,
"poll_period": self.poll_period,
"logging_level": logging.DEBUG if self.worker_debug else logging.INFO
},
daemon=True,
name="HTEX-Interchange"
)
self.queue_proc.start()
try:
(self.worker_task_port, self.worker_result_port) = comm_q.get(block=True, timeout=120)
except queue.Empty:
logger.error("Interchange has not completed initialization in 120s. Aborting")
raise Exception("Interchange failed to start")
def _start_queue_management_thread(self):
"""Method to start the management thread as a daemon.
Checks if a thread already exists, then starts it.
Could be used later as a restart if the management thread dies.
"""
if self._queue_management_thread is None:
logger.debug("Starting queue management thread")
self._queue_management_thread = threading.Thread(target=self._queue_management_worker, name="HTEX-Queue-Management-Thread")
self._queue_management_thread.daemon = True
self._queue_management_thread.start()
logger.debug("Started queue management thread")
else:
logger.debug("Management thread already exists, returning")
def hold_worker(self, worker_id):
"""Puts a worker on hold, preventing scheduling of additional tasks to it.
This is called "hold" mostly because this only stops scheduling of tasks,
and does not actually kill the worker.
Parameters
----------
worker_id : str
Worker id to be put on hold
"""
c = self.command_client.run("HOLD_WORKER;{}".format(worker_id))
logger.debug("Sent hold request to worker: {}".format(worker_id))
return c
@property
def outstanding(self):
outstanding_c = self.command_client.run("OUTSTANDING_C")
return outstanding_c
@property
def connected_workers(self):
workers = self.command_client.run("WORKERS")
return workers
@property
def connected_managers(self):
workers = self.command_client.run("MANAGERS")
return workers
def _hold_block(self, block_id):
""" Sends hold command to all managers which are in a specific block
Parameters
----------
block_id : str
Block identifier of the block to be put on hold
"""
managers = self.connected_managers
for manager in managers:
if manager['block_id'] == block_id:
logger.debug("[HOLD_BLOCK]: Sending hold to manager: {}".format(manager['manager']))
self.hold_worker(manager['manager'])
def submit(self, func, *args, **kwargs):
"""Submits work to the the outgoing_q.
The outgoing_q is an external process listens on this
queue for new work. This method behaves like a
submit call as described here `Python docs: <https://docs.python.org/3/library/concurrent.futures.html#concurrent.futures.ThreadPoolExecutor>`_
Args:
- func (callable) : Callable function
- *args (list) : List of arbitrary positional arguments.
Kwargs:
- **kwargs (dict) : A dictionary of arbitrary keyword args for func.
Returns:
Future
"""
if self.bad_state_is_set:
raise self.executor_exception
self._task_counter += 1
task_id = self._task_counter
# handle people sending blobs gracefully
args_to_print = args
if logger.getEffectiveLevel() >= logging.DEBUG:
args_to_print = tuple([arg if len(repr(arg)) < 100 else (repr(arg)[:100] + '...') for arg in args])
logger.debug("Pushing function {} to queue with args {}".format(func, args_to_print))
self.tasks[task_id] = Future()
try:
fn_buf = pack_apply_message(func, args, kwargs,
buffer_threshold=1024 * 1024,
item_threshold=1024)
except TypeError:
raise SerializationError(func.__name__)
msg = {"task_id": task_id,
"buffer": fn_buf}
# Post task to the the outgoing queue
self.outgoing_q.put(msg)
# Return the future
return self.tasks[task_id]
@property
def scaling_enabled(self):
return self._scaling_enabled
def scale_out(self, blocks=1):
"""Scales out the number of blocks by "blocks"
Raises:
NotImplementedError
"""
r = []
for i in range(blocks):
external_block_id = str(len(self.blocks))
launch_cmd = self.launch_cmd.format(block_id=external_block_id)
internal_block = self.provider.submit(launch_cmd, 1)
logger.debug("Launched block {}->{}".format(external_block_id, internal_block))
if not internal_block:
raise(ScalingFailed(self.provider.label,
"Attempts to provision nodes via provider has failed"))
r.extend([external_block_id])
self.blocks[external_block_id] = internal_block
return r
def scale_in(self, blocks=None, block_ids=[]):
"""Scale in the number of active blocks by specified amount.
The scale in method here is very rude. It doesn't give the workers
the opportunity to finish current tasks or cleanup. This is tracked
in issue #530
Parameters
----------
blocks : int
Number of blocks to terminate and scale_in by
block_ids : list
List of specific block ids to terminate. Optional
Raises:
NotImplementedError
"""
if block_ids:
block_ids_to_kill = block_ids
else:
block_ids_to_kill = list(self.blocks.keys())[:blocks]
# Hold the block
for block_id in block_ids_to_kill:
self._hold_block(block_id)
# Now kill via provider
to_kill = [self.blocks.pop(bid) for bid in block_ids_to_kill]
r = self.provider.cancel(to_kill)
return self._filter_scale_in_ids(to_kill, r)
def _get_job_ids(self) -> List[object]:
return list(self.blocks.values())
def shutdown(self, hub=True, targets='all', block=False):
"""Shutdown the executor, including all workers and controllers.
This is not implemented.
Kwargs:
- hub (Bool): Whether the hub should be shutdown, Default: True,
- targets (list of ints| 'all'): List of block id's to kill, Default: 'all'
- block (Bool): To block for confirmations or not
Raises:
NotImplementedError
"""
logger.info("Attempting HighThroughputExecutor shutdown")
self.queue_proc.terminate()
logger.info("Finished HighThroughputExecutor shutdown attempt")
return True
|
client.py | #!/usr/bin/python3
import json
import socket
import threading
import pyaudio
from loguru import logger
from protocol import Message, MessageType
CHUNK_SIZE = 512
AUDIO_FORMAT = pyaudio.paInt16
CHANNELS = 1
RATE = 44100
class Client:
def __init__(self):
self.lock = threading.Lock()
self.talking = False
# initialise microphone recording
self.p = pyaudio.PyAudio()
# select input device
while True:
try:
for i in range(self.p.get_device_count()):
device = self.p.get_device_info_by_index(i)
if device['maxInputChannels'] == 0:
continue
print(f'{device["index"]}: {device["name"]}, {device["defaultSampleRate"]}HZ')
input_device_index = int(input('Select input device index: '))
self.recording_stream = self.p.open(
format=AUDIO_FORMAT,
channels=CHANNELS,
rate=RATE,
input=True,
input_device_index=input_device_index,
frames_per_buffer=CHUNK_SIZE * 3)
break
except KeyboardInterrupt:
raise
except Exception as e:
print(e)
# select output device
while True:
try:
for i in range(self.p.get_device_count()):
device = self.p.get_device_info_by_index(i)
if device['maxOutputChannels'] == 0:
continue
print(f'{device["index"]}: {device["name"]}, {device["defaultSampleRate"]}HZ')
output_device_index = int(input('Select output device index: '))
self.playing_stream = self.p.open(
format=AUDIO_FORMAT,
channels=CHANNELS,
rate=RATE,
output=True,
output_device_index=output_device_index,
frames_per_buffer=CHUNK_SIZE * 3)
break
except KeyboardInterrupt:
raise
except Exception as e:
print(e)
self.s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
while True:
try:
self.target_ip = input('Enter IP address of server --> ')
self.target_port = int(input('Enter target port of server --> '))
self.s.connect((self.target_ip, self.target_port))
break
except KeyboardInterrupt:
raise
except BaseException as e:
logger.error(e)
print('Connected to Server!')
while True:
username = input('Enter username: ')
room_id = input('Enter room id: ')
Message.build_message(MessageType.CONNECTION_REQUEST, json.dumps({
'username': username,
'room_id': room_id
})).send(self.s)
m = Message.read_message(self.s)
assert m.message_type == MessageType.CONNECTION_RESPONSE
d = json.loads(m.content)
if d['result'] == 'ok':
break
print(f'Could not connect to room: {d["reason"]}')
# start threads
receive_thread = threading.Thread(target=self.receive_server_data).start()
send_thread = threading.Thread(target=self.send_data_to_server).start()
while True:
command = input()
if command == 'start':
self.talking = True
with self.lock:
Message.build_message(MessageType.START_TALKING, 'empty_content').send(self.s)
elif command == 'stop':
self.talking = False
with self.lock:
Message.build_message(MessageType.STOP_TALKING, 'empty_content').send(self.s)
elif command == 'disconnect':
with self.lock:
Message.build_message(MessageType.DISCONNECT, 'empty_content').send(self.s)
self.s.close()
exit(0)
else:
print(f'No such command: {command}')
def receive_server_data(self):
while True:
try:
m = Message.read_message(self.s)
if m.message_type is MessageType.VOICE_DATA:
self.playing_stream.write(m.content)
elif m.message_type is MessageType.ROOM_STATE:
room = json.loads(m.content)
s = []
for client, talking in room.items():
if talking:
s.append(f'({client})')
else:
s.append(client)
print(f'{" ".join(s)}')
except KeyboardInterrupt:
raise
except BaseException as e:
logger.error(e)
def send_data_to_server(self):
while True:
try:
if self.talking:
data = self.recording_stream.read(CHUNK_SIZE, False)
with self.lock:
Message.build_message(MessageType.VOICE_DATA, data).send(self.s)
except KeyboardInterrupt:
raise
except ConnectionResetError:
print(f'Server closed the connection')
self.s.close()
exit(1)
except BaseException as e:
logger.error(e)
client = Client()
|
test_distributed_sampling.py | import dgl
import unittest
import os
from dgl.data import CitationGraphDataset
from dgl.data import WN18Dataset
from dgl.distributed import sample_neighbors, sample_etype_neighbors
from dgl.distributed import partition_graph, load_partition, load_partition_book
import sys
import multiprocessing as mp
import numpy as np
import backend as F
import time
from utils import generate_ip_config, reset_envs
from pathlib import Path
import pytest
from scipy import sparse as spsp
import random
from dgl.distributed import DistGraphServer, DistGraph
def start_server(rank, tmpdir, disable_shared_mem, graph_name, graph_format=['csc', 'coo'],
keep_alive=False):
g = DistGraphServer(rank, "rpc_ip_config.txt", 1, 1,
tmpdir / (graph_name + '.json'), disable_shared_mem=disable_shared_mem,
graph_format=graph_format, keep_alive=keep_alive)
g.start()
def start_sample_client(rank, tmpdir, disable_shared_mem):
gpb = None
if disable_shared_mem:
_, _, _, gpb, _, _, _ = load_partition(tmpdir / 'test_sampling.json', rank)
dgl.distributed.initialize("rpc_ip_config.txt")
dist_graph = DistGraph("test_sampling", gpb=gpb)
try:
sampled_graph = sample_neighbors(dist_graph, [0, 10, 99, 66, 1024, 2008], 3)
except Exception as e:
print(e)
sampled_graph = None
dgl.distributed.exit_client()
return sampled_graph
def start_sample_client_shuffle(rank, tmpdir, disable_shared_mem, g, num_servers, group_id=0):
os.environ['DGL_GROUP_ID'] = str(group_id)
gpb = None
if disable_shared_mem:
_, _, _, gpb, _, _, _ = load_partition(tmpdir / 'test_sampling.json', rank)
dgl.distributed.initialize("rpc_ip_config.txt")
dist_graph = DistGraph("test_sampling", gpb=gpb)
sampled_graph = sample_neighbors(dist_graph, [0, 10, 99, 66, 1024, 2008], 3)
orig_nid = F.zeros((g.number_of_nodes(),), dtype=F.int64, ctx=F.cpu())
orig_eid = F.zeros((g.number_of_edges(),), dtype=F.int64, ctx=F.cpu())
for i in range(num_servers):
part, _, _, _, _, _, _ = load_partition(tmpdir / 'test_sampling.json', i)
orig_nid[part.ndata[dgl.NID]] = part.ndata['orig_id']
orig_eid[part.edata[dgl.EID]] = part.edata['orig_id']
src, dst = sampled_graph.edges()
src = orig_nid[src]
dst = orig_nid[dst]
assert sampled_graph.number_of_nodes() == g.number_of_nodes()
assert np.all(F.asnumpy(g.has_edges_between(src, dst)))
eids = g.edge_ids(src, dst)
eids1 = orig_eid[sampled_graph.edata[dgl.EID]]
assert np.array_equal(F.asnumpy(eids1), F.asnumpy(eids))
def start_find_edges_client(rank, tmpdir, disable_shared_mem, eids, etype=None):
gpb = None
if disable_shared_mem:
_, _, _, gpb, _, _, _ = load_partition(tmpdir / 'test_find_edges.json', rank)
dgl.distributed.initialize("rpc_ip_config.txt")
dist_graph = DistGraph("test_find_edges", gpb=gpb)
try:
u, v = dist_graph.find_edges(eids, etype=etype)
except Exception as e:
print(e)
u, v = None, None
dgl.distributed.exit_client()
return u, v
def start_get_degrees_client(rank, tmpdir, disable_shared_mem, nids=None):
gpb = None
if disable_shared_mem:
_, _, _, gpb, _, _, _ = load_partition(tmpdir / 'test_get_degrees.json', rank)
dgl.distributed.initialize("rpc_ip_config.txt", 1)
dist_graph = DistGraph("test_get_degrees", gpb=gpb)
try:
in_deg = dist_graph.in_degrees(nids)
all_in_deg = dist_graph.in_degrees()
out_deg = dist_graph.out_degrees(nids)
all_out_deg = dist_graph.out_degrees()
except Exception as e:
print(e)
in_deg, out_deg, all_in_deg, all_out_deg = None, None, None, None
dgl.distributed.exit_client()
return in_deg, out_deg, all_in_deg, all_out_deg
def check_rpc_sampling(tmpdir, num_server):
generate_ip_config("rpc_ip_config.txt", num_server, num_server)
g = CitationGraphDataset("cora")[0]
g.readonly()
print(g.idtype)
num_parts = num_server
num_hops = 1
partition_graph(g, 'test_sampling', num_parts, tmpdir,
num_hops=num_hops, part_method='metis', reshuffle=False)
pserver_list = []
ctx = mp.get_context('spawn')
for i in range(num_server):
p = ctx.Process(target=start_server, args=(i, tmpdir, num_server > 1, 'test_sampling'))
p.start()
time.sleep(1)
pserver_list.append(p)
sampled_graph = start_sample_client(0, tmpdir, num_server > 1)
print("Done sampling")
for p in pserver_list:
p.join()
src, dst = sampled_graph.edges()
assert sampled_graph.number_of_nodes() == g.number_of_nodes()
assert np.all(F.asnumpy(g.has_edges_between(src, dst)))
eids = g.edge_ids(src, dst)
assert np.array_equal(
F.asnumpy(sampled_graph.edata[dgl.EID]), F.asnumpy(eids))
def check_rpc_find_edges_shuffle(tmpdir, num_server):
generate_ip_config("rpc_ip_config.txt", num_server, num_server)
g = CitationGraphDataset("cora")[0]
g.readonly()
num_parts = num_server
orig_nid, orig_eid = partition_graph(g, 'test_find_edges', num_parts, tmpdir,
num_hops=1, part_method='metis',
reshuffle=True, return_mapping=True)
pserver_list = []
ctx = mp.get_context('spawn')
for i in range(num_server):
p = ctx.Process(target=start_server, args=(i, tmpdir, num_server > 1,
'test_find_edges', ['csr', 'coo']))
p.start()
time.sleep(1)
pserver_list.append(p)
eids = F.tensor(np.random.randint(g.number_of_edges(), size=100))
u, v = g.find_edges(orig_eid[eids])
du, dv = start_find_edges_client(0, tmpdir, num_server > 1, eids)
du = orig_nid[du]
dv = orig_nid[dv]
assert F.array_equal(u, du)
assert F.array_equal(v, dv)
def create_random_hetero(dense=False, empty=False):
num_nodes = {'n1': 210, 'n2': 200, 'n3': 220} if dense else \
{'n1': 1010, 'n2': 1000, 'n3': 1020}
etypes = [('n1', 'r1', 'n2'),
('n1', 'r2', 'n3'),
('n2', 'r3', 'n3')]
edges = {}
random.seed(42)
for etype in etypes:
src_ntype, _, dst_ntype = etype
arr = spsp.random(num_nodes[src_ntype] - 10 if empty else num_nodes[src_ntype],
num_nodes[dst_ntype] - 10 if empty else num_nodes[dst_ntype],
density=0.1 if dense else 0.001,
format='coo', random_state=100)
edges[etype] = (arr.row, arr.col)
g = dgl.heterograph(edges, num_nodes)
g.nodes['n1'].data['feat'] = F.ones((g.number_of_nodes('n1'), 10), F.float32, F.cpu())
return g
def check_rpc_hetero_find_edges_shuffle(tmpdir, num_server):
generate_ip_config("rpc_ip_config.txt", num_server, num_server)
g = create_random_hetero()
num_parts = num_server
orig_nid, orig_eid = partition_graph(g, 'test_find_edges', num_parts, tmpdir,
num_hops=1, part_method='metis',
reshuffle=True, return_mapping=True)
pserver_list = []
ctx = mp.get_context('spawn')
for i in range(num_server):
p = ctx.Process(target=start_server, args=(i, tmpdir, num_server > 1,
'test_find_edges', ['csr', 'coo']))
p.start()
time.sleep(1)
pserver_list.append(p)
eids = F.tensor(np.random.randint(g.number_of_edges('r1'), size=100))
u, v = g.find_edges(orig_eid['r1'][eids], etype='r1')
du, dv = start_find_edges_client(0, tmpdir, num_server > 1, eids, etype='r1')
du = orig_nid['n1'][du]
dv = orig_nid['n2'][dv]
assert F.array_equal(u, du)
assert F.array_equal(v, dv)
# Wait non shared memory graph store
@unittest.skipIf(os.name == 'nt', reason='Do not support windows yet')
@unittest.skipIf(dgl.backend.backend_name == 'tensorflow', reason='Not support tensorflow for now')
@unittest.skipIf(dgl.backend.backend_name == "mxnet", reason="Turn off Mxnet support")
@pytest.mark.parametrize("num_server", [1, 2])
def test_rpc_find_edges_shuffle(num_server):
reset_envs()
import tempfile
os.environ['DGL_DIST_MODE'] = 'distributed'
with tempfile.TemporaryDirectory() as tmpdirname:
check_rpc_hetero_find_edges_shuffle(Path(tmpdirname), num_server)
check_rpc_find_edges_shuffle(Path(tmpdirname), num_server)
def check_rpc_get_degree_shuffle(tmpdir, num_server):
generate_ip_config("rpc_ip_config.txt", num_server, num_server)
g = CitationGraphDataset("cora")[0]
g.readonly()
num_parts = num_server
partition_graph(g, 'test_get_degrees', num_parts, tmpdir,
num_hops=1, part_method='metis', reshuffle=True)
pserver_list = []
ctx = mp.get_context('spawn')
for i in range(num_server):
p = ctx.Process(target=start_server, args=(i, tmpdir, num_server > 1, 'test_get_degrees'))
p.start()
time.sleep(1)
pserver_list.append(p)
orig_nid = F.zeros((g.number_of_nodes(),), dtype=F.int64, ctx=F.cpu())
for i in range(num_server):
part, _, _, _, _, _, _ = load_partition(tmpdir / 'test_get_degrees.json', i)
orig_nid[part.ndata[dgl.NID]] = part.ndata['orig_id']
nids = F.tensor(np.random.randint(g.number_of_nodes(), size=100))
in_degs, out_degs, all_in_degs, all_out_degs = start_get_degrees_client(0, tmpdir, num_server > 1, nids)
print("Done get_degree")
for p in pserver_list:
p.join()
print('check results')
assert F.array_equal(g.in_degrees(orig_nid[nids]), in_degs)
assert F.array_equal(g.in_degrees(orig_nid), all_in_degs)
assert F.array_equal(g.out_degrees(orig_nid[nids]), out_degs)
assert F.array_equal(g.out_degrees(orig_nid), all_out_degs)
# Wait non shared memory graph store
@unittest.skipIf(os.name == 'nt', reason='Do not support windows yet')
@unittest.skipIf(dgl.backend.backend_name == 'tensorflow', reason='Not support tensorflow for now')
@unittest.skipIf(dgl.backend.backend_name == "mxnet", reason="Turn off Mxnet support")
@pytest.mark.parametrize("num_server", [1, 2])
def test_rpc_get_degree_shuffle(num_server):
reset_envs()
import tempfile
os.environ['DGL_DIST_MODE'] = 'distributed'
with tempfile.TemporaryDirectory() as tmpdirname:
check_rpc_get_degree_shuffle(Path(tmpdirname), num_server)
#@unittest.skipIf(os.name == 'nt', reason='Do not support windows yet')
#@unittest.skipIf(dgl.backend.backend_name == 'tensorflow', reason='Not support tensorflow for now')
@unittest.skip('Only support partition with shuffle')
def test_rpc_sampling():
reset_envs()
import tempfile
os.environ['DGL_DIST_MODE'] = 'distributed'
with tempfile.TemporaryDirectory() as tmpdirname:
check_rpc_sampling(Path(tmpdirname), 2)
def check_rpc_sampling_shuffle(tmpdir, num_server, num_groups=1):
generate_ip_config("rpc_ip_config.txt", num_server, num_server)
g = CitationGraphDataset("cora")[0]
g.readonly()
num_parts = num_server
num_hops = 1
partition_graph(g, 'test_sampling', num_parts, tmpdir,
num_hops=num_hops, part_method='metis', reshuffle=True)
pserver_list = []
ctx = mp.get_context('spawn')
keep_alive = num_groups > 1
for i in range(num_server):
p = ctx.Process(target=start_server, args=(
i, tmpdir, num_server > 1, 'test_sampling', ['csc', 'coo'], keep_alive))
p.start()
time.sleep(1)
pserver_list.append(p)
pclient_list = []
num_clients = 1
for client_id in range(num_clients):
for group_id in range(num_groups):
p = ctx.Process(target=start_sample_client_shuffle, args=(client_id, tmpdir, num_server > 1, g, num_server, group_id))
p.start()
time.sleep(1) # avoid race condition when instantiating DistGraph
pclient_list.append(p)
for p in pclient_list:
p.join()
if keep_alive:
for p in pserver_list:
assert p.is_alive()
# force shutdown server
dgl.distributed.shutdown_servers("rpc_ip_config.txt", 1)
for p in pserver_list:
p.join()
def start_hetero_sample_client(rank, tmpdir, disable_shared_mem, nodes):
gpb = None
if disable_shared_mem:
_, _, _, gpb, _, _, _ = load_partition(tmpdir / 'test_sampling.json', rank)
dgl.distributed.initialize("rpc_ip_config.txt")
dist_graph = DistGraph("test_sampling", gpb=gpb)
assert 'feat' in dist_graph.nodes['n1'].data
assert 'feat' not in dist_graph.nodes['n2'].data
assert 'feat' not in dist_graph.nodes['n3'].data
if gpb is None:
gpb = dist_graph.get_partition_book()
try:
sampled_graph = sample_neighbors(dist_graph, nodes, 3)
block = dgl.to_block(sampled_graph, nodes)
block.edata[dgl.EID] = sampled_graph.edata[dgl.EID]
except Exception as e:
print(e)
block = None
dgl.distributed.exit_client()
return block, gpb
def start_hetero_etype_sample_client(rank, tmpdir, disable_shared_mem, fanout=3,
nodes={'n3': [0, 10, 99, 66, 124, 208]}):
gpb = None
if disable_shared_mem:
_, _, _, gpb, _, _, _ = load_partition(tmpdir / 'test_sampling.json', rank)
dgl.distributed.initialize("rpc_ip_config.txt")
dist_graph = DistGraph("test_sampling", gpb=gpb)
assert 'feat' in dist_graph.nodes['n1'].data
assert 'feat' not in dist_graph.nodes['n2'].data
assert 'feat' not in dist_graph.nodes['n3'].data
if dist_graph.local_partition is not None:
# Check whether etypes are sorted in dist_graph
local_g = dist_graph.local_partition
local_nids = np.arange(local_g.num_nodes())
for lnid in local_nids:
leids = local_g.in_edges(lnid, form='eid')
letids = F.asnumpy(local_g.edata[dgl.ETYPE][leids])
_, idices = np.unique(letids, return_index=True)
assert np.all(idices[:-1] <= idices[1:])
if gpb is None:
gpb = dist_graph.get_partition_book()
try:
sampled_graph = sample_etype_neighbors(dist_graph, nodes, dgl.ETYPE, fanout)
block = dgl.to_block(sampled_graph, nodes)
block.edata[dgl.EID] = sampled_graph.edata[dgl.EID]
except Exception as e:
print(e)
block = None
dgl.distributed.exit_client()
return block, gpb
def check_rpc_hetero_sampling_shuffle(tmpdir, num_server):
generate_ip_config("rpc_ip_config.txt", num_server, num_server)
g = create_random_hetero()
num_parts = num_server
num_hops = 1
partition_graph(g, 'test_sampling', num_parts, tmpdir,
num_hops=num_hops, part_method='metis', reshuffle=True)
pserver_list = []
ctx = mp.get_context('spawn')
for i in range(num_server):
p = ctx.Process(target=start_server, args=(i, tmpdir, num_server > 1, 'test_sampling'))
p.start()
time.sleep(1)
pserver_list.append(p)
block, gpb = start_hetero_sample_client(0, tmpdir, num_server > 1,
nodes = {'n3': [0, 10, 99, 66, 124, 208]})
print("Done sampling")
for p in pserver_list:
p.join()
orig_nid_map = {ntype: F.zeros((g.number_of_nodes(ntype),), dtype=F.int64) for ntype in g.ntypes}
orig_eid_map = {etype: F.zeros((g.number_of_edges(etype),), dtype=F.int64) for etype in g.etypes}
for i in range(num_server):
part, _, _, _, _, _, _ = load_partition(tmpdir / 'test_sampling.json', i)
ntype_ids, type_nids = gpb.map_to_per_ntype(part.ndata[dgl.NID])
for ntype_id, ntype in enumerate(g.ntypes):
idx = ntype_ids == ntype_id
F.scatter_row_inplace(orig_nid_map[ntype], F.boolean_mask(type_nids, idx),
F.boolean_mask(part.ndata['orig_id'], idx))
etype_ids, type_eids = gpb.map_to_per_etype(part.edata[dgl.EID])
for etype_id, etype in enumerate(g.etypes):
idx = etype_ids == etype_id
F.scatter_row_inplace(orig_eid_map[etype], F.boolean_mask(type_eids, idx),
F.boolean_mask(part.edata['orig_id'], idx))
for src_type, etype, dst_type in block.canonical_etypes:
src, dst = block.edges(etype=etype)
# These are global Ids after shuffling.
shuffled_src = F.gather_row(block.srcnodes[src_type].data[dgl.NID], src)
shuffled_dst = F.gather_row(block.dstnodes[dst_type].data[dgl.NID], dst)
shuffled_eid = block.edges[etype].data[dgl.EID]
orig_src = F.asnumpy(F.gather_row(orig_nid_map[src_type], shuffled_src))
orig_dst = F.asnumpy(F.gather_row(orig_nid_map[dst_type], shuffled_dst))
orig_eid = F.asnumpy(F.gather_row(orig_eid_map[etype], shuffled_eid))
# Check the node Ids and edge Ids.
orig_src1, orig_dst1 = g.find_edges(orig_eid, etype=etype)
assert np.all(F.asnumpy(orig_src1) == orig_src)
assert np.all(F.asnumpy(orig_dst1) == orig_dst)
def get_degrees(g, nids, ntype):
deg = F.zeros((len(nids),), dtype=F.int64)
for srctype, etype, dsttype in g.canonical_etypes:
if srctype == ntype:
deg += g.out_degrees(u=nids, etype=etype)
elif dsttype == ntype:
deg += g.in_degrees(v=nids, etype=etype)
return deg
def check_rpc_hetero_sampling_empty_shuffle(tmpdir, num_server):
generate_ip_config("rpc_ip_config.txt", num_server, num_server)
g = create_random_hetero(empty=True)
num_parts = num_server
num_hops = 1
orig_nids, _ = partition_graph(g, 'test_sampling', num_parts, tmpdir,
num_hops=num_hops, part_method='metis',
reshuffle=True, return_mapping=True)
pserver_list = []
ctx = mp.get_context('spawn')
for i in range(num_server):
p = ctx.Process(target=start_server, args=(i, tmpdir, num_server > 1, 'test_sampling'))
p.start()
time.sleep(1)
pserver_list.append(p)
deg = get_degrees(g, orig_nids['n3'], 'n3')
empty_nids = F.nonzero_1d(deg == 0)
block, gpb = start_hetero_sample_client(0, tmpdir, num_server > 1,
nodes = {'n3': empty_nids})
print("Done sampling")
for p in pserver_list:
p.join()
assert block.number_of_edges() == 0
assert len(block.etypes) == len(g.etypes)
def check_rpc_hetero_etype_sampling_shuffle(tmpdir, num_server):
generate_ip_config("rpc_ip_config.txt", num_server, num_server)
g = create_random_hetero(dense=True)
num_parts = num_server
num_hops = 1
partition_graph(g, 'test_sampling', num_parts, tmpdir,
num_hops=num_hops, part_method='metis', reshuffle=True)
pserver_list = []
ctx = mp.get_context('spawn')
for i in range(num_server):
p = ctx.Process(target=start_server, args=(i, tmpdir, num_server > 1, 'test_sampling'))
p.start()
time.sleep(1)
pserver_list.append(p)
fanout = 3
block, gpb = start_hetero_etype_sample_client(0, tmpdir, num_server > 1, fanout,
nodes={'n3': [0, 10, 99, 66, 124, 208]})
print("Done sampling")
for p in pserver_list:
p.join()
src, dst = block.edges(etype=('n1', 'r2', 'n3'))
assert len(src) == 18
src, dst = block.edges(etype=('n2', 'r3', 'n3'))
assert len(src) == 18
orig_nid_map = {ntype: F.zeros((g.number_of_nodes(ntype),), dtype=F.int64) for ntype in g.ntypes}
orig_eid_map = {etype: F.zeros((g.number_of_edges(etype),), dtype=F.int64) for etype in g.etypes}
for i in range(num_server):
part, _, _, _, _, _, _ = load_partition(tmpdir / 'test_sampling.json', i)
ntype_ids, type_nids = gpb.map_to_per_ntype(part.ndata[dgl.NID])
for ntype_id, ntype in enumerate(g.ntypes):
idx = ntype_ids == ntype_id
F.scatter_row_inplace(orig_nid_map[ntype], F.boolean_mask(type_nids, idx),
F.boolean_mask(part.ndata['orig_id'], idx))
etype_ids, type_eids = gpb.map_to_per_etype(part.edata[dgl.EID])
for etype_id, etype in enumerate(g.etypes):
idx = etype_ids == etype_id
F.scatter_row_inplace(orig_eid_map[etype], F.boolean_mask(type_eids, idx),
F.boolean_mask(part.edata['orig_id'], idx))
for src_type, etype, dst_type in block.canonical_etypes:
src, dst = block.edges(etype=etype)
# These are global Ids after shuffling.
shuffled_src = F.gather_row(block.srcnodes[src_type].data[dgl.NID], src)
shuffled_dst = F.gather_row(block.dstnodes[dst_type].data[dgl.NID], dst)
shuffled_eid = block.edges[etype].data[dgl.EID]
orig_src = F.asnumpy(F.gather_row(orig_nid_map[src_type], shuffled_src))
orig_dst = F.asnumpy(F.gather_row(orig_nid_map[dst_type], shuffled_dst))
orig_eid = F.asnumpy(F.gather_row(orig_eid_map[etype], shuffled_eid))
# Check the node Ids and edge Ids.
orig_src1, orig_dst1 = g.find_edges(orig_eid, etype=etype)
assert np.all(F.asnumpy(orig_src1) == orig_src)
assert np.all(F.asnumpy(orig_dst1) == orig_dst)
def check_rpc_hetero_etype_sampling_empty_shuffle(tmpdir, num_server):
generate_ip_config("rpc_ip_config.txt", num_server, num_server)
g = create_random_hetero(dense=True, empty=True)
num_parts = num_server
num_hops = 1
orig_nids, _ = partition_graph(g, 'test_sampling', num_parts, tmpdir,
num_hops=num_hops, part_method='metis',
reshuffle=True, return_mapping=True)
pserver_list = []
ctx = mp.get_context('spawn')
for i in range(num_server):
p = ctx.Process(target=start_server, args=(i, tmpdir, num_server > 1, 'test_sampling'))
p.start()
time.sleep(1)
pserver_list.append(p)
fanout = 3
deg = get_degrees(g, orig_nids['n3'], 'n3')
empty_nids = F.nonzero_1d(deg == 0)
block, gpb = start_hetero_etype_sample_client(0, tmpdir, num_server > 1, fanout,
nodes={'n3': empty_nids})
print("Done sampling")
for p in pserver_list:
p.join()
assert block.number_of_edges() == 0
assert len(block.etypes) == len(g.etypes)
# Wait non shared memory graph store
@unittest.skipIf(os.name == 'nt', reason='Do not support windows yet')
@unittest.skipIf(dgl.backend.backend_name == 'tensorflow', reason='Not support tensorflow for now')
@unittest.skipIf(dgl.backend.backend_name == "mxnet", reason="Turn off Mxnet support")
@pytest.mark.parametrize("num_server", [1, 2])
def test_rpc_sampling_shuffle(num_server):
reset_envs()
import tempfile
os.environ['DGL_DIST_MODE'] = 'distributed'
with tempfile.TemporaryDirectory() as tmpdirname:
check_rpc_sampling_shuffle(Path(tmpdirname), num_server)
check_rpc_sampling_shuffle(Path(tmpdirname), num_server, num_groups=2)
check_rpc_hetero_sampling_shuffle(Path(tmpdirname), num_server)
check_rpc_hetero_sampling_empty_shuffle(Path(tmpdirname), num_server)
check_rpc_hetero_etype_sampling_shuffle(Path(tmpdirname), num_server)
check_rpc_hetero_etype_sampling_empty_shuffle(Path(tmpdirname), num_server)
def check_standalone_sampling(tmpdir, reshuffle):
g = CitationGraphDataset("cora")[0]
num_parts = 1
num_hops = 1
partition_graph(g, 'test_sampling', num_parts, tmpdir,
num_hops=num_hops, part_method='metis', reshuffle=reshuffle)
os.environ['DGL_DIST_MODE'] = 'standalone'
dgl.distributed.initialize("rpc_ip_config.txt")
dist_graph = DistGraph("test_sampling", part_config=tmpdir / 'test_sampling.json')
sampled_graph = sample_neighbors(dist_graph, [0, 10, 99, 66, 1024, 2008], 3)
src, dst = sampled_graph.edges()
assert sampled_graph.number_of_nodes() == g.number_of_nodes()
assert np.all(F.asnumpy(g.has_edges_between(src, dst)))
eids = g.edge_ids(src, dst)
assert np.array_equal(
F.asnumpy(sampled_graph.edata[dgl.EID]), F.asnumpy(eids))
dgl.distributed.exit_client()
def check_standalone_etype_sampling(tmpdir, reshuffle):
hg = CitationGraphDataset('cora')[0]
num_parts = 1
num_hops = 1
partition_graph(hg, 'test_sampling', num_parts, tmpdir,
num_hops=num_hops, part_method='metis', reshuffle=reshuffle)
os.environ['DGL_DIST_MODE'] = 'standalone'
dgl.distributed.initialize("rpc_ip_config.txt")
dist_graph = DistGraph("test_sampling", part_config=tmpdir / 'test_sampling.json')
sampled_graph = sample_etype_neighbors(dist_graph, [0, 10, 99, 66, 1023], dgl.ETYPE, 3)
src, dst = sampled_graph.edges()
assert sampled_graph.number_of_nodes() == hg.number_of_nodes()
assert np.all(F.asnumpy(hg.has_edges_between(src, dst)))
eids = hg.edge_ids(src, dst)
assert np.array_equal(
F.asnumpy(sampled_graph.edata[dgl.EID]), F.asnumpy(eids))
dgl.distributed.exit_client()
def check_standalone_etype_sampling_heterograph(tmpdir, reshuffle):
hg = CitationGraphDataset('cora')[0]
num_parts = 1
num_hops = 1
src, dst = hg.edges()
new_hg = dgl.heterograph({('paper', 'cite', 'paper'): (src, dst),
('paper', 'cite-by', 'paper'): (dst, src)},
{'paper': hg.number_of_nodes()})
partition_graph(new_hg, 'test_hetero_sampling', num_parts, tmpdir,
num_hops=num_hops, part_method='metis', reshuffle=reshuffle)
os.environ['DGL_DIST_MODE'] = 'standalone'
dgl.distributed.initialize("rpc_ip_config.txt")
dist_graph = DistGraph("test_hetero_sampling", part_config=tmpdir / 'test_hetero_sampling.json')
sampled_graph = sample_etype_neighbors(dist_graph, [0, 1, 2, 10, 99, 66, 1023, 1024, 2700, 2701], dgl.ETYPE, 1)
src, dst = sampled_graph.edges(etype=('paper', 'cite', 'paper'))
assert len(src) == 10
src, dst = sampled_graph.edges(etype=('paper', 'cite-by', 'paper'))
assert len(src) == 10
assert sampled_graph.number_of_nodes() == new_hg.number_of_nodes()
dgl.distributed.exit_client()
@unittest.skipIf(os.name == 'nt', reason='Do not support windows yet')
@unittest.skipIf(dgl.backend.backend_name == 'tensorflow', reason='Not support tensorflow for now')
def test_standalone_sampling():
reset_envs()
import tempfile
os.environ['DGL_DIST_MODE'] = 'standalone'
with tempfile.TemporaryDirectory() as tmpdirname:
check_standalone_sampling(Path(tmpdirname), False)
check_standalone_sampling(Path(tmpdirname), True)
def start_in_subgraph_client(rank, tmpdir, disable_shared_mem, nodes):
gpb = None
dgl.distributed.initialize("rpc_ip_config.txt")
if disable_shared_mem:
_, _, _, gpb, _, _, _ = load_partition(tmpdir / 'test_in_subgraph.json', rank)
dist_graph = DistGraph("test_in_subgraph", gpb=gpb)
try:
sampled_graph = dgl.distributed.in_subgraph(dist_graph, nodes)
except Exception as e:
print(e)
sampled_graph = None
dgl.distributed.exit_client()
return sampled_graph
def check_rpc_in_subgraph_shuffle(tmpdir, num_server):
generate_ip_config("rpc_ip_config.txt", num_server, num_server)
g = CitationGraphDataset("cora")[0]
g.readonly()
num_parts = num_server
partition_graph(g, 'test_in_subgraph', num_parts, tmpdir,
num_hops=1, part_method='metis', reshuffle=True)
pserver_list = []
ctx = mp.get_context('spawn')
for i in range(num_server):
p = ctx.Process(target=start_server, args=(i, tmpdir, num_server > 1, 'test_in_subgraph'))
p.start()
time.sleep(1)
pserver_list.append(p)
nodes = [0, 10, 99, 66, 1024, 2008]
sampled_graph = start_in_subgraph_client(0, tmpdir, num_server > 1, nodes)
for p in pserver_list:
p.join()
orig_nid = F.zeros((g.number_of_nodes(),), dtype=F.int64, ctx=F.cpu())
orig_eid = F.zeros((g.number_of_edges(),), dtype=F.int64, ctx=F.cpu())
for i in range(num_server):
part, _, _, _, _, _, _ = load_partition(tmpdir / 'test_in_subgraph.json', i)
orig_nid[part.ndata[dgl.NID]] = part.ndata['orig_id']
orig_eid[part.edata[dgl.EID]] = part.edata['orig_id']
src, dst = sampled_graph.edges()
src = orig_nid[src]
dst = orig_nid[dst]
assert sampled_graph.number_of_nodes() == g.number_of_nodes()
assert np.all(F.asnumpy(g.has_edges_between(src, dst)))
subg1 = dgl.in_subgraph(g, orig_nid[nodes])
src1, dst1 = subg1.edges()
assert np.all(np.sort(F.asnumpy(src)) == np.sort(F.asnumpy(src1)))
assert np.all(np.sort(F.asnumpy(dst)) == np.sort(F.asnumpy(dst1)))
eids = g.edge_ids(src, dst)
eids1 = orig_eid[sampled_graph.edata[dgl.EID]]
assert np.array_equal(F.asnumpy(eids1), F.asnumpy(eids))
@unittest.skipIf(os.name == 'nt', reason='Do not support windows yet')
@unittest.skipIf(dgl.backend.backend_name == 'tensorflow', reason='Not support tensorflow for now')
def test_rpc_in_subgraph():
reset_envs()
import tempfile
os.environ['DGL_DIST_MODE'] = 'distributed'
with tempfile.TemporaryDirectory() as tmpdirname:
check_rpc_in_subgraph_shuffle(Path(tmpdirname), 2)
@unittest.skipIf(os.name == 'nt', reason='Do not support windows yet')
@unittest.skipIf(dgl.backend.backend_name == 'tensorflow', reason='Not support tensorflow for now')
@unittest.skipIf(dgl.backend.backend_name == "mxnet", reason="Turn off Mxnet support")
def test_standalone_etype_sampling():
reset_envs()
import tempfile
with tempfile.TemporaryDirectory() as tmpdirname:
os.environ['DGL_DIST_MODE'] = 'standalone'
check_standalone_etype_sampling_heterograph(Path(tmpdirname), True)
with tempfile.TemporaryDirectory() as tmpdirname:
os.environ['DGL_DIST_MODE'] = 'standalone'
check_standalone_etype_sampling(Path(tmpdirname), True)
check_standalone_etype_sampling(Path(tmpdirname), False)
if __name__ == "__main__":
import tempfile
with tempfile.TemporaryDirectory() as tmpdirname:
os.environ['DGL_DIST_MODE'] = 'standalone'
check_standalone_etype_sampling_heterograph(Path(tmpdirname), True)
with tempfile.TemporaryDirectory() as tmpdirname:
os.environ['DGL_DIST_MODE'] = 'standalone'
check_standalone_etype_sampling(Path(tmpdirname), True)
check_standalone_etype_sampling(Path(tmpdirname), False)
check_standalone_sampling(Path(tmpdirname), True)
check_standalone_sampling(Path(tmpdirname), False)
os.environ['DGL_DIST_MODE'] = 'distributed'
check_rpc_sampling(Path(tmpdirname), 2)
check_rpc_sampling(Path(tmpdirname), 1)
check_rpc_get_degree_shuffle(Path(tmpdirname), 1)
check_rpc_get_degree_shuffle(Path(tmpdirname), 2)
check_rpc_find_edges_shuffle(Path(tmpdirname), 2)
check_rpc_find_edges_shuffle(Path(tmpdirname), 1)
check_rpc_hetero_find_edges_shuffle(Path(tmpdirname), 1)
check_rpc_hetero_find_edges_shuffle(Path(tmpdirname), 2)
check_rpc_in_subgraph_shuffle(Path(tmpdirname), 2)
check_rpc_sampling_shuffle(Path(tmpdirname), 1)
check_rpc_sampling_shuffle(Path(tmpdirname), 2)
check_rpc_hetero_sampling_shuffle(Path(tmpdirname), 1)
check_rpc_hetero_sampling_shuffle(Path(tmpdirname), 2)
check_rpc_hetero_sampling_empty_shuffle(Path(tmpdirname), 1)
check_rpc_hetero_etype_sampling_shuffle(Path(tmpdirname), 1)
check_rpc_hetero_etype_sampling_shuffle(Path(tmpdirname), 2)
check_rpc_hetero_etype_sampling_empty_shuffle(Path(tmpdirname), 1)
|
sqlite_web.py | #!/usr/bin/env python
import datetime
import hashlib
import math
import operator
import optparse
import os
import re
import sys
import threading
import time
import webbrowser
from collections import namedtuple, OrderedDict
from functools import wraps
from getpass import getpass
from io import TextIOWrapper
# Py2k compat.
if sys.version_info[0] == 2:
PY2 = True
binary_types = (buffer, bytes, bytearray)
decode_handler = 'replace'
numeric = (int, long, float)
unicode_type = unicode
from StringIO import StringIO
else:
PY2 = False
binary_types = (bytes, bytearray)
decode_handler = 'backslashreplace'
numeric = (int, float)
unicode_type = str
from io import StringIO
try:
from flask import (
Flask, abort, escape, flash, jsonify, make_response, Markup, redirect,
render_template, request, session, url_for)
except ImportError:
raise RuntimeError('Unable to import flask module. Install by running '
'pip install flask')
try:
from pygments import formatters, highlight, lexers
except ImportError:
import warnings
warnings.warn('pygments library not found.', ImportWarning)
syntax_highlight = lambda data: '<pre>%s</pre>' % data
else:
def syntax_highlight(data):
if not data:
return ''
lexer = lexers.get_lexer_by_name('sql')
formatter = formatters.HtmlFormatter(linenos=False)
return highlight(data, lexer, formatter)
try:
from peewee import __version__
peewee_version = tuple([int(p) for p in __version__.split('.')])
except ImportError:
raise RuntimeError('Unable to import peewee module. Install by running '
'pip install peewee')
else:
if peewee_version <= (3, 0, 0):
raise RuntimeError('Peewee >= 3.0.0 is required. Found version %s. '
'Please update by running pip install --update '
'peewee' % __version__)
from peewee import *
from peewee import IndexMetadata
from peewee import sqlite3
from playhouse.dataset import DataSet
from playhouse.migrate import migrate
CUR_DIR = os.path.realpath(os.path.dirname(__file__))
DEBUG = False
MAX_RESULT_SIZE = 1000
ROWS_PER_PAGE = 50
SECRET_KEY = 'sqlite-database-browser-0.1.0'
app = Flask(
__name__,
static_folder=os.path.join(CUR_DIR, 'static'),
template_folder=os.path.join(CUR_DIR, 'templates'))
app.config.from_object(__name__)
dataset = None
migrator = None
#
# Database metadata objects.
#
TriggerMetadata = namedtuple('TriggerMetadata', ('name', 'sql'))
ViewMetadata = namedtuple('ViewMetadata', ('name', 'sql'))
#
# Database helpers.
#
class SqliteDataSet(DataSet):
@property
def filename(self):
db_file = dataset._database.database
if db_file.startswith('file:'):
db_file = db_file[5:]
return os.path.realpath(db_file.rsplit('?', 1)[0])
@property
def is_readonly(self):
db_file = dataset._database.database
return db_file.endswith('?mode=ro')
@property
def base_name(self):
return os.path.basename(self.filename)
@property
def created(self):
stat = os.stat(self.filename)
return datetime.datetime.fromtimestamp(stat.st_ctime)
@property
def modified(self):
stat = os.stat(self.filename)
return datetime.datetime.fromtimestamp(stat.st_mtime)
@property
def size_on_disk(self):
stat = os.stat(self.filename)
return stat.st_size
def get_indexes(self, table):
return dataset._database.get_indexes(table)
def get_all_indexes(self):
cursor = self.query(
'SELECT name, sql FROM sqlite_master '
'WHERE type = ? ORDER BY name',
('index',))
return [IndexMetadata(row[0], row[1], None, None, None)
for row in cursor.fetchall()]
def get_columns(self, table):
return dataset._database.get_columns(table)
def get_foreign_keys(self, table):
return dataset._database.get_foreign_keys(table)
def get_triggers(self, table):
cursor = self.query(
'SELECT name, sql FROM sqlite_master '
'WHERE type = ? AND tbl_name = ?',
('trigger', table))
return [TriggerMetadata(*row) for row in cursor.fetchall()]
def get_all_triggers(self):
cursor = self.query(
'SELECT name, sql FROM sqlite_master '
'WHERE type = ? ORDER BY name',
('trigger',))
return [TriggerMetadata(*row) for row in cursor.fetchall()]
def get_all_views(self):
cursor = self.query(
'SELECT name, sql FROM sqlite_master '
'WHERE type = ? ORDER BY name',
('view',))
return [ViewMetadata(*row) for row in cursor.fetchall()]
def get_virtual_tables(self):
cursor = self.query(
'SELECT name FROM sqlite_master '
'WHERE type = ? AND sql LIKE ? '
'ORDER BY name',
('table', 'CREATE VIRTUAL TABLE%'))
return set([row[0] for row in cursor.fetchall()])
def get_corollary_virtual_tables(self):
virtual_tables = self.get_virtual_tables()
suffixes = ['content', 'docsize', 'segdir', 'segments', 'stat']
return set(
'%s_%s' % (virtual_table, suffix) for suffix in suffixes
for virtual_table in virtual_tables)
#
# Flask views.
#
@app.route('/')
def index():
return render_template('index.html', sqlite=sqlite3)
@app.route('/login/', methods=['GET', 'POST'])
def login():
if request.method == 'POST':
if request.form.get('password') == app.config['PASSWORD']:
session['authorized'] = True
return redirect(session.get('next_url') or url_for('index'))
flash('The password you entered is incorrect.', 'danger')
return render_template('login.html')
@app.route('/logout/', methods=['GET'])
def logout():
session.pop('authorized', None)
return redirect(url_for('login'))
def require_table(fn):
@wraps(fn)
def inner(table, *args, **kwargs):
if table not in dataset.tables:
abort(404)
return fn(table, *args, **kwargs)
return inner
@app.route('/create-table/', methods=['POST'])
def table_create():
table = (request.form.get('table_name') or '').strip()
if not table:
flash('Table name is required.', 'danger')
return redirect(request.form.get('redirect') or url_for('index'))
dataset[table]
return redirect(url_for('table_import', table=table))
@app.route('/<table>/')
@require_table
def table_structure(table):
ds_table = dataset[table]
model_class = ds_table.model_class
table_sql = dataset.query(
'SELECT sql FROM sqlite_master WHERE tbl_name = ? AND type = ?',
[table, 'table']).fetchone()[0]
return render_template(
'table_structure.html',
columns=dataset.get_columns(table),
ds_table=ds_table,
foreign_keys=dataset.get_foreign_keys(table),
indexes=dataset.get_indexes(table),
model_class=model_class,
table=table,
table_sql=table_sql,
triggers=dataset.get_triggers(table))
def get_request_data():
if request.method == 'POST':
return request.form
return request.args
@app.route('/<table>/add-column/', methods=['GET', 'POST'])
@require_table
def add_column(table):
column_mapping = OrderedDict((
('VARCHAR', CharField),
('TEXT', TextField),
('INTEGER', IntegerField),
('REAL', FloatField),
('BOOL', BooleanField),
('BLOB', BlobField),
('DATETIME', DateTimeField),
('DATE', DateField),
('TIME', TimeField),
('DECIMAL', DecimalField)))
request_data = get_request_data()
col_type = request_data.get('type')
name = request_data.get('name', '')
if request.method == 'POST':
if name and col_type in column_mapping:
migrate(
migrator.add_column(
table,
name,
column_mapping[col_type](null=True)))
flash('Column "%s" was added successfully!' % name, 'success')
dataset.update_cache(table)
return redirect(url_for('table_structure', table=table))
else:
flash('Name and column type are required.', 'danger')
return render_template(
'add_column.html',
col_type=col_type,
column_mapping=column_mapping,
name=name,
table=table)
@app.route('/<table>/drop-column/', methods=['GET', 'POST'])
@require_table
def drop_column(table):
request_data = get_request_data()
name = request_data.get('name', '')
columns = dataset.get_columns(table)
column_names = [column.name for column in columns]
if request.method == 'POST':
if name in column_names:
migrate(migrator.drop_column(table, name))
flash('Column "%s" was dropped successfully!' % name, 'success')
dataset.update_cache(table)
return redirect(url_for('table_structure', table=table))
else:
flash('Name is required.', 'danger')
return render_template(
'drop_column.html',
columns=columns,
column_names=column_names,
name=name,
table=table)
@app.route('/<table>/rename-column/', methods=['GET', 'POST'])
@require_table
def rename_column(table):
request_data = get_request_data()
rename = request_data.get('rename', '')
rename_to = request_data.get('rename_to', '')
columns = dataset.get_columns(table)
column_names = [column.name for column in columns]
if request.method == 'POST':
if (rename in column_names) and (rename_to not in column_names):
migrate(migrator.rename_column(table, rename, rename_to))
flash('Column "%s" was renamed successfully!' % rename, 'success')
dataset.update_cache(table)
return redirect(url_for('table_structure', table=table))
else:
flash('Column name is required and cannot conflict with an '
'existing column\'s name.', 'danger')
return render_template(
'rename_column.html',
columns=columns,
column_names=column_names,
rename=rename,
rename_to=rename_to,
table=table)
@app.route('/<table>/add-index/', methods=['GET', 'POST'])
@require_table
def add_index(table):
request_data = get_request_data()
indexed_columns = request_data.getlist('indexed_columns')
unique = bool(request_data.get('unique'))
columns = dataset.get_columns(table)
if request.method == 'POST':
if indexed_columns:
migrate(
migrator.add_index(
table,
indexed_columns,
unique))
flash('Index created successfully.', 'success')
return redirect(url_for('table_structure', table=table))
else:
flash('One or more columns must be selected.', 'danger')
return render_template(
'add_index.html',
columns=columns,
indexed_columns=indexed_columns,
table=table,
unique=unique)
@app.route('/<table>/drop-index/', methods=['GET', 'POST'])
@require_table
def drop_index(table):
request_data = get_request_data()
name = request_data.get('name', '')
indexes = dataset.get_indexes(table)
index_names = [index.name for index in indexes]
if request.method == 'POST':
if name in index_names:
migrate(migrator.drop_index(table, name))
flash('Index "%s" was dropped successfully!' % name, 'success')
return redirect(url_for('table_structure', table=table))
else:
flash('Index name is required.', 'danger')
return render_template(
'drop_index.html',
indexes=indexes,
index_names=index_names,
name=name,
table=table)
@app.route('/<table>/drop-trigger/', methods=['GET', 'POST'])
@require_table
def drop_trigger(table):
request_data = get_request_data()
name = request_data.get('name', '')
triggers = dataset.get_triggers(table)
trigger_names = [trigger.name for trigger in triggers]
if request.method == 'POST':
if name in trigger_names:
dataset.query('DROP TRIGGER "%s";' % name)
flash('Trigger "%s" was dropped successfully!' % name, 'success')
return redirect(url_for('table_structure', table=table))
else:
flash('Trigger name is required.', 'danger')
return render_template(
'drop_trigger.html',
triggers=triggers,
trigger_names=trigger_names,
name=name,
table=table)
@app.route('/<table>/content/')
@require_table
def table_content(table):
page_number = request.args.get('page') or ''
page_number = int(page_number) if page_number.isdigit() else 1
dataset.update_cache(table)
ds_table = dataset[table]
total_rows = ds_table.all().count()
rows_per_page = app.config['ROWS_PER_PAGE']
total_pages = int(math.ceil(total_rows / float(rows_per_page)))
# Restrict bounds.
page_number = min(page_number, total_pages)
page_number = max(page_number, 1)
previous_page = page_number - 1 if page_number > 1 else None
next_page = page_number + 1 if page_number < total_pages else None
query = ds_table.all().paginate(page_number, rows_per_page)
ordering = request.args.get('ordering')
if ordering:
field = ds_table.model_class._meta.columns[ordering.lstrip('-')]
if ordering.startswith('-'):
field = field.desc()
query = query.order_by(field)
field_names = ds_table.columns
columns = [f.column_name for f in ds_table.model_class._meta.sorted_fields]
table_sql = dataset.query(
'SELECT sql FROM sqlite_master WHERE tbl_name = ? AND type = ?',
[table, 'table']).fetchone()[0]
return render_template(
'table_content.html',
columns=columns,
ds_table=ds_table,
field_names=field_names,
next_page=next_page,
ordering=ordering,
page=page_number,
previous_page=previous_page,
query=query,
table=table,
total_pages=total_pages,
total_rows=total_rows)
@app.route('/<table>/query/', methods=['GET', 'POST'])
@require_table
def table_query(table):
data = []
data_description = error = row_count = sql = None
if request.method == 'POST':
sql = request.form['sql']
if 'export_json' in request.form:
return export(table, sql, 'json')
elif 'export_csv' in request.form:
return export(table, sql, 'csv')
try:
cursor = dataset.query(sql)
except Exception as exc:
error = str(exc)
else:
data = cursor.fetchall()[:app.config['MAX_RESULT_SIZE']]
data_description = cursor.description
row_count = cursor.rowcount
else:
if request.args.get('sql'):
sql = request.args.get('sql')
else:
sql = 'SELECT *\nFROM "%s"' % (table)
table_sql = dataset.query(
'SELECT sql FROM sqlite_master WHERE tbl_name = ? AND type = ?',
[table, 'table']).fetchone()[0]
return render_template(
'table_query.html',
data=data,
data_description=data_description,
error=error,
query_images=get_query_images(),
row_count=row_count,
sql=sql,
table=table,
table_sql=table_sql)
@app.route('/table-definition/', methods=['POST'])
def set_table_definition_preference():
key = 'show'
show = False
if request.form.get(key) and request.form.get(key) != 'false':
session[key] = show = True
elif key in session:
del session[key]
return jsonify({key: show})
def export(table, sql, export_format):
model_class = dataset[table].model_class
query = model_class.raw(sql).dicts()
buf = StringIO()
if export_format == 'json':
kwargs = {'indent': 2}
filename = '%s-export.json' % table
mimetype = 'text/javascript'
else:
kwargs = {}
filename = '%s-export.csv' % table
mimetype = 'text/csv'
dataset.freeze(query, export_format, file_obj=buf, **kwargs)
response_data = buf.getvalue()
response = make_response(response_data)
response.headers['Content-Length'] = len(response_data)
response.headers['Content-Type'] = mimetype
response.headers['Content-Disposition'] = 'attachment; filename=%s' % (
filename)
response.headers['Expires'] = 0
response.headers['Pragma'] = 'public'
return response
@app.route('/<table>/import/', methods=['GET', 'POST'])
@require_table
def table_import(table):
count = None
request_data = get_request_data()
strict = bool(request_data.get('strict'))
if request.method == 'POST':
file_obj = request.files.get('file')
if not file_obj:
flash('Please select an import file.', 'danger')
elif not file_obj.filename.lower().endswith(('.csv', '.json')):
flash('Unsupported file-type. Must be a .json or .csv file.',
'danger')
else:
if file_obj.filename.lower().endswith('.json'):
format = 'json'
else:
format = 'csv'
# Here we need to translate the file stream. Werkzeug uses a
# spooled temporary file opened in wb+ mode, which is not
# compatible with Python's CSV module. We'd need to reach pretty
# far into Flask's internals to modify this behavior, so instead
# we'll just translate the stream into utf8-decoded unicode.
if not PY2:
try:
stream = TextIOWrapper(file_obj, encoding='utf8')
except AttributeError:
# The SpooledTemporaryFile used by werkzeug does not
# implement an API that the TextIOWrapper expects, so we'll
# just consume the whole damn thing and decode it.
# Fixed in werkzeug 0.15.
stream = StringIO(file_obj.read().decode('utf8'))
else:
stream = file_obj.stream
try:
with dataset.transaction():
count = dataset.thaw(
table,
format=format,
file_obj=stream,
strict=strict)
except Exception as exc:
flash('Error importing file: %s' % exc, 'danger')
else:
flash(
'Successfully imported %s objects from %s.' % (
count, file_obj.filename),
'success')
return redirect(url_for('table_content', table=table))
return render_template(
'table_import.html',
count=count,
strict=strict,
table=table)
@app.route('/<table>/drop/', methods=['GET', 'POST'])
@require_table
def drop_table(table):
if request.method == 'POST':
model_class = dataset[table].model_class
model_class.drop_table()
dataset.update_cache() # Update all tables.
flash('Table "%s" dropped successfully.' % table, 'success')
return redirect(url_for('index'))
return render_template('drop_table.html', table=table)
@app.template_filter('format_index')
def format_index(index_sql):
split_regex = re.compile(r'\bon\b', re.I)
if not split_regex.search(index_sql):
return index_sql
create, definition = split_regex.split(index_sql)
return '\nON '.join((create.strip(), definition.strip()))
@app.template_filter('value_filter')
def value_filter(value, max_length=50):
if isinstance(value, numeric):
return value
if isinstance(value, binary_types):
if not isinstance(value, (bytes, bytearray)):
value = bytes(value) # Handle `buffer` type.
value = value.decode('utf-8', decode_handler)
if isinstance(value, unicode_type):
value = escape(value)
if len(value) > max_length:
return ('<span class="truncated">%s</span> '
'<span class="full" style="display:none;">%s</span>'
'<a class="toggle-value" href="#">...</a>') % (
value[:max_length],
value)
return value
column_re = re.compile('(.+?)\((.+)\)', re.S)
column_split_re = re.compile(r'(?:[^,(]|\([^)]*\))+')
def _format_create_table(sql):
create_table, column_list = column_re.search(sql).groups()
columns = [' %s' % column.strip()
for column in column_split_re.findall(column_list)
if column.strip()]
return '%s (\n%s\n)' % (
create_table,
',\n'.join(columns))
@app.template_filter()
def format_create_table(sql):
try:
return _format_create_table(sql)
except:
return sql
@app.template_filter('highlight')
def highlight_filter(data):
return Markup(syntax_highlight(data))
def get_query_images():
accum = []
image_dir = os.path.join(app.static_folder, 'img')
if not os.path.exists(image_dir):
return accum
for filename in sorted(os.listdir(image_dir)):
basename = os.path.splitext(os.path.basename(filename))[0]
parts = basename.split('-')
accum.append((parts, 'img/' + filename))
return accum
#
# Flask application helpers.
#
@app.context_processor
def _general():
return {
'dataset': dataset,
'login_required': bool(app.config.get('PASSWORD')),
}
@app.context_processor
def _now():
return {'now': datetime.datetime.now()}
@app.before_request
def _connect_db():
dataset.connect()
@app.teardown_request
def _close_db(exc):
if not dataset._database.is_closed():
dataset.close()
class PrefixMiddleware(object):
def __init__(self, app, prefix):
self.app = app
self.prefix = '/%s' % prefix.strip('/')
self.prefix_len = len(self.prefix)
def __call__(self, environ, start_response):
if environ['PATH_INFO'].startswith(self.prefix):
environ['PATH_INFO'] = environ['PATH_INFO'][self.prefix_len:]
environ['SCRIPT_NAME'] = self.prefix
return self.app(environ, start_response)
else:
start_response('404', [('Content-Type', 'text/plain')])
return ['URL does not match application prefix.'.encode()]
#
# Script options.
#
def get_option_parser():
parser = optparse.OptionParser()
parser.add_option(
'-p',
'--port',
default=8080,
help='Port for web interface, default=8080',
type='int')
parser.add_option(
'-H',
'--host',
default='127.0.0.1',
help='Host for web interface, default=127.0.0.1')
parser.add_option(
'-d',
'--debug',
action='store_true',
help='Run server in debug mode')
parser.add_option(
'-x',
'--no-browser',
action='store_false',
default=True,
dest='browser',
help='Do not automatically open browser page.')
parser.add_option(
'-P',
'--password',
action='store_true',
dest='prompt_password',
help='Prompt for password to access database browser.')
parser.add_option(
'-r',
'--read-only',
action='store_true',
dest='read_only',
help='Open database in read-only mode.')
parser.add_option(
'-u',
'--url-prefix',
dest='url_prefix',
help='URL prefix for application.')
ssl_opts = optparse.OptionGroup(parser, 'SSL options')
ssl_opts.add_option(
'-c',
'--ssl-cert',
dest='ssl_cert',
help='SSL certificate file path.')
ssl_opts.add_option(
'-k',
'--ssl-key',
dest='ssl_key',
help='SSL private key file path.')
ssl_opts.add_option(
'-a',
'--ad-hoc',
action='store_true',
dest='ssl_ad_hoc',
help='Use ad-hoc SSL context.')
parser.add_option_group(ssl_opts)
return parser
def die(msg, exit_code=1):
sys.stderr.write('%s\n' % msg)
sys.stderr.flush()
sys.exit(exit_code)
def open_browser_tab(host, port):
url = 'http://%s:%s/' % (host, port)
def _open_tab(url):
time.sleep(1.5)
webbrowser.open_new_tab(url)
thread = threading.Thread(target=_open_tab, args=(url,))
thread.daemon = True
thread.start()
def install_auth_handler(password):
app.config['PASSWORD'] = password
@app.before_request
def check_password():
if not session.get('authorized') and request.path != '/login/' and \
not request.path.startswith(('/static/', '/favicon')):
flash('You must log-in to view the database browser.', 'danger')
session['next_url'] = request.base_url
return redirect(url_for('login'))
def initialize_app(filename, read_only=False, password=None, url_prefix=None):
global dataset
global migrator
if password:
install_auth_handler(password)
if read_only:
if sys.version_info < (3, 4, 0):
die('Python 3.4.0 or newer is required for read-only access.')
if peewee_version < (3, 5, 1):
die('Peewee 3.5.1 or newer is required for read-only access.')
db = SqliteDatabase('file:%s?mode=ro' % filename, uri=True)
try:
db.connect()
except OperationalError:
die('Unable to open database file in read-only mode. Ensure that '
'the database exists in order to use read-only mode.')
db.close()
dataset = SqliteDataSet(db, bare_fields=True)
else:
dataset = SqliteDataSet('sqlite:///%s' % filename, bare_fields=True)
if url_prefix:
app.wsgi_app = PrefixMiddleware(app.wsgi_app, prefix=url_prefix)
migrator = dataset._migrator
dataset.close()
def main():
# This function exists to act as a console script entry-point.
parser = get_option_parser()
options, args = parser.parse_args()
if not args:
die('Error: missing required path to database file.')
password = None
if options.prompt_password:
if os.environ.get('SQLITE_WEB_PASSWORD'):
password = os.environ['SQLITE_WEB_PASSWORD']
else:
while True:
password = getpass('Enter password: ')
password_confirm = getpass('Confirm password: ')
if password != password_confirm:
print('Passwords did not match!')
else:
break
# Initialize the dataset instance and (optionally) authentication handler.
initialize_app(args[0], options.read_only, password, options.url_prefix)
if options.browser:
open_browser_tab(options.host, options.port)
if password:
key = b'sqlite-web-' + args[0].encode('utf8') + password.encode('utf8')
app.secret_key = hashlib.sha256(key).hexdigest()
# Set up SSL context, if specified.
kwargs = {}
if options.ssl_ad_hoc:
kwargs['ssl_context'] = 'adhoc'
if options.ssl_cert and options.ssl_key:
if not os.path.exists(options.ssl_cert) or not os.path.exists(options.ssl_key):
die('ssl cert or ssl key not found. Please check the file-paths.')
kwargs['ssl_context'] = (options.ssl_cert, options.ssl_key)
elif options.ssl_cert:
die('ssl key "-k" is required alongside the ssl cert')
elif options.ssl_key:
die('ssl cert "-c" is required alongside the ssl key')
# Run WSGI application.
app.run(host=options.host, port=options.port, debug=options.debug, **kwargs)
if __name__ == '__main__':
main()
|
test_config.py | import asyncio
import copy
import pytest
import random
import yaml
from chaingreen.util.config import create_default_chaingreen_config, initial_config_file, load_config, save_config
from chaingreen.util.path import mkdir
from multiprocessing import Pool
from pathlib import Path
from threading import Thread
from time import sleep
from typing import Dict
# Commented-out lines are preserved to aide in debugging the multiprocessing tests
# import logging
# import os
# import threading
# log = logging.getLogger(__name__)
def write_config(root_path: Path, config: Dict):
"""
Wait for a random amount of time and write out the config data. With a large
config, we expect save_config() to require multiple writes.
"""
sleep(random.random())
# log.warning(f"[pid:{os.getpid()}:{threading.get_ident()}] write_config")
# save_config(root_path=root_path, filename="config.yaml", config_data=modified_config)
save_config(root_path=root_path, filename="config.yaml", config_data=config)
def read_and_compare_config(root_path: Path, default_config: Dict):
"""
Wait for a random amount of time, read the config and compare with the
default config data. If the config file is partially-written or corrupt,
load_config should fail or return bad data
"""
# Wait a moment. The read and write threads are delayed by a random amount
# in an attempt to interleave their execution.
sleep(random.random())
# log.warning(f"[pid:{os.getpid()}:{threading.get_ident()}] read_and_compare_config")
config: Dict = load_config(root_path=root_path, filename="config.yaml")
assert len(config) > 0
# if config != default_config:
# log.error(f"[pid:{os.getpid()}:{threading.get_ident()}] bad config: {config}")
# log.error(f"[pid:{os.getpid()}:{threading.get_ident()}] default config: {default_config}")
assert config == default_config
async def create_reader_and_writer_tasks(root_path: Path, default_config: Dict):
"""
Spin-off reader and writer threads and wait for completion
"""
thread1 = Thread(target=write_config, kwargs={"root_path": root_path, "config": default_config})
thread2 = Thread(target=read_and_compare_config, kwargs={"root_path": root_path, "default_config": default_config})
thread1.start()
thread2.start()
thread1.join()
thread2.join()
def run_reader_and_writer_tasks(root_path: Path, default_config: Dict):
"""
Subprocess entry point. This function spins-off threads to perform read/write tasks
concurrently, possibly leading to synchronization issues accessing config data.
"""
asyncio.get_event_loop().run_until_complete(create_reader_and_writer_tasks(root_path, default_config))
class TestConfig:
@pytest.fixture(scope="function")
def root_path_populated_with_config(self, tmpdir) -> Path:
"""
Create a temp directory and populate it with a default config.yaml.
Returns the root path containing the config.
"""
root_path: Path = Path(tmpdir)
create_default_chaingreen_config(root_path)
return Path(root_path)
@pytest.fixture(scope="function")
def default_config_dict(self) -> Dict:
"""
Returns a dictionary containing the default config.yaml contents
"""
content: str = initial_config_file("config.yaml")
config: Dict = yaml.safe_load(content)
return config
def test_create_config_new(self, tmpdir):
"""
Test create_default_chaingreen_config() as in a first run scenario
"""
# When: using a clean directory
root_path: Path = Path(tmpdir)
config_file_path: Path = root_path / "config" / "config.yaml"
# Expect: config.yaml doesn't exist
assert config_file_path.exists() is False
# When: creating a new config
create_default_chaingreen_config(root_path)
# Expect: config.yaml exists
assert config_file_path.exists() is True
expected_content: str = initial_config_file("config.yaml")
assert len(expected_content) > 0
with open(config_file_path, "r") as f:
actual_content: str = f.read()
# Expect: config.yaml contents are seeded with initial contents
assert actual_content == expected_content
def test_create_config_overwrite(self, tmpdir):
"""
Test create_default_chaingreen_config() when overwriting an existing config.yaml
"""
# When: using a clean directory
root_path: Path = Path(tmpdir)
config_file_path: Path = root_path / "config" / "config.yaml"
mkdir(config_file_path.parent)
# When: config.yaml already exists with content
with open(config_file_path, "w") as f:
f.write("Some config content")
# Expect: config.yaml exists
assert config_file_path.exists() is True
# When: creating a new config
create_default_chaingreen_config(root_path)
# Expect: config.yaml exists
assert config_file_path.exists() is True
expected_content: str = initial_config_file("config.yaml")
assert len(expected_content) > 0
with open(config_file_path, "r") as f:
actual_content: str = f.read()
# Expect: config.yaml contents are overwritten with initial contents
assert actual_content == expected_content
def test_load_config(self, root_path_populated_with_config, default_config_dict):
"""
Call load_config() with a default config and verify a few values are set to the expected values
"""
root_path: Path = root_path_populated_with_config
# When: loading a newly created config
config: Dict = load_config(root_path=root_path, filename="config.yaml")
assert config is not None
# Expect: config values should match the defaults (from a small sampling)
assert config["daemon_port"] == default_config_dict["daemon_port"] == 55400
assert config["self_hostname"] == default_config_dict["self_hostname"] == "localhost"
assert (
config["farmer"]["network_overrides"]["constants"]["mainnet"]["GENESIS_CHALLENGE"]
== default_config_dict["farmer"]["network_overrides"]["constants"]["mainnet"]["GENESIS_CHALLENGE"]
== "ccd5bb71183532bff220ba46c268991a3ff07eb358e8255a65c30a2dce0e5fbb"
)
def test_load_config_exit_on_error(self, tmpdir):
"""
Call load_config() with an invalid path. Behavior should be dependent on the exit_on_error flag.
"""
root_path: Path = tmpdir
config_file_path: Path = root_path / "config" / "config.yaml"
# When: config file path points to a directory
mkdir(config_file_path)
# When: exit_on_error is True
# Expect: load_config will exit
with pytest.raises(SystemExit):
_ = load_config(root_path=root_path, filename=config_file_path, exit_on_error=True)
# When: exit_on_error is False
# Expect: load_config will raise an exception
with pytest.raises(ValueError):
_ = load_config(root_path=root_path, filename=config_file_path, exit_on_error=False)
def test_save_config(self, root_path_populated_with_config, default_config_dict):
"""
Test modifying the config and saving it to disk. The modified value(s) should be present after
calling load_config().
"""
root_path: Path = root_path_populated_with_config
config: Dict = copy.deepcopy(default_config_dict)
# When: modifying the config
config["harvester"]["farmer_peer"]["host"] = "oldmacdonald.eie.io"
# Sanity check that we didn't modify the default config
assert config["harvester"]["farmer_peer"]["host"] != default_config_dict["harvester"]["farmer_peer"]["host"]
# When: saving the modified config
save_config(root_path=root_path, filename="config.yaml", config_data=config)
# Expect: modifications should be preserved in the config read from disk
loaded: Dict = load_config(root_path=root_path, filename="config.yaml")
assert loaded["harvester"]["farmer_peer"]["host"] == "oldmacdonald.eie.io"
def test_multiple_writers(self, root_path_populated_with_config, default_config_dict):
"""
Test whether multiple readers/writers encounter data corruption. When using non-atomic operations
to write to the config, partial/incomplete writes can cause readers to yield bad/corrupt data.
Access to config.yaml isn't currently synchronized, so the best we can currently hope for is that
the file contents are written-to as a whole.
"""
# Artifically inflate the size of the default config. This is done to (hopefully) force
# save_config() to require multiple writes. When save_config() was using shutil.move()
# multiple writes were observed, leading to read failures when data was partially written.
default_config_dict["xyz"] = "x" * 32768
root_path: Path = root_path_populated_with_config
save_config(root_path=root_path, filename="config.yaml", config_data=default_config_dict)
num_workers: int = 30
args = list(map(lambda _: (root_path, default_config_dict), range(num_workers)))
# Spin-off several processes (not threads) to read and write config data. If any
# read failures are detected, the failing process will assert.
with Pool(processes=num_workers) as pool:
res = pool.starmap_async(run_reader_and_writer_tasks, args)
res.get(timeout=10)
|
dataloader_iter.py | # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import six
import sys
import time
import signal
import numbers
import logging
import itertools
import threading
import numpy as np
import multiprocessing
from collections import namedtuple
from paddle.fluid.framework import _set_expected_place, _current_expected_place, set_flags
# NOTE: queue has a different name in python2 and python3
import queue
import paddle
from .. import core, layers
from ..framework import in_dygraph_mode, _in_eager_mode
from ..multiprocess_utils import _set_SIGCHLD_handler, MP_STATUS_CHECK_INTERVAL, CleanupFuncRegistrar
from .fetcher import _IterableDatasetFetcher, _MapDatasetFetcher
from .batch_sampler import _InfiniteIterableSampler
from .collate import default_collate_fn, default_convert_fn
from .worker import ParentWatchDog, get_worker_info, _worker_loop, \
_DatasetKind, _IterableDatasetStopIteration, _WorkerException, \
_ResumeIteration
from .flat import _flatten_batch, _restore_batch
__all__ = ['get_worker_info']
# NOTE: fix `terminate called without an active exception`
# if for loop break and program exit immediately(with no model
# layers processing) after iterate **the first few data** in
# distributed lauch mode, distributed launch will call
# terminate() to kill main process on each devices, but thread
# is still iterating to fullfill blocking queue caches, which
# may cause thread error `terminate called without an active
# exception` for terminate is a strong singal and `__del__`
# of DataLoader may not be called, so we add a global link to
# the last DataLoader instance to call `__del__` to clean up
# resources
# NOTE: cannot simply as `__del__` to CleanupFuncRegistrar,
# for this will remain a link to each DataLoader instance in
# global, and will precludes GC to auto collect DataLoader
# instance and will cause memory leak
_loader = None
def _clear_loader():
global _loader
if _loader is not None:
try:
_loader.__del__()
del _loader
except:
pass
CleanupFuncRegistrar.register(_clear_loader)
class _DataLoaderIterBase(object):
"""
Iterator implement of DataLoader, will load and feed mini-batch
data by setting in given dataloader.
Args:
loader(instance of DataLoader): instance of `fluid.io.DataLoader`
"""
def __init__(self, loader):
self._dataset = loader.dataset
self._feed_list = loader.feed_list or []
self._places = loader.places
self._return_list = loader.return_list
self._batch_sampler = loader.batch_sampler
self._drop_last = loader.drop_last
self._auto_collate_batch = loader.auto_collate_batch
self._num_workers = loader.num_workers
self._use_buffer_reader = loader.use_buffer_reader
self._use_shared_memory = loader.use_shared_memory
self._timeout = loader.timeout if loader.timeout > 0 else MP_STATUS_CHECK_INTERVAL
self._worker_init_fn = loader.worker_init_fn
self._dataset_kind = loader.dataset_kind
self._pin_memory = loader.pin_memory
self._sampler_iter = iter(self._index_sampler)
if self._auto_collate_batch:
self._collate_fn = loader.collate_fn or default_collate_fn
else:
self._collate_fn = loader.collate_fn or default_convert_fn
# LoDTensorBlockingQueue instance for create_py_reader and a thread
# to put mini-batch data to self._blocking_queue, mini-batch data
# will be get from:
# 1. multi-process mode: get data from workers' result queue
# 2. single-process mode: read mini-batch data in main process
self._blocking_queue = None
self._thread = None
self._thread_done_event = threading.Event()
@property
def _index_sampler(self):
if self._auto_collate_batch:
return self._batch_sampler
else:
if self._dataset_kind == _DatasetKind.MAP:
return list(range(len(self._dataset)))
else:
return _InfiniteIterableSampler(self._dataset, 1)
def __iter__(self):
return self
def __len__(self):
return len(self._batch_sampler)
def _exit_thread_expectedly(self):
self._thread_done_event.set()
if self._blocking_queue:
self._blocking_queue.close()
def _exit_thread_unexpectedly(self):
self._thread_done_event.set()
if self._blocking_queue:
self._blocking_queue.kill()
class _DataLoaderIterSingleProcess(_DataLoaderIterBase):
"""
Single process implement of DataLoaderIter, loading data from
loader.data in main process
"""
def __init__(self, loader):
super(_DataLoaderIterSingleProcess, self).__init__(loader)
self._dataset_fetcher = _DatasetKind.create_fetcher(
self._dataset_kind, self._dataset, self._auto_collate_batch,
self._collate_fn, self._drop_last)
# NOTE: _structrue_infos used to record the data structure of
# batch to restore batch structure after reading Tensor
# from blocking_queue in single-process mode. Note that
# only single process is used in single-process mode, we
# can record the data structure sequencely in a list without
# recording the send and recv index
self._structure_infos = []
# NOTE: len(self._places) batch data compose as an output
# iteration, set blocking_queue can cache 2 iteration datas
# at most here
self._blocking_queue_capacity = 1 * len(self._places)
self._init_thread()
self._shutdown = False
global _loader
_loader = self
def _init_thread(self):
self._var_names = [v.name for v in self._feed_list]
self._shapes = [v.shape for v in self._feed_list]
self._dtypes = [v.dtype for v in self._feed_list]
self._need_check_feed = [
v.desc.need_check_feed() for v in self._feed_list
]
# if only 1 place, do not need to keep order
self._blocking_queue = core.init_lod_tensor_blocking_queue(
core.Variable(), self._blocking_queue_capacity,
len(self._places) > 1)
self._reader = core.create_py_reader(
self._blocking_queue, self._var_names, self._shapes, self._dtypes,
self._need_check_feed, self._places, self._use_buffer_reader, True,
self._pin_memory)
self._thread = threading.Thread(
target=self._thread_loop, args=(_current_expected_place(), ))
self._thread.daemon = True
self._thread.start()
def _thread_loop(self, legacy_expected_place):
#NOTE(zhiqiu): Set the expected place for new thread as the same as father thread,
# and it will call platform::SetDeviceId() in c++ internally.
# If we do not set cudaDeviceId in new thread, the default cudaDeviceId will be 0,
# Which may cost hundreds of MB of GPU memory on CUDAPlace(0) if calling some cuda
# APIs in this thread.
_set_expected_place(legacy_expected_place)
while not self._thread_done_event.is_set():
try:
indices = next(self._sampler_iter)
# read data from dataset in mini-batch
# with paddle.fluid.dygraph.guard(place=paddle.CPUPlace()):
# read data from dataset in mini-batch
batch = self._dataset_fetcher.fetch(indices,
self._thread_done_event)
except StopIteration:
self._exit_thread_expectedly()
return
if batch is None or self._thread_done_event.is_set(): break
# flat batch and record structure infos
batch, structure = _flatten_batch(batch)
self._structure_infos.append(structure)
if self._thread_done_event.is_set(): break
try:
# pack as LoDTensorArray
array = core.LoDTensorArray()
for slot in batch:
if isinstance(slot, paddle.Tensor):
slot = slot.value().get_tensor()
elif not isinstance(slot, core.LoDTensor):
tmp = core.LoDTensor()
tmp.set(slot, core.CPUPlace())
slot = tmp
array.append(slot)
if self._thread_done_event.is_set(): break
try:
self._blocking_queue.push(array)
except:
self._exit_thread_expectedly()
except:
self._exit_thread_unexpectedly()
six.reraise(*sys.exc_info())
self._exit_thread_expectedly()
def __next__(self):
try:
if in_dygraph_mode():
if _in_eager_mode():
data = core.eager.read_next_eager_tensor_list(
self._reader.read_next_list()[0])
else:
data = self._reader.read_next_var_list()
data = _restore_batch(data, self._structure_infos.pop(0))
else:
if self._return_list:
data = self._reader.read_next_list()
for i in range(len(data)):
data[i] = data[i]._move_to_list()
data = [
_restore_batch(d, s)
for d, s in zip(data, self._structure_infos[:len(
self._places)])
]
self._structure_infos = self._structure_infos[len(
self._places):]
# static graph organized data on multi-device with list, if
# place number is 1, there is only 1 device, extra the data
# from list for devices to be compatible with dygraph mode
if len(self._places) == 1:
data = data[0]
else:
data = self._reader.read_next()
return data
except StopIteration:
self._reader.shutdown()
self._try_shutdown_all()
six.reraise(*sys.exc_info())
def _shutdown_thread(self):
if self._thread:
self._thread_done_event.set()
# NOTE: we wait for _thread exit for 3 seconds, if
# thread not exit normally, force kill it
for _ in range(3):
if self._thread.is_alive():
time.sleep(1)
else:
break
else:
if self._thread is not threading.current_thread():
self._thread.join()
self._thread = None
# python2 compatibility
def next(self):
return self.__next__()
def _try_shutdown_all(self):
if not self._shutdown:
try:
# # _blocking_queue in keep order mode holds sub-threads
# # need to release thread resources on unexpected exit
if self._blocking_queue:
self._blocking_queue.close()
self._blocking_queue = None
# NOTE: blocking queue should be closed firstly for
# blocking queue read may hang and _thread_done_event
# cannot be checked
self._shutdown_thread()
finally:
self._shutdown = True
def __del__(self):
self._try_shutdown_all()
class _DataLoaderIterMultiProcess(_DataLoaderIterBase):
def __init__(self, loader):
super(_DataLoaderIterMultiProcess, self).__init__(loader)
self._persistent_workers = loader._persistent_workers
self._resume_worker_cnt = 0
assert self._num_workers > 0, "Multi-process DataLoader " \
"invalid num_workers({})".format(self._num_workers)
# subprocess wrokers' result queue
self._data_queue = None
# data get from _data_queue will be reordered by _rcvd_idx
# for data order keeping, data index not equal _rcvd_idx
# will be cached in _task_infos
self._send_idx = 0
self._rcvd_idx = 0
self._batches_outstanding = 0
self._task_infos = {}
self._structure_infos = []
# indices outstand as _outstanding_capacity at first, and
# blocking_queue capacity is also _outstanding_capacity.
# _outstanding_capacity here to make sure each indices_queue
# has at least 2 indices, and outstanding batch cached
# output data for at least 2 iterations(Note that len(_places)
# batches will be composed as an iteration output)
self._outstanding_capacity = 2 * max(self._num_workers,
len(self._places))
# see _try_put_indices
self._thread_lock = threading.Lock()
# init workers and indices queues and put 2 indices in each indices queue
self._init_workers()
for _ in range(self._outstanding_capacity):
self._try_put_indices()
self._init_thread()
self._shutdown = False
def _init_workers(self):
# multiprocess worker and indice queue list initial as empty
self._workers = []
self._worker_status = []
self._indices_queues = []
self._workers_idx_cycle = itertools.cycle(range(self._num_workers))
# create data_queue for workers
self._data_queue = multiprocessing.Queue()
# event for workers and thread, thread event is only need
# in multi-processing mode
self._workers_done_event = multiprocessing.Event()
self._thread_done_event = threading.Event()
for i in range(self._num_workers):
indices_queue = multiprocessing.Queue()
self._indices_queues.append(indices_queue)
worker = multiprocessing.Process(
target=_worker_loop,
args=(self._dataset, self._dataset_kind, indices_queue,
self._data_queue, self._workers_done_event,
self._auto_collate_batch, self._collate_fn,
self._drop_last, self._worker_init_fn, i,
self._num_workers, self._use_shared_memory))
worker.daemon = True
worker.start()
self._workers.append(worker)
self._worker_status.append(True)
core._set_process_pids(id(self), tuple(w.pid for w in self._workers))
_set_SIGCHLD_handler()
def _clear_and_remove_data_queue(self):
if self._data_queue is not None:
while True:
try:
self._data_queue.get_nowait()
except:
self._data_queue.cancel_join_thread()
self._data_queue.close()
break
def _init_thread(self):
self._var_names = [v.name for v in self._feed_list]
self._shapes = [v.shape for v in self._feed_list]
self._dtypes = [v.dtype for v in self._feed_list]
self._need_check_feed = [
v.desc.need_check_feed() for v in self._feed_list
]
# if only 1 place, do not need to keep order
self._blocking_queue = core.init_lod_tensor_blocking_queue(
core.Variable(), self._outstanding_capacity, len(self._places) > 1)
self._reader = core.create_py_reader(
self._blocking_queue, self._var_names, self._shapes, self._dtypes,
self._need_check_feed, self._places, self._use_buffer_reader, True,
self._pin_memory)
self._thread_done_event = threading.Event()
# thread event is only need in multi-processing mode
self._thread = threading.Thread(
target=self._thread_loop, args=(_current_expected_place(), ))
self._thread.daemon = True
self._thread.start()
def _reset(self):
# resume iteration in following steps
# 1. Resume workers, clear worker caches
# put _ResumeIteration to all worker as resume iteration flag
with self._thread_lock:
self._resume_worker_cnt = self._num_workers
for worker_id in range(self._num_workers):
self._indices_queues[worker_id].put(_ResumeIteration())
self._batches_outstanding += 1
# all flag will be check in _thread_loop, simply wait here
while self._resume_worker_cnt > 0:
time.sleep(0.5)
# 2. clear blocking_queue caches
# in order not to restart the thread, we just clear
# the blocking_queue cachees instead of recreating one
while self._blocking_queue.size() >= len(self._places):
if in_dygraph_mode():
if _in_eager_mode():
data = core.eager.read_next_eager_tensor_list(
self._reader.read_next_list()[0])
else:
self._reader.read_next_var_list()
elif self._return_list:
self._reader.read_next_list()
else:
data = self._reader.read_next()
# 3. reset all states
self._send_idx = 0
self._rcvd_idx = 0
self._batches_outstanding = 0
self._task_infos = {}
self._structure_infos = []
# set all worker status available
self._worker_status = [True] * self._num_workers
# 4. reset _sampler_iter and put prefetch indices to start next epoch
# init workers and indices queues and put 2 indices in each indices queue
self._sampler_iter = iter(self._index_sampler)
for _ in range(self._outstanding_capacity):
self._try_put_indices()
def _shutdown_worker(self, worker_id, shutdown=False):
if self._worker_status[worker_id] or (self._persistent_workers and
shutdown):
self._indices_queues[worker_id].put(None)
self._worker_status[worker_id] = False
def _try_shutdown_all(self, timeout=None):
if not self._shutdown:
try:
self._exit_thread_expectedly()
self._clear_and_remove_data_queue()
# set _workers_done_event should be set before put None
# to indices_queue, workers wll exit on reading None from
# indices_queue
self._workers_done_event.set()
for i in range(self._num_workers):
self._shutdown_worker(i, shutdown=True)
if not self._shutdown:
for w in self._workers:
w.join(timeout)
for q in self._indices_queues:
q.cancel_join_thread()
q.close()
finally:
core._erase_process_pids(id(self))
self._shutdown = True
def _thread_loop(self, legacy_expected_place):
#NOTE(zhiqiu): Set the expected place for new thread as the same as father thread,
# and it will call platform::SetDeviceId() in c++ internally.
# If we do not set cudaDeviceId in new thread, the default cudaDeviceId will be 0,
# Which may cost hundreds of MB of GPU memory on CUDAPlace(0) if calling some cuda
# APIs in this thread.
_set_expected_place(legacy_expected_place)
while not self._thread_done_event.is_set():
batch = self._get_data()
if not self._thread_done_event.is_set():
if batch is None:
self._exit_thread_expectedly()
else:
if isinstance(batch, _ResumeIteration):
assert self._resume_worker_cnt > 0
self._resume_worker_cnt -= 1
continue
try:
# pack as LoDTensorArray
array = core.LoDTensorArray()
if self._use_shared_memory:
for tensor in batch:
array.append(tensor)
else:
# LoDTensor not in shared memory is not
# serializable, cannot be create in workers
for slot in batch:
if isinstance(slot, paddle.Tensor):
slot = slot.value().get_tensor()
elif not isinstance(slot, core.LoDTensor):
tmp = core.LoDTensor()
tmp.set(slot, core.CPUPlace())
slot = tmp
array.append(slot)
if not self._blocking_queue.push(array):
self._blocking_queue.close()
except Exception as e:
self._exit_thread_unexpectedly()
six.reraise(*sys.exc_info())
finally:
self._rcvd_idx += 1
def _get_data(self):
while not self._thread_done_event.is_set():
# For IterableDataset, batch indices is generated infinitely
# for each worker to raise StopIteration, but a StopIteration
# raising process will discard a batch indices which is count
# in _send_idx but will not increase _rcvd_idx, so we check
# whether the worker is still alive here to skip the discarded
# batch indices and increase _rcvd_idx
if self._dataset_kind == _DatasetKind.ITER:
while self._rcvd_idx < self._send_idx:
info = self._task_infos[self._rcvd_idx]
if len(info) == 3 or self._worker_status[info[0]]:
break
del self._task_infos[self._rcvd_idx]
self._rcvd_idx += 1
self._batches_outstanding -= 1
else:
# NOTE: in persistent workers mode, do not check data
# drained here, simply let it go to _data_queue
# reading to get _ResumeIteration
if not self._persistent_workers:
# NOTE: _rcvd_idx and _send_idx only record batches among
# workers, if batches among workers drained, there
# may also be data in blocking queue
if self._batches_outstanding < len(self._places):
return None
continue
if self._rcvd_idx in self._task_infos and \
len(self._task_infos[self._rcvd_idx]) == 3:
info = self._task_infos.pop(self._rcvd_idx)
self._structure_infos.append(info[2])
return info[1]
try:
# [ avoid hang ]: main process may blocking at _reader.read_next when
# KeyboardInterrupt, we do following tradeoff:
# 1. get data with timeout, MP_STATUS_CHECK_INTERVAL(5s) as timeout
# default, if KeyboardInterrupt blocking, failed workers will be
# checked and raise RuntimeError to quit DataLoader in timeout
# exception handling.
# 2. if get data timeout and check workers all alive, continue to
# get data again
data = self._data_queue.get(timeout=self._timeout)
except Exception as e:
# check if thread done event set when waiting data
if self._thread_done_event.is_set():
continue
# check failed workers
failed_workers = []
for i, w in enumerate(self._workers):
if self._worker_status[i] and not w.is_alive():
failed_workers.append(w)
self._shutdown_worker(i)
if len(failed_workers) > 0:
self._exit_thread_unexpectedly()
pids = ', '.join(str(w.pid) for w in failed_workers)
raise RuntimeError("DataLoader {} workers exit unexpectedly, " \
"pids: {}".format(len(failed_workers), pids))
# get(timeout) will call _poll(timeout) and may raise IOError
if isinstance(e, queue.Empty) or isinstance(e, IOError):
# continue on timeout to keep getting data from queue
continue
self._exit_thread_unexpectedly()
logging.error("DataLoader reader thread failed({}) to read data from " \
"workers' result queue.".format(e))
six.reraise(*sys.exc_info())
else:
if self._dataset_kind == _DatasetKind.ITER and isinstance(
data, _IterableDatasetStopIteration):
# if a worker get StopIteraion, we shutdown this worker,
# note that this batch indices to trigger StopIteration
# is discard, outstanding batch number should be decrease
# and another indices should be put for other workers
# may still working.
if self._persistent_workers:
self._worker_status[data.worker_id] = False
else:
self._shutdown_worker(data.worker_id)
self._batches_outstanding -= 1
self._try_put_indices()
continue
idx, batch, structure = data
if isinstance(idx, _ResumeIteration) and batch is None \
and structure is None:
return idx
if isinstance(batch, _WorkerException):
self._exit_thread_unexpectedly()
batch.reraise()
if idx == self._rcvd_idx:
del self._task_infos[idx]
self._structure_infos.append(structure)
return batch
else:
self._task_infos[idx] += (batch, structure)
continue
def _try_put_indices(self):
assert self._batches_outstanding <= self._outstanding_capacity, \
"too many indices have been put to queue"
# In multi-process mode for IterableDataset, _try_put_indices will
# be called both in main process(for our implement has blocking queue,
# and blocking queue read is in main process) and thread, which may
# cause error following error
# 1. "ValueError: generator already executing" in next(self._sampler_iter)
# 2. re-enter in increase _send_idx
# add a lock for threading save, for _try_put_indices is only a slight
# function which is not in data reading pipeline, this lock almost no
# influence on performance
with self._thread_lock:
try:
indices = next(self._sampler_iter)
except StopIteration:
return
for i in range(self._num_workers):
worker_idx = next(self._workers_idx_cycle)
if self._worker_status[worker_idx]:
break
else:
return
self._indices_queues[worker_idx].put((self._send_idx, indices))
self._task_infos[self._send_idx] = (worker_idx, )
self._batches_outstanding += 1
self._send_idx += 1
def __del__(self):
self._try_shutdown_all()
def _shutdown_on_exit(self):
self._try_shutdown_all(1)
def __next__(self):
try:
# _batches_outstanding here record the total batch data number
# in 'from after _try_put_indices to beforeoutput data', this
# value should be _outstanding_capacity if data is not drained,
# if _batches_outstanding is less than _places number, there are
# no enough data to generate next output, close blocking_queue and
# set _thread_done_event here, py_reader will raise StopIteration,
# end workers and indices_queues in StopIteration handling
if self._batches_outstanding < len(self._places):
if self._persistent_workers:
raise StopIteration
else:
self._thread_done_event.set()
self._blocking_queue.close()
if in_dygraph_mode():
if _in_eager_mode():
data = core.eager.read_next_eager_tensor_list(
self._reader.read_next_list()[0])
else:
data = self._reader.read_next_var_list()
data = _restore_batch(data, self._structure_infos.pop(0))
else:
if self._return_list:
data = self._reader.read_next_list()
for i in range(len(data)):
data[i] = data[i]._move_to_list()
data = [
_restore_batch(d, s)
for d, s in zip(data, self._structure_infos[:len(
self._places)])
]
self._structure_infos = self._structure_infos[len(
self._places):]
# static graph organized data on multi-device with list, if
# place number is 1, there is only 1 device, extra the data
# from list for devices to be compatible with dygraph mode
if len(self._places) == 1:
data = data[0]
else:
data = self._reader.read_next()
self._on_output_batch()
return data
except StopIteration:
if not self._persistent_workers:
self._reader.shutdown()
self._try_shutdown_all()
six.reraise(*sys.exc_info())
# python2 compatibility
def next(self):
return self.__next__()
def _on_output_batch(self):
for _ in range(len(self._places)):
self._batches_outstanding -= 1
self._try_put_indices()
|
PClient.py | # -*- coding : utf-8-*-
# coding:unicode_escape
from Proxy import Proxy
import ast
import threading
import time
import re
class PClient:
def __init__(self, tracker_addr=(str, int), proxy=None, port=None, upload_rate=0, download_rate=0,
packet_size=20000, name=None):
if proxy:
self.proxy = proxy
else:
self.proxy = Proxy(upload_rate, download_rate, port) # Do not modify this line!
self.tracker = tracker_addr
self.name = name
self.target_address_get = 0
self.target_address = ()
"""
Start your additional code below!
"""
self.downloaded_file = b"empty"
self.active = False
self.packet_size = packet_size
self.registered_fid = set()
self.fid_length = {} #存有的fid的length给tracker发
self.already_download = 0
self.special_use_close = ""
self.listening = True
self.thread = threading.Thread(target=self.alwaysListen, args=[])
self.thread.start()
def __send__(self, data: bytes, dst: (str, int)):
"""
Do not modify this function!!!
You must send all your packet by this function!!!
:param data: The data to be sent
:param dst: The address of the destination
"""
self.proxy.sendto(data, dst)
def __recv__(self, timeout=None) -> (bytes, (str, int)):
"""
Do not modify this function!!!
You must receive all data from this function!!!
:param timeout: if its value has been set, it can raise a TimeoutError;
else it will keep waiting until receive a packet from others
:return: a tuple x with packet data in x[0] and the source address(ip, port) in x[1]
"""
return self.proxy.recvfrom(timeout)
def register(self, file_path: str):
"""
Share a file in P2P network
:param file_path: The path to be shared, such as "./alice.txt"
:return: fid, which is a unique identification of the shared file and can be used by other PClients to
download this file, such as a hash code of it
"""
"""
Start your code below!
"""
print(self.name, "register start")
fid = file_path
already_in_self_fid_length = False # false就是没有这个文件的length,要存
for key,value in self.fid_length.items():
if key == fid:
already_in_self_fid_length = True
break
if not already_in_self_fid_length:
with open(fid, 'rb') as f: # f = open(../tes,'rb')
data = f.read()
packets = [data[i * self.packet_size: (i + 1) * self.packet_size]
for i in range(len(data) // self.packet_size + 1)]
self.fid_length[fid] = len(packets)
if fid in self.registered_fid:
return fid
else:
self.registered_fid.add(fid)
msg = "REGISTER: " + fid
msg = msg.encode() # string发送之前要encode
self.__send__(msg, ("127.0.0.1", 10086))
time.sleep(0.3)
# print(self.name, "register finish")
"""
End of your code
"""
return fid
def download(self, fid) -> bytes:
"""
Download a file from P2P network using its unique identification
:param fid: the unique identification of the expected file, should be the same type of the return value of share()
:return: the whole received file in bytes
"""
"""
Start your code below!
"""
msg = "QUERY: " + fid
msg = msg.encode()
self.__send__(msg, self.tracker)
# time.sleep(0.25)
while self.target_address_get == 0:
msg = "QUERY: " + fid
msg = msg.encode()
self.__send__(msg, self.tracker)
time.sleep(0.25)
if self.target_address_get == 1:
request = "REQUEST: " + fid
request = request.encode()
self.__send__(request, self.target_address)
self.active = True
while self.active:
time.sleep(0.1)
self.target_address_get = 0
data = self.downloaded_file
# data = data.encode()
print(self.name, "download finish")
# print()
self.register(fid)
"""
End of your code
"""
return data
def cancel(self, fid):
"""
Stop sharing a specific file, others should be unable to get this file from this client anymore
:param fid: the unique identification of the file to be canceled register on the Tracker
:return: You can design as your need
"""
self.registered_fid.remove(fid)
msg = "CANCEL: " + fid
msg = msg.encode()
self.__send__(msg, self.tracker)
print(self.name, "cancel")
"""
End of your code
"""
def close(self):
"""
Completely stop the client, this client will be unable to share or download files anymore
:return: You can design as your need
"""
# print(self.name, "ready to close")
self.registered_fid.clear()
msg = "CLOSE"
msg = msg.encode()
self.__send__(msg, self.tracker)
# print(self.name, self.proxy.send_queue.qsize())
while not self.proxy.send_queue.empty():
time.sleep(0.5)
# print(self.name, self.proxy.send_queue.qsize())
print(self.name, "close")
"""
End of your code
"""
self.listening = False
# print(self.name, "listening is", self.listening)
self.proxy.close()
def listen(self):
# print(self.name, "listen start")
try:
msg, frm = self.__recv__(10)
except Exception:
return
self.special_use_close = frm # special use only use for instance close that shut down uncompleted transfer
# print(self.name, "listen over")
msg = msg.decode()
if msg.startswith("REQUEST:"): # PClient收到请求文件的fid
fid = msg[9:]
with open(fid, 'rb') as f: # f = open(../tes,'rb')
data = f.read()
packets = [data[i * self.packet_size: (i + 1) * self.packet_size]
for i in range(len(data) // self.packet_size + 1)]
self.__send__(("GIVE: " + fid + "-.-." + str(len(packets))).encode(), frm)
print(self.name, "send packet length is:", len(packets))
# time.sleep(0.25)
self.__transmitting = True
for packet in packets:
self.__send__(packet, frm)
# print("3", self.__transmitting)
self.__transmitting = False
elif msg.startswith("GIVE:"): # 给这个PClient的文件
special_notion = re.search("-.-.", msg).span() # 正确
fid = msg[6:special_notion[0]]
msg = msg[special_notion[1]:]#文件总长度
# global downloaded_file # 改全局变量一定要加global关键字
self.downloaded_file = b""
for idx in range(int(msg)):
start =time.time()
# if(time.time()-start>1)
data_fragment, frm = self.__recv__()
self.downloaded_file += data_fragment
if idx % 100 == 0:
print("%s receive %d" % (self.name, idx))
msg = "OK"
msg = msg.encode()
self.__send__(msg, frm)
self.already_download = 1
self.active = False
self.__send__(("Free: " + fid + "-." + "("+str(frm[0])+","+str(frm[1])+")").encode(), ("127.0.0.1", 10086)) #send 需要encode()
print("already send Free", fid, frm)
elif msg.startswith("LIST:"):
lst = msg[6:]
who_have = ast.literal_eval(lst) # it is a list of tuples. eg: ('127.0.0.1', 38235)
if len(who_have) != 0:
print(self.name, "knows who have it:", who_have)
if len(who_have) != 0:
self.target_address = who_have[0]
self.target_address_get = 1
# else:
# print("No server available for",self.name)
def alwaysListen(self):
while self.listening:
# print(self.name, "invoke listen")
self.listen()
# print(self.name, "close listening")
if __name__ == '__main__':
pass
|
metrics.py | import atexit
import time
import socket
import traceback
import os
import mod_wsgi
from threading import Thread
from queue import Queue
from influxdb import InfluxDBClient
from datetime import datetime
session_namespace = os.environ["SESSION_NAMESPACE"]
influxdb_hostname = f"{session_namespace}-influxdb"
client = InfluxDBClient(influxdb_hostname, 8086, 'wsgi', 'wsgi', 'wsgi')
queue = Queue()
interval = 1.0
hostname = socket.gethostname()
pid = os.getpid()
process = f"{hostname}:{pid}"
def report_metrics():
"""Report aggregated metrics to InfluxDB.
"""
# Grab the set of metrics for the current reporting period.
metrics = mod_wsgi.request_metrics()
stop_time = datetime.fromtimestamp(metrics["stop_time"]).isoformat()
data_points = []
# Create a record for InfluxDB of the primary metrics.
measurement = {
"measurement": "request-metrics",
"time": stop_time,
"tags": {
"hostname": hostname,
"process": process
},
"fields": {
"request_throughput": metrics["request_throughput"],
"capacity_utilization": metrics["capacity_utilization"],
"server_time": metrics["server_time"],
"application_time": metrics["application_time"],
"cpu_user_time": metrics["cpu_user_time"],
"cpu_system_time": metrics["cpu_system_time"],
"memory_max_rss": metrics["memory_max_rss"],
"memory_rss": metrics["memory_rss"],
"request_threads_maximum": metrics["request_threads_maximum"],
"request_threads_started": metrics["request_threads_started"],
"request_threads_active": metrics["request_threads_active"]
}
}
if metrics["queue_time"] is not None:
measurement["fields"]["queue_time"] = metrics["queue_time"]
if metrics["daemon_time"] is not None:
measurement["fields"]["daemon_time"] = metrics["daemon_time"]
data_points.append(measurement)
# Now record special bucketed metrics corresponding to the spread
# of response times. The first bucket is for 0 to 0.005 seconds.
# The next will be 0.005 to 0.010. For each subsequent bucket, the
# end of the time bucket is doubled, except for the last bucket,
# which is opened ended and covers up to infinity. There should be
# a total of 16 buckets.
server_time_buckets = metrics["server_time_buckets"]
queue_time_buckets = metrics["queue_time_buckets"]
daemon_time_buckets = metrics["daemon_time_buckets"]
application_time_buckets = metrics["application_time_buckets"]
def add_bucket_1(threshold, server_count, queue_count, daemon_count, application_count):
data_points.append(
{
"measurement": "request-metrics",
"time": stop_time,
"tags": {
"hostname": hostname,
"process": process,
"time_bucket": threshold,
},
"fields": {
"server_time_bucket": server_count,
"queue_time_bucket": queue_count,
"daemon_time_bucket": daemon_count,
"application_time_bucket": application_count
}
}
)
threshold = 0.0
for i in range(len(server_time_buckets)-1):
add_bucket_1(threshold, server_time_buckets[i], queue_time_buckets[i], daemon_time_buckets[i], application_time_buckets[i])
threshold = (threshold * 2) or 0.005
add_bucket_1(float("inf"), server_time_buckets[-1], queue_time_buckets[-1], daemon_time_buckets[-1], application_time_buckets[-1])
request_threads_buckets = metrics["request_threads_buckets"]
def add_bucket_2(thread_id, request_count):
data_points.append(
{
"measurement": "request-metrics",
"time": stop_time,
"tags": {
"hostname": hostname,
"process": process,
"thread_id": thread_id,
},
"fields": {
"request_threads_bucket": request_count
}
}
)
for i, value in enumerate(sorted(request_threads_buckets, reverse=True)):
add_bucket_2(i+1, value)
# Write the metrics to InfluxDB.
try:
client.write_points(data_points)
except Exception:
traceback.print_exc()
def collector():
# Activate aggregated metrics and set baseline for initial period.
# Since this is the first time it is being called we ignore result.
mod_wsgi.request_metrics()
next_time = time.time() + interval
while True:
next_time += interval
now = time.time()
try:
# Waiting for next schedule time to report metrics.
queue.get(timeout=max(0, next_time-now))
# If we get to here it means the process is being shutdown
# so we report any metrics that haven't been sent.
report_metrics()
return
except Exception:
# Timeout occurred on waiting on queue, which means the next
# reporting time has arrived.
pass
# Report the current batch of metrics.
report_metrics()
queue = Queue()
thread = Thread(target=collector)
def shutdown_handler(name, **kwargs):
queue.put(None)
def enable_reporting():
"""Subscribe to shutdown of the application so we can report the last
batch of metrics and notify the collector thread to shutdown.
"""
mod_wsgi.subscribe_shutdown(shutdown_handler)
# Start collector thread for periodically reporting accumlated metrics.
thread.start()
|
oldtest.py | """
SPDX-License-Identifier: Apache-2.0
Copyright 2017 Massachusetts Institute of Technology.
"""
import base64
import configparser
import json
import os
import queue
import shutil
import signal
import subprocess
import tempfile
import threading
import time
import unittest
import uuid
from distutils.dir_util import copy_tree
import _io
import common
import crypto
import tenant
import tornado_requests
sentinel = None
cv_process = None
cn_process = None
cn_process_list = []
queue = queue.Queue()
num_threads = 5
config = configparser.RawConfigParser()
config.read(common.CONFIG_FILE)
cloudverifier_port = config.get("general", "cloudverifier_port")
cloudagent_port = config.get("general", "cloudagent_port")
registrar_port = config.get("general", "registrar_port")
cloudagent_ip = config.get("tenant", "cloudagent_ip")
cloudverifier_ip = config.get("tenant", "cloudverifier_ip")
registrar_ip = config.get("tenant", "cloudverifier_ip")
tpm_policy = json.loads(config.get("tenant", "tpm_policy"))
my_cert = config.get("tenant", "my_cert")
ca_cert = config.get("tenant", "ca_cert")
private_key = config.get("tenant", "private_key")
test_num_cloudagents = config.getint("general", "test_num_cloudagents")
test_duration = config.getint("general", "test_duration")
# cv_persistence_filename = config.get('cloud_verifier', 'persistence_filename')
# en_persistence_filename = config.get('registrar', 'persistence_filename')
cv_persistence_filename = None
en_persistence_filename = None
K = None
U = None
V = None
def readKUV():
global K, U, V
# read the keys in
f = open("content_keys.txt")
K = base64.b64decode(f.readline())
U = base64.b64decode(f.readline())
V = base64.b64decode(f.readline())
f.close()
def text_callback(request, context):
context.status_code = 402
return "{}"
class Test(unittest.TestCase):
cloudverifier_process = None
@classmethod
def setUpClass(cls):
cls.test_table = {
"test_cloudagent_tenant_get_nonce": {
"prerun_function": {"name": "launch_cloudagent", "argument": None},
"state_change_functions": [
{
"function_name": "test_cloudagent_tenant_get_nonce",
"http_request_verb": "GET",
"http_request_ip": cloudagent_ip,
"http_request_port": cloudagent_port,
"http_request_query": {"nonce": "ThisIsThePasswordABC"},
"http_request_path": "/v1/quotes/tenant",
"http_result_status_expected": 200,
"check_function": {"name": "check_test_cloudagent_tenant_get_nonce"},
}
],
"postrun_function": {"name": "kill_cloudagent", "argument": None},
},
"test_cloudagent_tenant_get_quote": {
"prerun_function": {"name": "launch_cloudagent", "argument": None},
"state_change_functions": [
{
"function_name": "test_cloudagent_tenant_get_quote",
"http_request_verb": "POST",
"http_request_ip": cloudagent_ip,
"http_request_port": cloudagent_port,
"http_request_path": "/v1/quotes/tenant",
"http_request_body": '{"encrypt_check": "K+oD4GfBMAdOFy94ZxTU2hB77tySSB75VVz2Zo4jN02txhNK2KiO5JhE1SRIUVASMZMW/VQUS9WgWdCUaJ+LOTWSuQ13alG4P4cLoamBr9c=","encrypted_key":"rBWIxK4i6zTl/M69Yyh2hmX+itDR9QCx4CIqmuRrEN3JAIUc2M+balr8gPD9r3Bs0OxYRC8/kcxBNo9Bsm93WZKwlmbZt2uVxhfaAqXwdGVpMBnM3bQnAEj1LIFoZZyQ48PVIdrEO4WW73Z2X3fplEFgOC3YT3lzluYgrn8iBkMRm+o2pJMdhynh6xLguszLX7qDOccPIIJch14ftWlsy6Ya9a6LHr9+hIfs4p2ATVVSl1wtUbf/ouNJdqUPAiFc4oXsg+kHQzWWiipjsAm871cA4wlvUb+/D4mFz1p3PRAK9hcICGwKoanWh8jbeuYnoqkch2EoHeLqayrisfNogg=="}',
"http_result_status_expected": 200,
}
],
"postrun_function": {"name": "kill_cloudagent", "argument": None},
},
"test_cloudverifier_tenant_provide_v": {
# "prerun_function" : {"name":"launch_cloudverifier", "argument": None},
"state_change_functions": [
{
"function_name": "test_cloudverifier_tenant_provide_v",
# "pre_function" : {"name":"do_mock_for_test_cloudverifier_tenant_provide_v", "argument": None},
"http_request_verb": "POST",
"http_request_ip": cloudverifier_ip,
"http_request_port": cloudverifier_port,
"http_request_path": "/v1/instances",
"http_request_body": '{"v": "nsRIy93UeeAi3GhAxpEcMH6R7OmaB7ArBdn2bEgyEwU=","agent_id":"06480EC4-6BF3-4F00-8323-FE6AE5868297","cloudagent_ip":"127.0.0.1","cloudagent_port":"8882","tpm_policy": {"00": "0000000000000000000000000000000000000000", "mask": "0x400801", "22": "ffffffffffffffffffffffffffffffffffffffff"}}',
"http_result_status_expected": 200,
# "concurrent_instances" : 10,
# "concurrent_new_thread_function" : "new_thread",
# "test_iterations" : 100,
},
],
},
"test_concurrent_access": {
"prerun_function": {"name": "launch_cloudverifier", "argument": None},
"state_change_functions": [
{
"function_name": "test_concurrent_access",
"http_request_verb": "POST",
"http_request_ip": cloudverifier_ip,
"http_request_port": cloudverifier_port,
"http_request_path": "/v1/instances",
"http_request_body": '{"v": "nsRIy93UeeAi3GhAxpEcMH6R7OmaB7ArBdn2bEgyEwU=","agent_id":"06480EC4-6BF3-4F00-8323-FE6AE5868297","cloudagent_ip":"127.0.0.1","cloudagent_port":"8882","tpm_policy": {"00": "0000000000000000000000000000000000000000", "mask": "0x400801", "22": "ffffffffffffffffffffffffffffffffffffffff"}}',
"http_result_status_expected": 200,
"concurrency": {"instances": 5, "new_thread_function": "new_thread"},
"test_iterations": 100,
},
],
"state_validation_functions": [
{
"function_name": "test_agent_id_list",
"http_request_verb": "GET",
"http_request_ip": cloudverifier_ip,
"http_request_port": cloudverifier_port,
"http_request_path": "/v1/instances",
# "http_request_body": '{"v": "nsRIy93UeeAi3GhAxpEcMH6R7OmaB7ArBdn2bEgyEwU=","agent_id":"06480EC4-6BF3-4F00-8323-FE6AE5868297","cloudagent_ip":"127.0.0.1","cloudagent_port":"8882","tpm_policy": {"00": "0000000000000000000000000000000000000000", "mask": "0x400801", "22": "ffffffffffffffffffffffffffffffffffffffff"}}',
"http_result_status_expected": 200,
"check_function": {"name": "check_and_delete_all_entries", "argument": 500},
},
],
"postrun_function": {"name": "kill_cloudverifier", "argument": None},
},
"test_concurrent_cloudnodiness": {
# "prerun_function" : {"name":"launch_cloudagents", "args": {'starting_port':9000, 'num_cloudagent_instances':250}},
"prerun_function": {
"name": "launch_cloudagents",
"args": {"port_file": "cloudagent_port.txt", "num_cloudagent_instances": test_num_cloudagents},
},
"state_change_functions": [
{
"pre_function": {"name": "test_concurrent_cloudnodiness_modify_request", "argument": 500},
"function_name": "test_concurrent_cloudnodiness",
"http_request_verb": "POST",
"http_request_ip": cloudverifier_ip,
"http_request_port": cloudverifier_port,
"http_request_path": "/v1/instances",
"http_request_body": '{"v": "nsRIy93UeeAi3GhAxpEcMH6R7OmaB7ArBdn2bEgyEwU=","agent_id":"C432FBB3-D2F1-4A97-9EF7-75BD81C00000","cloudagent_ip":"cloudagent_ip.txt","cloudagent_port":"cloudagent_port.txt","tpm_policy": {"22":"ffffffffffffffffffffffffffffffffffffffff","16":"0000000000000000000000000000000000000000"} }',
"http_result_status_expected": 200,
"test_iterations": test_num_cloudagents,
"post_function": {
"name": "test_concurrent_cloudnodiness_reset_request",
"args": {"ip_file": "cloudagent_ip.txt", "port_file": "cloudagent_port.txt"},
},
},
],
"postrun_function": {
"name": "kill_cloudagents_after_delay",
"args": {
"sleep": test_duration,
"port_file": "cloudagent_port.txt",
"num_cloudagent_instances": test_num_cloudagents,
},
},
},
"test_full_integration_happy_path": {
# "prerun_function" : {"name":"launch_required_servers", "argument": None},
"state_change_functions": [
{
"function_name": "do_cloudagent_part",
"http_request_verb": "GET",
"http_request_ip": cloudagent_ip,
"http_request_port": cloudagent_port,
"http_request_path": "/v1/quotes/tenant",
"http_request_query": {"nonce": "ThisIsThePasswordABC"},
"http_result_status_expected": 200,
"check_function": {"name": "provide_e"},
# "concurrent_new_thread_function" : "new_thread",
# "test_iterations" : 100,
},
{
"function_name": "do_cloudverifier_part",
"http_request_verb": "POST",
"http_request_ip": cloudverifier_ip,
"http_request_port": cloudverifier_port,
"http_request_path": "/v1/instances",
"http_request_body": '{"v": "XrNfEiODfu1fdXGtWbA+Wk02UhBxx1jTq7zhbC54ROA=","agent_id":"C432FBB3-D2F1-4A97-9EF7-75BD81C866E9","cloudagent_ip":"127.0.0.1","cloudagent_port":"8882","tpm_policy": {"00": "0000000000000000000000000000000000000000", "mask": "0x400801", "22": "ffffffffffffffffffffffffffffffffffffffff"}}',
"http_result_status_expected": 200,
"check_function": {"name": "check_test_sleep", "argument": 5},
# "concurrent_new_thread_function" : "new_thread",
# "test_iterations" : 100,
},
],
# "postrun_function" : {"name":"kill_required_servers", "argument": None},
},
"test_persistance_file_load": {
"prerun_function": {
"name": "launch_cloudverifier",
"args": '{"06480EC4-6BF3-4F00-8323-FE6AE5868297": {"tpm_policy": {"00": "0000000000000000000000000000000000000000", "mask": "0x400801", "22": "ffffffffffffffffffffffffffffffffffffffff"}, "ip": "127.0.0.1", "port": "8882", "v": "nsRIy93UeeAi3GhAxpEcMH6R7OmaB7ArBdn2bEgyEwU="}}',
},
"state_change_functions": [
{
"function_name": "test_persistance_file_load",
"http_request_verb": "GET",
"http_request_ip": cloudverifier_ip,
"http_request_port": cloudverifier_port,
"http_request_path": "/v1/instances",
"http_result_status_expected": 200,
"check_function": {
"name": "check_test_persistance_file_load",
"argument": "06480EC4-6BF3-4F00-8323-FE6AE5868297",
},
},
],
"postrun_function": {"name": "kill_cloudverifier", "argument": None},
},
"test_persistance_file_write": {
"prerun_function": {"name": "launch_cloudverifier", "args": "{}"},
"state_change_functions": [
{
"function_name": "test_persistance_file_write",
"http_request_verb": "POST",
"http_request_ip": cloudverifier_ip,
"http_request_port": cloudverifier_port,
"http_request_path": "/v1/instances",
"http_request_body": '{"v": "nsRIy93UeeAi3GhAxpEcMH6R7OmaB7ArBdn2bEgyEwU=","agent_id":"06480EC4-6BF3-4F00-8323-FE6AE5868297","cloudagent_ip":"127.0.0.1","cloudagent_port":"8882","tpm_policy": {"00": "0000000000000000000000000000000000000000", "mask": "0x400801", "22": "ffffffffffffffffffffffffffffffffffffffff"}}',
"http_result_status_expected": 200,
"check_function": {
"name": "check_test_persistance_file_write",
"argument": "06480EC4-6BF3-4F00-8323-FE6AE5868297",
},
},
],
"postrun_function": {"name": "kill_cloudverifier", "argument": None},
},
"test_persistance_file_bad": {
"prerun_function": {"name": "launch_cloudverifier", "args": "{"},
},
"test_persistance_file_empty": {
"prerun_function": {"name": "launch_cloudverifier", "args": ""},
"state_change_functions": [
{
"function_name": "test_persistance_file_empty",
"http_request_verb": "GET",
"http_request_ip": cloudverifier_ip,
"http_request_port": cloudverifier_port,
"http_request_path": "/v1/instances",
"http_result_status_expected": 200,
"check_function": {"name": "test_check_persistance_file_empty", "argument": None},
},
],
"postrun_function": {"name": "kill_cloudverifier", "argument": None},
},
"test_persistance_file_nonexistent": {
"prerun_function": {"name": "launch_cloudverifier", "args": None},
"state_change_functions": [
{
"function_name": "test_persistance_file_nonexistent",
"http_request_verb": "GET",
"http_request_ip": cloudverifier_ip,
"http_request_port": cloudverifier_port,
"http_request_path": "/v1/instances",
"http_result_status_expected": 200,
"check_function": {"name": "test_check_persistance_file_empty", "argument": None},
},
],
"postrun_function": {"name": "kill_cloudverifier", "argument": None},
},
}
def test_concurrent_cloudnodiness(self):
self.execute_test_definition()
def test_cloudagent_tenant_get_nonce(self):
self.execute_test_definition()
def test_cloudagent_tenant_get_quote(self):
self.execute_test_definition()
def test_cloudverifier_tenant_provide_v(self):
self.execute_test_definition()
def test_concurrent_access(self):
self.execute_test_definition()
def test_full_integration_happy_path(self):
self.execute_test_definition()
def test_persistance_file_load(self):
self.execute_test_definition()
def test_persistance_file_write(self):
self.execute_test_definition()
def test_persistance_file_bad(self):
self.execute_test_definition()
def test_persistance_file_empty(self):
self.execute_test_definition()
def test_persistance_file_nonexistent(self):
self.execute_test_definition()
def test_cloudagent_cloud_verifier_get_quote(self):
pass
def check_test_sleep(
self, test_method_name, test_function_name, state_change_or_validation, test_iteration, argument
):
time.sleep(argument)
# '{"v": "nsRIy93UeeAi3GhAxpEcMH6R7OmaB7ArBdn2bEgyEwU=","agent_id":"06480EC4-6BF3-4F00-8323-FE6AE5868297","cloudagent_ip":"127.0.0.1","cloudagent_port":"8882","tpm_policy": {"00": "0000000000000000000000000000000000000000", "mask": "0x400801", "22": "ffffffffffffffffffffffffffffffffffffffff"}}',
def read_line_in_file(self, infile, line_number):
with open(infile) as fp:
for i, line in enumerate(fp):
if i == line_number:
return line
return ""
def sleep_for_a_while(self, argument):
time.sleep(float(argument))
def test_concurrent_cloudnodiness_modify_request(
self, test_method_name, test_function_name, state_change_or_validation, test_iteration, argument
):
test_record = self.test_table.get(test_method_name)
# perform each of the test functions and store the results
for test_functions in test_record[state_change_or_validation]:
if test_functions.get("function_name") == test_function_name:
request_body = test_functions.get("http_request_body")
try:
json_request_body = json.loads(request_body)
tmpp_policy = json_request_body["tpm_policy"]
mask = 0
for key in list(tmpp_policy.keys()):
if key.isdigit():
mask = mask | (1 << int(key))
mask_str = "0x%X" % (mask)
tmpp_policy["mask"] = mask_str
json_request_body["tpm_policy"] = tmpp_policy
cloudagent_ip = json_request_body["cloudagent_ip"]
if cloudagent_ip.endswith(".txt"):
cloudagent_ip_file = cloudagent_ip
cloudagent_ip_read_from_file = self.read_line_in_file(cloudagent_ip_file, test_iteration)
json_request_body["cloudagent_ip"] = cloudagent_ip_read_from_file.strip()
cloudagent_port = json_request_body["cloudagent_port"]
if cloudagent_port.endswith(".txt"):
cloudagent_port_file = cloudagent_port
cloudagent_port_read_from_file = self.read_line_in_file(cloudagent_port_file, test_iteration)
json_request_body["cloudagent_port"] = cloudagent_port_read_from_file.strip()
# parser = ConfigParser.RawConfigParser()
# parser.read(common.CONFIG_FILE)
# test_agent_uuid = parser.get('general', 'agent_uuid')
test_agent_uuid = json_request_body["agent_id"]
port_string_length = len(str(json_request_body["cloudagent_port"]))
contrived_uuid = test_agent_uuid[:-port_string_length]
contrived_uuid = contrived_uuid + str(json_request_body["cloudagent_port"])
json_request_body["agent_id"] = contrived_uuid
test_functions["http_request_body"] = json.dumps(json_request_body)
except Exception as e:
self.fail(
"Problem in test_concurrent_cloudnodiness_modify_request() replacing cloudagent_ip or cloudagent_port. Error: %s"
% e
)
def test_concurrent_cloudnodiness_reset_request(
self, test_method_name, test_function_name, state_change_or_validation, test_iteration, argument
):
# time.sleep(2)
test_record = self.test_table.get(test_method_name)
# perform each of the test functions and store the results
for test_functions in test_record[state_change_or_validation]:
if test_functions.get("function_name") == test_function_name:
request_body = test_functions.get("http_request_body")
try:
json_request_body = json.loads(request_body)
# reset the request body to file arguments for next iteration
json_request_body["cloudagent_ip"] = argument["ip_file"]
json_request_body["cloudagent_port"] = argument["port_file"]
test_functions["http_request_body"] = json.dumps(json_request_body)
except Exception as e:
self.fail(
"Problem in test_concurrent_cloudnodiness_modify_request() replacing cloudagent_ip or cloudagent_port. Error: %s"
% e
)
def test_check_persistance_file_empty(
self, test_method_name, test_function_name, state_change_or_validation, test_iteration, argument
):
test_record = self.test_table.get(test_method_name)
# perform each of the test functions and store the results
for test_functions in test_record[state_change_or_validation]:
if test_functions.get("function_name") == test_function_name:
target_body = test_functions.get("http_result_body_actual")
try:
jsondecoded = json.loads(target_body)
# test to make sure these two keys (and values) are in the return
if len(jsondecoded) != 0:
self.fail(
"Expected empty persistence file to replace non existent persistence file on startup."
)
except Exception as e:
self.fail(
"Problem reading persistence file after replacement of empty persistence file. Error: %s" % e
)
def check_test_persistance_file_write(
self, test_method_name, test_function_name, state_change_or_validation, test_iteration, argument
):
test_record = self.test_table.get(test_method_name)
uuid_str = argument
# perform each of the test functions and store the results
for test_functions in test_record[state_change_or_validation]:
if test_functions.get("function_name") == test_function_name:
try:
with open(cv_persistence_filename) as persistance_file:
file_contents = persistance_file.read()
json_content = json.loads(file_contents)
if len(json_content) != 1 or json_content.get(uuid_str) is None:
self.fail("Unexpected persistence file contents.")
except Exception as e:
self.fail("Problem reading persistence file after POST. Error: %s" % e)
try:
with open(cv_persistence_filename + ".bak") as backup_persistance_file:
backup_file_contents = backup_persistance_file.read()
json_backup_content = json.loads(backup_file_contents)
if len(json_backup_content) != 0:
self.fail("Unexpected backup persistence file contents.")
except Exception as e:
self.fail("Problem reading backup persistence file after POST. Error: %s" % e)
def check_test_persistance_file_load(
self, test_method_name, test_function_name, state_change_or_validation, test_iteration, argument
):
test_record = self.test_table.get(test_method_name)
uuid_str = argument
# perform each of the test functions and store the results
for test_functions in test_record[state_change_or_validation]:
if test_functions.get("function_name") == test_function_name:
target_body = test_functions.get("http_result_body_actual")
jsondecoded = json.loads(target_body)
# test to make sure these two keys (and values) are in the return
if len(jsondecoded) != 1 or jsondecoded.get(uuid_str) is None:
self.fail("Expected " + uuid_str + " to be in the list of active agent_ids")
# def do_mock_for_test_cloudverifier_tenant_provide_v(self, argument):
# global text_callback
# nonce = tpm_initialize.random_password(20)
# tpm_policy = {"00": "0000000000000000000000000000000000000000", "mask": "0x400801", "22": "ffffffffffffffffffffffffffffffffffffffff" }
# #theurl = 'http://' + cloudagent_ip + ':' + cloudagent_port + "/v1/quotes/cloudverifier" + "?nonce=" + nonce + "&mask=" + tpm_policy['mask']
# theurl = 'http://' + cloudagent_ip + ':' + cloudagent_port + "/v1/quotes/cloudverifier"
# with requests_mock.Mocker(real_http=True) as m:
# m.get(requests_mock.ANY, text=text_callback)
def provide_e(self, test_method_name, test_function_name, state_change_or_validation, test_iteration, argument):
test_record = self.test_table.get(test_method_name)
# perform each of the test functions and store the results
for test_functions in test_record[state_change_or_validation]:
if test_functions.get("function_name") == test_function_name:
response_body = test_functions.get("http_result_body_actual")
jsondecoded = json.loads(response_body)
public_key = jsondecoded.get("pubkey")
quote = jsondecoded.get("quote")
# test to make sure these two keys (and values) are in the return
if public_key is None or quote is None:
self.fail("Expected both pubkey and quote arguments.")
else:
mytenant = tenant.Tenant()
# command line options can overwrite config values
mytenant.cloudagent_ip = cloudagent_ip
mytenant.cloudverifier_ip = cloudverifier_ip
mytenant.agent_uuid = "C432FBB3-D2F1-4A97-9EF7-75BD81C866E9"
if mytenant.validate_tpm_quote(public_key, quote):
# encrypt U with the public key
global U, K
encrypted_U = crypto.rsa_encrypt(crypto.rsa_import_pubkey(public_key), str(U))
encrypt_check = crypto.do_hmac(K, mytenant.agent_uuid)
b64_encrypted_u = base64.b64encode(encrypted_U)
data = {"encrypted_key": b64_encrypted_u, "encrypt_check": encrypt_check}
u_json_message = json.dumps(data)
# post encrypted U back to Cloud Agent
response = tornado_requests.request(
"POST", f"http://{cloudagent_ip}:{cloudagent_port}/v1/quotes/tenant", data=u_json_message
)
if response.status_code != 200:
self.fail(
"Posting of Encrypted U to the Cloud Agent failed with response code %d"
% response.status_code
)
else:
self.fail("TPM Quote from cloud agent is invalid for nonce: %s" % self.nonce)
def check_test_cloudagent_tenant_get_nonce(
self, test_method_name, test_function_name, state_change_or_validation, test_iteration, argument
):
test_record = self.test_table.get(test_method_name)
# perform each of the test functions and store the results
for test_functions in test_record[state_change_or_validation]:
if test_functions.get("function_name") == test_function_name:
target_body = test_functions.get("http_result_body_actual")
jsondecoded = json.loads(target_body)
# test to make sure these two keys (and values) are in the return
if jsondecoded.get("pubkey") is None or jsondecoded.get("quote") is None:
self.fail("Expected both pubkey and quote arguments.")
def check_validate_test_cloudverifier_tenant_provide_v(
self, test_method_name, test_function_name, state_change_or_validation, test_iteration, argument
):
test_record = self.test_table.get(test_method_name)
# lookup test data and compare the results to canned values
for test_functions in test_record[state_change_or_validation]:
if test_functions.get("function_name") == test_function_name:
target_body = test_functions.get("http_result_body_actual")
jsondecoded = json.loads(target_body)
v = jsondecoded.get("v")
ip = jsondecoded.get("ip")
port = jsondecoded.get("port")
tpm_policy = jsondecoded.get("tpm_policy")
if v is None or v != "nsRIy93UeeAi3GhAxpEcMH6R7OmaB7ArBdn2bEgyEwU=":
self.fail("Returned v from instance 06480EC4-6BF3-4F00-8323-FE6AE5868297 was not correct.")
if ip is None or ip != "127.0.0.1":
self.fail("Returned ip from instance 06480EC4-6BF3-4F00-8323-FE6AE5868297 was not correct.")
if port is None or port != "8882":
self.fail("Returned port from instance 06480EC4-6BF3-4F00-8323-FE6AE5868297 was not correct.")
if tpm_policy is None or tpm_policy != {
"00": "0000000000000000000000000000000000000000",
"mask": "0x400801",
"22": "ffffffffffffffffffffffffffffffffffffffff",
}:
self.fail("Returned tpm_policy from instance 06480EC4-6BF3-4F00-8323-FE6AE5868297 was not correct.")
def check_and_delete_all_entries(
self, test_method_name, test_function_name, state_change_or_validation, test_iteration, argument
):
test_record = self.test_table.get(test_method_name)
# lookup test data and compare the results to canned values
for test_functions in test_record[state_change_or_validation]:
if test_functions.get("function_name") == test_function_name:
target_body = test_functions.get("http_result_body_actual")
agent_id_list = json.loads(target_body)
expected_len = argument
actual_len = len(agent_id_list)
if actual_len != expected_len:
self.fail("Expected " + str(expected_len) + " instance id's but received " + str(actual_len))
for agent_id in agent_id_list:
params = {
"agent_id": agent_id,
}
try:
response = tornado_requests.request(
"DELETE",
"http://" + cloudverifier_ip + ":" + cloudverifier_port + "/v1/instances",
params=params,
)
if response.status_code != 200:
self.fail("Delete of agent_id " + agent_id + " failed.")
except Exception as e:
self.fail("Delete of agent_id " + agent_id + " failed with exception: %s" % e)
def execute_the_test(self, setup_or_state_change_or_validation, test_functions, test_iteration):
# call the pre_function
pre_function = test_functions.get("pre_function")
if pre_function is not None:
pre_function_name = pre_function.get("name")
pre_function_args = pre_function.get("args")
# self._testMethodName, test_functions["function_name"], setup_or_state_change_or_validation, check_argument
function_return = getattr(self, pre_function_name)(
self._testMethodName,
test_functions["function_name"],
setup_or_state_change_or_validation,
test_iteration,
pre_function_args,
)
if not function_return:
self.fail(
"Test "
+ self._testMethodName
+ ":"
+ test_functions["function_name"]
+ ":"
+ pre_function_name
+ " pre_function failure, test aborted."
)
full_url = (
"http://"
+ test_functions.get("http_request_ip")
+ ":"
+ test_functions.get("http_request_port")
+ test_functions.get("http_request_path")
)
http_request_body_tag = test_functions.get("http_request_body")
http_request_body_file_tag = test_functions.get("http_request_body_file")
if http_request_body_tag is not None and http_request_body_file_tag is not None:
self.fail(
"Test "
+ self._testMethodName
+ ":"
+ test_functions["function_name"]
+ " contains both http_request_body and http_request_body_file tags."
)
thedata = ""
if http_request_body_tag is None and http_request_body_file_tag is not None:
thedata = open(http_request_body_file_tag).read()
else:
thedata = http_request_body_tag
verb = test_functions.get("http_request_verb")
query = test_functions.get("http_request_query", "")
test_functions.get("http_request_header")
req_header = test_functions.get("http_request_header")
response = tornado_requests.request(verb, full_url, params=query, data=thedata, headers=req_header)
temp = tempfile.TemporaryFile()
for chunk in response.iter_content(1024):
temp.write(chunk)
temp.seek(0)
# copy the results for future checking
test_functions["http_result_status_actual"] = response.status_code
test_functions["http_result_header_actual"] = response.headers
test_functions["http_result_body_actual"] = temp.read()
# validate response status
if test_functions["http_result_status_actual"] != test_functions["http_result_status_expected"]:
self.fail(
"Test "
+ self._testMethodName
+ ":"
+ test_functions["function_name"]
+ " expected "
+ str(test_functions["http_result_status_expected"])
+ " but received "
+ str(test_functions["http_result_status_actual"])
) # reset the file marker for reading
# validate response headers
if test_functions.get("http_result_header_expected") is not None and not all(
item in list(response.headers.items())
for item in list(test_functions["http_result_header_expected"].items())
):
self.fail(
"Test "
+ self._testMethodName
+ ":"
+ test_functions["function_name"]
+ ", didn't receive expected headers."
)
# validate (shallow) response body
if test_functions.get("http_result_body_expected") is not None and json.loads(
test_functions.get("http_result_body_expected")
) != json.loads(test_functions.get("http_result_body_actual")):
self.fail(
"Test "
+ self._testMethodName
+ ":"
+ test_functions["function_name"]
+ ", didn't receive exact expected result body."
)
# validate (deep) response body
check_function = test_functions.get("check_function")
if check_function is not None:
check_argument = check_function.get("argument")
if getattr(self, check_function["name"])(
self._testMethodName,
test_functions["function_name"],
setup_or_state_change_or_validation,
test_iteration,
check_argument,
):
self.fail(
"Test "
+ self._testMethodName
+ ":"
+ test_functions["function_name"]
+ ", didn't receive exact expected result body."
)
# call the post_function
post_function = test_functions.get("post_function")
if post_function is not None:
post_function_name = post_function.get("name")
post_function_args = post_function.get("args")
function_return = getattr(self, post_function_name)(
self._testMethodName,
test_functions["function_name"],
setup_or_state_change_or_validation,
test_iteration,
post_function_args,
)
if not function_return:
self.fail(
"Test "
+ self._testMethodName
+ ":"
+ test_functions["function_name"]
+ ":"
+ post_function_name
+ " post_function failure, test aborted."
)
temp.close()
def request_task(self, queue, setup_or_state_change_or_validation, test_functions, test_iteration):
try:
# Table data does not provide ability to inject unique agent_id's for each concurrent instance.
# The queue stores unique agent_id objects, injected by the new_thread function.
# Get the agent_id from the Queue and modify the original table data to change the agent_id to something unique.
http_request_body_tag = test_functions.get("http_request_body")
http_request_body_file_tag = test_functions.get("http_request_body_file")
if http_request_body_tag is not None and http_request_body_file_tag is not None:
self.fail(
"Test "
+ self._testMethodName
+ ":"
+ test_functions["function_name"]
+ " contains both http_request_body and http_request_body_file tags."
)
thedata = ""
if http_request_body_tag is None and http_request_body_file_tag is not None:
thedata = open(http_request_body_file_tag).read()
else:
thedata = http_request_body_tag
the_uid = queue.get()
jsondata = json.loads(thedata)
jsondata["agent_id"] = the_uid
newdata = json.dumps(jsondata)
# call the inline task passing the new data with the unique agent_id
self.execute_the_test(setup_or_state_change_or_validation, test_functions, test_iteration)
except Exception as e:
self.fail(
"Test "
+ self._testMethodName
+ ":"
+ test_functions["function_name"]
+ ", unexpected exception error: %s" % e
)
finally:
queue.task_done()
def modify_persistence_file(self, argument):
string_to_write = None
if isinstance(argument, dict):
string_to_write = json.dumps(argument)
elif isinstance(argument, str):
string_to_write = argument
elif isinstance(argument, _io.TextIOWrapper):
string_to_write = argument.read()
argument.close()
elif argument is None:
if os.path.isfile(cv_persistence_filename):
os.remove(cv_persistence_filename)
if string_to_write is not None:
with open(cv_persistence_filename, "w") as persistance_file:
persistance_file.write(string_to_write)
backup_file_name = cv_persistence_filename + ".bak"
if os.path.isfile(backup_file_name):
os.remove(backup_file_name)
def launch_cloudverifier(self, argument):
readKUV()
# modify the persistence file per the passed argument
if argument is not None:
self.modify_persistence_file(argument)
global cv_process
cv_process = subprocess.Popen("python cloud_verifier.py", shell=True)
time.sleep(1)
return True
def overwrite_config_file(self, path, section, option, value):
parser = configparser.RawConfigParser()
parser.read(path)
parser.set(section, option, value)
# Writing our configuration file to 'example.ini'
with open(path, "wb") as configfile:
parser.write(configfile)
def launch_cloudagents(self, argument):
# self.launch_cloudverifier(None)
port_file = argument.get("port_file")
cloudagent_start_port = argument.get("starting_port")
num_cloudagent_instances = argument["num_cloudagent_instances"]
if cloudagent_start_port is not None:
parser = configparser.RawConfigParser()
parser.read(common.CONFIG_FILE)
original_cloudagent_port = parser.get("general", "cloudagent_port")
test_agent_uuid = parser.get("general", "agent_uuid")
for cn in range(num_cloudagent_instances):
new_dir = r"../cloudagent_on_port_" + str(cloudagent_start_port)
config_file_path = new_dir + "/keylime.conf"
copy_tree(".", new_dir)
shutil.copyfile(common.CONFIG_FILE, config_file_path)
if not os.path.isdir(new_dir):
os.mkdir(new_dir)
# shutil.copyfile(r'../keylime.conf', new_dir + r'/keylime.conf')
self.overwrite_config_file(config_file_path, "general", "cloudagent_port", str(cloudagent_start_port))
port_string_length = len(str(cloudagent_start_port))
contrived_uuid = test_agent_uuid[:-port_string_length]
contrived_uuid = contrived_uuid + str(cloudagent_start_port)
self.overwrite_config_file(config_file_path, "general", "agent_uuid", contrived_uuid)
cn_process_list.append(
subprocess.Popen("python cloud_agent.py", shell=True, cwd=new_dir, preexec_fn=os.setsid).pid
)
cloudagent_start_port = cloudagent_start_port + 1
# time.sleep(2)
self.overwrite_config_file(common.CONFIG_FILE, "general", "cloudagent_port", str(original_cloudagent_port))
elif port_file is not None:
parser = configparser.RawConfigParser()
parser.read(common.CONFIG_FILE)
original_cloudagent_port = parser.get("general", "cloudagent_port")
test_agent_uuid = parser.get("general", "agent_uuid")
for cn in range(num_cloudagent_instances):
cloudagent_port_read_from_file = self.read_line_in_file(port_file, cn).strip()
new_dir = r"../cloudagent_on_port_" + cloudagent_port_read_from_file
config_file_path = new_dir + "/keylime.conf"
copy_tree(".", new_dir)
shutil.copyfile(common.CONFIG_FILE, config_file_path)
if not os.path.isdir(new_dir):
os.mkdir(new_dir)
# shutil.copyfile(r'../keylime.conf', new_dir + r'/keylime.conf')
self.overwrite_config_file(
config_file_path, "general", "cloudagent_port", cloudagent_port_read_from_file
)
port_string_length = len(cloudagent_port_read_from_file)
contrived_uuid = test_agent_uuid[:-port_string_length]
contrived_uuid = contrived_uuid + cloudagent_port_read_from_file
self.overwrite_config_file(config_file_path, "general", "agent_uuid", contrived_uuid)
cn_process_list.append(
subprocess.Popen("python cloud_agent.py", shell=True, cwd=new_dir, preexec_fn=os.setsid).pid
)
cloudagent_port = int(cloudagent_port_read_from_file) + 1
# time.sleep(2)
self.overwrite_config_file(common.CONFIG_FILE, "general", "cloudagent_port", str(original_cloudagent_port))
print("done creating cloud agents, waiting for them to start...")
time.sleep(10)
print("starting test...")
def kill_cloudagents_after_delay(self, argument):
sleep_time = argument.get("sleep")
time.sleep(sleep_time)
# self.launch_cloudverifier(None)
port_file = argument.get("port_file")
cloudagent_start_port = argument.get("starting_port")
num_cloudagent_instances = argument["num_cloudagent_instances"]
if cloudagent_start_port is not None:
parser = configparser.RawConfigParser()
parser.read(common.CONFIG_FILE)
for cn in range(num_cloudagent_instances):
new_dir = r"../cloudagent_on_port_" + str(cloudagent_start_port)
shutil.rmtree(new_dir)
cloudagent_port = cloudagent_start_port + 1
elif port_file is not None:
parser = configparser.RawConfigParser()
parser.read(common.CONFIG_FILE)
test_agent_uuid = parser.get("general", "agent_uuid")
for cn in range(num_cloudagent_instances):
cloudagent_port_read_from_file = self.read_line_in_file(port_file, cn).strip()
port_string_length = len(cloudagent_port_read_from_file)
contrived_uuid = test_agent_uuid[:-port_string_length]
contrived_uuid = contrived_uuid + cloudagent_port_read_from_file
params = {
"agent_id": contrived_uuid,
}
try:
print("Sending #" + str(cn) + " DELETE request to CV for uuid: " + contrived_uuid)
response = tornado_requests.request(
"DELETE",
"http://" + cloudverifier_ip + ":" + cloudverifier_port + "/v1/instances",
params=params,
)
if response.status_code != 200:
self.fail("Delete of agent_id " + contrived_uuid + " failed.")
except Exception as e:
self.fail("Delete of agent_id " + contrived_uuid + " failed with exception: %s" % e)
for cn in range(num_cloudagent_instances):
cloudagent_port_read_from_file = self.read_line_in_file(port_file, cn).strip()
new_dir = r"../cloudagent_on_port_" + cloudagent_port_read_from_file
shutil.rmtree(new_dir)
for the_pid in cn_process_list:
print("killing pid" + str(the_pid))
os.killpg(the_pid, signal.SIGTERM)
def kill_cloudverifier(self, argument):
cv_process.kill()
return True
def launch_cloudagent(self, argument):
readKUV()
global cn_process
cn_process = subprocess.Popen("python cloud_agent.py", shell=True)
time.sleep(1)
return True
def kill_cloudagent(self, argument):
cn_process.kill()
return True
def launch_required_servers(self, argument):
self.launch_cloudagent(argument)
self.launch_cloudverifier(argument)
return True
def kill_required_servers(self, argument):
self.kill_cloudagent(argument)
self.kill_cloudverifier(argument)
return True
def new_thread(self, args):
# create a new uuid, and place it in the queue
the_global_queue = args[0]
new_uuid = str(uuid.uuid4())
the_global_queue.put(new_uuid)
return threading.Thread(target=self.request_task, args=args)
def execute_test_function_set(self, setup_or_state_change_or_validation):
# look up the test record
test_record = self.test_table.get(self._testMethodName)
# perform each of the test functions and store the results
change_or_validation = test_record.get(setup_or_state_change_or_validation)
if change_or_validation is not None:
for test_functions in test_record[setup_or_state_change_or_validation]:
# full_url = "http://" + test_functions.get("http_request_ip") + ":" + test_functions.get("http_request_port") + test_functions.get("http_request_path")
# http_request_body_tag = test_functions.get("http_request_body")
# http_request_body_file_tag = test_functions.get("http_request_body_file")
# if http_request_body_tag != None and http_request_body_file_tag != None :
# self.fail("Test " + self._testMethodName + ":" + test_functions["function_name"] + " contains both http_request_body and http_request_body_file tags." )
#
# thedata = ''
# if http_request_body_tag == None and http_request_body_file_tag != None:
# thedata = open(http_request_body_file_tag).read()
# else:
# thedata=http_request_body_tag
# verb = test_functions.get("http_request_verb")
# query = test_functions.get("http_request_query","")
# test_functions.get("http_request_header")
# req_header = test_functions.get("http_request_header")
concurrent_instances = None
concurrent_new_thread_function = None
concurrency_dict = test_functions.get("concurrency")
if concurrency_dict is not None:
concurrent_instances = concurrency_dict.get("instances")
concurrent_new_thread_function = concurrency_dict.get("new_thread_function")
if concurrent_instances is None or concurrent_new_thread_function is None:
self.fail(
"Test "
+ self._testMethodName
+ ":"
+ test_functions["function_name"]
+ ' contains concurrency agent without mandatory \\"instances\\" or and \\"new_thread_function\\" specifiers'
)
for test_iteration in range(int(test_functions.get("test_iterations", "1"))):
if concurrent_instances is None:
# do it inline on this thread
self.execute_the_test(setup_or_state_change_or_validation, test_functions, test_iteration)
else:
threads = []
for count in range(concurrent_instances):
args = (queue, setup_or_state_change_or_validation, test_functions, test_iteration)
# call the new_thread_function specified in the test table under concurrency tag.
# the new_thread_function is responsible for setting up the task, and creating the new thread.
# the task given to the thread must not block and call task_done() on completion regardless of success or failure
new_thread = getattr(self, concurrent_new_thread_function)(args)
threads.append(new_thread)
# start the threads
for t in threads:
t.start()
# blocks until all tasks have called task_done()
queue.join()
# blocks until all threads are complete
for t in threads:
t.join()
def execute_test_definition(self):
test_record = self.test_table.get(self._testMethodName)
prerun_function_dict = test_record.get("prerun_function")
if prerun_function_dict is not None:
prerun_function_name = prerun_function_dict.get("name")
prerun_function_args = prerun_function_dict.get("args")
function_return = getattr(self, prerun_function_name)(prerun_function_args)
self.execute_test_function_set("setup_functions")
self.execute_test_function_set("state_change_functions")
self.execute_test_function_set("state_validation_functions")
postrun_function_dict = test_record.get("postrun_function")
if postrun_function_dict is not None:
postrun_function_name = postrun_function_dict.get("name")
postrun_function_args = postrun_function_dict.get("args")
function_return = getattr(self, postrun_function_name)(postrun_function_args)
def setUp(self):
pass
def tearDown(self):
# os.killpg(self.cloudverifier_process.pid, signal.SIGKILL)
pass
if __name__ == "__main__":
# import sys;sys.argv = ['', 'Test.testName']
unittest.main()
|
reset_job_test_alone.py | # Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import parl
from parl.remote.master import Master
from parl.remote.worker import Worker
from parl.remote.client import disconnect
from parl.utils import logger, _IS_WINDOWS
import os
import threading
import time
import subprocess
from parl.utils import get_free_tcp_port
@parl.remote_class
class Actor(object):
def __init__(self, arg1=None, arg2=None):
self.arg1 = arg1
self.arg2 = arg2
def get_arg1(self):
return self.arg1
def get_arg2(self):
return self.arg2
def set_arg1(self, value):
self.arg1 = value
def set_arg2(self, value):
self.arg2 = value
def add_one(self, value):
value += 1
return value
def add(self, x, y):
time.sleep(3)
return x + y
def will_raise_exception_func(self):
x = 1 / 0
class TestJobAlone(unittest.TestCase):
def tearDown(self):
disconnect()
def test_job_exit_exceptionally(self):
port = get_free_tcp_port()
master = Master(port=port)
th = threading.Thread(target=master.run)
th.start()
time.sleep(1)
worker1 = Worker('localhost:{}'.format(port), 4)
time.sleep(10)
self.assertEqual(worker1.job_buffer.full(), True)
time.sleep(1)
self.assertEqual(master.cpu_num, 4)
print("We are going to kill all the jobs.")
if _IS_WINDOWS:
command = r'''for /F "skip=2 tokens=2 delims=," %a in ('wmic process where "commandline like '%remote\\job.py%'" get processid^,status /format:csv') do taskkill /F /T /pid %a'''
print(os.popen(command).read())
else:
command = (
"ps aux | grep remote/job.py | awk '{print $2}' | xargs kill -9"
)
subprocess.call([command], shell=True)
parl.connect('localhost:{}'.format(port))
actor = Actor()
self.assertEqual(actor.add_one(1), 2)
time.sleep(20)
master.exit()
worker1.exit()
if __name__ == '__main__':
unittest.main()
|
there.py | #!/usr/bin/env python3
"""Plot results of a Where analysis
Usage::
{exe:there} [date] [--<pipeline>] [pipeline-options] [options]
The following commands are optional:
=================== ===========================================================
Command Description
=================== ===========================================================
date The model run date in the format ``<year month day>``.
=================== ===========================================================
Furthermore, the following options are recognized:
=================== ===========================================================
Option Description
=================== ===========================================================
{pipelines_doc:Plot results from}
-S, --showconfig Show configuration of There and exit.
--style=style Set style of plotting. Choose between white, dark,
whitegrid, darkgrid, and ticks. Default is darkgrid.
--context=size Set size of font and markers. Choose between paper,
notebook, talk, and poster. Default is notebook.
--colormap=cmap Set colors used for plotting. Choose between all colormaps
recognized by matplotlib. Try --colormap=help for a list.
--debug, ... Show additional debug information. Other flags such as
--all, --debug, --info, --warn, --error, --fatal, --none
are also allowed, and will show differing amounts of
information as the program runs.
--version Show version information and exit.
-h, --help Show this help message and exit.
=================== ===========================================================
Description:
------------
The script plots the results from a Where analysis for a given pipeline for the
given model run date. The analysis should already have been run with the main
where script.
Current Maintainers:
--------------------
{maintainers}
Version: {version}
"""
# Standard library imports
from contextlib import contextmanager
from datetime import date, datetime
import editor
import itertools
from pprint import pprint
import seaborn as sns
import subprocess
import sys
from threading import Thread
import tkinter as tk
from tkinter import ttk
from tkinter import scrolledtext
from tkinter import font as tk_font
# External library imports
import matplotlib
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg, NavigationToolbar2Tk
from matplotlib import cm
import matplotlib.dates as mdates
from matplotlib import gridspec
from mpl_toolkits.mplot3d import Axes3D # noqa (Needed for projection='3d')
import numpy as np
# Midgard imports
from midgard.dev import console
from midgard.dev import exceptions
from midgard.math.unit import Unit
# Where imports
import where
from where import pipelines
from where import setup
from where.data import dataset3 as dataset
from where.data.time import Time
from where.lib import config
from where.lib import log
from where.lib import util
PLOT_TYPES = dict() # Dynamically set by plot_type()
TABS = dict() # Dynamically set by register_tab()
DROPDOWNS = dict() # Dynamically set by register_dropdown()
KEY_EVENTS = dict() # Read from config by set_up_key_events()
matplotlib.rcParams["axes.formatter.useoffset"] = False
class CallWrapperReportingException(tk.CallWrapper):
"""Overrides the built-in CallWrapper, so that exceptions happening inside tkinter are reported."""
def __call__(self, *args):
"""Apply first function SUBST to arguments, then FUNC."""
try:
if self.subst:
args = self.subst(*args)
return self.func(*args)
except SystemExit:
raise
except: # noqa
import traceback
from tkinter import messagebox
err = traceback.format_exc()
print(err)
messagebox.showerror("Oops ...", err)
tk.CallWrapper = CallWrapperReportingException
@util.no_traceback
def main():
"""Parse command line arguments and run the program
"""
util.check_help_and_version(doc_module=__name__)
# Start logging
log_cfg = config.there.log
prefix = util.get_program_name().title()
log.init(log_level=log_cfg.default_level.str, prefix=prefix)
if log_cfg.log_to_file.bool:
log.file_init(
file_path=config.files.path("log"),
log_level=log_cfg.default_level.str,
prefix=prefix,
rotation=log_cfg.number_of_log_backups.int,
)
# Use options to specify pipeline and session, include pipeline as profile
pipeline = pipelines.get_from_options()
config.read_pipeline(pipeline)
# Add command line options to config
user = config.program_vars(rundate=None, pipeline=pipeline)["user"]
profile = util.read_option_value("--profile")
config.there.update_from_options(profile="__opts__", allow_new=True)
config.there.profiles = [p for p in ("__opts__", profile, pipeline, user) if p]
# Show the model configuration info
if util.check_options("-S", "--showconfig"):
print(config.there)
raise SystemExit
# Initialize (poor man's optional commands)
rundate = None
util.parse_args(doc_module=__name__)
if [a for a in sys.argv[1:] if not a.startswith("-")]:
rundate = util.parse_args("date")
# Use Seaborn to add styling
sns.set(context=config.there.context.str, style=config.there.style.str)
# Test if the colormap is recognized
colormap = config.there.colormap.str
if colormap:
if colormap == "help":
print("Possible values for colormap are:")
print(console.fill(", ".join(sorted(cm.cmap_d)), initial_indent=" " * 4, subsequent_indent=" " * 4))
raise SystemExit
else:
cm.get_cmap(colormap)
args, kwargs = util.options2args(sys.argv[1:])
# Run program
there = There(rundate, pipeline, *args, **kwargs)
there.mainloop()
def plot_type(func):
"""Decorator to register different plot types
"""
name = func.__name__.replace("plot_", "").replace("_", " ")
PLOT_TYPES[name] = func
log.debug(f"Registered plot type {name!r} handled by {func.__name__}")
return func
def register_tab(cls):
"""Decorator to register tabs"""
TABS[cls.name] = cls
log.debug(f"Registered tab {cls.name!r} handled by {cls.__name__}")
def register_dropdown(cls):
"""Decorator to register dropdowns"""
DROPDOWNS[cls.name] = cls
log.debug(f"Registered dropdown {cls.name!r} handled by {cls.__name__}")
class There(tk.Tk):
"""A simple GUI wrapper around a Matplotlib chart for manipulating data
The GUI makes it easy to interactively choose which data are plotted.
"""
def __init__(self, rundate=None, pipeline=None, *args, **kwargs):
"""Set up the basic structure of the GUI
"""
super().__init__()
super().wm_title(f"{util.get_program_name().title()} - {pipeline.upper()}")
self._there_icon = tk.PhotoImage(file=config.files.path("there_icon")) # Keep reference to icon
self.iconphoto(True, self._there_icon)
# Set up default values for variables
self.vars = dict(
common=dict(config.program_vars(rundate, pipeline, **kwargs), **config.date_vars(rundate), rundate=rundate)
)
self.vars["common"]["do_update"] = True
# Add tabs
self.status = Status(self)
self.nb = ttk.Notebook(self)
self.nb.enable_traversal()
self.tabs = dict()
tabs = config.there.tabs.list
for tab in tabs:
# Tab variables
self.vars[tab] = self.vars["common"].copy()
if f"{tab}_initial" in config.there.section_names:
self.vars[tab].update(config.there[f"{tab}_initial"].as_dict())
# Add tab widget
self.tabs[tab] = TABS[tab](self.nb, self.vars[tab])
self.tabs[tab].add_to(self.nb)
self.nb.pack(expand=True, fill="both")
# Only display status line if set in config
if config.there.status.display.bool:
self.status.pack(side=tk.TOP, fill=tk.X, expand=False)
# Select tab
if pipeline and rundate:
self.select_tab("analysis")
elif pipeline:
self.select_tab("timeseries")
else:
self.select_tab("run_analysis")
# Bind key events
self.set_up_key_events()
def set_up_key_events(self):
# Method events are first tried on the current tab, then on There in general
if "key_method" in config.there.section_names:
for key, entry in config.there.key_method.items():
KEY_EVENTS[key] = (entry.str.capitalize().replace("_", " "), [["current_tab", entry.str], [entry.str]])
# Config events add to the current tech config using update_config()
if "key_config" in config.there.section_names:
for key, entry in config.there.key_config.items():
KEY_EVENTS[key] = (
f"Update config: {entry.str}",
[["current_tab", "update_config"]],
entry.as_list(", *"),
)
# Select events select from dropdowns using select_dropdown()
if "key_select" in config.there.section_names:
for key, entry in config.there.key_select.items():
KEY_EVENTS[key] = (f"Select {entry.str}", [["current_tab", "select_dropdown"]], entry.as_list(", *"))
self.bind("<Key>", self.key_pressed)
def key_pressed(self, event):
key = event.char
if key not in KEY_EVENTS:
return
event, attrs_list, *args = KEY_EVENTS[key]
for attrs in attrs_list:
obj = self
try:
for attr in attrs:
obj = getattr(obj, attr)
obj(*args)
break
except AttributeError:
continue
else:
log.warn(f"Key {key!r} is not valid on the {self.current_tab.name.title().replace('_', ' ')!r} tab")
@property
def current_tab(self):
tab = self.nb.select().rpartition(".")[-1]
return self.tabs[tab]
def select_tab(self, tab):
if tab in self.tabs:
return self.tabs[tab].select()
log.warn(f"Could not select unknown tab {tab!r}")
def show_keyboard_shortcuts(self):
key_str = ["Current Keyboard Shortcuts:"]
key_str += [f"{k}: {v[0]}" for k, v in sorted(KEY_EVENTS.items())]
log.out("\n ".join(key_str))
self.current_tab.status("\n".join(key_str))
def run(self, func, *args, description="", **kwargs):
"""Start Runner instance to run command"""
self.status.write(f"{description} ...")
log.info(description)
Runner(self).run(func, *args, description=description, **kwargs)
self.status.replace(f"{description} ... Done")
class Runner(tk.Toplevel):
def __init__(self, parent, title=None):
super().__init__(parent)
self.parent = parent
self.transient(parent) # Do not show up as independent window
if title is not None:
self.title(title)
body = tk.Frame(self)
self._label = tk.Label(body, text="")
self._label.pack(padx=5, pady=5)
self._progress = ttk.Progressbar(body, mode="indeterminate", length=400)
self._progress.pack(padx=5, pady=5)
body.pack(padx=5, pady=5)
self.grab_set() # Block parent
self.protocol("WM_DELETE_WINDOW", lambda: None) # Disable close button
self.geometry(f"+{parent.winfo_rootx() + 100}+{parent.winfo_rooty() + 100}")
def run(self, func, *args, description="", **kwargs):
"""Run command in a separate thread to not block GUI"""
self._label["text"] = description
self._progress.start()
command = Thread(target=self._run_and_exit, args=(func,) + args, kwargs=kwargs)
command.start()
self.wait_window(self)
def _run_and_exit(self, func, *args, **kwargs):
"""Run function, exit Runner when done"""
try:
func(*args, **kwargs)
finally:
self.cancel()
def cancel(self):
"""Destroy the Runner dialog and reset focus to parent"""
self.parent.focus_set()
self.destroy()
class Status(ttk.Frame):
def __init__(self, master):
super().__init__(master)
cfg = config.there.status
# Set up fonts
font_family, _, _ = font = (cfg.font_family.str, cfg.font_size.int, cfg.font_style.str)
if font_family:
available_fonts = tk_font.families()
if font_family not in available_fonts:
log.warn(f"Font family {font_family!r} is not available. Use one of: {', '.join(available_fonts)}")
else:
font_family = tk_font.nametofont("TkTextFont").actual()["family"]
font = (font_family,) + font[1:]
# Lay out widgets
self._previous_icon = tk.PhotoImage(
file=config.files.path("there_button_icon", file_vars=dict(name="previous"))
)
self._previous = ttk.Button(self, image=self._previous_icon, command=lambda: self.write("Previous"))
# self._previous.pack(side=tk.LEFT, fill=tk.Y)
self._next_icon = tk.PhotoImage(file=config.files.path("there_button_icon", file_vars=dict(name="next")))
self._next = ttk.Button(self, image=self._next_icon, command=lambda: self.write("Next"))
# self._next.pack(side=tk.RIGHT, fill=tk.Y)
self._text = scrolledtext.ScrolledText(self, height=cfg.height.int, wrap=tk.WORD, font=font)
self._text.pack(fill=tk.X, expand=False)
# Initialize widgets
startup_text = f"Start {util.get_program_info()} at {datetime.now():{config.FMT_datetime}}"
self._text.insert(1.0, startup_text)
self._text.config(state=tk.DISABLED)
self._previous.config(state=tk.DISABLED)
self._next.config(state=tk.DISABLED)
def clear(self):
"""Clear status"""
self._text.config(state=tk.NORMAL)
self._text.delete(1.0, tk.END)
self._text.config(state=tk.DISABLED)
def write(self, text):
"""Write text to status"""
self._text.config(state=tk.NORMAL)
self._text.insert(tk.END, f"\n{text}") # New-line in front to avoid blank line at end
self._text.yview(tk.END) # Move scrollbar to bottom
self._text.config(state=tk.DISABLED)
def replace(self, text):
"""Replace the last line in status with text"""
self._text.config(state=tk.NORMAL)
self._text.delete(float(self._text.index(tk.END)) - 1, tk.END) # Delete last line
self._text.insert(tk.END, f"\n{text}") # Replace with new text
self._text.yview(tk.END) # Move scrollbar to bottom
self._text.config(state=tk.DISABLED)
class Tab(ttk.Frame):
name = "updated by subclasses"
def __init__(self, master, vars_):
super().__init__(master, name=self.name)
self.master = master
self.vars = vars_
self.widgets = dict()
self.widget_updates = list()
self._icon = None
self.init_tab()
def init_tab(self):
"""Initialize the contents of the tab"""
pass
@property
def icon(self):
if self._icon is None:
self._icon = tk.PhotoImage(file=config.files.path("there_tab_icon", file_vars=dict(name=self.name)))
return self._icon
def add_to(self, master):
"""Add tab to master widget"""
master.add(
self, text=f" {self.name.title().replace('_', ' ')} ", underline=2, image=self.icon, compound="left"
)
def select(self):
self.master.select(f"{self.master}.{self.name}")
def status(self, text):
self.master.master.status.write(text)
def add_button(self, frame, text, command):
ttk.Button(frame, text=text, command=command).pack(side=tk.RIGHT, padx=1, pady=0)
def add_buttons(self, frame, instance, *button_list):
for button in reversed(button_list): # Reversed since we are packing to the right
try:
button_func = getattr(instance, f"button_{button}")
except AttributeError:
buttons = ", ".join(b[7:] for b in dir(instance) if b.startswith("button_"))
raise ValueError(f"Button '{button}' in config is not recognized. Use one of: {buttons}") from None
self.add_button(frame, button.title().replace("_", " "), button_func)
def add_checkboxes(self, frame, instance, *name_list):
for name in name_list:
CheckBox(frame, name=name, vars_=self.vars, figure=instance).pack(side=tk.LEFT, padx=5)
def add_dropdown(self, frame, name, figure=None):
try:
dropdown = DROPDOWNS[name](frame, self.vars, figure)
except KeyError:
dropdowns = ", ".join(DROPDOWNS.keys())
raise ValueError(f"Dropdown '{name}' in config is not recognized. Use one of: {dropdowns}") from None
self.widgets[name] = dropdown
tk.Label(frame, text=f"{dropdown.label}:").pack(side=tk.LEFT, padx=5, pady=5)
dropdown.pack(side=tk.LEFT)
self.add_widget_update(dropdown, dropdown.update_options)
def add_entry(self, frame, name):
tk.Label(frame, text=f"{name.title()}:").pack(side=tk.LEFT, padx=5, pady=5)
entry = tk.Entry(frame, width=20)
entry.pack(side=tk.LEFT)
def add_widget_update(self, widget, update_func):
if self.widget_updates:
last_widget = self.widget_updates[-1][0]
last_widget.set_next_update(update_func)
self.widget_updates.append((widget, update_func))
def add_radiobuttons(self, frame, group, *name_list):
self.widgets[group] = dict()
group_name = group.replace("_", " ").title()
tk.Label(frame, text=f"{group_name}:").pack(side=tk.LEFT, padx=5)
for name in name_list:
self.widgets[group][name] = Radiobutton(frame, name=name, group=group, vars_=self.vars)
self.widgets[group][name].pack(side=tk.LEFT, padx=5)
def update_all(self):
self.widget_updates[0][1]()
def autoupdate(self):
"""Automatically update tabs"""
interval = config.there.get("update_interval").str
interval_unit = config.there.general.update_interval.meta.get("unit", "minute")
if interval:
interval_ms = int(float(interval) * Unit(interval_unit, "milliseconds"))
self.after(interval_ms, self.autoupdate)
log.info(f"Auto-updating {self.name.title()} tab, next update in {interval_ms / 1000:.0f} seconds")
self.update_all()
def remember_data(self):
return self.figure.remember_data()
def forget_data(self):
return self.figure.forget_data()
def show_vars(self):
pprint(self.vars)
def run_analysis(self):
"""Run the current analysis"""
exe = where.__executable__
cmd = "{exe} {rundate:%Y %m %d} --{pipeline}".format(exe=exe, **self.vars).split()
if self.vars["id"]:
cmd.append(f"--id={self.vars['id']}")
if self.vars.get("force_rerun", False):
cmd.append("-F")
if self.vars.get("fresh_run", False):
cmd.append("-N")
if self.vars.get("traceback", False):
cmd.append("-T")
if self.vars["pipeline"] == "vlbi": # TODO better solution
cmd.append(f"--session={self.vars['session']}")
description = f"Running {self.vars['pipeline'].upper()} analysis: {' '.join(cmd)}"
self.master.master.run(subprocess.check_call, cmd, description=description)
self.update_all()
update_figure = update_all
button_update = update_figure
button_remember = remember_data
button_forget = forget_data
button_show_vars = show_vars
button_run_analysis = run_analysis
button_rerun = run_analysis
def select_dropdown(self, selections):
for selection in selections:
key, _, value = selection.partition("=")
if key not in self.widgets:
continue
if value not in self.widgets[key]["values"]:
continue
self.widgets[key].choice.set(value)
class TabFigure(Tab):
"""General figure tab"""
def init_tab(self):
cfg = config.there[self.name]
# Dataset controls
dataset_line = ttk.Frame(self)
for dropdown in cfg.dataset_dropdowns.list:
self.add_dropdown(dataset_line, dropdown)
dataset_line.pack(side=tk.TOP, fill=tk.X, expand=False)
# Figure area
self.figure = Plot(self, self.vars)
self.add_widget_update(self.figure, self.figure.update_dataset)
# Plot controls
plot_line = ttk.Frame(self)
self.add_dropdown(plot_line, "plot_type", self.figure) # TODO: Config?
self.add_dropdown(plot_line, "x_axis", self.figure)
self.add_dropdown(plot_line, "y_axis", self.figure)
self.add_dropdown(plot_line, "color", self.figure)
self.add_dropdown(plot_line, "size", self.figure)
plot_line.pack(side=tk.TOP, fill=tk.X, expand=False)
# Filter controls
filter_line = ttk.Frame(self)
for filter in cfg.filters.list:
filter_cls = filter_factory(filter)
self.add_dropdown(filter_line, filter_cls.name, self.figure)
filter_line.pack(side=tk.TOP, fill=tk.X, expand=False)
# Tool controls
button_line = ttk.Frame(self)
self.add_checkboxes(button_line, self.figure, *cfg.checkboxes.list)
self.add_buttons(button_line, self, *cfg.buttons.list)
button_line.pack(side=tk.TOP, fill=tk.X, expand=False)
# Edit operations
tool_line = ttk.Frame(self)
self.add_radiobuttons(tool_line, "double_click", *cfg.double_clicks.tuple)
self.add_buttons(tool_line, self.figure, *cfg.figure_buttons.list)
tool_line.pack(side=tk.TOP, fill=tk.X, expand=False)
# Update figure and start automatic updates
self.add_widget_update(self.figure, self.figure.update_plot)
self.update_all()
self.autoupdate()
def dataset_info(self):
dset = self.figure.dataset
if dset is None:
return
update_txt = [dset.repr]
timestamps = config.timestamps(**self.vars)
if timestamps:
update_txt += [f"{k.title()} {v}" for k, v in timestamps.items()]
log.out("\n ".join(update_txt))
self.status(", ".join(update_txt))
def next_double_click_option(self):
self.next_radiobutton_option("double_click")
def next_radiobutton_option(self, group):
choices = list(self.widgets[group])
current_choice = self.vars[group]
current_idx = choices.index(current_choice)
next_choice = choices[(current_idx + 1) % len(choices)]
self.widgets[group][current_choice].choice.set(next_choice)
self.vars[group] = next_choice
@register_tab
class TabTimeseries(TabFigure):
name = "timeseries"
def init_tab(self):
# Fix date to 1970-01-01
rundate = date(1970, 1, 1)
self.vars.update(dict(rundate=rundate))
# Initialize tab as usual
super().init_tab()
@register_tab
class TabAnalysis(TabFigure):
name = "analysis"
def update_config(self, config_opts):
cfg_log = [f"Updating configuration:"] + config_opts
log.info("\n ".join(cfg_log))
config_opts = [f"--{o}" for o in config_opts if not o.startswith("--")]
with config.update_tech_config(use_options=False, **self.vars) as cfg:
cfg.update_from_options(options=config_opts, source=util.get_program_name())
def show_log(self):
log_path = config.files.path("log", file_vars=self.vars)
if not log_path.exists():
log.warn(f"Log file {log_path} does not exist")
return
log.print_file(log_path, config.there.log.print_log_level.str)
def show_map(self):
pipelines.make_map(self.figure.dataset)
def edit_config(self):
cfg_path = config.files.path("config", file_vars=self.vars)
if not cfg_path.exists():
log.warn(f"Config file '{cfg_path}' does not exist")
return
self.master.master.run(editor.edit, str(cfg_path), description=f"Editing config file {cfg_path}")
_vars = self.vars.copy()
setup.add_timestamp(_vars.pop("rundate"), _vars.pop("pipeline"), "last update", **_vars)
button_log = show_log
button_show_map = show_map
button_config = edit_config
class UpdateMixin:
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.next_update_func = None
def set_next_update(self, next_update_func):
self.next_update_func = next_update_func
def update_next(self):
if self.vars["do_update"] and self.next_update_func is not None:
self.next_update_func()
@contextmanager
def no_update(self):
"""Delay updates until end of context manager"""
self.vars["do_update"] = False
yield
self.vars["do_update"] = True
class ConfigTree(ttk.Treeview, UpdateMixin):
def __init__(self, master):
super().__init__(master, columns=("value", "help"))
self.heading("value", text="Value")
self.column("value", width=300)
self.heading("help", text="Description")
self.column("help", width=700)
def update(self, cfg):
for idx, section in enumerate(cfg.sections):
tree_section = self.insert("", idx, text=section.name)
for idx_section, (key, entry) in enumerate(section.items()):
self.insert(tree_section, idx_section, text=key, values=(entry.str, entry.help))
class Plot(FigureCanvasTkAgg, UpdateMixin):
def __init__(self, master, vars_):
figsize = (8, config.there.minimum_figure_height.float)
self.figure = matplotlib.figure.Figure(figsize=figsize, dpi=100)
self.graph = self.figure.add_subplot(1, 1, 1)
super().__init__(self.figure, master)
self.master = master
self.filters = list()
self.vars = vars_
self.dataset = None
self.other_dataset = None
self.next_update_func = None
self.get_tk_widget().pack(side=tk.TOP, fill=tk.BOTH, expand=True)
NavigationToolbar2Tk(self, master).update()
self._tkcanvas.pack(side=tk.TOP, fill=tk.BOTH, expand=True)
self.picker_id = None
self.ignore_obs = None
self.cmap = config.there.colormap.str
self.event_colors = dict()
self.draw()
def add_filter(self, filter_):
self.filters.append(filter_.name.replace("filter_", ""))
def update_dataset(self):
if self.vars["rundate"] is None:
location = config.files.directory_work.directory.replaced.str.split("{")[0]
log.error(f"No data found in {location}. Run '{where.__executable__}' to generate data.")
return
# Read dataset from disk
dset_vars = dict()
for var in config.there.general.dataset_variables.list:
if var is not None:
dset_vars[var] = self.vars[var]
self.dataset = dataset.Dataset.read(use_options=False, **dset_vars)
# Add event interval field
events = self.dataset.meta.get_events()
if events and "time" in self.dataset.fields:
obs_epochs = self.dataset.time.utc.jd
event_epochs = np.array([e[0].utc.jd for e in self.dataset.meta.get_events()])
event, obs = np.meshgrid(event_epochs, obs_epochs)
event_interval = np.sum(event - obs < 0, axis=1) + 0.5
if "event_interval" not in self.dataset.fields:
self.dataset.add_float("event_interval", event_interval)
self.dataset.event_interval[:] = event_interval
# Observations that will not be plotted
self.ignore_obs = np.zeros(self.dataset.num_obs, dtype=bool)
# Update the next widget
self.update_next()
def remember_data(self):
self.other_dataset = self.dataset
self.vars["x_axis_other"] = self.vars.get("x_axis_data")
self.vars["y_axis_other"] = self.vars.get("y_axis_data")
self.vars["color_other"] = self.vars.get("color_data")
self.vars["size_other"] = self.vars.get("size_data")
self.update_plot()
def forget_data(self):
self.other_dataset = None
self.vars["x_axis_other"] = None
self.vars["y_axis_other"] = None
self.vars["color_other"] = None
self.vars["size_other"] = None
self.update_plot()
def add_event_color(self, event_type):
if event_type in self.event_colors:
return
event_types = list(self.event_colors.keys()) + [event_type]
for event, value in zip(event_types, np.linspace(0, 1, len(event_types))):
self.event_colors[event] = cm.tab20(value)
@property
def title(self):
pipeline = self.vars["pipeline"].upper()
session = f" {self.vars['session']}" if self.vars.get("session") else ""
date = f" {self.vars['date']}" if self.vars.get("date") else ""
stage = f" - {self.vars['stage']}".title() if self.vars.get("stage") else ""
return f"{pipeline}{session}{date}{stage}"
@property
def xlabel(self):
try:
axis_unit = self.dataset.unit(self.vars["x_axis_name"])
except exceptions.UnitError:
axis_unit = None
if self.vars["x_axis_columns"]:
unit_str = ""
else:
unit_str = " [{}]".format(axis_unit) if axis_unit else ""
return self.vars["x_axis_name"].replace("_", " ").replace(".", " - ").title() + unit_str
@property
def xlim(self):
return self._calculate_range(self.vars["x_axis_data"])
@property
def ylabel(self):
try:
axis_unit = self.dataset.unit(self.vars["y_axis_name"])
except exceptions.UnitError:
axis_unit = None
if self.vars["y_axis_columns"]:
unit_str = ""
else:
unit_str = " [{}]".format(axis_unit) if axis_unit else ""
return self.vars["y_axis_name"].replace("_", " ").replace(".", " - ").title() + unit_str
@property
def ylim(self):
return self._calculate_range(self.vars["y_axis_data"])
def update_plot(self):
log.debug(f"Updating the {self.vars['plot_type']}-plot")
tooltip_fields = config.there.tooltip_fields.tuple
idx, idx_other = self.do_filter()
self.figure.canvas.mpl_connect("pick_event", self.dbl_click_pick) # TODO: Delete these?
# Use the registered plotting functions to plot the correct plot
self.figure.clear()
PLOT_TYPES[self.vars["plot_type"]](self)
# Some quick info, ignore if info can't be calculated
info_data = self.vars["y_axis_data"]
try:
info_idx = idx & np.all(np.isfinite(info_data), axis=tuple(range(1, info_data.ndim)))
log.out(
f"Num obs: {np.sum(info_idx)} Mean: {np.mean(info_data[info_idx])} "
f"RMS: {np.sqrt(np.mean(np.square(info_data[info_idx])))}"
)
except TypeError:
pass
if self.picker_id:
self.figure.canvas.mpl_disconnect(self.picker_id)
def on_pick(event):
dset = self.dataset
if event.mouseevent.dblclick:
return
pick_fields = [
f for f in dset.fields if f in tooltip_fields or any(f.startswith(p + "_") for p in tooltip_fields)
]
fields = sorted(
{
f
for f in pick_fields
+ [
self.vars["x_axis_name"],
self.vars["y_axis_name"],
self.vars["color_name"],
self.vars["size_name"],
]
if f in dset.fields
}
)
for ind in self.event2dset(event.ind):
def unit(field):
try:
return dset.unit(field)
except exceptions.UnitError:
return ""
texts = [
f"{f}: {dset.plot_values(f)[ind]} {unit(f)}" for f in fields if f.startswith("time")
]
texts += [f"{f}: {dset[f][ind]} {unit(f)}" for f in fields if not f.startswith("time")]
log.out("\n ".join(texts))
self.master.status(" ".join(texts))
self.picker_id = self.figure.canvas.mpl_connect("pick_event", on_pick)
# Turn off scientific notation on axis labels
for ax in self.figure.axes:
try:
ax.xaxis.get_major_formatter().set_scientific(False)
except AttributeError:
pass
try:
ax.yaxis.get_major_formatter().set_scientific(False)
except AttributeError:
pass
self.figure.subplots_adjust(right=0.99, top=0.95)
self.draw()
def do_filter(self):
filter_dict = {f: self.vars[f"filter_{f}"] for f in self.filters if self.vars[f"filter_{f}"] != "no filter"}
idx_data = self.dataset.filter(**filter_dict, idx=np.logical_not(self.ignore_obs))
try:
idx_other = self.other_dataset.filter(**filter_dict)
except AttributeError:
idx_other = idx_data
return idx_data, idx_other
def event2dset(self, ind):
"""Convert event.ind indexing to dataset indices
The event.ind is not counting observations that are filtered out.
"""
idx, _ = self.do_filter()
data_vars = ("x_axis_data", "y_axis_data", "color_data", "size_data")
idx_finite = np.ones(sum(idx), dtype=bool)
for var in data_vars:
try:
data = self.vars[var][idx]
idx_finite &= np.all(np.isfinite(data), axis=tuple(range(1, data.ndim)))
except TypeError:
idx_finite &= np.ones(sum(idx), dtype=bool)
return np.where(idx)[0][ind][idx_finite[ind]]
def dbl_click_pick(self, event, mouse_event=None):
if mouse_event is None:
mouse_event = event.mouseevent
if not mouse_event.dblclick:
return False, dict(ind=list())
dbl_click_func = getattr(self, f"dbl_click_{self.vars['double_click']}")
return dbl_click_func(event, mouse_event)
def dbl_click_do_nothing(self, _, __):
log.debug("Doing nothing about double click")
return False, dict()
def dbl_click_add_clock_break(self, event, mouse_event):
if "ind" in dir(event): # Workaround so that clock breaks are not added twice
return False, dict()
if self.vars["filter_station"] == "no filter":
log.error("Choose a station to add a clock break")
else:
d = mdates.num2date(mouse_event.xdata).replace(tzinfo=None)
time = Time(d, fmt="datetime", scale="utc")
# Check if there is a suspected clock break nearby
all_data = self.vars["x_axis_data"]
threshold = (max(all_data) - min(all_data)).total_seconds() * Unit.seconds2days / 200
for suspect_time, _, suspect_station in self.dataset.meta.get_events("suspected_clock_break"):
if (
suspect_station == self.vars["filter_station"]
and np.abs(time.utc.jd - suspect_time.utc.jd) < threshold
):
log.info("Converting suspected clock break")
time = suspect_time
# TODO: dset.remove_event ...
clock_break = f"{self.vars['filter_station']} {time.datetime:{config.FMT_datetime}}"
log.info(f"Adding clock break: '{clock_break}'")
# Add event on dataset to visualize
self.dataset.meta.add_event(time, "unprocessed_clock_break", self.vars["filter_station"])
self.dataset.write()
self.update_plot()
# Add to config file
with config.update_tech_config(use_options=False, **self.vars) as cfg:
current = cfg.vlbi_clock_poly.clock_breaks.as_list(", *")
updated = ", ".join(sorted(current + [clock_break]))
cfg.update("vlbi_clock_poly", "clock_breaks", updated, source=util.get_program_name())
return False, dict()
def dbl_click_ignore_observation(self, event, _):
if "ind" not in dir(event):
return False, dict()
station_field = "baseline" if "baseline" in self.dataset.fields else "station"
# Find which observations to ignore
ignore_list = list()
for idx in self.event2dset(event.ind):
ignore_str = f"{self.dataset.time.utc.iso[idx]} {self.dataset[station_field][idx]}"
log.info(f"Adding '{ignore_str}' to ignore_observation")
ignore_list.append(ignore_str)
# Add to ignore filter for visualization
# TODO: Should ignore_obs be in dset.meta so it will be permanent?
self.ignore_obs[idx] = True
self.update_plot()
# Add to config file
with config.update_tech_config(use_options=False, **self.vars) as cfg:
current = cfg.ignore_observation.observations.as_list(", *")
updated = ", ".join(sorted(current + ignore_list))
cfg.update("ignore_observation", "observations", updated, source=util.get_program_name())
return False, dict()
def dbl_click_go_to_analysis(self, event, mouse_event):
if "ind" not in dir(event) or len(event.ind) == 0:
return False, dict()
# Figure out info about the observation that was clicked
idx = self.event2dset(event.ind)[0] # Use first event index
var_names = ["rundate", "pipeline", "id"]
var_names += [f[7:] for f in self.vars.keys() if f.startswith("filter_")]
analysis_vars = dict()
for var in var_names:
fvar = var[7:] if var.startswith("filter_") else var
if fvar in self.dataset.fields:
analysis_vars[var] = self.dataset[fvar][idx]
elif fvar in self.vars:
analysis_vars[var] = self.vars[fvar]
else:
print(f"Unknown fvar {fvar} in dbl_click_go_to_analysis")
# Fix date variables
if "rundate" in analysis_vars:
analysis_vars["rundate"] = datetime.strptime(analysis_vars["rundate"], config.FMT_date)
analysis_vars["date"] = analysis_vars["rundate"].strftime("%Y%m%d")
# Update session variables
log.info(f"Opening {analysis_vars['pipeline'].upper()} {analysis_vars.get('date', '')}")
self.master.status(f"Opening {analysis_vars['pipeline'].upper()} {analysis_vars.get('date', '')}")
analysis_tab = self.master.master.master.tabs["analysis"]
with analysis_tab.figure.no_update():
for key, value in analysis_vars.items():
if key in analysis_tab.widgets:
analysis_tab.widgets[key].choice.set(value)
elif f"filter_{key}" in analysis_tab.widgets:
analysis_tab.widgets[f"filter_{key}"].choice.set(value)
# Switch to session tab and update
analysis_tab.update_all()
analysis_tab.select()
return False, dict()
def button_ignore_baseline(self):
if "filter_baseline" not in self.vars or self.vars["filter_baseline"] == "no filter":
log.error("Choose a baseline in the filter menu to ignore it")
else:
log.info(f"Adding {self.vars['filter_baseline']} to ignore_baseline")
with config.update_tech_config(use_options=False, **self.vars) as cfg:
current = cfg.vlbi_ignore_baseline.baselines.as_list(", *")
updated = ", ".join(sorted(current + [self.vars["filter_baseline"]]))
cfg.update("vlbi_ignore_baseline", "baselines", updated, source=util.get_program_name())
def button_add_baseline_clock_offset(self):
if "filter_baseline" not in self.vars or self.vars["filter_baseline"] == "no filter":
log.error("Choose a baseline in the filter menu to estimate the baseline clock offset")
else:
log.info(f"Adding {self.vars['filter_baseline']} to baseline_clock_offsets")
with config.update_tech_config(use_options=False, **self.vars) as cfg:
current = cfg.vlbi_clock_poly.baseline_clock_offsets.as_list(", *")
updated = ", ".join(sorted(current + [self.vars["filter_baseline"]]))
cfg.update("vlbi_clock_poly", "baseline_clock_offsets", updated, source=util.get_program_name())
def button_ignore_station(self):
if "filter_station" not in self.vars or self.vars["filter_station"] == "no filter":
log.error("Choose a station in the filter menu to ignore it")
else:
log.info(f"Adding {self.vars['filter_station']} to ignore_station")
with config.update_tech_config(use_options=False, **self.vars) as cfg:
current = cfg.ignore_station.stations.as_list(", *")
updated = ", ".join(sorted(current + [self.vars["filter_station"]]))
cfg.update("ignore_station", "stations", updated, source=util.get_program_name())
def button_ignore_source(self):
if "filter_source" not in self.vars or self.vars["filter_source"] == "no filter":
log.error("Choose a source in the filter menu to ignore it")
else:
log.info(f"Adding {self.vars['filter_source']} to ignore_source")
with config.update_tech_config(use_options=False, **self.vars) as cfg:
current = cfg.vlbi_ignore_source.sources.as_list(", *")
updated = ", ".join(sorted(current + [self.vars["filter_source"]]))
cfg.update("vlbi_ignore_source", "sources", updated, source=util.get_program_name())
# Different types of plots
@plot_type
def plot_scatter(self, gs=gridspec.GridSpec(1, 1)[0]):
idx, idx_other = self.do_filter()
x_data, x_events = self._identify_events(self.vars["x_axis_data"])
y_data, y_events = self._identify_events(self.vars["y_axis_data"])
if x_data.ndim < 1 or y_data.ndim < 1:
return idx, idx_other
def event_pick(event, axis):
all_data = self.vars[axis + "_axis_data"]
try:
threshold = (max(all_data) - min(all_data)).total_seconds() * Unit.seconds2days / 100
except AttributeError:
threshold = (max(all_data) - min(all_data)) / 100
e_time, e_type, e_description = event
e_type = e_type.replace("_", " ").title()
def on_pick(_, mouse_event):
if mouse_event.dblclick:
return False, dict(ind=list())
mouse_time = getattr(mouse_event, axis + "data")
event_time = mdates.date2num(e_time.datetime)
if mouse_time and abs(event_time - mouse_time) < threshold:
log.out(f"Event: {e_time.datetime:{config.FMT_datetime}} - {e_type}\n {e_description}")
self.master.status(f"Event: {e_time.datetime:{config.FMT_datetime}} {e_type}: {e_description}")
return True, dict(ind=list())
else:
return False, dict(ind=list())
return on_pick
# Handle multi-dimensional data as separate plots
ncols = 1 if x_data.ndim <= 1 else x_data.shape[1]
nrows = 1 if y_data.ndim <= 1 else y_data.shape[1]
sub_gs = gridspec.GridSpecFromSubplotSpec(nrows, ncols, subplot_spec=gs)
for plot_num, (num_y, num_x) in enumerate(itertools.product(range(nrows), range(ncols))):
ax = self.figure.add_subplot(sub_gs[plot_num])
ax.clear()
ax.scatter(0, 0, s=0, picker=self.dbl_click_pick, cmap=self.cmap)
idx_x = slice(None) if x_data.ndim == 1 else (slice(None), num_x)
idx_y = slice(None) if y_data.ndim == 1 else (slice(None), num_y)
xlim = self._pad_range(self.xlim if x_data.ndim == 1 else self.xlim[num_x])
ylim = self._pad_range(self.ylim if y_data.ndim == 1 else self.ylim[num_y])
ax.scatter(
x_data[idx][idx_x],
y_data[idx][idx_y],
c=self._project_to_1d(self.vars["color_data"][idx]),
s=self.vars["size_data"][idx],
marker=config.there.scatter.marker.str,
alpha=config.there.scatter.alpha.float,
cmap=self.cmap,
picker=True,
)
try:
x_other = self.vars.get("x_axis_other")[idx_other][idx_x]
y_other = self.vars.get("y_axis_other")[idx_other][idx_y]
if x_data[idx][idx_x].dtype != x_other.dtype:
log.warn("Cannot plot remembered data. X-axis data types are not compatible ")
raise ValueError("Remebered data must be of same type as the new plot")
if y_data[idx][idx_y].dtype != y_other.dtype:
log.warn("Cannot plot remembered data. Y-axis data types are not compatible ")
raise ValueError("Remebered data must be of same type as the new plot")
ax.scatter(
x_other,
y_other,
c=config.there.scatter.color_remember.str,
s=self.vars.get("size_other")[idx_other],
marker=config.there.scatter.marker_remember.str,
alpha=config.there.scatter.alpha_remember.float,
)
except (IndexError, TypeError, ValueError):
log.debug("Not plotting other data")
# Plot events
for x in x_events:
ax.plot(
(x[0].datetime, x[0].datetime), ylim, ":", color=self.event_colors[x[1]], picker=event_pick(x, "x")
)
for y in y_events:
ax.plot(
xlim, (y[0].datetime, y[0].datetime), ":", color=self.event_colors[y[1]], picker=event_pick(y, "y")
)
# Label subplot
ax.set_xlim(xlim)
ax.set_ylim(ylim)
if self.vars["x_axis_columns"]:
column_field = f"{self.vars['x_axis_name']}.{self.vars['x_axis_columns'][num_x]}"
try:
axis_unit = self.dataset.unit_short(column_field)
except exceptions.UnitError:
axis_unit = ""
unit_str = " [{}]".format(axis_unit[0]) if axis_unit else ""
ax.set_xlabel(self.vars["x_axis_columns"][num_x] + unit_str)
if self.vars["y_axis_columns"]:
column_field = f"{self.vars['y_axis_name']}.{self.vars['y_axis_columns'][num_y]}"
try:
axis_unit = self.dataset.unit_short(column_field)
except exceptions.UnitError:
axis_unit = ""
unit_str = " [{}]".format(axis_unit[0]) if axis_unit else ""
ax.set_ylabel(self.vars["y_axis_columns"][num_y] + unit_str)
if num_x > 0:
ax.tick_params(left=False, labelleft=False)
if num_y < nrows - 1:
ax.tick_params(bottom=False, labelbottom=False)
else:
for label in ax.get_xticklabels():
label.set_rotation(12)
# Label figure
self.figure.suptitle(self.title)
self.figure.text(0.5, 0.01, self.xlabel, ha="center")
self.figure.text(0.01, 0.5, self.ylabel, va="center", rotation="vertical")
self.figure.align_ylabels()
# Return indices to surrounding plot functions
return idx, idx_other
@plot_type
def plot_scatter_w_hist(self):
gs = gridspec.GridSpec(2, 2, height_ratios=[1, 5], width_ratios=[5, 1], hspace=0.02, wspace=0.01)
idx, idx_other = self.plot_scatter(gs=gs[2])
# Horisontal histogram
histogram = self.figure.add_subplot(gs[0])
hist_data, hist_range = self._convert_datetime_to_sec(self.vars["x_axis_data"])
hist_range = self._pad_range(hist_range if hist_data.ndim == 1 else self._flatten_range(hist_range))
histogram.hist(hist_data[idx], bins=99, range=hist_range, alpha=0.75)
histogram.set_xlim(hist_range)
histogram.tick_params(bottom=False, labelbottom=False)
histogram.set_ylabel("Count")
# Vertical histogram
histogram = self.figure.add_subplot(gs[3])
hist_data, hist_range = self._convert_datetime_to_sec(self.vars["y_axis_data"])
hist_range = self._pad_range(hist_range if hist_data.ndim == 1 else self._flatten_range(hist_range))
histogram.hist(hist_data[idx], bins=49, range=hist_range, alpha=0.75, orientation="horizontal")
histogram.set_ylim(hist_range)
histogram.tick_params(left=False, labelleft=False)
histogram.set_xlabel("Count")
@plot_type
def plot_polar(self):
ax = self.figure.add_axes([0.1, 0.05, 0.8, 0.8], polar=True)
ax.clear()
idx, idx_other = self.do_filter()
x_data, x_lim = self._convert_datetime_to_sec(self._project_to_1d(self.vars["x_axis_data"]))
y_data, y_lim = self._convert_datetime_to_sec(self._project_to_1d(self.vars["y_axis_data"]))
# Convert radians to degrees for certain fields
y_name = self.vars["y_axis_name"]
if y_name.endswith(".zenith_distance") or y_name.endswith(".elevation") or y_name.endswith(".azimuth"):
y_data = np.degrees(y_data)
y_lim = (np.min((0, np.degrees(np.min(y_lim)))), np.max((90, np.degrees(np.max(y_lim)))))
ax_scatter = ax.scatter(
x_data[idx],
y_data[idx],
c=self.vars["color_data"][idx],
s=self.vars["size_data"][idx],
marker="x",
alpha=0.7,
cmap=self.cmap,
picker=True,
)
ax.set_ylim(self._pad_range(y_lim))
ax.set_theta_zero_location("N") # sets 0(deg) to North
ax.set_theta_direction(-1) # sets plot clockwise
if self.vars["color_name"]:
self.figure.colorbar(ax_scatter)
ax.set_title("{} vs {}".format(self.xlabel, self.ylabel))
self.figure.suptitle(self.title)
@plot_type
def plot_3d(self):
idx, idx_other = self.do_filter()
# Pick out data: Prefer y_axis, but alternatively plot x_axis if it is 3d
if self.vars["y_axis_3d"]:
plot_data = self.vars["y_axis_data"]
x_name, y_name, z_name = ["{}: {}".format(xyz, self.ylabel) for xyz in "XYZ"]
elif self.vars["x_axis_3d"]:
plot_data = self.vars["x_axis_data"]
x_name, y_name, z_name = ["{}: {}".format(xyz, self.xlabel) for xyz in "XYZ"]
else:
data_x, _ = self._convert_datetime_to_sec(self.vars["x_axis_data"])
data_y, _ = self._convert_datetime_to_sec(self.vars["y_axis_data"])
plot_data = np.vstack((data_x, data_y, np.zeros(data_x.shape))).T
x_name, y_name, z_name = self.xlabel, self.ylabel, ""
# Do the plotting
ax = self.figure.add_subplot(1, 1, 1, projection="3d")
ax.clear()
ax.scatter(
plot_data[idx, 0],
plot_data[idx, 1],
plot_data[idx, 2],
c=self.vars["color_data"][idx],
s=self.vars["size_data"][idx],
marker="x",
cmap=self.cmap,
picker=True,
)
ax.set_title(self.title)
ax.set_xlabel(x_name)
ax.set_ylabel(y_name)
ax.set_zlabel(z_name)
def _convert_datetime_to_sec(self, plot_data):
if np.issubdtype(plot_data.dtype, np.datetime64) or plot_data.dtype == np.object:
time_data = np.array([t.timestamp() for t in plot_data])
plot_data = time_data - time_data[0]
plot_range = self._calculate_range(plot_data)
return plot_data, plot_range
def _identify_events(self, plot_data):
events = self.dataset.meta.get_events()
for _, event_type, _ in events:
self.add_event_color(event_type)
if events and (np.issubdtype(plot_data.dtype, np.datetime64) or plot_data.dtype == np.object):
return plot_data, events
else:
return plot_data, list()
@staticmethod
def _project_to_1d(plot_data):
return plot_data.mean(axis=tuple(range(1, plot_data.ndim))) if plot_data.ndim > 1 else plot_data
def _calculate_range(self, values):
"""Calculate range of given data
"""
if self.vars.get("scale_to_filter", False):
idx_filter, _ = self.do_filter()
else:
idx_filter = np.ones(len(values), dtype=bool)
values = values[idx_filter]
try:
idx_finite = np.all(np.isfinite(values), axis=tuple(range(1, values.ndim)))
except TypeError: # Datetimes throw a TypeError on isnan/isfinite
idx_finite = np.ones(len(values), dtype=bool)
if not np.any(idx_finite):
return (0, 1) if values.ndim == 1 else ((0, 1),) * values.shape[1]
if values.ndim == 1:
return np.min(values[idx_finite]), np.max(values[idx_finite])
else:
return tuple(zip(np.min(values[idx_finite], axis=0), np.max(values[idx_finite], axis=0)))
@staticmethod
def _flatten_range(ranges):
"""Flatten multidimensional range
"""
r_min, r_max = zip(*ranges)
return min(r_min), max(r_max)
@staticmethod
def _pad_range(val_range, factor=0.01):
"""Pad value range at both sides
"""
rmin, rmax = val_range
try:
delta = np.fmax((rmax - rmin) * factor, 1e-6)
except TypeError: # np.fmax fails on timedeltas
delta = (rmax - rmin) * factor
return rmin - delta, rmax + delta
class CheckBox(ttk.Checkbutton):
def __init__(self, master, name, vars_, figure, **kwargs):
self.choice = tk.IntVar()
label = name.replace("_", " ").title()
super().__init__(master, text=label, command=self.update_vars, variable=self.choice, **kwargs)
self.name = name
self.vars = vars_
self.figure = figure
self.vars[self.name] = False
def update_vars(self):
self.vars[self.name] = bool(self.choice.get())
if self.figure is not None:
self.figure.update_plot()
log.debug(f"Setting {self.name} = {self.vars[self.name]}")
class Radiobutton(ttk.Radiobutton):
groupvars = dict()
def __init__(self, master, name, group, vars_, **kwargs):
self.name = name
self.group = group
text = name.replace("_", " ").capitalize()
group = f"{master}.{group}"
if group not in self.groupvars:
self.groupvars[group] = tk.StringVar()
self.groupvars[group].set(self.name)
self.choice = self.groupvars[group]
super().__init__(master, text=text, command=self.update_vars, variable=self.choice, value=self.name, **kwargs)
self.vars = vars_
self.update_vars()
def update_vars(self):
self.vars[self.group] = self.choice.get()
log.debug(f"Setting {self.group} = {self.vars[self.group]}")
class DropdownPicker(ttk.Combobox, UpdateMixin):
name = "no_name"
width = 8
def __init__(self, master, vars_, figure, init_val="", **kwargs):
self.choice = tk.StringVar()
super().__init__(master, width=self.width, textvariable=self.choice, state="readonly", **kwargs)
self.vars = vars_
self.figure = figure
self.next_update_func = None
self.choice.set(self.vars.get(self.var, init_val))
self.choice.trace("w", self.choose)
@property
def label(self):
return self.name.replace("_", " ").title()
@property
def var(self):
return self.name
def choose(self, *_):
vars_ = self.parse_vars()
self.vars.update(vars_)
for name, value in vars_.items():
log.debug(f"DropdownPicker.choose({self.name}) Setting {name} = {value}")
self.update_next()
def parse_vars(self):
return {self.var: self.choice.get()}
def update_options(self):
log.debug(f"Updating {self.name}")
self["values"] = self.read_options()
if self["values"] and self.choice.get() not in self["values"]:
self.choice.set(self["values"][0])
elif not self["values"]:
self.choice.set("")
else:
self.choice.set(self.choice.get())
def read_options(self):
return list()
@register_dropdown
class DD_RunDate(DropdownPicker):
name = "date"
label = "Date"
width = 10
def read_options(self):
"""Read dates from filenames"""
file_vars = dict(user=self.vars["user"], pipeline=self.vars["pipeline"], id=self.vars["id"])
dates = config.files.glob_variable("dataset", "date", r"[0-9]{8}", file_vars=file_vars)
dates -= {"19700101"} # Ignore timeseries datasets
return sorted(dates)
def parse_vars(self):
"""Construct date variables"""
date_value = self.choice.get()
rundate = datetime.strptime(date_value, "%Y%m%d").date() if date_value else None
analysis_vars = config.date_vars(rundate)
analysis_vars["rundate"] = rundate
analysis_vars["date"] = date_value
return analysis_vars
@register_dropdown
class DD_Session(DropdownPicker):
name = "session"
label = "Session"
width = 10
def read_options(self):
"""Read sessions from filenames
"""
file_vars = {k: v for k, v in self.vars.items() if k not in ("stage", "label")}
sessions = config.files.glob_variable("dataset", "session", r"[_a-zA-Z0-9]*", file_vars=file_vars)
return sorted(sessions)
@register_dropdown
class DD_Station(DropdownPicker):
name = "station"
label = "station"
width = 10
def read_options(self):
"""Read station from filenames
"""
file_vars = dict(user=self.vars["user"], pipeline=self.vars["pipeline"], id=self.vars["id"])
stations = config.files.glob_variable("dataset", "station", r"[\w]*", file_vars=file_vars)
return sorted(stations)
@register_dropdown
class DD_Id(DropdownPicker):
name = "id"
width = 20
def read_options(self):
"""Read ids from filenames
"""
file_vars = {"pipeline": self.vars["pipeline"], "user": self.vars["user"]}
ids = config.files.glob_variable("dataset", "id", r"|[_\w]+", file_vars=file_vars)
return sorted(ids)
@register_dropdown
class DD_Stage(DropdownPicker):
name = "stage"
width = 14
def read_options(self):
"""Read pipeline and stage from filenames
"""
file_vars = {k: v for k, v in self.vars.items() if k not in ("stage", "label")}
stages = config.files.glob_variable("dataset", "stage", r"[a-z_]+", file_vars=file_vars)
return sorted(stages, key=self._sorter)
def _sorter(self, stage):
"""Return a sort key for the given stage
Sorts first on pipeline then on stage such that later stages are sorted first. Unknown stages are sorted
alphabetically at the end.
Args:
stage: Tuple with pipeline and stage.
Returns:
Tuple that can be sorted on.
"""
pipeline = self.vars["pipeline"]
stages = pipelines.stages(pipeline)[::-1] # Reversed to sort later stages first
try:
stage_id = stages.index(stage)
except ValueError:
stage_id = len(stages) # Unknown stages are sorted last
return (stage_id, stage)
@register_dropdown
class DD_SatName(DropdownPicker):
name = "sat_name"
width = 14
def read_options(self):
"""Read pipeline and sat_name from filenames
"""
file_vars = {k: v for k, v in self.vars.items() if k not in ("sat_name")}
sat_names = config.files.glob_variable("dataset", "sat_name", r"[a-z1-2]+", file_vars=file_vars)
return sorted(sat_names)
@register_dropdown
class DD_Label(DropdownPicker):
name = "label"
width = 14
def read_options(self):
"""Read label from filenames"""
file_vars = self.vars.copy()
if "label" in file_vars:
file_vars.pop("label")
labels = config.files.glob_variable("dataset", "label", ".+", file_vars=file_vars)
return sorted(labels)
@register_dropdown
class DD_PlotType(DropdownPicker):
name = "plot_type"
width = 10
def read_options(self):
return sorted(PLOT_TYPES)
class DD_Field(DropdownPicker):
width = 35
def read_options(self):
return sorted(self.figure.dataset.plot_fields, key=lambda f: ("a" if f.startswith("time") else "z") + f)
def parse_vars(self):
field = self.choice.get()
if not field or field == "none":
values = np.ones(self.figure.dataset.num_obs)
return {self.name + "_data": values, self.name + "_name": ""}
values = self.figure.dataset.plot_values(field)
do_3d_plot = values.ndim == 2 and values.shape[1] >= 3
try:
if values.ndim == 2:
column_names = self.figure.dataset[field].column_names
else:
column_names = None
except AttributeError:
column_names = None
return {
self.name + "_data": values,
self.name + "_name": field,
self.name + "_3d": do_3d_plot,
self.name + "_columns": column_names,
}
@register_dropdown
class DD_XAxis(DD_Field):
name = "x_axis"
label = "X-Axis"
@register_dropdown
class DD_YAxis(DD_Field):
name = "y_axis"
label = "Y-Axis"
@register_dropdown
class DD_Color(DD_Field):
name = "color"
def read_options(self):
return ["none"] + super().read_options()
def parse_vars(self):
file_vars = super().parse_vars()
try:
file_vars[self.name + "_data"] = np.vectorize(lambda d: d.timestamp())(file_vars[self.name + "_data"])
except AttributeError:
pass
return file_vars
@register_dropdown
class DD_Size(DD_Field):
name = "size"
def read_options(self):
return ["none"] + super().read_options()
def parse_vars(self):
fields = super().parse_vars()
values = fields[self.name + "_data"]
try:
values = np.vectorize(lambda d: d.timestamp())(values)
except AttributeError:
pass
size = normalize(np.abs(values), self.figure) # TODO: Can we normalize while plotting to update properly?
return dict(size_data=size ** 2 * 65 + 10, size_name=fields[self.name + "_name"])
def filter_factory(filter_name):
class DD_Filter(DropdownPicker):
width = 20
name = f"filter_{filter_name}"
label = filter_name.replace("_", " ").title()
def __init__(self, master, vars_, figure, **kwargs):
super().__init__(master, vars_, figure, init_val="no filter", **kwargs)
figure.add_filter(self)
def read_options(self):
try:
return ["no filter"] + sorted((self.figure.dataset.unique(self.name[7:])))
except KeyError:
return ["no filter"]
register_dropdown(DD_Filter)
return DD_Filter
def normalize(values, figure):
"""Normalize the values of values so they are in the [0, 1]-interval
"""
if figure.vars.get("scale_to_filter", False):
idx_filter, _ = figure.do_filter()
else:
idx_filter = np.ones(len(values), dtype=bool)
min_ = np.min(values[idx_filter])
max_ = np.max(values[idx_filter])
rng = max_ - min_
if rng == 0:
return 0.5 * np.ones(values.shape)
normalized_vals = (values - min_) / rng
normalized_vals[~idx_filter] = 0.5
return normalized_vals
# Run main function only when running as script
if __name__ == "__main__":
main()
|
test_zeromq.py | # -*- coding: utf-8 -*-
'''
:codeauthor: Thomas Jackson <jacksontj.89@gmail.com>
'''
# Import python libs
from __future__ import absolute_import, print_function, unicode_literals
import os
import time
import threading
import multiprocessing
import ctypes
from concurrent.futures.thread import ThreadPoolExecutor
# linux_distribution deprecated in py3.7
try:
from platform import linux_distribution
except ImportError:
from distro import linux_distribution
# Import 3rd-party libs
import zmq.eventloop.ioloop
# support pyzmq 13.0.x, TODO: remove once we force people to 14.0.x
if not hasattr(zmq.eventloop.ioloop, 'ZMQIOLoop'):
zmq.eventloop.ioloop.ZMQIOLoop = zmq.eventloop.ioloop.IOLoop
from tornado.testing import AsyncTestCase
import tornado.gen
# Import Salt libs
import salt.config
import salt.log.setup
from salt.ext import six
import salt.utils.process
import salt.utils.platform
import salt.transport.server
import salt.transport.client
import salt.exceptions
from salt.ext.six.moves import range
from salt.transport.zeromq import AsyncReqMessageClientPool
# Import test support libs
from tests.support.runtests import RUNTIME_VARS
from tests.support.unit import TestCase, skipIf
from tests.support.helpers import flaky, get_unused_localhost_port
from tests.support.mixins import AdaptedConfigurationTestCaseMixin
from tests.support.mock import MagicMock, patch
from tests.unit.transport.mixins import PubChannelMixin, ReqChannelMixin, run_loop_in_thread
ON_SUSE = False
if 'SuSE' in linux_distribution(full_distribution_name=False):
ON_SUSE = True
class BaseZMQReqCase(TestCase, AdaptedConfigurationTestCaseMixin):
'''
Test the req server/client pair
'''
@classmethod
def setUpClass(cls):
if not hasattr(cls, '_handle_payload'):
return
ret_port = get_unused_localhost_port()
publish_port = get_unused_localhost_port()
tcp_master_pub_port = get_unused_localhost_port()
tcp_master_pull_port = get_unused_localhost_port()
tcp_master_publish_pull = get_unused_localhost_port()
tcp_master_workers = get_unused_localhost_port()
cls.master_config = cls.get_temp_config(
'master',
**{'transport': 'zeromq',
'auto_accept': True,
'ret_port': ret_port,
'publish_port': publish_port,
'tcp_master_pub_port': tcp_master_pub_port,
'tcp_master_pull_port': tcp_master_pull_port,
'tcp_master_publish_pull': tcp_master_publish_pull,
'tcp_master_workers': tcp_master_workers}
)
cls.minion_config = cls.get_temp_config(
'minion',
**{'transport': 'zeromq',
'master_ip': '127.0.0.1',
'master_port': ret_port,
'auth_timeout': 5,
'auth_tries': 1,
'master_uri': 'tcp://127.0.0.1:{0}'.format(ret_port)}
)
cls.process_manager = salt.utils.process.ProcessManager(name='ReqServer_ProcessManager')
cls.server_channel = salt.transport.server.ReqServerChannel.factory(cls.master_config)
cls.server_channel.pre_fork(cls.process_manager)
cls.io_loop = zmq.eventloop.ioloop.ZMQIOLoop()
cls.evt = threading.Event()
cls.server_channel.post_fork(cls._handle_payload, io_loop=cls.io_loop)
cls.server_thread = threading.Thread(target=run_loop_in_thread, args=(cls.io_loop, cls.evt))
cls.server_thread.start()
@classmethod
def tearDownClass(cls):
if not hasattr(cls, '_handle_payload'):
return
# Attempting to kill the children hangs the test suite.
# Let the test suite handle this instead.
cls.process_manager.stop_restarting()
cls.process_manager.kill_children()
cls.evt.set()
cls.server_thread.join()
time.sleep(2) # Give the procs a chance to fully close before we stop the io_loop
cls.server_channel.close()
del cls.server_channel
del cls.io_loop
del cls.process_manager
del cls.server_thread
del cls.master_config
del cls.minion_config
@classmethod
def _handle_payload(cls, payload):
'''
TODO: something besides echo
'''
return payload, {'fun': 'send_clear'}
class ClearReqTestCases(BaseZMQReqCase, ReqChannelMixin):
'''
Test all of the clear msg stuff
'''
def setUp(self):
self.channel = salt.transport.client.ReqChannel.factory(self.minion_config, crypt='clear')
def tearDown(self):
self.channel.close()
del self.channel
@classmethod
@tornado.gen.coroutine
def _handle_payload(cls, payload):
'''
TODO: something besides echo
'''
raise tornado.gen.Return((payload, {'fun': 'send_clear'}))
def test_master_uri_override(self):
'''
ensure master_uri kwarg is respected
'''
# minion_config should be 127.0.0.1, we want a different uri that still connects
uri = 'tcp://{master_ip}:{master_port}'.format(master_ip='localhost', master_port=self.minion_config['master_port'])
channel = salt.transport.Channel.factory(self.minion_config, master_uri=uri)
self.assertIn('localhost', channel.master_uri)
del channel
@flaky
@skipIf(ON_SUSE, 'Skipping until https://github.com/saltstack/salt/issues/32902 gets fixed')
class AESReqTestCases(BaseZMQReqCase, ReqChannelMixin):
def setUp(self):
self.channel = salt.transport.client.ReqChannel.factory(self.minion_config)
def tearDown(self):
self.channel.close()
del self.channel
@classmethod
@tornado.gen.coroutine
def _handle_payload(cls, payload):
'''
TODO: something besides echo
'''
raise tornado.gen.Return((payload, {'fun': 'send'}))
# TODO: make failed returns have a specific framing so we can raise the same exception
# on encrypted channels
#
#!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
#
# WARNING: This test will fail randomly on any system with > 1 CPU core!!!
#
#!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
def test_badload(self):
'''
Test a variety of bad requests, make sure that we get some sort of error
'''
# TODO: This test should be re-enabled when Jenkins moves to C7.
# Once the version of salt-testing is increased to something newer than the September
# release of salt-testing, the @flaky decorator should be applied to this test.
msgs = ['', [], tuple()]
for msg in msgs:
with self.assertRaises(salt.exceptions.AuthenticationError):
ret = self.channel.send(msg, timeout=5)
class BaseZMQPubCase(AsyncTestCase, AdaptedConfigurationTestCaseMixin):
'''
Test the req server/client pair
'''
@classmethod
def setUpClass(cls):
ret_port = get_unused_localhost_port()
publish_port = get_unused_localhost_port()
tcp_master_pub_port = get_unused_localhost_port()
tcp_master_pull_port = get_unused_localhost_port()
tcp_master_publish_pull = get_unused_localhost_port()
tcp_master_workers = get_unused_localhost_port()
cls.master_config = cls.get_temp_config(
'master',
**{'transport': 'zeromq',
'auto_accept': True,
'ret_port': ret_port,
'publish_port': publish_port,
'tcp_master_pub_port': tcp_master_pub_port,
'tcp_master_pull_port': tcp_master_pull_port,
'tcp_master_publish_pull': tcp_master_publish_pull,
'tcp_master_workers': tcp_master_workers}
)
cls.minion_config = salt.config.minion_config(os.path.join(RUNTIME_VARS.TMP_CONF_DIR, 'minion'))
cls.minion_config = cls.get_temp_config(
'minion',
**{'transport': 'zeromq',
'master_ip': '127.0.0.1',
'master_port': ret_port,
'master_uri': 'tcp://127.0.0.1:{0}'.format(ret_port)}
)
cls.process_manager = salt.utils.process.ProcessManager(name='ReqServer_ProcessManager')
cls.server_channel = salt.transport.server.PubServerChannel.factory(cls.master_config)
cls.server_channel.pre_fork(cls.process_manager)
# we also require req server for auth
cls.req_server_channel = salt.transport.server.ReqServerChannel.factory(cls.master_config)
cls.req_server_channel.pre_fork(cls.process_manager)
cls._server_io_loop = zmq.eventloop.ioloop.ZMQIOLoop()
cls.evt = threading.Event()
cls.req_server_channel.post_fork(cls._handle_payload, io_loop=cls._server_io_loop)
cls.server_thread = threading.Thread(target=run_loop_in_thread, args=(cls._server_io_loop, cls.evt))
cls.server_thread.start()
@classmethod
def tearDownClass(cls):
cls.process_manager.kill_children()
cls.process_manager.stop_restarting()
time.sleep(2) # Give the procs a chance to fully close before we stop the io_loop
cls.evt.set()
cls.server_thread.join()
cls.req_server_channel.close()
cls.server_channel.close()
cls._server_io_loop.stop()
del cls.server_channel
del cls._server_io_loop
del cls.process_manager
del cls.server_thread
del cls.master_config
del cls.minion_config
@classmethod
def _handle_payload(cls, payload):
'''
TODO: something besides echo
'''
return payload, {'fun': 'send_clear'}
def setUp(self):
super(BaseZMQPubCase, self).setUp()
self._start_handlers = dict(self.io_loop._handlers)
def tearDown(self):
super(BaseZMQPubCase, self).tearDown()
failures = []
for k, v in six.iteritems(self.io_loop._handlers):
if self._start_handlers.get(k) != v:
failures.append((k, v))
del self._start_handlers
if len(failures) > 0:
raise Exception('FDs still attached to the IOLoop: {0}'.format(failures))
@skipIf(True, 'Skip until we can devote time to fix this test')
class AsyncPubChannelTest(BaseZMQPubCase, PubChannelMixin):
'''
Tests around the publish system
'''
def get_new_ioloop(self):
return zmq.eventloop.ioloop.ZMQIOLoop()
class AsyncReqMessageClientPoolTest(TestCase):
def setUp(self):
super(AsyncReqMessageClientPoolTest, self).setUp()
sock_pool_size = 5
with patch('salt.transport.zeromq.AsyncReqMessageClient.__init__', MagicMock(return_value=None)):
self.message_client_pool = AsyncReqMessageClientPool({'sock_pool_size': sock_pool_size},
args=({}, ''))
self.original_message_clients = self.message_client_pool.message_clients
self.message_client_pool.message_clients = [MagicMock() for _ in range(sock_pool_size)]
def tearDown(self):
with patch('salt.transport.zeromq.AsyncReqMessageClient.destroy', MagicMock(return_value=None)):
del self.original_message_clients
super(AsyncReqMessageClientPoolTest, self).tearDown()
def test_send(self):
for message_client_mock in self.message_client_pool.message_clients:
message_client_mock.send_queue = [0, 0, 0]
message_client_mock.send.return_value = []
self.assertEqual([], self.message_client_pool.send())
self.message_client_pool.message_clients[2].send_queue = [0]
self.message_client_pool.message_clients[2].send.return_value = [1]
self.assertEqual([1], self.message_client_pool.send())
def test_destroy(self):
self.message_client_pool.destroy()
self.assertEqual([], self.message_client_pool.message_clients)
class ZMQConfigTest(TestCase):
def test_master_uri(self):
'''
test _get_master_uri method
'''
m_ip = '127.0.0.1'
m_port = 4505
s_ip = '111.1.0.1'
s_port = 4058
m_ip6 = '1234:5678::9abc'
s_ip6 = '1234:5678::1:9abc'
with patch('salt.transport.zeromq.LIBZMQ_VERSION_INFO', (4, 1, 6)), \
patch('salt.transport.zeromq.ZMQ_VERSION_INFO', (16, 0, 1)):
# pass in both source_ip and source_port
assert salt.transport.zeromq._get_master_uri(master_ip=m_ip,
master_port=m_port,
source_ip=s_ip,
source_port=s_port) == 'tcp://{0}:{1};{2}:{3}'.format(s_ip, s_port, m_ip, m_port)
assert salt.transport.zeromq._get_master_uri(master_ip=m_ip6,
master_port=m_port,
source_ip=s_ip6,
source_port=s_port) == 'tcp://[{0}]:{1};[{2}]:{3}'.format(s_ip6, s_port, m_ip6, m_port)
# source ip and source_port empty
assert salt.transport.zeromq._get_master_uri(master_ip=m_ip,
master_port=m_port) == 'tcp://{0}:{1}'.format(m_ip, m_port)
assert salt.transport.zeromq._get_master_uri(master_ip=m_ip6,
master_port=m_port) == 'tcp://[{0}]:{1}'.format(m_ip6, m_port)
# pass in only source_ip
assert salt.transport.zeromq._get_master_uri(master_ip=m_ip,
master_port=m_port,
source_ip=s_ip) == 'tcp://{0}:0;{1}:{2}'.format(s_ip, m_ip, m_port)
assert salt.transport.zeromq._get_master_uri(master_ip=m_ip6,
master_port=m_port,
source_ip=s_ip6) == 'tcp://[{0}]:0;[{1}]:{2}'.format(s_ip6, m_ip6, m_port)
# pass in only source_port
assert salt.transport.zeromq._get_master_uri(master_ip=m_ip,
master_port=m_port,
source_port=s_port) == 'tcp://0.0.0.0:{0};{1}:{2}'.format(s_port, m_ip, m_port)
class PubServerChannel(TestCase, AdaptedConfigurationTestCaseMixin):
@classmethod
def setUpClass(cls):
ret_port = get_unused_localhost_port()
publish_port = get_unused_localhost_port()
tcp_master_pub_port = get_unused_localhost_port()
tcp_master_pull_port = get_unused_localhost_port()
tcp_master_publish_pull = get_unused_localhost_port()
tcp_master_workers = get_unused_localhost_port()
cls.master_config = cls.get_temp_config(
'master',
**{'transport': 'zeromq',
'auto_accept': True,
'ret_port': ret_port,
'publish_port': publish_port,
'tcp_master_pub_port': tcp_master_pub_port,
'tcp_master_pull_port': tcp_master_pull_port,
'tcp_master_publish_pull': tcp_master_publish_pull,
'tcp_master_workers': tcp_master_workers,
'sign_pub_messages': False,
}
)
salt.master.SMaster.secrets['aes'] = {
'secret': multiprocessing.Array(
ctypes.c_char,
six.b(salt.crypt.Crypticle.generate_key_string()),
),
}
cls.minion_config = cls.get_temp_config(
'minion',
**{'transport': 'zeromq',
'master_ip': '127.0.0.1',
'master_port': ret_port,
'auth_timeout': 5,
'auth_tries': 1,
'master_uri': 'tcp://127.0.0.1:{0}'.format(ret_port)}
)
@classmethod
def tearDownClass(cls):
del cls.minion_config
del cls.master_config
def setUp(self):
# Start the event loop, even though we dont directly use this with
# ZeroMQPubServerChannel, having it running seems to increase the
# likely hood of dropped messages.
self.io_loop = zmq.eventloop.ioloop.ZMQIOLoop()
self.io_loop.make_current()
self.io_loop_thread = threading.Thread(target=self.io_loop.start)
self.io_loop_thread.start()
self.process_manager = salt.utils.process.ProcessManager(name='PubServer_ProcessManager')
def tearDown(self):
self.io_loop.add_callback(self.io_loop.stop)
self.io_loop_thread.join()
self.process_manager.stop_restarting()
self.process_manager.kill_children()
del self.io_loop
del self.io_loop_thread
del self.process_manager
@staticmethod
def _gather_results(opts, pub_uri, results, timeout=120, messages=None):
'''
Gather results until then number of seconds specified by timeout passes
without reveiving a message
'''
ctx = zmq.Context()
sock = ctx.socket(zmq.SUB)
sock.setsockopt(zmq.LINGER, -1)
sock.setsockopt(zmq.SUBSCRIBE, b'')
sock.connect(pub_uri)
last_msg = time.time()
serial = salt.payload.Serial(opts)
crypticle = salt.crypt.Crypticle(opts, salt.master.SMaster.secrets['aes']['secret'].value)
while time.time() - last_msg < timeout:
try:
payload = sock.recv(zmq.NOBLOCK)
except zmq.ZMQError:
time.sleep(.01)
else:
if messages:
if messages != 1:
messages -= 1
continue
payload = crypticle.loads(serial.loads(payload)['load'])
if 'stop' in payload:
break
last_msg = time.time()
results.append(payload['jid'])
@skipIf(salt.utils.platform.is_windows(), 'Skip on Windows OS')
def test_publish_to_pubserv_ipc(self):
'''
Test sending 10K messags to ZeroMQPubServerChannel using IPC transport
ZMQ's ipc transport not supported on Windows
'''
opts = dict(self.master_config, ipc_mode='ipc', pub_hwm=0)
server_channel = salt.transport.zeromq.ZeroMQPubServerChannel(opts)
server_channel.pre_fork(self.process_manager, kwargs={
'log_queue': salt.log.setup.get_multiprocessing_logging_queue()
})
pub_uri = 'tcp://{interface}:{publish_port}'.format(**server_channel.opts)
send_num = 10000
expect = []
results = []
gather = threading.Thread(target=self._gather_results, args=(self.minion_config, pub_uri, results,))
gather.start()
# Allow time for server channel to start, especially on windows
time.sleep(2)
for i in range(send_num):
expect.append(i)
load = {'tgt_type': 'glob', 'tgt': '*', 'jid': i}
server_channel.publish(load)
server_channel.publish(
{'tgt_type': 'glob', 'tgt': '*', 'stop': True}
)
gather.join()
server_channel.pub_close()
assert len(results) == send_num, (len(results), set(expect).difference(results))
def test_zeromq_publish_port(self):
'''
test when connecting that we
use the publish_port set in opts
when its not 4506
'''
opts = dict(self.master_config, ipc_mode='ipc',
pub_hwm=0, recon_randomize=False,
publish_port=455505,
recon_default=1, recon_max=2, master_ip='127.0.0.1',
acceptance_wait_time=5, acceptance_wait_time_max=5)
opts['master_uri'] = 'tcp://{interface}:{publish_port}'.format(**opts)
channel = salt.transport.zeromq.AsyncZeroMQPubChannel(opts)
patch_socket = MagicMock(return_value=True)
patch_auth = MagicMock(return_value=True)
with patch.object(channel, '_socket', patch_socket), \
patch.object(channel, 'auth', patch_auth):
channel.connect()
assert str(opts['publish_port']) in patch_socket.mock_calls[0][1][0]
def test_zeromq_zeromq_filtering_decode_message_no_match(self):
'''
test AsyncZeroMQPubChannel _decode_messages when
zmq_filtering enabled and minion does not match
'''
message = [b'4f26aeafdb2367620a393c973eddbe8f8b846eb',
b'\x82\xa3enc\xa3aes\xa4load\xda\x00`\xeeR\xcf'
b'\x0eaI#V\x17if\xcf\xae\x05\xa7\xb3bN\xf7\xb2\xe2'
b'\xd0sF\xd1\xd4\xecB\xe8\xaf"/*ml\x80Q3\xdb\xaexg'
b'\x8e\x8a\x8c\xd3l\x03\\,J\xa7\x01i\xd1:]\xe3\x8d'
b'\xf4\x03\x88K\x84\n`\xe8\x9a\xad\xad\xc6\x8ea\x15>'
b'\x92m\x9e\xc7aM\x11?\x18;\xbd\x04c\x07\x85\x99\xa3\xea[\x00D']
opts = dict(self.master_config, ipc_mode='ipc',
pub_hwm=0, zmq_filtering=True, recon_randomize=False,
recon_default=1, recon_max=2, master_ip='127.0.0.1',
acceptance_wait_time=5, acceptance_wait_time_max=5)
opts['master_uri'] = 'tcp://{interface}:{publish_port}'.format(**opts)
server_channel = salt.transport.zeromq.AsyncZeroMQPubChannel(opts)
with patch('salt.crypt.AsyncAuth.crypticle',
MagicMock(return_value={'tgt_type': 'glob', 'tgt': '*',
'jid': 1})) as mock_test:
res = server_channel._decode_messages(message)
assert res.result() is None
def test_zeromq_zeromq_filtering_decode_message(self):
'''
test AsyncZeroMQPubChannel _decode_messages
when zmq_filtered enabled
'''
message = [b'4f26aeafdb2367620a393c973eddbe8f8b846ebd',
b'\x82\xa3enc\xa3aes\xa4load\xda\x00`\xeeR\xcf'
b'\x0eaI#V\x17if\xcf\xae\x05\xa7\xb3bN\xf7\xb2\xe2'
b'\xd0sF\xd1\xd4\xecB\xe8\xaf"/*ml\x80Q3\xdb\xaexg'
b'\x8e\x8a\x8c\xd3l\x03\\,J\xa7\x01i\xd1:]\xe3\x8d'
b'\xf4\x03\x88K\x84\n`\xe8\x9a\xad\xad\xc6\x8ea\x15>'
b'\x92m\x9e\xc7aM\x11?\x18;\xbd\x04c\x07\x85\x99\xa3\xea[\x00D']
opts = dict(self.master_config, ipc_mode='ipc',
pub_hwm=0, zmq_filtering=True, recon_randomize=False,
recon_default=1, recon_max=2, master_ip='127.0.0.1',
acceptance_wait_time=5, acceptance_wait_time_max=5)
opts['master_uri'] = 'tcp://{interface}:{publish_port}'.format(**opts)
server_channel = salt.transport.zeromq.AsyncZeroMQPubChannel(opts)
with patch('salt.crypt.AsyncAuth.crypticle',
MagicMock(return_value={'tgt_type': 'glob', 'tgt': '*',
'jid': 1})) as mock_test:
res = server_channel._decode_messages(message)
assert res.result()['enc'] == 'aes'
@skipIf(salt.utils.platform.is_windows(), 'Skip on Windows OS')
def test_zeromq_filtering(self):
'''
Test sending messags to publisher using UDP
with zeromq_filtering enabled
'''
opts = dict(self.master_config, ipc_mode='ipc',
pub_hwm=0, zmq_filtering=True, acceptance_wait_time=5)
server_channel = salt.transport.zeromq.ZeroMQPubServerChannel(opts)
server_channel.pre_fork(self.process_manager, kwargs={
'log_queue': salt.log.setup.get_multiprocessing_logging_queue()
})
pub_uri = 'tcp://{interface}:{publish_port}'.format(**server_channel.opts)
send_num = 1
expect = []
results = []
gather = threading.Thread(target=self._gather_results,
args=(self.minion_config, pub_uri, results,),
kwargs={'messages': 2})
gather.start()
# Allow time for server channel to start, especially on windows
time.sleep(2)
expect.append(send_num)
load = {'tgt_type': 'glob', 'tgt': '*', 'jid': send_num}
with patch('salt.utils.minions.CkMinions.check_minions',
MagicMock(return_value={'minions': ['minion'], 'missing': [],
'ssh_minions': False})):
server_channel.publish(load)
server_channel.publish(
{'tgt_type': 'glob', 'tgt': '*', 'stop': True}
)
gather.join()
server_channel.pub_close()
assert len(results) == send_num, (len(results), set(expect).difference(results))
def test_publish_to_pubserv_tcp(self):
'''
Test sending 10K messags to ZeroMQPubServerChannel using TCP transport
'''
opts = dict(self.master_config, ipc_mode='tcp', pub_hwm=0)
server_channel = salt.transport.zeromq.ZeroMQPubServerChannel(opts)
server_channel.pre_fork(self.process_manager, kwargs={
'log_queue': salt.log.setup.get_multiprocessing_logging_queue()
})
pub_uri = 'tcp://{interface}:{publish_port}'.format(**server_channel.opts)
send_num = 10000
expect = []
results = []
gather = threading.Thread(target=self._gather_results, args=(self.minion_config, pub_uri, results,))
gather.start()
# Allow time for server channel to start, especially on windows
time.sleep(2)
for i in range(send_num):
expect.append(i)
load = {'tgt_type': 'glob', 'tgt': '*', 'jid': i}
server_channel.publish(load)
gather.join()
server_channel.pub_close()
assert len(results) == send_num, (len(results), set(expect).difference(results))
@staticmethod
def _send_small(opts, sid, num=10):
server_channel = salt.transport.zeromq.ZeroMQPubServerChannel(opts)
for i in range(num):
load = {'tgt_type': 'glob', 'tgt': '*', 'jid': '{}-{}'.format(sid, i)}
server_channel.publish(load)
server_channel.close()
@staticmethod
def _send_large(opts, sid, num=10, size=250000 * 3):
server_channel = salt.transport.zeromq.ZeroMQPubServerChannel(opts)
for i in range(num):
load = {'tgt_type': 'glob', 'tgt': '*', 'jid': '{}-{}'.format(sid, i), 'xdata': '0' * size}
server_channel.publish(load)
server_channel.close()
def test_issue_36469_tcp(self):
'''
Test sending both large and small messags to publisher using TCP
https://github.com/saltstack/salt/issues/36469
'''
opts = dict(self.master_config, ipc_mode='tcp', pub_hwm=0)
server_channel = salt.transport.zeromq.ZeroMQPubServerChannel(opts)
server_channel.pre_fork(self.process_manager, kwargs={
'log_queue': salt.log.setup.get_multiprocessing_logging_queue()
})
send_num = 10 * 4
expect = []
results = []
pub_uri = 'tcp://{interface}:{publish_port}'.format(**opts)
# Allow time for server channel to start, especially on windows
time.sleep(2)
gather = threading.Thread(target=self._gather_results, args=(self.minion_config, pub_uri, results,))
gather.start()
with ThreadPoolExecutor(max_workers=4) as executor:
executor.submit(self._send_small, opts, 1)
executor.submit(self._send_small, opts, 2)
executor.submit(self._send_small, opts, 3)
executor.submit(self._send_large, opts, 4)
expect = ['{}-{}'.format(a, b) for a in range(10) for b in (1, 2, 3, 4)]
time.sleep(0.1)
server_channel.publish({'tgt_type': 'glob', 'tgt': '*', 'stop': True})
gather.join()
server_channel.pub_close()
assert len(results) == send_num, (len(results), set(expect).difference(results))
|
helperapi.py | import os
import imp
from bson import json_util
import json
import restapi
import netifaces as ni
import confparser as cp
import pathdumpapi as pdapi
from threading import Thread
import time
import socket
import zmq
cwd = os.getcwd()
subsURL = "tcp://localhost:5556"
subsFilter = "TIB "
subsQueries={}
subsSocket=None
instQueries = {}
def buildFlowidFilter (flowID):
sip_filter = ''
sport_filter = ''
dip_filter = ''
dport_filter = ''
proto_filter = ''
if 'sip' in flowID and flowID['sip'] != "*":
sip_filter = {'sip' : flowID['sip']}
if 'sport' in flowID and flowID['sport'] != "*":
sport_filter = {'sport' : flowID['sport']}
if 'dip' in flowID and flowID['dip'] != "*":
dip_filter = {'dip' : flowID['dip']}
if 'dport' in flowID and flowID['dport'] != "*":
dport_filter = {'dport' : flowID['dport']}
if 'proto' in flowID and flowID['proto'] != "*":
proto_filter = {'proto' : flowID['proto']}
return (sip_filter, sport_filter, dip_filter, dport_filter, proto_filter)
def buildLinkFilter (linkID):
link_filter = ''
link = ''
if linkID[0] != "*" and linkID[1] != "*":
link = linkID[0] + "-" + linkID[1]
link_filter = {'path': link}
elif linkID[0] == "*" and linkID[1] == "*":
link_filter = '' # DO NOTHING
elif linkID[0] == "*":
link = '[0-9]+' + "-" + linkID[1]
link_filter = {'path': {"$regex": link}}
else:
link = linkID[0] + "-" + '[0-9]+'
link_filter = {'path': {"$regex": link}}
return link_filter
def buildTimeFilter (timeRange):
stime_filter = ''
etime_filter = ''
if timeRange[0] != "*":
stime_filter = {'start' : {'$gte' : timeRange[0]}}
if timeRange[1] != "*":
etime_filter = {'end' : {'$lte' : timeRange[1]}}
return (stime_filter, etime_filter)
def buildPathFilter (path):
path_filter = ''
if len (path) != 0:
path_filter = {'path': {'$all': path}}
return path_filter
def doAndFilters (filters):
# format: {'$and' : [{}, {}]
ret_fltr = {'$and': []}
for fltr in filters:
if fltr != '':
ret_fltr['$and'].append (fltr)
nelem = len (ret_fltr['$and'])
if nelem == 0:
return ''
elif nelem == 1:
return ret_fltr['$and'][0]
else:
return ret_fltr
# returns an IP address as Node ID
def getCurNodeID ():
return socket.gethostname()
#return ni.ifaddresses('eth0')[2][0]['addr']
def wrapper (func, args, results):
results.append (func (*args))
def initializeSubscription():
global subsSocket,subsURL,subsFilter
if subsSocket == None:
# Socket to subscribe for flow records
context = zmq.Context()
subsSocket = context.socket(zmq.SUB)
subsSocket.connect(subsURL)
fltr=subsFilter
# Python 2 - ascii bytes to unicode str
if isinstance(fltr, bytes):
fltr = fltr.decode('ascii')
print("Collecting flow stats host agent",subsURL,fltr)
subsSocket.setsockopt_string(zmq.SUBSCRIBE, fltr)
def execSubsQueries(argv):
global subsQueries,subsSocket,instQueries
for qname in subsQueries.keys():
if qname in instQueries and instQueries[qname]:
runQuery(subsQueries[qname], argv)
else:
del subsQueries[qname]
del instQueries[qname]
if len(subsQueries)==0:
print "NO queries subscribed for flows. Closing socket"
subsSocket.close()
subsSocket=None
def listenRecords(filter_str):
while True and len(subsQueries) > 0:
print "listening for records"
msg = subsSocket.recv_string()
flow_record = json.loads(msg[len(filter_str):])
execSubsQueries(flow_record)
def runQuery(source, argv):
filepath = cp.options['repository'] + '/' + source['name']
module = imp.load_source ('', filepath)
# module must implement 'run' function
return module.run (argv)
def processTIB (source, collection):
filepath = cp.options['repository'] + '/' + source['name']
module = imp.load_source ('', filepath)
# module must implement 'run' function
return module.run (source['argv'], collection)
def processCollectedData (source, data):
filepath = cp.options['repository'] + '/' + source['name']
module = imp.load_source ('', filepath)
# module must implement 'run' function
return module.run (source['argv'], data)
def httpcmd (node, req):
return restapi.post (node, json.dumps (req, default=json_util.default), "pathdump")
def checkSource (name, checksum):
filepath = cwd + '/' + cp.options['repository'] + '/' + name
md5fpath = filepath + '.md5'
try:
with open (md5fpath, 'r') as f:
chksum = f.read()
if not os.path.exists (filepath) or chksum != checksum:
return [{getCurNodeID(): False}]
else:
return [{getCurNodeID(): True}]
except IOError:
return [{getCurNodeID(): False}]
def saveSource (name, checksum, filedata):
filepath = cwd + '/' + cp.options['repository'] + '/' + name
md5fpath = filepath + '.md5'
try:
with open (filepath, 'w') as f:
f.write (filedata)
with open (md5fpath, 'w') as f:
f.write (checksum)
return [{getCurNodeID(): True}]
except IOError:
return [{getCurNodeID(): False}]
def schedQuery (qname, interval, func, args):
global instQueries
while qname in instQueries and instQueries[qname]:
time.sleep (interval)
if not instQueries[qname]:
break
result = func (*args)
print result
# remove query because uninstallQuery was executed
if qname in instQueries:
del instQueries[qname]
def installQuery (query, interval):
global instQueries
qname = query['name']
# the query is already installed
if qname in instQueries:
return [{getCurNodeID(): False}]
instQueries[qname] = True
print "Installing query ",qname
if interval > 0.0:
t = Thread (target = schedQuery, args = (qname, interval, processTIB,
(query, pdapi.collection)))
t.start()
elif interval==0.0:
# data should be a stream of TIB records being exported from memory
subsQueries[qname]=query
if len(subsQueries)==1:
initializeSubscription()
t = Thread (target=listenRecords, args =(subsFilter,))
t.start()
return [{getCurNodeID(): True}]
def uninstallQuery (qname):
global instQueries
# no need for tight synchronization, so no locking mechanism is implemented
if qname in instQueries:
instQueries[qname] = False
print "Uninstalling query ",qname
print "Current installed queries",instQueries
return [{getCurNodeID(): True}]
def handleLeafNode (req):
if req['api'] == 'execQuery':
query = req['query']
return processTIB (query, pdapi.collection)
elif req['api'] == 'check_source':
name = req['name']
checksum = req['checksum']
return checkSource (name, checksum)
elif req['api'] == 'send_source':
name = req['name']
checksum = req['checksum']
filedata = req['file']
return saveSource (name, checksum, filedata)
elif req['api'] == 'installQuery':
query = req['query']
interval = req['interval']
return installQuery (query, interval)
elif req['api'] == 'uninstallQuery':
qname = req['query']['name']
return uninstallQuery (qname)
def getThreadArgument (local, req, node=None):
if local:
api = req['api']
if api == 'execQuery':
query = req['query']
return (processTIB, (query, pdapi.collection))
elif api == 'check_source':
name = req['name']
checksum = req['checksum']
return (checkSource, (name, checksum))
elif api == 'send_source':
name = req['name']
checksum = req['checksum']
filedata = req['file']
return (saveSource, (name, checksum, filedata))
elif req['api'] == 'installQuery':
query = req['query']
interval = req['interval']
return (installQuery, (query, interval))
elif req['api'] == 'uninstallQuery':
qname = req['query']['name']
return (uninstallQuery, (qname,))
else:
return (httpcmd, (node, req))
|
server.py | import socket
import threading
IPADDR = "127.0.0.1"
PORT = 49152
sock_sv = socket.socket(socket.AF_INET)
sock_sv.bind((IPADDR, PORT))
sock_sv.listen()
# クライアントのリスト
client_list = []
def recv_client(sock, addr):
while True:
try:
data = sock.recv(1024)
if data == b"":
break
print("$ say client:{}".format(addr))
# 受信データを全クライアントに送信
for client in client_list:
client[0].send(data)
except ConnectionResetError:
break
# クライアントリストから削除
client_list.remove((sock, addr))
print("- close client:{}".format(addr))
sock.shutdown(socket.SHUT_RDWR)
sock.close()
# クライアント接続待ちループ
while True:
sock_cl, addr = sock_sv.accept()
# クライアントをリストに追加
client_list.append((sock_cl, addr))
print("+ join client:{}".format(addr))
thread = threading.Thread(target=recv_client, args=(sock_cl, addr))
thread.start() |
main.py | # < Импорт модулей >
from PyQt5 import QtCore, QtGui, QtWidgets
from threading import Thread
from win32gui import GetWindowText, GetForegroundWindow
import tkinter as tk
import pymem, pymem.process, ctypes, requests, re, time, keyboard, pyautogui
# <...>
# < Переменные >
_translate = QtCore.QCoreApplication.translate
pm = ''
client = ''
engine = ''
Glow_E = False
Cham_s = False
TrigerB = False
BHo_p = False
Radar_H = False
NOFlas_h = False
Auto = False
Triger_T = 0
fov = 0
t = ''
ct = ''
t_a = 0
t_b = 0
t_c = 0
ct_a = 0
ct_b = 0
ct_c = 0
t_a1 = 0
t_b1 = 0
t_c1 = 0
# <...>
# < Оффсеты >
dwLocalPlayer = 0
dwGlowObjectManager = 0
dwEntityList = 0
dwForceAttack = 0
dwForceJump = 0
m_iCrosshairId = 0
m_iTeamNum = 0
m_iGlowIndex = 0
m_fFlags = 0
m_bSpotted = 0
m_flFlashMaxAlpha = 0
m_clrRender = 0
m_iDefaultFOV = 0x332C
def get_sig(modname, pattern, extra = 0, offset = 0, relative = True):
module = pymem.process.module_from_name(pm.process_handle, modname)
bytes = pm.read_bytes(module.lpBaseOfDll, module.SizeOfImage)
match = re.search(pattern, bytes).start()
non_relative = pm.read_int(module.lpBaseOfDll + match + offset) + extra
yes_relative = pm.read_int(module.lpBaseOfDll + match + offset) + extra - module.lpBaseOfDll
return "0x{:X}".format(yes_relative) if relative else "0x{:X}".format(non_relative)
# <...>
# < Функции >
# < Glow_ESP >
def Color():
global t, ct, t_a, t_b, t_c, ct_a, ct_b, ct_c, t_a1, t_b1, t_c1
t = ui.comboBox_1.currentText()
ct = ui.comboBox_2.currentText()
if t == 'Без подсветки':
t_a1 = 255
t_a = 0
t_b1 = 255
t_b = 0
t_c1 = 255
t_c = 0
else:
pass
if t == 'Красный':
t_a = 1
t_a1 = 250
t_b = 0
t_b1 = 17
t_c = 0
t_c1 = 5
else:
pass
if t == 'Зеленый':
t_a = 0
t_a1 = 6
t_b = 1
t_b1 = 251
t_c = 0
t_c1 = 6
else:
pass
if t == 'Синий':
t_a = 0
t_a1 = 50
t_b = 0
t_b1 = 6
t_c = 1
t_c1 = 245
else:
pass
if t == 'Желтый':
t_a = 1
t_a1 = 214
t_b = 1
t_b1 = 240
t_c = 0
t_c1 = 32
else:
pass
if t == 'Белый':
t_a = 1
t_a1 = 255
t_b = 1
t_b1 = 255
t_c = 1
t_c1 = 255
else:
pass
if t == 'Голубой':
t_a = 0
t_a1 = 32
t_b = 1
t_b1 = 238
t_c = 1
t_c1 = 240
else:
pass
if t == 'Розовый':
t_a = 1
t_a1 = 237
t_b = 0
t_b1 = 44
t_c = 1
t_c1 = 231
else:
pass
if ct == 'Без подсветки':
ct_a = 0
ct_b = 0
ct_c = 0
else:
pass
if ct == 'Красный':
ct_a = 1
ct_b = 0
ct_c = 0
else:
pass
if ct == 'Зеленый':
ct_a = 0
ct_b = 1
ct_c = 0
else:
pass
if ct == 'Синий':
ct_a = 0
ct_b = 0
ct_c = 1
else:
pass
if ct == 'Желтый':
ct_a = 1
ct_b = 1
ct_c = 0
else:
pass
if ct == 'Белый':
ct_a = 1
ct_b = 1
ct_c = 1
else:
pass
if ct == 'Голубой':
ct_a = 0
ct_b = 1
ct_c = 1
else:
pass
if ct == 'Розовый':
ct_a = 1
ct_b = 0
ct_c = 1
else:
pass
def ESP():
while not Glow_E:
time.sleep (0.009)
glow_manager = pm.read_int(client + dwGlowObjectManager)
for i in range(1, 32):
entity = pm.read_int(client + dwEntityList + i * 0x10)
if entity:
entity_team_id = pm.read_int(entity + m_iTeamNum)
entity_glow = pm.read_int(entity + m_iGlowIndex)
if entity_team_id == 2: # Terrorist
pm.write_float(glow_manager + entity_glow * 0x38 + 0x4, float(t_a))
pm.write_float(glow_manager + entity_glow * 0x38 + 0x8, float(t_b))
pm.write_float(glow_manager + entity_glow * 0x38 + 0xC, float(t_c))
pm.write_float(glow_manager + entity_glow * 0x38 + 0x10, float(1))
pm.write_int(glow_manager + entity_glow * 0x38 + 0x24, 1)
elif entity_team_id == 3: # Counter-terrorist
pm.write_float(glow_manager + entity_glow * 0x38 + 0x4, float(ct_a))
pm.write_float(glow_manager + entity_glow * 0x38 + 0x8, float(ct_b))
pm.write_float(glow_manager + entity_glow * 0x38 + 0xC, float(ct_c))
pm.write_float(glow_manager + entity_glow * 0x38 + 0x10, float(1))
pm.write_int(glow_manager + entity_glow * 0x38 + 0x24, 1)
# <...>
# < Chams >
def Chams_f():
global Cham_s
while not Cham_s:
try:
for ent_id in range(1, 32):
ent = pm.read_int(client + dwEntityList + ent_id * 0x10)
pm.write_int(ent + m_clrRender, t_a1) # Red
pm.write_int(ent + m_clrRender + 1, t_b1) # Green
pm.write_int(ent + m_clrRender + 2, t_c1) # Blue
pm.write_int(ent + m_clrRender + 3, 0) # Alpha
Cham_s = True
except:
pass
# <...>
# < TrigerBot>
def TrigerBo_t():
while not TrigerB:
if not GetWindowText(GetForegroundWindow()) == "Counter-Strike: Global Offensive":
continue
player = pm.read_int(client + dwLocalPlayer)
entity_id = pm.read_int(player + m_iCrosshairId)
entity = pm.read_int(client + dwEntityList + (entity_id - 1) * 0x10)
entity_team = pm.read_int(entity + m_iTeamNum)
player_team = pm.read_int(player + m_iTeamNum)
if entity_id > 0 and entity_id <= 64 and player_team != entity_team:
time.sleep (Triger_T)
pm.write_int(client + dwForceAttack, 6)
time.sleep(0.01)
# <...>
# < BunnyHop >
def BunnyHo_p():
while not BHo_p:
if pm.read_int(client + dwLocalPlayer):
player = pm.read_int(client + dwLocalPlayer)
force_jump = client + dwForceJump
on_ground = pm.read_int(player + m_fFlags)
if keyboard.is_pressed("space"):
if on_ground == 257:
pm.write_int(force_jump, 5)
time.sleep(0.17)
pm.write_int(force_jump, 4)
# <...>
# < RadarHack >
def RadarHac_k():
while not Radar_H:
if pm.read_int(client + dwLocalPlayer):
localplayer = pm.read_int(client + dwLocalPlayer)
localplayer_team = pm.read_int(localplayer + m_iTeamNum)
for i in range(64):
if pm.read_int(client + dwEntityList + i * 0x10):
entity = pm.read_int(client + dwEntityList + i * 0x10)
entity_team = pm.read_int(entity + m_iTeamNum)
if entity_team != localplayer_team:
pm.write_int(entity + m_bSpotted, 1)
# <...>
# < NOFlash >
def NF1ash():
while not NOFlas_h:
player = pm.read_int(client + dwLocalPlayer)
if player:
flash_value = player + m_flFlashMaxAlpha
if flash_value:
pm.write_float(flash_value, float(0))
time.sleep(1)
# <...>
# < AutoAccept >
root = tk.Tk()
h = root.winfo_screenwidth() # Ширина
w = root.winfo_screenheight() # Высота
h2 = h/2
w2 = w/2 + 30
w3 = w/2 + 60
def AutoAccep_t():
while not Auto:
if not GetWindowText(GetForegroundWindow()) == "Counter-Strike: Global Offensive":
continue
time.sleep (3)
pyautogui.click(h2,w2)
pyautogui.click(h2,w3)
# <...>
# < FOV_P >
def FOVP():
print ('fov')
player = pm.read_int(client + dwEntityList)
iFOV = pm.read_int(player + m_iDefaultFOV)
print(iFOV)
pm.write_int(player + m_iDefaultFOV, fov)
# <...>
# <...>``
class Ui_Main_Menu(object):
def setupUi(self, Main_Menu):
Main_Menu.setObjectName("Main_Menu")
Main_Menu.resize(365, 545)
Main_Menu.setMaximumSize(QtCore.QSize(365, 545))
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap("res/mini.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
Main_Menu.setWindowIcon(icon)
self.backgr = QtWidgets.QLabel(Main_Menu)
self.backgr.setGeometry(QtCore.QRect(0, 0, 761, 561))
self.backgr.setStyleSheet("background-color: rgb(15,15,15)")
self.backgr.setText("")
self.backgr.setObjectName("backgr")
self.label = QtWidgets.QLabel(Main_Menu)
self.label.setGeometry(QtCore.QRect(50, 10, 85, 36))
self.label.setStyleSheet("font: 24pt \"mr_Franklin GothicG\";")
self.label.setObjectName("label")
self.label_4 = QtWidgets.QLabel(Main_Menu)
self.label_4.setGeometry(QtCore.QRect(40, 50, 8, 29))
self.label_4.setStyleSheet("font: 20pt \"mr_FranklinGothicG\";")
self.label_4.setObjectName("label_4")
self.label_5 = QtWidgets.QLabel(Main_Menu)
self.label_5.setGeometry(QtCore.QRect(30, 80, 18, 29))
self.label_5.setStyleSheet("font: 20pt \"mr_FranklinGothicG\";")
self.label_5.setObjectName("label_5")
self.comboBox_1 = QtWidgets.QComboBox(Main_Menu)
self.comboBox_1.setGeometry(QtCore.QRect(60, 50, 111, 21))
self.comboBox_1.setStyleSheet("")
self.comboBox_1.setObjectName("comboBox_1")
self.comboBox_1.addItem("")
self.comboBox_1.addItem("")
self.comboBox_1.addItem("")
self.comboBox_1.addItem("")
self.comboBox_1.addItem("")
self.comboBox_1.addItem("")
self.comboBox_1.addItem("")
self.comboBox_1.addItem("")
self.comboBox_2 = QtWidgets.QComboBox(Main_Menu)
self.comboBox_2.setGeometry(QtCore.QRect(60, 80, 111, 21))
self.comboBox_2.setStyleSheet("")
self.comboBox_2.setObjectName("comboBox_2")
self.comboBox_2.addItem("")
self.comboBox_2.addItem("")
self.comboBox_2.addItem("")
self.comboBox_2.addItem("")
self.comboBox_2.addItem("")
self.comboBox_2.addItem("")
self.comboBox_2.addItem("")
self.comboBox_2.addItem("")
self.label_6 = QtWidgets.QLabel(Main_Menu)
self.label_6.setGeometry(QtCore.QRect(20, 110, 32, 29))
self.label_6.setStyleSheet("font: 20pt \"mr_FranklinGothicG\";")
self.label_6.setObjectName("label_6")
self.comboBox_3 = QtWidgets.QComboBox(Main_Menu)
self.comboBox_3.setGeometry(QtCore.QRect(60, 110, 111, 22))
self.comboBox_3.setObjectName("comboBox_3")
self.comboBox_3.addItem("")
self.comboBox_3.addItem("")
self.pushButton_1 = QtWidgets.QPushButton(Main_Menu)
self.pushButton_1.setGeometry(QtCore.QRect(20, 140, 71, 31))
self.pushButton_1.setStyleSheet("QPushButton{\n"
" color: white;\n"
" background-color: rgb(30,30,30);\n"
" width: 75px;\n"
" height: 50px;\n"
" font-size: 14px;\n"
" font-weight: bold;\n"
" border: none;\n"
" text-align: center;\n"
"}\n"
"\n"
"QPushButton:hover {\n"
" background-color: rgb(35,35,35);\n"
"}\n"
"\n"
"QPushButton:pressed {\n"
" background-color: rgb(45,45,45);\n"
"}")
self.pushButton_1.setObjectName("pushButton_1")
self.pushButton_2 = QtWidgets.QPushButton(Main_Menu)
self.pushButton_2.setGeometry(QtCore.QRect(100, 140, 71, 31))
self.pushButton_2.setStyleSheet("QPushButton{\n"
" color: white;\n"
" background-color: rgb(30,30,30);\n"
" width: 75px;\n"
" height: 50px;\n"
" font-size: 14px;\n"
" font-weight: bold;\n"
" border: none;\n"
" text-align: center;\n"
"}\n"
"\n"
"QPushButton:hover {\n"
" background-color: rgb(35,35,35);\n"
"}\n"
"\n"
"QPushButton:pressed {\n"
" background-color: rgb(45,45,45);\n"
"}")
self.pushButton_2.setObjectName("pushButton_2")
self.line = QtWidgets.QFrame(Main_Menu)
self.line.setGeometry(QtCore.QRect(10, 170, 171, 20))
self.line.setFrameShape(QtWidgets.QFrame.HLine)
self.line.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line.setObjectName("line")
self.line_2 = QtWidgets.QFrame(Main_Menu)
self.line_2.setGeometry(QtCore.QRect(170, 30, 20, 151))
self.line_2.setFrameShape(QtWidgets.QFrame.VLine)
self.line_2.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line_2.setObjectName("line_2")
self.line_3 = QtWidgets.QFrame(Main_Menu)
self.line_3.setGeometry(QtCore.QRect(0, 30, 20, 151))
self.line_3.setFrameShape(QtWidgets.QFrame.VLine)
self.line_3.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line_3.setObjectName("line_3")
self.line_4 = QtWidgets.QFrame(Main_Menu)
self.line_4.setGeometry(QtCore.QRect(10, 20, 31, 21))
self.line_4.setFrameShape(QtWidgets.QFrame.HLine)
self.line_4.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line_4.setObjectName("line_4")
self.line_5 = QtWidgets.QFrame(Main_Menu)
self.line_5.setGeometry(QtCore.QRect(150, 20, 31, 21))
self.line_5.setFrameShape(QtWidgets.QFrame.HLine)
self.line_5.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line_5.setObjectName("line_5")
self.label_2 = QtWidgets.QLabel(Main_Menu)
self.label_2.setGeometry(QtCore.QRect(50, 190, 85, 38))
self.label_2.setStyleSheet("font: 24pt \"mr_Franklin GothicG\";")
self.label_2.setObjectName("label_2")
self.label_7 = QtWidgets.QLabel(Main_Menu)
self.label_7.setGeometry(QtCore.QRect(20, 230, 48, 29))
self.label_7.setStyleSheet("font: 20pt \"mr_FranklinGothicG\";")
self.label_7.setObjectName("label_7")
self.lineEdit = QtWidgets.QLineEdit(Main_Menu)
self.lineEdit.setGeometry(QtCore.QRect(70, 230, 41, 31))
self.lineEdit.setStyleSheet("font: 18pt \"mr_Franklin GothicG\";\n"
"color: white;\n"
"background-color: rgb(25,25,25);")
self.lineEdit.setObjectName("lineEdit")
self.label_8 = QtWidgets.QLabel(Main_Menu)
self.label_8.setGeometry(QtCore.QRect(20, 270, 41, 29))
self.label_8.setStyleSheet("font: 20pt \"mr_FranklinGothicG\";")
self.label_8.setObjectName("label_8")
self.pushButton_3 = QtWidgets.QPushButton(Main_Menu)
self.pushButton_3.setGeometry(QtCore.QRect(60, 270, 75, 31))
self.pushButton_3.setStyleSheet("QPushButton{\n"
" color: white;\n"
" background-color: rgb(15,15,15);\n"
" width: 75px;\n"
" height: 50px;\n"
" font-size: 16px;\n"
" font-weight: bold;\n"
" border: none;\n"
" text-align: center;\n"
"}\n"
"\n"
"\n"
"QPushButton:pressed {\n"
" color: rgb(161,161,161);\n"
"}")
self.pushButton_3.setObjectName("pushButton_3")
self.pushButton_4 = QtWidgets.QPushButton(Main_Menu)
self.pushButton_4.setGeometry(QtCore.QRect(20, 300, 71, 31))
self.pushButton_4.setStyleSheet("QPushButton{\n"
" color: white;\n"
" background-color: rgb(30,30,30);\n"
" width: 75px;\n"
" height: 50px;\n"
" font-size: 14px;\n"
" font-weight: bold;\n"
" border: none;\n"
" text-align: center;\n"
"}\n"
"\n"
"QPushButton:hover {\n"
" background-color: rgb(35,35,35);\n"
"}\n"
"\n"
"QPushButton:pressed {\n"
" background-color: rgb(45,45,45);\n"
"}")
self.pushButton_4.setObjectName("pushButton_4")
self.pushButton_5 = QtWidgets.QPushButton(Main_Menu)
self.pushButton_5.setGeometry(QtCore.QRect(100, 300, 71, 31))
self.pushButton_5.setStyleSheet("QPushButton{\n"
" color: white;\n"
" background-color: rgb(30,30,30);\n"
" width: 75px;\n"
" height: 50px;\n"
" font-size: 14px;\n"
" font-weight: bold;\n"
" border: none;\n"
" text-align: center;\n"
"}\n"
"\n"
"QPushButton:hover {\n"
" background-color: rgb(35,35,35);\n"
"}\n"
"\n"
"QPushButton:pressed {\n"
" background-color: rgb(45,45,45);\n"
"}")
self.pushButton_5.setObjectName("pushButton_5")
self.line_6 = QtWidgets.QFrame(Main_Menu)
self.line_6.setGeometry(QtCore.QRect(10, 330, 171, 20))
self.line_6.setFrameShape(QtWidgets.QFrame.HLine)
self.line_6.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line_6.setObjectName("line_6")
self.line_7 = QtWidgets.QFrame(Main_Menu)
self.line_7.setGeometry(QtCore.QRect(170, 210, 20, 131))
self.line_7.setFrameShape(QtWidgets.QFrame.VLine)
self.line_7.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line_7.setObjectName("line_7")
self.line_8 = QtWidgets.QFrame(Main_Menu)
self.line_8.setGeometry(QtCore.QRect(0, 210, 20, 131))
self.line_8.setFrameShape(QtWidgets.QFrame.VLine)
self.line_8.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line_8.setObjectName("line_8")
self.line_9 = QtWidgets.QFrame(Main_Menu)
self.line_9.setGeometry(QtCore.QRect(10, 200, 31, 21))
self.line_9.setFrameShape(QtWidgets.QFrame.HLine)
self.line_9.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line_9.setObjectName("line_9")
self.line_10 = QtWidgets.QFrame(Main_Menu)
self.line_10.setGeometry(QtCore.QRect(150, 200, 31, 21))
self.line_10.setFrameShape(QtWidgets.QFrame.HLine)
self.line_10.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line_10.setObjectName("line_10")
self.label_3 = QtWidgets.QLabel(Main_Menu)
self.label_3.setGeometry(QtCore.QRect(70, 350, 43, 38))
self.label_3.setStyleSheet("font: 24pt \"mr_Franklin GothicG\";")
self.label_3.setObjectName("label_3")
self.line_11 = QtWidgets.QFrame(Main_Menu)
self.line_11.setGeometry(QtCore.QRect(10, 530, 171, 20))
self.line_11.setFrameShape(QtWidgets.QFrame.HLine)
self.line_11.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line_11.setObjectName("line_11")
self.line_12 = QtWidgets.QFrame(Main_Menu)
self.line_12.setGeometry(QtCore.QRect(0, 370, 20, 171))
self.line_12.setFrameShape(QtWidgets.QFrame.VLine)
self.line_12.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line_12.setObjectName("line_12")
self.label_9 = QtWidgets.QLabel(Main_Menu)
self.label_9.setGeometry(QtCore.QRect(20, 410, 41, 29))
self.label_9.setStyleSheet("font: 20pt \"mr_FranklinGothicG\";")
self.label_9.setObjectName("label_9")
self.label_10 = QtWidgets.QLabel(Main_Menu)
self.label_10.setGeometry(QtCore.QRect(20, 440, 41, 29))
self.label_10.setStyleSheet("font: 20pt \"mr_FranklinGothicG\";")
self.label_10.setObjectName("label_10")
self.label_11 = QtWidgets.QLabel(Main_Menu)
self.label_11.setGeometry(QtCore.QRect(20, 470, 46, 29))
self.label_11.setStyleSheet("font: 20pt \"mr_FranklinGothicG\";")
self.label_11.setObjectName("label_11")
self.label_12 = QtWidgets.QLabel(Main_Menu)
self.label_12.setGeometry(QtCore.QRect(20, 500, 48, 29))
self.label_12.setStyleSheet("font: 20pt \"mr_FranklinGothicG\";")
self.label_12.setObjectName("label_12")
self.pushButton_6 = QtWidgets.QPushButton(Main_Menu)
self.pushButton_6.setGeometry(QtCore.QRect(80, 410, 41, 21))
self.pushButton_6.setStyleSheet("QPushButton{\n"
" color: white;\n"
" background-color: rgb(30,30,30);\n"
" width: 75px;\n"
" height: 50px;\n"
" font-size: 14px;\n"
" font-weight: bold;\n"
" border: none;\n"
" text-align: center;\n"
"}\n"
"\n"
"QPushButton:hover {\n"
" background-color: rgb(35,35,35);\n"
"}\n"
"\n"
"QPushButton:pressed {\n"
" background-color: rgb(45,45,45);\n"
"}")
self.pushButton_6.setObjectName("pushButton_6")
self.pushButton_7 = QtWidgets.QPushButton(Main_Menu)
self.pushButton_7.setGeometry(QtCore.QRect(130, 410, 41, 21))
self.pushButton_7.setStyleSheet("QPushButton{\n"
" color: white;\n"
" background-color: rgb(30,30,30);\n"
" width: 75px;\n"
" height: 50px;\n"
" font-size: 14px;\n"
" font-weight: bold;\n"
" border: none;\n"
" text-align: center;\n"
"}\n"
"\n"
"QPushButton:hover {\n"
" background-color: rgb(35,35,35);\n"
"}\n"
"\n"
"QPushButton:pressed {\n"
" background-color: rgb(45,45,45);\n"
"}")
self.pushButton_7.setObjectName("pushButton_7")
self.pushButton_8 = QtWidgets.QPushButton(Main_Menu)
self.pushButton_8.setGeometry(QtCore.QRect(80, 500, 41, 21))
self.pushButton_8.setStyleSheet("QPushButton{\n"
" color: white;\n"
" background-color: rgb(30,30,30);\n"
" width: 75px;\n"
" height: 50px;\n"
" font-size: 14px;\n"
" font-weight: bold;\n"
" border: none;\n"
" text-align: center;\n"
"}\n"
"\n"
"QPushButton:hover {\n"
" background-color: rgb(35,35,35);\n"
"}\n"
"\n"
"QPushButton:pressed {\n"
" background-color: rgb(45,45,45);\n"
"}")
self.pushButton_8.setObjectName("pushButton_8")
self.pushButton_9 = QtWidgets.QPushButton(Main_Menu)
self.pushButton_9.setGeometry(QtCore.QRect(130, 500, 41, 21))
self.pushButton_9.setStyleSheet("QPushButton{\n"
" color: white;\n"
" background-color: rgb(30,30,30);\n"
" width: 75px;\n"
" height: 50px;\n"
" font-size: 14px;\n"
" font-weight: bold;\n"
" border: none;\n"
" text-align: center;\n"
"}\n"
"\n"
"QPushButton:hover {\n"
" background-color: rgb(35,35,35);\n"
"}\n"
"\n"
"QPushButton:pressed {\n"
" background-color: rgb(45,45,45);\n"
"}")
self.pushButton_9.setObjectName("pushButton_9")
self.pushButton_10 = QtWidgets.QPushButton(Main_Menu)
self.pushButton_10.setGeometry(QtCore.QRect(80, 470, 41, 21))
self.pushButton_10.setStyleSheet("QPushButton{\n"
" color: white;\n"
" background-color: rgb(30,30,30);\n"
" width: 75px;\n"
" height: 50px;\n"
" font-size: 14px;\n"
" font-weight: bold;\n"
" border: none;\n"
" text-align: center;\n"
"}\n"
"\n"
"QPushButton:hover {\n"
" background-color: rgb(35,35,35);\n"
"}\n"
"\n"
"QPushButton:pressed {\n"
" background-color: rgb(45,45,45);\n"
"}")
self.pushButton_10.setObjectName("pushButton_10")
self.pushButton_11 = QtWidgets.QPushButton(Main_Menu)
self.pushButton_11.setGeometry(QtCore.QRect(130, 470, 41, 21))
self.pushButton_11.setStyleSheet("QPushButton{\n"
" color: white;\n"
" background-color: rgb(30,30,30);\n"
" width: 75px;\n"
" height: 50px;\n"
" font-size: 14px;\n"
" font-weight: bold;\n"
" border: none;\n"
" text-align: center;\n"
"}\n"
"\n"
"QPushButton:hover {\n"
" background-color: rgb(35,35,35);\n"
"}\n"
"\n"
"QPushButton:pressed {\n"
" background-color: rgb(45,45,45);\n"
"}")
self.pushButton_11.setObjectName("pushButton_11")
self.pushButton_12 = QtWidgets.QPushButton(Main_Menu)
self.pushButton_12.setGeometry(QtCore.QRect(80, 440, 41, 21))
self.pushButton_12.setStyleSheet("QPushButton{\n"
" color: white;\n"
" background-color: rgb(30,30,30);\n"
" width: 75px;\n"
" height: 50px;\n"
" font-size: 14px;\n"
" font-weight: bold;\n"
" border: none;\n"
" text-align: center;\n"
"}\n"
"\n"
"QPushButton:hover {\n"
" background-color: rgb(35,35,35);\n"
"}\n"
"\n"
"QPushButton:pressed {\n"
" background-color: rgb(45,45,45);\n"
"}")
self.pushButton_12.setObjectName("pushButton_12")
self.pushButton_13 = QtWidgets.QPushButton(Main_Menu)
self.pushButton_13.setGeometry(QtCore.QRect(130, 440, 41, 21))
self.pushButton_13.setStyleSheet("QPushButton{\n"
" color: white;\n"
" background-color: rgb(30,30,30);\n"
" width: 75px;\n"
" height: 50px;\n"
" font-size: 14px;\n"
" font-weight: bold;\n"
" border: none;\n"
" text-align: center;\n"
"}\n"
"\n"
"QPushButton:hover {\n"
" background-color: rgb(35,35,35);\n"
"}\n"
"\n"
"QPushButton:pressed {\n"
" background-color: rgb(45,45,45);\n"
"}")
self.pushButton_13.setObjectName("pushButton_13")
self.line_13 = QtWidgets.QFrame(Main_Menu)
self.line_13.setGeometry(QtCore.QRect(170, 370, 20, 171))
self.line_13.setFrameShape(QtWidgets.QFrame.VLine)
self.line_13.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line_13.setObjectName("line_13")
self.line_14 = QtWidgets.QFrame(Main_Menu)
self.line_14.setGeometry(QtCore.QRect(130, 360, 51, 21))
self.line_14.setFrameShape(QtWidgets.QFrame.HLine)
self.line_14.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line_14.setObjectName("line_14")
self.line_15 = QtWidgets.QFrame(Main_Menu)
self.line_15.setGeometry(QtCore.QRect(10, 360, 51, 21))
self.line_15.setFrameShape(QtWidgets.QFrame.HLine)
self.line_15.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line_15.setObjectName("line_15")
self.label_13 = QtWidgets.QLabel(Main_Menu)
self.label_13.setGeometry(QtCore.QRect(250, 350, 52, 38))
self.label_13.setStyleSheet("font: 24pt \"mr_Franklin GothicG\";")
self.label_13.setObjectName("label_13")
self.pushButton_15 = QtWidgets.QPushButton(Main_Menu)
self.pushButton_15.setGeometry(QtCore.QRect(200, 460, 151, 31))
self.pushButton_15.setStyleSheet("QPushButton{\n"
" color: white;\n"
" background-color: rgb(30,30,30);\n"
" width: 75px;\n"
" height: 50px;\n"
" font-size: 14px;\n"
" font-weight: bold;\n"
" border: none;\n"
" text-align: center;\n"
"}\n"
"\n"
"QPushButton:hover {\n"
" background-color: rgb(35,35,35);\n"
"}\n"
"\n"
"QPushButton:pressed {\n"
" background-color: rgb(45,45,45);\n"
"}")
self.pushButton_15.setObjectName("pushButton_15")
self.pushButton_16 = QtWidgets.QPushButton(Main_Menu)
self.pushButton_16.setGeometry(QtCore.QRect(200, 500, 151, 31))
self.pushButton_16.setStyleSheet("QPushButton{\n"
" color: white;\n"
" background-color: rgb(30,30,30);\n"
" width: 75px;\n"
" height: 50px;\n"
" font-size: 12px;\n"
" font-weight: bold;\n"
" border: none;\n"
" text-align: center;\n"
"}\n"
"\n"
"QPushButton:hover {\n"
" background-color: rgb(35,35,35);\n"
"}\n"
"\n"
"QPushButton:pressed {\n"
" background-color: rgb(45,45,45);\n"
"}")
self.pushButton_16.setObjectName("pushButton_16")
self.line_16 = QtWidgets.QFrame(Main_Menu)
self.line_16.setGeometry(QtCore.QRect(180, 370, 20, 171))
self.line_16.setFrameShape(QtWidgets.QFrame.VLine)
self.line_16.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line_16.setObjectName("line_16")
self.line_17 = QtWidgets.QFrame(Main_Menu)
self.line_17.setGeometry(QtCore.QRect(190, 530, 171, 20))
self.line_17.setFrameShape(QtWidgets.QFrame.HLine)
self.line_17.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line_17.setObjectName("line_17")
self.line_18 = QtWidgets.QFrame(Main_Menu)
self.line_18.setGeometry(QtCore.QRect(350, 370, 20, 171))
self.line_18.setFrameShape(QtWidgets.QFrame.VLine)
self.line_18.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line_18.setObjectName("line_18")
self.line_19 = QtWidgets.QFrame(Main_Menu)
self.line_19.setGeometry(QtCore.QRect(190, 360, 51, 21))
self.line_19.setFrameShape(QtWidgets.QFrame.HLine)
self.line_19.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line_19.setObjectName("line_19")
self.line_20 = QtWidgets.QFrame(Main_Menu)
self.line_20.setGeometry(QtCore.QRect(310, 360, 51, 21))
self.line_20.setFrameShape(QtWidgets.QFrame.HLine)
self.line_20.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line_20.setObjectName("line_20")
self.label_14 = QtWidgets.QLabel(Main_Menu)
self.label_14.setGeometry(QtCore.QRect(230, 190, 89, 38))
self.label_14.setStyleSheet("font: 24pt \"mr_Franklin GothicG\";")
self.label_14.setObjectName("label_14")
self.line_21 = QtWidgets.QFrame(Main_Menu)
self.line_21.setGeometry(QtCore.QRect(190, 330, 171, 20))
self.line_21.setFrameShape(QtWidgets.QFrame.HLine)
self.line_21.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line_21.setObjectName("line_21")
self.line_22 = QtWidgets.QFrame(Main_Menu)
self.line_22.setGeometry(QtCore.QRect(180, 210, 20, 131))
self.line_22.setFrameShape(QtWidgets.QFrame.VLine)
self.line_22.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line_22.setObjectName("line_22")
self.line_23 = QtWidgets.QFrame(Main_Menu)
self.line_23.setGeometry(QtCore.QRect(190, 200, 31, 21))
self.line_23.setFrameShape(QtWidgets.QFrame.HLine)
self.line_23.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line_23.setObjectName("line_23")
self.label_15 = QtWidgets.QLabel(Main_Menu)
self.label_15.setGeometry(QtCore.QRect(200, 230, 58, 29))
self.label_15.setStyleSheet("font: 20pt \"mr_FranklinGothicG\";")
self.label_15.setObjectName("label_15")
self.lineEdit_2 = QtWidgets.QLineEdit(Main_Menu)
self.lineEdit_2.setGeometry(QtCore.QRect(260, 230, 61, 31))
self.lineEdit_2.setStyleSheet("font: 18pt \"mr_Franklin GothicG\";\n"
"color: white;\n"
"background-color: rgb(25,25,25);")
self.lineEdit_2.setObjectName("lineEdit_2")
self.pushButton_17 = QtWidgets.QPushButton(Main_Menu)
self.pushButton_17.setGeometry(QtCore.QRect(200, 270, 151, 21))
self.pushButton_17.setStyleSheet("QPushButton{\n"
" color: white;\n"
" background-color: rgb(30,30,30);\n"
" width: 75px;\n"
" height: 50px;\n"
" font-size: 14px;\n"
" font-weight: bold;\n"
" border: none;\n"
" text-align: center;\n"
"}\n"
"\n"
"QPushButton:hover {\n"
" background-color: rgb(35,35,35);\n"
"}\n"
"\n"
"QPushButton:pressed {\n"
" background-color: rgb(45,45,45);\n"
"}")
self.pushButton_17.setObjectName("pushButton_17")
self.pushButton_18 = QtWidgets.QPushButton(Main_Menu)
self.pushButton_18.setGeometry(QtCore.QRect(200, 300, 151, 31))
self.pushButton_18.setStyleSheet("QPushButton{\n"
" color: white;\n"
" background-color: rgb(30,30,30);\n"
" width: 75px;\n"
" height: 50px;\n"
" font-size: 14px;\n"
" font-weight: bold;\n"
" border: none;\n"
" text-align: center;\n"
"}\n"
"\n"
"QPushButton:hover {\n"
" background-color: rgb(35,35,35);\n"
"}\n"
"\n"
"QPushButton:pressed {\n"
" background-color: rgb(45,45,45);\n"
"}")
self.pushButton_18.setObjectName("pushButton_18")
self.line_24 = QtWidgets.QFrame(Main_Menu)
self.line_24.setGeometry(QtCore.QRect(350, 210, 20, 131))
self.line_24.setFrameShape(QtWidgets.QFrame.VLine)
self.line_24.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line_24.setObjectName("line_24")
self.line_25 = QtWidgets.QFrame(Main_Menu)
self.line_25.setGeometry(QtCore.QRect(330, 200, 31, 21))
self.line_25.setFrameShape(QtWidgets.QFrame.HLine)
self.line_25.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line_25.setObjectName("line_25")
self.pushButton_19 = QtWidgets.QPushButton(Main_Menu)
self.pushButton_19.setGeometry(QtCore.QRect(200, 420, 151, 31))
self.pushButton_19.setStyleSheet("QPushButton{\n"
" color: white;\n"
" background-color: rgb(30,30,30);\n"
" width: 75px;\n"
" height: 50px;\n"
" font-size: 14px;\n"
" font-weight: bold;\n"
" border: none;\n"
" text-align: center;\n"
"}\n"
"\n"
"QPushButton:hover {\n"
" background-color: rgb(35,35,35);\n"
"}\n"
"\n"
"QPushButton:pressed {\n"
" background-color: rgb(45,45,45);\n"
"}")
self.pushButton_19.setObjectName("pushButton_19")
self.pushButton_20 = QtWidgets.QPushButton(Main_Menu)
self.pushButton_20.setGeometry(QtCore.QRect(200, 390, 151, 21))
self.pushButton_20.setStyleSheet("QPushButton{\n"
" color: white;\n"
" background-color: rgb(30,30,30);\n"
" width: 75px;\n"
" height: 50px;\n"
" font-size: 14px;\n"
" font-weight: bold;\n"
" border: none;\n"
" text-align: center;\n"
"}\n"
"\n"
"QPushButton:hover {\n"
" background-color: rgb(35,35,35);\n"
"}\n"
"\n"
"QPushButton:pressed {\n"
" background-color: rgb(45,45,45);\n"
"}")
self.pushButton_20.setObjectName("pushButton_20")
self.pushButton_14 = QtWidgets.QPushButton(Main_Menu)
self.pushButton_14.setGeometry(QtCore.QRect(330, 230, 21, 31))
self.pushButton_14.setStyleSheet("QPushButton{\n"
" color: white;\n"
" background-color: rgb(30,30,30);\n"
" width: 75px;\n"
" height: 50px;\n"
" font-size: 14px;\n"
" font-weight: bold;\n"
" border: none;\n"
" text-align: center;\n"
"}\n"
"\n"
"QPushButton:hover {\n"
" background-color: rgb(35,35,35);\n"
"}\n"
"\n"
"QPushButton:pressed {\n"
" background-color: rgb(45,45,45);\n"
"}")
self.pushButton_14.setObjectName("pushButton_14")
self.label_16 = QtWidgets.QLabel(Main_Menu)
self.label_16.setGeometry(QtCore.QRect(190, -10, 141, 200))
self.label_16.setText("")
self.label_16.setPixmap(QtGui.QPixmap("res/Fon.png"))
self.label_16.setObjectName("label_16")
self.backgr.raise_()
self.label.raise_()
self.label_4.raise_()
self.label_5.raise_()
self.comboBox_1.raise_()
self.comboBox_2.raise_()
self.label_6.raise_()
self.comboBox_3.raise_()
self.line.raise_()
self.line_2.raise_()
self.line_3.raise_()
self.line_4.raise_()
self.line_5.raise_()
self.label_2.raise_()
self.label_7.raise_()
self.lineEdit.raise_()
self.label_8.raise_()
self.pushButton_3.raise_()
self.line_6.raise_()
self.line_7.raise_()
self.line_8.raise_()
self.line_9.raise_()
self.line_10.raise_()
self.label_3.raise_()
self.line_11.raise_()
self.line_12.raise_()
self.label_9.raise_()
self.label_10.raise_()
self.label_11.raise_()
self.label_12.raise_()
self.line_13.raise_()
self.line_14.raise_()
self.line_15.raise_()
self.label_13.raise_()
self.line_16.raise_()
self.line_17.raise_()
self.line_18.raise_()
self.line_19.raise_()
self.line_20.raise_()
self.label_14.raise_()
self.line_21.raise_()
self.line_22.raise_()
self.line_23.raise_()
self.label_15.raise_()
self.lineEdit_2.raise_()
self.line_24.raise_()
self.line_25.raise_()
self.label_16.raise_()
self.pushButton_1.raise_()
self.pushButton_2.raise_()
self.pushButton_4.raise_()
self.pushButton_5.raise_()
self.pushButton_6.raise_()
self.pushButton_7.raise_()
self.pushButton_12.raise_()
self.pushButton_13.raise_()
self.pushButton_10.raise_()
self.pushButton_11.raise_()
self.pushButton_8.raise_()
self.pushButton_9.raise_()
self.pushButton_16.raise_()
self.pushButton_15.raise_()
self.pushButton_19.raise_()
self.pushButton_20.raise_()
self.pushButton_18.raise_()
self.pushButton_17.raise_()
self.pushButton_14.raise_()
self.retranslateUi(Main_Menu)
QtCore.QMetaObject.connectSlotsByName(Main_Menu)
def retranslateUi(self, Main_Menu):
Main_Menu.setWindowTitle(_translate("Main_Menu", "DupliDup | Создатель: L1mPeX"))
self.label.setText(_translate("Main_Menu", "<html><head/><body><p><span style=\" color:#0055ff;\">WallHack</span></p></body></html>"))
self.label_4.setText(_translate("Main_Menu", "<html><head/><body><p><span style=\" font-size:18pt; color:#00ff00;\">T</span></p></body></html>"))
self.label_5.setText(_translate("Main_Menu", "<html><head/><body><p><span style=\" font-size:18pt; color:#00ff00;\">CT</span></p></body></html>"))
self.comboBox_1.setItemText(0, _translate("Main_Menu", "Красный"))
self.comboBox_1.setItemText(1, _translate("Main_Menu", "Синий"))
self.comboBox_1.setItemText(2, _translate("Main_Menu", "Зеленый"))
self.comboBox_1.setItemText(3, _translate("Main_Menu", "Желтый"))
self.comboBox_1.setItemText(4, _translate("Main_Menu", "Белый"))
self.comboBox_1.setItemText(5, _translate("Main_Menu", "Голубой"))
self.comboBox_1.setItemText(6, _translate("Main_Menu", "Розовый"))
self.comboBox_1.setItemText(7, _translate("Main_Menu", "Без подсветки"))
self.comboBox_2.setItemText(0, _translate("Main_Menu", "Синий"))
self.comboBox_2.setItemText(1, _translate("Main_Menu", "Красный"))
self.comboBox_2.setItemText(2, _translate("Main_Menu", "Зеленый"))
self.comboBox_2.setItemText(3, _translate("Main_Menu", "Желтый"))
self.comboBox_2.setItemText(4, _translate("Main_Menu", "Белый"))
self.comboBox_2.setItemText(5, _translate("Main_Menu", "Голубой"))
self.comboBox_2.setItemText(6, _translate("Main_Menu", "Розовый"))
self.comboBox_2.setItemText(7, _translate("Main_Menu", "Без подсветки"))
self.label_6.setText(_translate("Main_Menu", "<html><head/><body><p><span style=\" font-size:18pt; color:#ffff00;\">Type</span></p></body></html>"))
self.comboBox_3.setItemText(0, _translate("Main_Menu", "Glow_ESP"))
self.comboBox_3.setItemText(1, _translate("Main_Menu", "Chams"))
self.pushButton_1.setText(_translate("Main_Menu", "ON"))
self.pushButton_2.setText(_translate("Main_Menu", "OFF"))
self.label_2.setText(_translate("Main_Menu", "<html><head/><body><p><span style=\" color:#0055ff;\">TrigerBot</span></p></body></html>"))
self.label_7.setText(_translate("Main_Menu", "<html><head/><body><p><span style=\" font-size:18pt; color:#ffff00;\">Delay: </span></p></body></html>"))
self.lineEdit.setText(_translate("Main_Menu", "0.1"))
self.pushButton_4.setText(_translate("Main_Menu", "ON"))
self.pushButton_5.setText(_translate("Main_Menu", "OFF"))
self.label_3.setText(_translate("Main_Menu", "<html><head/><body><p><span style=\" color:#0055ff;\">Misc</span></p></body></html>"))
self.label_9.setText(_translate("Main_Menu", "<html><head/><body><p><span style=\" font-size:18pt; color:#ffff00;\">Bhop </span></p></body></html>"))
self.label_10.setText(_translate("Main_Menu", "<html><head/><body><p><span style=\" font-size:18pt; color:#ffff00;\">Radar</span></p></body></html>"))
self.label_11.setText(_translate("Main_Menu", "<html><head/><body><p><span style=\" font-size:18pt; color:#ffff00;\">NFlash</span></p></body></html>"))
self.label_12.setText(_translate("Main_Menu", "<html><head/><body><p><span style=\" font-size:18pt; color:#ffff00;\">Accept</span></p></body></html>"))
self.pushButton_6.setText(_translate("Main_Menu", "ON"))
self.pushButton_7.setText(_translate("Main_Menu", "OFF"))
self.pushButton_8.setText(_translate("Main_Menu", "ON"))
self.pushButton_9.setText(_translate("Main_Menu", "OFF"))
self.pushButton_10.setText(_translate("Main_Menu", "ON"))
self.pushButton_11.setText(_translate("Main_Menu", "OFF"))
self.pushButton_12.setText(_translate("Main_Menu", "ON"))
self.pushButton_13.setText(_translate("Main_Menu", "OFF"))
self.label_13.setText(_translate("Main_Menu", "<html><head/><body><p><span style=\" color:#0055ff;\">Game</span></p></body></html>"))
self.pushButton_15.setText(_translate("Main_Menu", "Скачать оффсеты"))
self.pushButton_16.setText(_translate("Main_Menu", "Автономные оффсеты"))
self.label_14.setText(_translate("Main_Menu", "<html><head/><body><p><span style=\" color:#0055ff;\">Perilously</span></p></body></html>"))
self.label_15.setText(_translate("Main_Menu", "<html><head/><body><p><span style=\" font-size:18pt; color:#ffff00;\">FOV_P: </span></p></body></html>"))
self.lineEdit_2.setText(_translate("Main_Menu", "90"))
self.pushButton_17.setText(_translate("Main_Menu", "Показать деньги"))
self.pushButton_18.setText(_translate("Main_Menu", "Консольное ВХ"))
self.pushButton_19.setText(_translate("Main_Menu", "> Подключиться <"))
self.pushButton_20.setText(_translate("Main_Menu", "Отключить все"))
self.pushButton_14.setText(_translate("Main_Menu", "<"))
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
Main_Menu = QtWidgets.QWidget()
ui = Ui_Main_Menu()
ui.setupUi(Main_Menu)
Main_Menu.show()
def Connect_Game():
try:
global pm,client,engine
pm = pymem.Pymem("csgo.exe")
client = pymem.process.module_from_name(pm.process_handle, "client.dll").lpBaseOfDll
engine = pymem.process.module_from_name(pm.process_handle, "engine.dll").lpBaseOfDll
ui.pushButton_19.setText(_translate("Main_Menu", "> Подключено <"))
except:
ctypes.windll.user32.MessageBoxW (None, 'Не удалось получить доступ к процессу - csgo.exe.', 'Ошибка', 0)
ui.pushButton_19.setText(_translate("Main_Menu", "> Подключиться <"))
def DownOffsets():
global dwLocalPlayer, dwGlowObjectManager, dwEntityList, dwForceAttack, dwForceJump, m_iCrosshairId, m_iTeamNum, m_iGlowIndex, m_fFlags, m_bSpotted, m_flFlashMaxAlpha, m_clrRender
try:
url = 'https://raw.githubusercontent.com/frk1/hazedumper/master/csgo.json'
response = requests.get(url).json()
dwLocalPlayer = int(response["signatures"]["dwLocalPlayer"])
dwGlowObjectManager = int(response["signatures"]["dwGlowObjectManager"])
dwEntityList = int(response["signatures"]["dwEntityList"])
dwForceAttack = int(response["signatures"]["dwForceAttack"])
dwForceJump = int(response["signatures"]["dwForceJump"])
m_iCrosshairId = int(response["netvars"]["m_iCrosshairId"])
m_iTeamNum = int(response["netvars"]["m_iTeamNum"])
m_iGlowIndex = int(response["netvars"]["m_iGlowIndex"])
m_fFlags = int(response["netvars"]["m_fFlags"])
m_bSpotted = int(response["netvars"]["m_bSpotted"])
m_clrRender = int(response["netvars"]["m_clrRender"])
print (dwLocalPlayer, dwGlowObjectManager, dwEntityList, dwForceAttack, dwForceJump, m_iCrosshairId, m_iTeamNum, m_iGlowIndex, m_fFlags, m_bSpotted, m_flFlashMaxAlpha, m_clrRender)
except:
ctypes.windll.user32.MessageBoxW (None, 'Не удалось получить доступ к сетевому файлу - https://raw.githubusercontent.com/frk1/hazedumper/master/csgo.json', 'Ошибка', 0)
def AutoOffsets():
# < Спасибо minicx :D >
global dwLocalPlayer, dwGlowObjectManager, dwEntityList, dwForceAttack, dwForceJump
dwLocalPlayer = int(get_sig('client.dll', rb'\x8D\x34\x85....\x89\x15....\x8B\x41\x08\x8B\x48\x04\x83\xF9\xFF', 4, 3), 0)
dwGlowObjectManager = int(get_sig('client.dll', rb'\xA1....\xA8\x01\x75\x4B', 4, 1),0)
dwEntityList = int(get_sig('client.dll',rb'\xBB....\x83\xFF\x01\x0F\x8C....\x3B\xF8',0,1),0)
dwForceAttack = int(get_sig('client.dll', rb'\x89\x0D....\x8B\x0D....\x8B\xF2\x8B\xC1\x83\xCE\x04', 0, 2), 0)
dwForceJump = int(get_sig('client.dll', rb'\x8B\x0D....\x8B\xD6\x8B\xC1\x83\xCA\x02', 0, 2), 0)
print (dwLocalPlayer, dwGlowObjectManager, dwEntityList, dwForceAttack, dwForceJump)
def WallHack():
global Glow_E, Cham_s
Glow_E = False
Cham_s = False
check = ui.comboBox_3.currentText()
print (check)
Color()
if check == 'Glow_ESP':
Thread(target=ESP).start()
else:
Thread(target=Chams_f).start()
def WallHack_Stop():
global Glow_E, Cham_s
check = ui.comboBox_3.currentText()
print (check)
if check == 'Glow_ESP':
Glow_E = True
else:
Cham_s = True
ctypes.windll.user32.MessageBoxW (None, 'Chams нельзя отключить :/', 'Преколяс', 0)
def TrigerBot():
global TrigerB, Triger_T
TrigerB = False
Triger_T = float(ui.lineEdit.text())
print (Triger_T)
Thread(target=TrigerBo_t).start()
def TrigerBot_Stop():
global TrigerB
TrigerB = True
def BunnyHop():
global BHo_p
BHo_p = False
Thread(target=BunnyHo_p).start()
def BunnyHop_Stop():
global BHo_p
BHo_p = True
def RadarHack():
global Radar_H
Radar_H = False
Thread(target=RadarHac_k).start()
def RadarHack_Stop():
global Radar_H
Radar_H = True
def NOF1ash():
ctypes.windll.user32.MessageBoxW (None, 'NOFlash не работает :(', 'Преколяс№2', 0)
def NOF1ash_Stop():
ctypes.windll.user32.MessageBoxW (None, 'NOFlash не работает :(', 'Преколяс№2', 0)
def AutoAccept():
global Auto
Auto = False
Thread(target=AutoAccep_t).start()
def AutoAccept_Stop():
global Auto
Auto = True
def FOV_P():
global fov
fov = int(ui.lineEdit_2.text())
Thread(target=FOVP).start()
def ShowM():
pm = pymem.Pymem('csgo.exe')
client = pymem.process.module_from_name(pm.process_handle,'client.dll')
clientModule = pm.read_bytes(client.lpBaseOfDll, client.SizeOfImage)
address = client.lpBaseOfDll + re.search(rb'.\x0C\x5B\x5F\xB8\xFB\xFF\xFF\xFF',clientModule).start()
pm.write_uchar(address, 0xEB if pm.read_uchar(address) == 0x75 else 0x75)
pm.close_process()
def CMD_WH():
pm = pymem.Pymem('csgo.exe')
client = pymem.process.module_from_name(pm.process_handle,'client.dll')
clientModule = pm.read_bytes(client.lpBaseOfDll, client.SizeOfImage)
address = client.lpBaseOfDll + re.search(rb'\x83\xF8.\x8B\x45\x08\x0F',clientModule).start() + 2
pm.write_uchar(address, 2 if pm.read_uchar(address) == 1 else 1)
pm.close_process()
def gl_off():
WallHack_Stop()
TrigerBot_Stop()
AutoAccept_Stop()
BunnyHop_Stop()
RadarHack_Stop()
ui.pushButton_1.clicked.connect(WallHack)
ui.pushButton_2.clicked.connect(WallHack_Stop)
ui.pushButton_4.clicked.connect(TrigerBot)
ui.pushButton_5.clicked.connect(TrigerBot_Stop)
ui.pushButton_8.clicked.connect(AutoAccept)
ui.pushButton_9.clicked.connect(AutoAccept_Stop)
ui.pushButton_6.clicked.connect(BunnyHop)
ui.pushButton_7.clicked.connect(BunnyHop_Stop)
ui.pushButton_10.clicked.connect(NF1ash)
ui.pushButton_11.clicked.connect(NOF1ash_Stop)
ui.pushButton_12.clicked.connect(RadarHack)
ui.pushButton_13.clicked.connect(RadarHack_Stop)
ui.pushButton_14.clicked.connect(FOV_P)
ui.pushButton_19.clicked.connect(Connect_Game)
ui.pushButton_15.clicked.connect(DownOffsets)
ui.pushButton_16.clicked.connect(AutoOffsets)
ui.pushButton_17.clicked.connect(ShowM)
ui.pushButton_18.clicked.connect(CMD_WH)
sys.exit(app.exec_())
|
period_max.py | import time
import logging
import threading
import distutils
from src.domain.cache import Cache
from src.domain.exchanges import Exchange
from . import TradingStrategy
class PeriodMax(TradingStrategy):
"""
PeriodMax is a trading strategy that compares the current value with the
maximum value of a given period to decide when to enter a trade.
When does it send a green signal to enter a trade?
When the current value is higher than the maximum value of a given
period, the trend is up and the strategy would say to buy.
"""
def __init__(self, config, exchange: Exchange, cache: Cache, base_asset: str):
self.cache = cache
self.seconds_to_update_cache = config['periodMax']['secondsToUpdateCache']
self.base_asset = base_asset
self.exchange = exchange
self.period_used_in_days = config['periodMax']['periodUsedInDays']
self.cache_key_name = f'max-value-in-{self.period_used_in_days}-days'
if str(config['periodMax']['cacheUpdater']['enabled']).lower() == 'true':
threading.Thread(target=self.__cache_updater).start()
def should_place_order(self, df, current_price: float, symbol: str) -> bool:
max_price = self.cache.hget(self.cache_key_name, symbol)
if max_price == None:
return False
return current_price > float(max_price)
def __cache_updater(self):
logging.info('Start running PeriodMax cache updater')
symbols_period_max = {}
try:
symbols_period_max = self.__build_symbols_period_max()
except Exception as e:
logging.error(f'Fail to update cache, error={e}')
while True:
try:
self.cache.hset(self.cache_key_name, symbols_period_max)
logging.info('PeriodMax cache updated successfully')
time.sleep(self.seconds_to_update_cache)
symbols_period_max = self.__build_symbols_period_max()
except Exception as e:
logging.error(f'Fail to update cache, error={e}')
time.sleep(self.seconds_to_update_cache)
def __build_symbols_period_max(self):
threads = []
symbols_period_max = {}
def update_max_for_symbol(symbol):
klines = self.exchange.get_klines(
symbol, '1d', self.period_used_in_days)
HIGH_POSITION = 2
max_for_symbol = max(
list(map(lambda x: float(x[HIGH_POSITION]), klines)))
symbols_period_max[symbol] = max_for_symbol # must be thread-safe.
current_prices = self.exchange.get_current_prices()
for current_price in current_prices:
symbol = current_price['symbol']
if not symbol.endswith(self.base_asset):
continue
threads.append(threading.Thread(
target=update_max_for_symbol, kwargs={'symbol': symbol}))
# binance client start discarding connections for higher numbers.
MAX_SIMULTANEOUS_THREADS = 8
for i, t in enumerate(threads):
t.start()
if (i+1) % MAX_SIMULTANEOUS_THREADS == 0:
for j in range(MAX_SIMULTANEOUS_THREADS):
threads[i-j].join()
for t in threads:
t.join()
return symbols_period_max
|
event_source.py | # -*- coding: utf-8 -*-
#
# Copyright 2017 Ricequant, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
import time
from threading import Thread
from six.moves.queue import Queue, Empty
from rqalpha.interface import AbstractEventSource
from rqalpha.environment import Environment
from rqalpha.utils.logger import system_log
from rqalpha.events import Event, EVENT
from rqalpha.utils import rq_json
from rqalpha.utils.i18n import gettext as _
from .utils import get_realtime_quotes, is_holiday_today, is_tradetime_now
from . import data_board
class RealtimeEventSource(AbstractEventSource):
MARKET_DATA_EVENT = "RealtimeEventSource.MARKET_DATA_EVENT"
def __init__(self, fps, mod_config):
self._env = Environment.get_instance()
self.mod_config = mod_config
self.fps = fps
self.event_queue = Queue()
self.before_trading_fire_date = datetime.date(2000, 1, 1)
self.after_trading_fire_date = datetime.date(2000, 1, 1)
self.settlement_fire_date = datetime.date(2000, 1, 1)
if not mod_config.redis_uri:
self.quotation_engine_thread = Thread(target=self.quotation_worker)
self.quotation_engine_thread.daemon = True
self.clock_engine_thread = Thread(target=self.clock_worker)
self.clock_engine_thread.daemon = True
def set_state(self, state):
persist_dict = rq_json.convert_json_to_dict(state.decode('utf-8'))
self.before_trading_fire_date = persist_dict['before_trading_fire_date']
self.after_trading_fire_date = persist_dict['after_trading_fire_date']
self.settlement_fire_date = persist_dict['settlement_fire_date']
def get_state(self):
return rq_json.convert_dict_to_json({
"before_trading_fire_date": self.before_trading_fire_date,
"after_trading_fire_date": self.after_trading_fire_date,
"settlement_fire_date": self.settlement_fire_date,
}).encode('utf-8')
def quotation_worker(self):
while True:
if not is_holiday_today() and is_tradetime_now():
order_book_id_list = sorted([instruments.order_book_id for instruments in self._env.data_proxy.all_instruments("CS", self._env.trading_dt)])
try:
data_board.realtime_quotes_df = get_realtime_quotes(order_book_id_list)
except Exception as e:
system_log.exception(_("get_realtime_quotes fail"))
continue
time.sleep(1)
def clock_worker(self):
data_proxy = self._env.data_proxy
while True:
# wait for the first data ready
if data_proxy.current_snapshot("000001.XSHG", None, None).datetime.date() == datetime.date.today():
system_log.info(_("Market data is ready, start to work now!"))
break
time.sleep(0.1)
while True:
time.sleep(self.fps)
if is_holiday_today():
time.sleep(60)
continue
dt = datetime.datetime.now()
if dt.strftime("%H:%M:%S") >= "08:30:00" and dt.date() > self.before_trading_fire_date:
self.event_queue.put((dt, EVENT.BEFORE_TRADING))
self.before_trading_fire_date = dt.date()
elif dt.strftime("%H:%M:%S") >= "15:10:00" and dt.date() > self.after_trading_fire_date:
self.event_queue.put((dt, EVENT.AFTER_TRADING))
self.after_trading_fire_date = dt.date()
if is_tradetime_now():
self.event_queue.put((dt, EVENT.BAR))
def events(self, start_date, end_date, frequency):
running = True
self.clock_engine_thread.start()
if not self.mod_config.redis_uri:
self.quotation_engine_thread.start()
while running:
real_dt = datetime.datetime.now()
while True:
try:
dt, event_type = self.event_queue.get(timeout=1)
break
except Empty:
continue
system_log.debug("real_dt {}, dt {}, event {}", real_dt, dt, event_type)
yield Event(event_type, calendar_dt=real_dt, trading_dt=dt)
|
utility.py | import os
import math
import time
import datetime
from multiprocessing import Process
from multiprocessing import Queue
import config
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import numpy as np
import imageio
import torch
import torch.optim as optim
import torch.optim.lr_scheduler as lrs
class timer():
def __init__(self):
self.acc = 0
self.tic()
def tic(self):
self.t0 = time.time()
def toc(self, restart=False):
diff = time.time() - self.t0
if restart: self.t0 = time.time()
return diff
def hold(self):
self.acc += self.toc()
def release(self):
ret = self.acc
self.acc = 0
return ret
def reset(self):
self.acc = 0
class checkpoint():
def __init__(self, args):
self.args = args
self.ok = True
self.log = torch.Tensor()
now = datetime.datetime.now().strftime('%Y-%m-%d-%H:%M:%S')
if not args.load:
if not args.save:
args.save = now
self.dir = os.path.join(config.OUTPUT_DIR, args.save)
else:
self.dir = os.path.join(config.OUTPUT_DIR, args.load)
if os.path.exists(self.dir):
self.log = torch.load(self.get_path('psnr_log.pt'))
print('Continue from epoch {}...'.format(len(self.log)))
else:
args.load = ''
if args.reset:
os.system('rm -rf ' + self.dir)
args.load = ''
os.makedirs(self.dir, exist_ok=True)
os.makedirs(self.get_path('model'), exist_ok=True)
for d in args.data_test:
os.makedirs(self.get_path('results-{}'.format(d)), exist_ok=True)
open_type = 'a' if os.path.exists(self.get_path('log.txt'))else 'w'
self.log_file = open(self.get_path('log.txt'), open_type)
with open(self.get_path('config.txt'), open_type) as f:
f.write(now + '\n\n')
for arg in vars(args):
f.write('{}: {}\n'.format(arg, getattr(args, arg)))
f.write('\n')
self.n_processes = 8
def get_path(self, *subdir):
return os.path.join(self.dir, *subdir)
def save(self, trainer, epoch, is_best=False):
trainer.model.save(self.get_path('model'), epoch, is_best=is_best)
trainer.loss.save(self.dir)
#trainer.loss.plot_loss(self.dir, epoch)
#self.plot_psnr(epoch)
trainer.optimizer.save(self.dir)
torch.save(self.log, self.get_path('psnr_log.pt'))
def add_log(self, log):
self.log = torch.cat([self.log, log])
def write_log(self, log, refresh=False):
print(log)
self.log_file.write(log + '\n')
if refresh:
self.log_file.close()
self.log_file = open(self.get_path('log.txt'), 'a')
def done(self):
self.log_file.close()
def plot_psnr(self, epoch):
axis = np.linspace(1, epoch, epoch)
for idx_data, d in enumerate(self.args.data_test):
label = 'SR on {}'.format(d)
fig = plt.figure()
plt.title(label)
for idx_scale, scale in enumerate(self.args.scale):
plt.plot(
axis,
self.log[:, idx_data, idx_scale].numpy(),
label='Scale {}'.format(scale)
)
plt.legend()
plt.xlabel('Epochs')
plt.ylabel('PSNR')
plt.grid(True)
plt.savefig(self.get_path('test_{}.pdf'.format(d)))
plt.close(fig)
def begin_background(self):
self.queue = Queue()
def bg_target(queue):
while True:
if not queue.empty():
filename, tensor = queue.get()
if filename is None: break
imageio.imwrite(filename, tensor.numpy())
self.process = [
Process(target=bg_target, args=(self.queue,)) \
for _ in range(self.n_processes)
]
for p in self.process: p.start()
def end_background(self):
for _ in range(self.n_processes): self.queue.put((None, None))
while not self.queue.empty(): time.sleep(1)
for p in self.process: p.join()
def save_results(self, dataset, filename, save_list, scale):
if self.args.save_results:
filename = self.get_path(
'results-{}'.format(dataset.dataset.name),
'{}_x{}_'.format(filename, scale)
)
postfix = ('SR', 'LR', 'HR')
for v, p in zip(save_list, postfix):
normalized = v[0].mul(255 / self.args.rgb_range)
tensor_cpu = normalized.byte().permute(1, 2, 0).cpu()
self.queue.put(('{}{}.png'.format(filename, p), tensor_cpu))
def quantize(img, rgb_range):
pixel_range = 255 / rgb_range
return img.mul(pixel_range).clamp(0, 255).round().div(pixel_range)
def calc_psnr(sr, hr, scale, rgb_range, dataset=None):
if hr.nelement() == 1: return 0
diff = (sr - hr) / rgb_range
if dataset and dataset.dataset.benchmark:
shave = scale
if diff.size(1) > 1:
gray_coeffs = [65.738, 129.057, 25.064]
convert = diff.new_tensor(gray_coeffs).view(1, 3, 1, 1) / 256
diff = diff.mul(convert).sum(dim=1)
else:
shave = scale + 6
valid = diff[..., shave:-shave, shave:-shave]
mse = valid.pow(2).mean()
return -10 * math.log10(mse)
def make_optimizer(args, target):
'''
make optimizer and scheduler together
'''
# optimizer
trainable = filter(lambda x: x.requires_grad, target.parameters())
kwargs_optimizer = {'lr': args.lr, 'weight_decay': args.weight_decay}
if args.optimizer == 'SGD':
optimizer_class = optim.SGD
kwargs_optimizer['momentum'] = args.momentum
elif args.optimizer == 'ADAM':
optimizer_class = optim.Adam
kwargs_optimizer['betas'] = args.betas
kwargs_optimizer['eps'] = args.epsilon
elif args.optimizer == 'RMSprop':
optimizer_class = optim.RMSprop
kwargs_optimizer['eps'] = args.epsilon
# scheduler
milestones = list(map(lambda x: int(x), args.decay.split('-')))
kwargs_scheduler = {'milestones': milestones, 'gamma': args.gamma}
scheduler_class = lrs.MultiStepLR
class CustomOptimizer(optimizer_class):
def __init__(self, *args, **kwargs):
super(CustomOptimizer, self).__init__(*args, **kwargs)
def _register_scheduler(self, scheduler_class, **kwargs):
self.scheduler = scheduler_class(self, **kwargs)
def save(self, save_dir):
torch.save(self.state_dict(), self.get_dir(save_dir))
def load(self, load_dir, epoch=1):
self.load_state_dict(torch.load(self.get_dir(load_dir)))
if epoch > 1:
for _ in range(epoch): self.scheduler.step()
def get_dir(self, dir_path):
return os.path.join(dir_path, 'optimizer.pt')
def schedule(self):
self.scheduler.step()
def get_lr(self):
return self.scheduler.get_lr()[0]
def get_last_epoch(self):
return self.scheduler.last_epoch
optimizer = CustomOptimizer(trainable, **kwargs_optimizer)
optimizer._register_scheduler(scheduler_class, **kwargs_scheduler)
return optimizer
|
background.py | import threading
class BackgroundThread(object):
def __init__(self, target_method):
self.thread = threading.Thread(target=target_method, args=())
self.thread.daemon = True
def run(self):
self.thread.start()
def is_alive(self):
if self.thread.isAlive():
return True
else:
return False
|
do_psiblast.py | # This script takes a fasta file as input and runs PSIBLAST
# on each one individually. The results are saved in their
# own files, with the names equal to their fasta IDs
import os
from multiprocessing import Process, Queue
from pathlib import Path
from Bio import SeqIO
from Bio.Blast import NCBIXML
from Bio.Blast.Applications import NcbipsiblastCommandline
def do_psiblast(dirpath, rec):
"""
Run a PSIBLAST query on the given sequence.
"""
# save the query to a fasta file
query_basepath = Path(dirpath, rec.id)
SeqIO.write(rec, str(query_basepath)+".fasta", "fasta")
# build the query
query=str(query_basepath)+".fasta"
db="cpdb2_db"
evalue=0.001
outfmt=5
out=str(query_basepath)+"_blast.xml"
num_threads=6
num_iterations=3
# out_pssm=str(query_basepath)+"_blast.pssm"
# out_ascii_pssm=str(query_basepath)+"_blast.ascii_pssm"
# save_pssm_after_last_round=True
try:
psib_cline = NcbipsiblastCommandline(query=query, db=db, evalue=evalue, outfmt=outfmt, out=out, num_threads=num_threads, num_iterations=num_iterations)
#print(psib_cline)
stdout, stderr = psib_cline()
except:
print("Failed to run PSIBLAST on record %s" % rec.id)
return -1
return 1
def create_directory(rec):
dirname = rec.id+"_pb"
dirpath = Path(Path.cwd(), dirname)
# check that directory doesn't exist
count = 0
if dirpath.exists():
return None
# count += 1
# print("dir %s exists, incrementing name" % (dirname))
# dirname = rec.id+"_pb_"+str(count)
# dirpath = Path(Path.cwd(), dirname)
# create directory
#print("creating dir %s" % str(dirpath))
os.makedirs(str(dirpath))
return dirpath
def worker(rec_queue, done_queue):
#
count = 0
skipped = 0
while True:
rec = rec_queue.get()
if rec is None:
done_queue.put((count, skipped))
return
count += 1
# check if dir / files already exist
# create directory for this record
dirpath = create_directory(rec)
if dirpath is None:
skipped += 1
continue
# execute psiblast
if do_psiblast(dirpath, rec) == -1:
skipped += 1
if __name__ == "__main__":
num_workers = 3
records = SeqIO.parse("/home/dillonbbailly/main/uniref50/uniref50_filt.fasta", "fasta")
rec_queue = Queue(1000)
done_queue = Queue()
workers = []
for i in range(num_workers):
p = Process(target=worker, args=(rec_queue, done_queue))
workers.append(p)
p.start()
for i, rec in enumerate(records):
if len(rec.seq) < 25 or len(rec.seq) > 2000:
continue
rec_queue.put(rec)
if i % 5000 == 0:
print("Handled %d records" % (i))
for i in range(num_workers):
rec_queue.put(None)
total = 0
skipped = 0
for i in range(num_workers):
(count, skip) = done_queue.get()
total += count
skipped += skip
print("DONE: %d sequences processed, of which %d were skipped." % (total, skipped))
for p in workers:
p.join()
|
plant_node.py | #!/usr/bin/env python
from sys import version_info
if version_info[0] <= 2:
from Queue import Queue
else:
from queue import Queue
import time
import threading
import rospy
import actionlib
import ros_opcua_srvs.srv as ros_opcua
from robonomics_game_plant.msg import OrderAction, OrderFeedback, OrderResult
from robonomics_game_plant.srv import Unload, UnloadResponse
from robonomics_game_common.opcua_client import OpcuaClient
class Plant:
name = ''
state = -1 # -1 means undefined
orders_queue = Queue()
_last_enable_time = 0 # ms
def __init__(self, name, opcua_client_node, opcua_endpoint, opcua_server_namespace,
unload_time, # ms, time conveyor will move after detail gone from sensor
handle_time, # ms, detail processing time
timeout # ms, proximity sensors and communication response timeout
):
# connect to OPC-UA Server using ros opcua client
rospy.logdebug('Connecting to OPC UA endpoint: ' + opcua_endpoint + ', '
'ros_opcua client node: ' + opcua_client_node + ', '
'OPC UA namespace: ' + opcua_server_namespace + ', ')
self.opcua = OpcuaClient(opcua_client_node, opcua_endpoint) # ros opcua client read/write interface
self.opcua_ns = opcua_server_namespace
# prepare plant action server
rospy.logdebug('Starting ActionServer with name: ' + name)
self.name = name
self._server = actionlib.ActionServer(name, OrderAction, self.plan_job, auto_start=False)
self._server.start()
# queue goals from action server and proc them in separate thread
rospy.logdebug('Starting orders proc thread')
self._orders_proc_thread = threading.Thread(target=self._orders_proc)
self._orders_proc_thread.daemon = True
self._orders_proc_thread.start()
# load plant parameters to opcua server
rospy.logdebug('Loading parameters to OPC UA server: '
'unload_time %d, handle_time %d, timeout %d'
% (unload_time, handle_time, timeout) )
self._unload_time = unload_time
self._handle_time = handle_time
self._timeout = timeout # ms, proximity sensors and communication timeout
try:
self.opcua.write(self.opcua_ns + '/Settings/UnloadTime', 'uint16', unload_time)
self.opcua.write(self.opcua_ns + '/Settings/HandleTime', 'uint16', handle_time)
self.opcua.write(self.opcua_ns + '/Settings/Timeout', 'uint16', timeout)
except rospy.ServiceException as e:
rospy.logerr('Exception raised while OPC-UA request: %s' % e)
rospy.logerr('Check OPC-UA client, server connection or server model')
rospy.signal_shutdown(e)
# update plant state in separate thread
rospy.logdebug('Starting state updater thread')
self._state_updater_thread = threading.Thread(target=self._state_updater)
self._state_updater_thread.daemon = True
self._state_updater_thread.start()
# unload signal service
rospy.logdebug('Creating Unload service')
rospy.Service('~unload', Unload, self.unload)
rospy.logdebug('Reseting OPC UA signals')
self.reset()
rospy.logdebug('Plant %s ready' % name)
def reset(self):
rospy.logdebug('Plant.reset')
self.opcua.write(self.opcua_ns + '/Enable', 'bool', False)
self.opcua.write(self.opcua_ns + '/Unload', 'bool', False)
def spin(self):
rospy.spin()
def plan_job(self, order):
rospy.logdebug('Got a new order')
if self.state == -1:
rospy.logwarn(
'Plant state undefined. Check ROS node to PLC connection.'
'Queueing orders until connection establish')
elif self.state == 0: # disabled
pass
elif self.state in range(1, 12):
rospy.loginfo('Plant busy. Queuing new order')
elif self.state == 100:
rospy.logwarn('Plant FAULT. Queueing orders until fault reset')
else:
raise ValueError('Plant state value: %s deprecated' % str(self.state))
order.set_accepted()
self.orders_queue.put(order)
def _orders_proc(self):
while not rospy.is_shutdown():
if not self.orders_queue.empty() and self.state == 0: # order in plan and plant off
self.start_job(self.orders_queue.get_nowait())
rospy.logdebug('Orders queue: ' + str(self.orders_queue.queue))
rospy.logdebug('Orders queue: ' + str(self.orders_queue.queue))
rospy.sleep(1)
def start_job(self, order):
rospy.logdebug('Self.start_job')
result = OrderResult()
enabled = False
while not enabled:
enabled = self.enable() # enable plant to start the job, returns with (state != 0)
# if not enabled:
# result.act = 'Order %s %s aborted' % ( str(order.get_goal_id()), str(order.get_goal()) )
# order.set_aborted(result)
# self.opcua.write(self.opcua_ns + '/Enable', 'bool', False)
# return
state_prev = 0
rospy.sleep(5) # let plant update actual state
while self.state not in [0, 9, 10, 11]: # while order in proc (state not DISABLE or after UNLOAD)
rospy.logdebug('Job in progress, state: %d' % self.state)
if self.state != state_prev: # [1..9] means work
feedback = OrderFeedback()
feedback.status = str(self.state)
order.publish_feedback(feedback) # publish state as feedback
state_prev = self.state
if rospy.is_shutdown() or self.state == -1 or self.state > 99: # abort order if something go wrong
result.act = 'Order %s %s aborted' % ( str(order.get_goal_id()), str(order.get_goal()) )
order.set_aborted(result)
self.opcua.write(self.opcua_ns + '/Enable', 'bool', False)
return
rospy.sleep(1)
rospy.logdebug('Job complete, state: %d' % self.state)
result.act = 'Order %s %s complete' % (str(order.get_goal_id()), str(order.get_goal()))
order.set_succeeded(result)
def enable(self):
rospy.logdebug('Plant.enable')
"""
Minimum call period is (HandleTime + 2*UnloadTime)
Each call period timeout will be ignored
"""
now = int(round(time.time() * 1000)) # ms
last = self._last_enable_time
# period = self._handle_time + 2 * self._unload_time
period = 1000
if now > last + period:
self._last_enable_time = now
try:
# Rising edge enables plant, check LOW state first
response = self.opcua.read(self.opcua_ns + '/Enable')
# if not response.success:
# rospy.logerr('Enable request not successful')
# return False
if response.data.bool_d is True:
rospy.logerr('Plant enabled without node call. Reseting enable signal')
self.opcua.write(self.opcua_ns + '/Enable', 'bool', False)
rospy.sleep(2)
response.success = False
while not response.success: # try hard until it finally enables
response = self.opcua.write(self.opcua_ns + '/Enable', 'bool', True)
rospy.sleep(1)
# ensure PLC starts the order checking state is not undefined or off
t = time.time() * 1000 # ms
while self.state <= 0: # undefined or disabled
if time.time() * 1000 - t > self._timeout:
rospy.logerr('Plant enable timeout.')
return False
time.sleep(1)
response.success = False
while not response.success: # try hard until it finally read
response = self.opcua.read(self.opcua_ns + '/Enable')
rospy.sleep(1)
return response.data.bool_d # True if enabled, False it not enabled
except rospy.ServiceException as e:
rospy.logerr('Exception raised while OPC-UA request: %s' % e)
else:
rospy.logwarn('Order run attempt after %d sec from the last run ingnored.'
'Minium run period: %d sec.' % (now - last, period))
return False
def unload(self, request):
rospy.logdebug('Unloading...')
self.opcua.write(self.opcua_ns + '/Unload', 'bool', False)
rospy.sleep(2)
self.opcua.write(self.opcua_ns + '/Unload', 'bool', True)
while not rospy.is_shutdown(): # wait for unloaded state
rospy.logdebug('Waiting for unload...')
if self.state == 11: # Unloaded
rospy.logdebug('Unloaded')
break
if self.state == 0:
rospy.logdebug('Turned off while unloading')
break
rospy.sleep(1)
self.opcua.write(self.opcua_ns + '/Unload', 'bool', False)
self.opcua.write(self.opcua_ns + '/Enable', 'bool', False) # state "0", start next job
rospy.logdebug('Unloading complete. Disabled for next order')
return UnloadResponse()
def _state_updater(self):
rospy.logdebug('State updater run')
while not rospy.is_shutdown():
response = self.opcua.read(self.opcua_ns + '/State')
if not response.success:
rospy.logwarn('State update unsuccessful')
self.state = -1
continue
self.state = getattr(response.data, '%s_d' % response.data.type)
rospy.logdebug('Plant state: ' + str(self.state))
if self.state not in range(0, 12): # PLC state codes can be fromm 0 to 11
rospy.logwarn(
'Deprecated state code: %d, set state to undefined (-1)' % self.state)
self.state = -1
rospy.logdebug( 'New plant state: ' + str(self.state) )
time.sleep(1)
if __name__ == '__main__':
rospy.init_node('plant', log_level=rospy.DEBUG)
node_name = rospy.get_name()
opcua_endpoint= rospy.get_param('~opcua_endpoint')
if not rospy.has_param('~opcua_server_namespace'):
raise rospy.ROSInitException(
'Parameter "opcua_server_namespace" must be specified in accordance with OPCU-UA'
'Model. Example: /Airalab/Plant1')
opcua_server_namespace = rospy.get_param('~opcua_server_namespace')
if 'ns=' not in opcua_server_namespace: # use only string type nodeId
raise rospy.ROSInitException(
'Parameter "opcua_server_namespace" template: "ns=<int>;s=/<VendorName>/<PlantName>"')
if not rospy.has_param('opcua_client_node'):
rospy.logwarn('Using default ROS OPC-UA Client node path: /opcua/opcua_client')
rospy.logwarn('You can specify it in parameter \'opcua_client_node\'')
opcua_client_node = rospy.get_param('opcua_client_node', '/opcua/opcua_client')
unload_time = rospy.get_param('~unload_time', 2000)
handle_time = rospy.get_param('~handle_time', 2000)
timeout = rospy.get_param('~timeout', 10000)
Plant(node_name, opcua_client_node, opcua_endpoint, opcua_server_namespace, unload_time, handle_time, timeout).spin()
|
trainingsrecorder.py | """
This program listens on OSC_IP_ADDRESS:OSC_PORT for incoming
sound vectors (array of floats) via OSC protocol address '/record_sound_vector'
and saves it to a buffer. After hitting ctrl-C in the terminal or receiving
any data on address '/stop' the recording process stops and the sound vectors
will be saved along neural net prediction output vectors based camera frames
that were saved during recording.
HowTo:
- Check IP address in Vezer and SuperCollider
- Vezer(Trainings Notebook) sends sound data to SuperCollider
- Vezer sends '/sendTrainData' command to SuperCollider. By this command,
SuperCollider will send all sound messages stored in one vector
'/record_sound_vector' to this Python Dispatcher
- send '/stop' message from Vezer to this patch, to stop recording
"""
import os
import threading
import signal
import sys
import platform
import csv
from pythonosc import osc_server, dispatcher
from conversation.vision_camera import Camera
from conversation import neuralnet_vision_inference, configuration, vision_camera
OSC_IP_ADDRESS = "0.0.0.0"
OSC_PORT = 8005
TRAININGS_SET_PATH = "./data/trainingsset_dodeca.csv"
SHOW_FRAMES = True # show window frames
ZOOM_AREA_WIDTH = 380
ZOOME_AREA_HEIGHT = 380
CAMERA = Camera(224, 224, ZOOM_AREA_WIDTH, ZOOME_AREA_HEIGHT)
MODEL = neuralnet_vision_inference.InferenceModel()
trainingsset = []
trainingsset_final = []
stop_event = threading.Event()
def get_frame():
"""
returns tuple with frame andwar name of file each in an array
"""
for frames in CAMERA:
cv2_img, pil_img = frames
if SHOW_FRAMES:
vision_camera.cv2.imshow('frame', cv2_img)
key = vision_camera.cv2.waitKey(20)
img_collection = [pil_img]
names_of_file = ["test"]
return img_collection, names_of_file, cv2_img
def process_trainingsset():
"""
takes the trainings set images and transforms them to a
512 dim vector based on the neural net and saves them together
with the sound vector to the trainingsset_final list
"""
# ->moved to line 31 MODEL = neuralnet_vision_inference.InferenceModel()
for set in trainingsset:
soundvector = set[0]
img_collection = set[1]
names_of_file = set[2]
cv2_img = set[3]
activation_vectors, header, img_coll_bn = MODEL.get_activations(
MODEL, img_collection, names_of_file)
trainingsset_final.append((activation_vectors, soundvector))
print("Finished processing trainings set")
def save_to_disk():
"""
saves the trainings set from trainingsset_final to disk
"""
if len(trainingsset_final) == 0:
print("No trainings data received. Nothing written to disk.\n")
return
with open(TRAININGS_SET_PATH, mode="w") as csv_file:
fieldnames = ["image vector" + str(i) for i in range(512)]
fieldnames.extend(["sound vector" + str(i) for i in range(5)])
writer = csv.writer(csv_file, delimiter=" ")
# writer.writerow(fieldnames)
for image_vector, sound_vector in trainingsset_final:
row = list(image_vector[0])
row.extend(sound_vector)
writer.writerow(row)
abspath = os.path.realpath(csv_file.name)
print("\n\nWritten trainings set to {}".format(abspath))
def record(address, *args):
""" blocking
Records incoming 5dim audio vector consisting of float values
"""
soundvector = args
img_collection, names_of_file, cv2_img = get_frame()
trainingsset.append([soundvector, img_collection, names_of_file, cv2_img])
def osc_stop(address, *args):
"""
Callback osc dispatcher to stop recording
"""
print("received /stop")
stop_recording()
def stop_recording():
"""
Stops the recording and processes the already recorded frames
and saves the result to disk
"""
def stop():
server.shutdown()
server.server_close()
stop_event.set()
threading.Thread(target=stop, daemon=True).start()
def start_recording():
"""
Execute the trainingsrecorder
"""
global server
dispatcher_server = dispatcher.Dispatcher()
dispatcher_server.map("/record_sound_vector", record)
dispatcher_server.map("/stop", osc_stop)
server = osc_server.BlockingOSCUDPServer(
(OSC_IP_ADDRESS, OSC_PORT), dispatcher_server)
print("Serving on {}".format(server.server_address))
#threading.Thread(target=server.serve_forever, daemon=True).start()
return server
if __name__ == "__main__":
server = start_recording()
server.serve_forever()
process_trainingsset()
save_to_disk()
sys.exit(0)
|
Query.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Author: Ivar Vargas Belizario
# Copyright (c) 2021
# E-mail: ivar@usp.br
import tornado.ioloop
import tornado.web
import tornado.httpserver
import ujson
import glob
import os
import time
import sys
import pandas as pd
import numpy as np
import os.path
import math
import uuid
import zipfile
from io import BytesIO
from datetime import datetime
import threading
import SimpleITK as sitk
from bson.objectid import ObjectId
from vx.com.py.database.MongoDB import *
from vx.radpleura.Settings import *
from vx.radpleura.BaseHandler import *
from vx.radpleura.ROI import *
from vx.radpleura.Features import *
from vx.radpleura.VSI import *
from vx.radpleura.Classification import *
from vx.radpleura.SplitImage import *
class Query(BaseHandler):
#Get RequestHandler
def get(self):
dat = self.get_argument('data')
app = ujson.loads(dat)
#app = DataTransfer()
#app.load(dat)
obj = ""
if app["argms"]["type"]==0:
pass;
elif app["argms"]["type"]==1:
obj = self.listimages();
#elif app["argms"]["type"]==2:
# obj = self.listfilesdirs(app["argms"]);
elif app["argms"]["type"]==3:
obj = self.makeimgfromvsi(app["argms"]);
elif app["argms"]["type"]==4:
obj = None
elif app["argms"]["type"]==5:
obj = self.getregions(app["argms"]);
# print("obj-x",obj);
elif app["argms"]["type"]==7:
obj = self.makeclassification(self.current_user, app["argms"]);
# print("obj-x",obj);
elif app["argms"]["type"]==8:
obj = self.listprojects(self.current_user, app["argms"]);
# print("obj-x",obj);
elif app["argms"]["type"]==9:
obj = self.openproject(self.current_user, app["argms"]);
self.write(obj)
self.finish()
#Post RequestHandler
def post(self):
dat = self.get_argument('data')
app = ujson.loads(dat)
rs = ""
if self.current_user:
#print("app.argms", app, self.request.files['fileu'][0])
if app["argms"]["type"]==6:
rs = Query.uploadfiledata(self.current_user, self.request.files['fileu'][0]);
self.write(rs)
#pass
# static query methods
"""
def listimages():
fileso = []
for name in os.listdir(Settings.DATA_PATH):
# print("name", name)
if name.endswith(".png") or name.endswith(".jpg") or name.endswith(".jpeg"):
# fileso.append(str(os.path.join(outdir, str(name))))
# fileso.append({"name":Settings.IMAGE_PATH+str(name)})
fileso.append({"name":str(name)})
return {"response":fileso}
"""
@staticmethod
def openFile(pathf):
dfile = {}
with open(pathf,'r') as fp:
dfile = ujson.load(fp)
return dfile
@staticmethod
def writeFile(pathf, rdata):
with open(pathf,'w') as fp:
ujson.dump(rdata, fp)
@staticmethod
def listimages():
fileso = []
"""
for name in os.listdir(Settings.DATA_PATH):
if name.endswith(".png") or name.endswith(".jpg") or name.endswith(".jpeg"):
fileso.append({"name":str(name)})
"""
ini = 2021
months = ["01","02","03","04","05","06","07","08","09","10","11","12"]
now = 2021
for y in range(ini,now+1):
for m in months:
folder = os.path.join(Settings.DATA_PATH,str(y),str(m))
if os.path.exists(folder):
for ide in os.listdir(folder):
if os.path.isdir(os.path.join(folder, ide)):
fileobj = os.path.join(folder, ide, "db.obj")
if os.path.exists(fileobj):
dat = Query.openFile(fileobj)
#print("dat",dat, fileobj)
fileso.append(dat)
#fileso[ide] = {"y":y, "m":m, "data":dat}
#fileso.sort(key=lambda item:item['date'], reverse=True)
#fileso = sorted(fileso.items(), key=lambda x: x["date"])
#fileso = sorted(fileso, key=lambda k: k['date'])
#print(fileso)
fileso = sorted(fileso, key = lambda i: (i['date']), reverse=True)
return {"response":fileso}
# static query methods
@staticmethod
def listfilesdirs(argms):
path = argms["path"]
direc = argms["directory"]
pathi = path
if direc!="":
pathi += "/"+direc
result = []
#print("path", path)
#print("direc", direc)
pathi = os.path.join(path,direc)
#print("pathii", pathi)
try:
for fil in os.listdir(pathi):
cc = os.path.join(pathi,fil)
modTimesinceEpoc = os.path.getmtime(cc)
modificationTime = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(modTimesinceEpoc))
#print("cc",cc)
if os.path.isfile(cc):
result.append({"name":fil,"type":1,"date":modificationTime})
else:
result.append({"name":fil,"type":0,"date":modificationTime})
result = sorted(result, key=lambda k: (k['type'], k['name']))
result = {"response":{"path":pathi,"files":result}, "error":0}
except FileNotFoundError:
result = {"response":"FileNotFoundError", "error":1}
except PermissionError:
result = {"response":"PermissionError", "error":1}
except:
result = {"response":"UndefinedError", "error":1}
finally:
#print("Done error checking")
pass
return result
# static query methods
@staticmethod
def openproject(iduser, argms):
idpj = ObjectId(argms["idpj"])
print("idpj", idpj)
res = list(MongoDB.find(DBS.DBMEDIA, "app_lung", {"_id": idpj}))
for rs in res:
rs["_id"] = str(rs["_id"])
rs["_id_user"] = str(rs["_id_user"])
pf = os.path.join(Settings.DATA_PATH, rs["y"]+"/"+rs["m"]+"/"+rs["_id"]+"/pieces.json");
if os.path.exists(pf):
#print("pahfile", pf)
rs["pieces"] = Query.openFile(pf)
#print("roisx", rois)
rs["pathpieces"] = "data/"+rs["y"]+"/"+rs["m"]+"/"+rs["_id"]+"/pieces/"
print("rs", res)
return {"response":res, "error":0}
# static query methods
@staticmethod
def listprojects(iduser, argms):
#iduser = argms["iduser"]
iduser = ObjectId(iduser.decode("utf-8"))
#print("iduser xxxx", iduser)
#rest = []
rs = list(MongoDB.aggregate(DBS.DBMEDIA, "app_lung",
[
{"$lookup":
{
"from": "user",
"localField": "_id_user",
"foreignField" : "_id",
"as": "usersUnits",
}
},
{"$match": {
"$or": [
{"_id_user": iduser},
{"shared": 1}
]
}
},
{"$project":
{
"_id" : 1,
"_id_user": 1 ,
"name": 1,
"date_update" : 1,
"factor" : 1,
"m" : 1,
"y" : 1,
"shared" : 1,
"status" : 1,
"statusmsg" : 1,
"usersUnits._id" : 1 ,
"usersUnits.name" : 1 ,
}
},
{
"$sort": {
"date_update": -1
}
}
]
))
#print("xxxxresx", rs)
#{"_id_user": iduser}))
print("zzzzzzsrs", rs)
for i in range(len(rs)):
rs[i]["_id"] = str(rs[i]["_id"])
rs[i]["_id_user"] = str(rs[i]["_id_user"])
if len(rs[i]["usersUnits"])==1:
rs[i]["usersUnits"][0]["_id"] = str(rs[i]["usersUnits"][0]["_id"])
#row["rois"] = []
return {"response":rs, "error":0}
@staticmethod
def makedir(outdir):
if not os.path.exists(outdir):
os.makedirs(outdir)
@staticmethod
def getPathSave(mainpath):
dt_year = datetime.now().strftime("%Y")
dt_mont = datetime.now().strftime("%m")
mpth = os.path.join(mainpath, dt_year, dt_mont)
Query.makedir(mpth)
return dt_year, dt_mont, mpth
# static query methods
@staticmethod
def makeimgfromvsi(argms):
name = argms["name"]
path = argms["path"]
file = argms["file"]
factor = argms["factor"]
#print("CC",name, path, file, factor)
vsifile = os.path.join(path,file)
""" pathsave = getdiresave(Settings.DATA_PATH) """
#convertvsi2img(vsifile, factor, Settings.DATA_PATH, "df3wfsd")
y, m, idf, pathsave = Query.getPathSave(Settings.DATA_PATH)
fileid = uuid.uuid4().hex
t = threading.Thread(target=Query.convertvsi2img, args=(vsifile, factor, pathsave, fileid,))
t.start()
dt_string = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
dbdat = {
"y":y,
"m":m,
"id":idf,
"name":name,
"date":dt_string,
"image":fileid+".jpg",
"tumbail":fileid+".jpg",
"atributes":{"factor":factor,"status":0,"statusmsg":"working..."},
"images":[]
}
"""
"images":[
{
"name":"original",
"date":dt_string,
"image":fileid+".jpg",
"tumbail":fileid+"_tumbail.jpg",
"atributes":{},
}
]
"""
Query.writeFile(os.path.join(pathsave,"db.obj"), dbdat)
#makeimage(filevis, factor, pathsave)
result = {"response":"ok", "error":0}
return result
# get regions
@staticmethod
def getregions(argms):
results = None
try:
pahfile = os.path.join(Settings.DATA_PATH, argms["path"]+"/"+"contours.json")
print("pahfile", pahfile)
rois = Query.openFile(pahfile)
print("roisx", rois)
results = {"response":rois, "error":0}
except FileNotFoundError:
results = {"response":"FileNotFoundError", "error":1}
print("error file not")
except PermissionError:
results = {"response":"PermissionError", "error":1}
print("permission error")
except:
results = {"response":"UndefinedError", "error":1}
print("error undefined")
finally:
#print("Done error checking")
pass
return results
@staticmethod
def convertvsi2img(vsifile, factor, pathout, outfile):
outfiletiff = os.path.join(pathout,outfile+".tiff")
outfilejpg = os.path.join(pathout,outfile+".jpg")
outtumbailjpg = os.path.join(pathout,outfile+"_tumbail.jpg")
BaseManager.register('VSI', VSI, exposed=['getAux','getnTilesX','getnTilesY'])
manager = BaseManager()
manager.start()
obj = manager.VSI(vsifile, float(factor))
#print("obj.aux", obj.getAux())
#obj = VSI(vsifile, float(factor))
image = VSI.makeimage(obj)
#image = readVSI(vsifile, float(factor))
cv2.imwrite(outfiletiff, image)
cv2.imwrite(outfilejpg, image)
fileobj = os.path.join(pathout, "db.obj")
dat = Query.openFile(fileobj)
dat["atributes"]["status"] = 1
dat["atributes"]["statusmsg"] = ""
Query.writeFile(fileobj, dat)
@staticmethod
def uploadfiledata(iduser, file):
r = """<script>
parent.mwalert('','Error: upload file');
parent.openprojects();
</script>"""
iduser = ObjectId(iduser.decode("utf-8"))
path = Settings.DATA_PATH
fname, ext = os.path.splitext(file['filename'])
ext = ext.lower()
ye, mo, path = Query.getPathSave(Settings.DATA_PATH)
rowdata = {
"_id_user":iduser,
"name":fname,
"y":ye,
"m":mo,
"date_create":Query.now(),
"date_update":Query.now(),
"factor":1.0,
"rois":[],
"shared":0,
"status":1,
"statusmsg":"new data lung...",
}
idin = None
try:
idin = MongoDB.insert(DBS.DBMEDIA, "app_lung", rowdata)
idin = str(idin)
idin = Query.converid(idin)
rs = list(MongoDB.find(DBS.DBMEDIA, "app_lung", {"_id": idin}))
for rr in rs:
tilesize = 500
tileperce = 0.01
path = os.path.join(path, str(idin))
Query.makedir(path)
Query.savefile(path, file['body'])
ROI.execute(path, tilesize, tileperce)
Features.execute(path)
SplitImage.execute(path, 500)
r = "<script>parent.openprojects();</script>"
except Exception as e:
print("error upload file", e)
if idin != None:
Query.dropdataset(os.path.join(Settings.DATA_PATH, ye, mo, str(idin)) )
return r
@staticmethod
def dropdataset(idin):
filefe = str(idin)
#os.system("rm -rf "+filefe)
MongoDB.delete(DBS.DBMEDIA, "app_lung", {"_id": ObjectId(idin)})
r = "<script>parent.openprojects();</script>"
return {"response":r};
@staticmethod
def makeclassification(usid, argms):
idus = usid
idpj = argms["idpj"]
idrois = argms["idroi"]
idmodelversion = argms["idmodelversion"]
idmodel = argms["idmodel"]
print("argms classs", argms)
parthquery = os.path.join(Settings.DATA_PATH, argms["path"])
#parthquery = os.path.join(Settings.DATA_PATH, argms["path"])
ypred, labels = Classification.predict(parthquery, idmodelversion, idmodel, idrois)
rs = {"yp":ypred, "labels":labels}
print("rs", rs)
return {"statusopt":0, "statusval":"", "response":rs}
#return {"statusopt":0, "statusval":"", "response":[]}
@staticmethod
def savefile(path, data):
pfiletiff = os.path.join(path, "original.tiff")
pfilejpg = os.path.join(path, "original.jpg")
#create directory
output_file = open(pfiletiff, mode="wb")
output_file.write(data)
output_file.close()
image = sitk.ReadImage(pfiletiff)
sitk.WriteImage(image, pfilejpg)
@staticmethod
def now():
return datetime.now().strftime('%Y-%m-%d %H:%M:%S')
@staticmethod
def converid(idin):
"""
#idin = file
if Settings.MULIUSER == 1:
#idin = idin.decode("utf-8");
idin = ObjectId(idin) """
idin = ObjectId(idin)
return idin
|
oepoll.py | # -*- coding: utf-8 -*-
from .client import LINE
from types import *
import os, sys, threading, time
class OEPoll(object):
OpInterrupt = {}
client = None
__squareSubId = {}
__squareSyncToken = {}
def __init__(self, client):
if type(client) is not LINE:
raise Exception('You need to set LINE instance to initialize OEPoll')
self.client = client
def __fetchOperation(self, revision, count=1):
return self.client.poll.fetchOperations(revision, count)
def __execute(self, op, threading):
try:
if threading:
_td = threading.Thread(target=self.OpInterrupt[op.type](op))
_td.daemon = False
_td.start()
else:
self.OpInterrupt[op.type](op)
except Exception as e:
self.client.log(e)
def addOpInterruptWithDict(self, OpInterruptDict):
self.OpInterrupt.update(OpInterruptDict)
def addOpInterrupt(self, OperationType, DisposeFunc):
self.OpInterrupt[OperationType] = DisposeFunc
def setRevision(self, revision):
self.client.revision = max(revision, self.client.revision)
def singleTrace(self, count=1):
try:
operations = self.__fetchOperation(self.client.revision, count=count)
except KeyboardInterrupt:
exit()
except:
return
if operations is None:
return []
else:
return operations
def trace(self, threading=False):
try:
operations = self.__fetchOperation(self.client.revision)
except KeyboardInterrupt:
exit()
except:
return
for op in operations:
if op.type in self.OpInterrupt.keys():
self.__execute(op, threading)
self.setRevision(op.revision)
def singleFetchSquareChat(self, squareChatMid, limit=1):
if squareChatMid not in self.__squareSubId:
self.__squareSubId[squareChatMid] = 0
if squareChatMid not in self.__squareSyncToken:
self.__squareSyncToken[squareChatMid] = ''
sqcEvents = self.client.fetchSquareChatEvents(squareChatMid, subscriptionId=self.__squareSubId[squareChatMid], syncToken=self.__squareSyncToken[squareChatMid], limit=limit, direction=1)
self.__squareSubId[squareChatMid] = sqcEvents.subscription
self.__squareSyncToken[squareChatMid] = sqcEvents.syncToken
return sqcEvents.events
|
core.py | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import print_function
import json
import unittest
import bleach
import doctest
import mock
import multiprocessing
import os
import re
import signal
import sqlalchemy
import subprocess
import tempfile
import warnings
from datetime import timedelta
from dateutil.relativedelta import relativedelta
from email.mime.application import MIMEApplication
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from numpy.testing import assert_array_almost_equal
from six.moves.urllib.parse import urlencode
from time import sleep
from airflow import configuration
from airflow.executors import SequentialExecutor
from airflow.models import Variable
from airflow import jobs, models, DAG, utils, macros, settings, exceptions
from airflow.models import BaseOperator
from airflow.models.connection import Connection
from airflow.operators.bash_operator import BashOperator
from airflow.operators.check_operator import CheckOperator, ValueCheckOperator
from airflow.operators.dagrun_operator import TriggerDagRunOperator
from airflow.operators.python_operator import PythonOperator
from airflow.operators.dummy_operator import DummyOperator
from airflow.hooks.base_hook import BaseHook
from airflow.hooks.sqlite_hook import SqliteHook
from airflow.bin import cli
from airflow.www import app as application
from airflow.settings import Session
from airflow.utils import timezone
from airflow.utils.timezone import datetime
from airflow.utils.state import State
from airflow.utils.dates import days_ago, infer_time_unit, round_time, scale_time_units
from lxml import html
from airflow.exceptions import AirflowException
from airflow.configuration import AirflowConfigException, run_command
from jinja2.sandbox import SecurityError
from jinja2 import UndefinedError
from pendulum import utcnow
import six
NUM_EXAMPLE_DAGS = 18
DEV_NULL = '/dev/null'
TEST_DAG_FOLDER = os.path.join(
os.path.dirname(os.path.realpath(__file__)), 'dags')
DEFAULT_DATE = datetime(2015, 1, 1)
DEFAULT_DATE_ISO = DEFAULT_DATE.isoformat()
DEFAULT_DATE_DS = DEFAULT_DATE_ISO[:10]
TEST_DAG_ID = 'unit_tests'
EXAMPLE_DAG_DEFAULT_DATE = days_ago(2)
try:
import cPickle as pickle
except ImportError:
# Python 3
import pickle
class OperatorSubclass(BaseOperator):
"""
An operator to test template substitution
"""
template_fields = ['some_templated_field']
def __init__(self, some_templated_field, *args, **kwargs):
super(OperatorSubclass, self).__init__(*args, **kwargs)
self.some_templated_field = some_templated_field
def execute(*args, **kwargs):
pass
class CoreTest(unittest.TestCase):
default_scheduler_args = {"num_runs": 1}
def setUp(self):
configuration.conf.load_test_config()
self.dagbag = models.DagBag(
dag_folder=DEV_NULL, include_examples=True)
self.args = {'owner': 'airflow', 'start_date': DEFAULT_DATE}
self.dag = DAG(TEST_DAG_ID, default_args=self.args)
self.dag_bash = self.dagbag.dags['example_bash_operator']
self.runme_0 = self.dag_bash.get_task('runme_0')
self.run_after_loop = self.dag_bash.get_task('run_after_loop')
self.run_this_last = self.dag_bash.get_task('run_this_last')
def tearDown(self):
if os.environ.get('KUBERNETES_VERSION') is None:
session = Session()
session.query(models.TaskInstance).filter_by(
dag_id=TEST_DAG_ID).delete()
session.query(models.TaskFail).filter_by(
dag_id=TEST_DAG_ID).delete()
session.commit()
session.close()
def test_schedule_dag_no_previous_runs(self):
"""
Tests scheduling a dag with no previous runs
"""
dag = DAG(TEST_DAG_ID + 'test_schedule_dag_no_previous_runs')
dag.add_task(models.BaseOperator(
task_id="faketastic",
owner='Also fake',
start_date=datetime(2015, 1, 2, 0, 0)))
dag_run = jobs.SchedulerJob(**self.default_scheduler_args).create_dag_run(dag)
self.assertIsNotNone(dag_run)
self.assertEqual(dag.dag_id, dag_run.dag_id)
self.assertIsNotNone(dag_run.run_id)
self.assertNotEqual('', dag_run.run_id)
self.assertEqual(
datetime(2015, 1, 2, 0, 0),
dag_run.execution_date,
msg='dag_run.execution_date did not match expectation: {0}'
.format(dag_run.execution_date)
)
self.assertEqual(State.RUNNING, dag_run.state)
self.assertFalse(dag_run.external_trigger)
dag.clear()
def test_schedule_dag_relativedelta(self):
"""
Tests scheduling a dag with a relativedelta schedule_interval
"""
delta = relativedelta(hours=+1)
dag = DAG(TEST_DAG_ID + 'test_schedule_dag_relativedelta',
schedule_interval=delta)
dag.add_task(models.BaseOperator(
task_id="faketastic",
owner='Also fake',
start_date=datetime(2015, 1, 2, 0, 0)))
dag_run = jobs.SchedulerJob(**self.default_scheduler_args).create_dag_run(dag)
self.assertIsNotNone(dag_run)
self.assertEqual(dag.dag_id, dag_run.dag_id)
self.assertIsNotNone(dag_run.run_id)
self.assertNotEqual('', dag_run.run_id)
self.assertEqual(
datetime(2015, 1, 2, 0, 0),
dag_run.execution_date,
msg='dag_run.execution_date did not match expectation: {0}'
.format(dag_run.execution_date)
)
self.assertEqual(State.RUNNING, dag_run.state)
self.assertFalse(dag_run.external_trigger)
dag_run2 = jobs.SchedulerJob(**self.default_scheduler_args).create_dag_run(dag)
self.assertIsNotNone(dag_run2)
self.assertEqual(dag.dag_id, dag_run2.dag_id)
self.assertIsNotNone(dag_run2.run_id)
self.assertNotEqual('', dag_run2.run_id)
self.assertEqual(
datetime(2015, 1, 2, 0, 0) + delta,
dag_run2.execution_date,
msg='dag_run2.execution_date did not match expectation: {0}'
.format(dag_run2.execution_date)
)
self.assertEqual(State.RUNNING, dag_run2.state)
self.assertFalse(dag_run2.external_trigger)
dag.clear()
def test_schedule_dag_fake_scheduled_previous(self):
"""
Test scheduling a dag where there is a prior DagRun
which has the same run_id as the next run should have
"""
delta = timedelta(hours=1)
dag = DAG(TEST_DAG_ID + 'test_schedule_dag_fake_scheduled_previous',
schedule_interval=delta,
start_date=DEFAULT_DATE)
dag.add_task(models.BaseOperator(
task_id="faketastic",
owner='Also fake',
start_date=DEFAULT_DATE))
scheduler = jobs.SchedulerJob(**self.default_scheduler_args)
dag.create_dagrun(run_id=models.DagRun.id_for_date(DEFAULT_DATE),
execution_date=DEFAULT_DATE,
state=State.SUCCESS,
external_trigger=True)
dag_run = scheduler.create_dag_run(dag)
self.assertIsNotNone(dag_run)
self.assertEqual(dag.dag_id, dag_run.dag_id)
self.assertIsNotNone(dag_run.run_id)
self.assertNotEqual('', dag_run.run_id)
self.assertEqual(
DEFAULT_DATE + delta,
dag_run.execution_date,
msg='dag_run.execution_date did not match expectation: {0}'
.format(dag_run.execution_date)
)
self.assertEqual(State.RUNNING, dag_run.state)
self.assertFalse(dag_run.external_trigger)
def test_schedule_dag_once(self):
"""
Tests scheduling a dag scheduled for @once - should be scheduled the first time
it is called, and not scheduled the second.
"""
dag = DAG(TEST_DAG_ID + 'test_schedule_dag_once')
dag.schedule_interval = '@once'
dag.add_task(models.BaseOperator(
task_id="faketastic",
owner='Also fake',
start_date=datetime(2015, 1, 2, 0, 0)))
dag_run = jobs.SchedulerJob(**self.default_scheduler_args).create_dag_run(dag)
dag_run2 = jobs.SchedulerJob(**self.default_scheduler_args).create_dag_run(dag)
self.assertIsNotNone(dag_run)
self.assertIsNone(dag_run2)
dag.clear()
def test_fractional_seconds(self):
"""
Tests if fractional seconds are stored in the database
"""
dag = DAG(TEST_DAG_ID + 'test_fractional_seconds')
dag.schedule_interval = '@once'
dag.add_task(models.BaseOperator(
task_id="faketastic",
owner='Also fake',
start_date=datetime(2015, 1, 2, 0, 0)))
start_date = timezone.utcnow()
run = dag.create_dagrun(
run_id='test_' + start_date.isoformat(),
execution_date=start_date,
start_date=start_date,
state=State.RUNNING,
external_trigger=False
)
run.refresh_from_db()
self.assertEqual(start_date, run.execution_date,
"dag run execution_date loses precision")
self.assertEqual(start_date, run.start_date,
"dag run start_date loses precision ")
def test_schedule_dag_start_end_dates(self):
"""
Tests that an attempt to schedule a task after the Dag's end_date
does not succeed.
"""
delta = timedelta(hours=1)
runs = 3
start_date = DEFAULT_DATE
end_date = start_date + (runs - 1) * delta
dag = DAG(TEST_DAG_ID + 'test_schedule_dag_start_end_dates',
start_date=start_date,
end_date=end_date,
schedule_interval=delta)
dag.add_task(models.BaseOperator(task_id='faketastic',
owner='Also fake'))
# Create and schedule the dag runs
dag_runs = []
scheduler = jobs.SchedulerJob(**self.default_scheduler_args)
for i in range(runs):
dag_runs.append(scheduler.create_dag_run(dag))
additional_dag_run = scheduler.create_dag_run(dag)
for dag_run in dag_runs:
self.assertIsNotNone(dag_run)
self.assertIsNone(additional_dag_run)
def test_schedule_dag_no_end_date_up_to_today_only(self):
"""
Tests that a Dag created without an end_date can only be scheduled up
to and including the current datetime.
For example, if today is 2016-01-01 and we are scheduling from a
start_date of 2015-01-01, only jobs up to, but not including
2016-01-01 should be scheduled.
"""
session = settings.Session()
delta = timedelta(days=1)
now = utcnow()
start_date = now.subtract(weeks=1)
runs = (now - start_date).days
dag = DAG(TEST_DAG_ID + 'test_schedule_dag_no_end_date_up_to_today_only',
start_date=start_date,
schedule_interval=delta)
dag.add_task(models.BaseOperator(task_id='faketastic',
owner='Also fake'))
dag_runs = []
scheduler = jobs.SchedulerJob(**self.default_scheduler_args)
for i in range(runs):
dag_run = scheduler.create_dag_run(dag)
dag_runs.append(dag_run)
# Mark the DagRun as complete
dag_run.state = State.SUCCESS
session.merge(dag_run)
session.commit()
# Attempt to schedule an additional dag run (for 2016-01-01)
additional_dag_run = scheduler.create_dag_run(dag)
for dag_run in dag_runs:
self.assertIsNotNone(dag_run)
self.assertIsNone(additional_dag_run)
def test_confirm_unittest_mod(self):
self.assertTrue(configuration.conf.get('core', 'unit_test_mode'))
def test_pickling(self):
dp = self.dag.pickle()
self.assertEqual(dp.pickle.dag_id, self.dag.dag_id)
def test_rich_comparison_ops(self):
class DAGsubclass(DAG):
pass
dag_eq = DAG(TEST_DAG_ID, default_args=self.args)
dag_diff_load_time = DAG(TEST_DAG_ID, default_args=self.args)
dag_diff_name = DAG(TEST_DAG_ID + '_neq', default_args=self.args)
dag_subclass = DAGsubclass(TEST_DAG_ID, default_args=self.args)
dag_subclass_diff_name = DAGsubclass(
TEST_DAG_ID + '2', default_args=self.args)
for d in [dag_eq, dag_diff_name, dag_subclass, dag_subclass_diff_name]:
d.last_loaded = self.dag.last_loaded
# test identity equality
self.assertEqual(self.dag, self.dag)
# test dag (in)equality based on _comps
self.assertEqual(dag_eq, self.dag)
self.assertNotEqual(dag_diff_name, self.dag)
self.assertNotEqual(dag_diff_load_time, self.dag)
# test dag inequality based on type even if _comps happen to match
self.assertNotEqual(dag_subclass, self.dag)
# a dag should equal an unpickled version of itself
d = pickle.dumps(self.dag)
self.assertEqual(pickle.loads(d), self.dag)
# dags are ordered based on dag_id no matter what the type is
self.assertLess(self.dag, dag_diff_name)
self.assertGreater(self.dag, dag_diff_load_time)
self.assertLess(self.dag, dag_subclass_diff_name)
# greater than should have been created automatically by functools
self.assertGreater(dag_diff_name, self.dag)
# hashes are non-random and match equality
self.assertEqual(hash(self.dag), hash(self.dag))
self.assertEqual(hash(dag_eq), hash(self.dag))
self.assertNotEqual(hash(dag_diff_name), hash(self.dag))
self.assertNotEqual(hash(dag_subclass), hash(self.dag))
def test_check_operators(self):
conn_id = "sqlite_default"
captainHook = BaseHook.get_hook(conn_id=conn_id)
captainHook.run("CREATE TABLE operator_test_table (a, b)")
captainHook.run("insert into operator_test_table values (1,2)")
t = CheckOperator(
task_id='check',
sql="select count(*) from operator_test_table",
conn_id=conn_id,
dag=self.dag)
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
t = ValueCheckOperator(
task_id='value_check',
pass_value=95,
tolerance=0.1,
conn_id=conn_id,
sql="SELECT 100",
dag=self.dag)
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
captainHook.run("drop table operator_test_table")
def test_clear_api(self):
task = self.dag_bash.tasks[0]
task.clear(
start_date=DEFAULT_DATE, end_date=DEFAULT_DATE,
upstream=True, downstream=True)
ti = models.TaskInstance(task=task, execution_date=DEFAULT_DATE)
ti.are_dependents_done()
def test_illegal_args(self):
"""
Tests that Operators reject illegal arguments
"""
with warnings.catch_warnings(record=True) as w:
BashOperator(
task_id='test_illegal_args',
bash_command='echo success',
dag=self.dag,
illegal_argument_1234='hello?')
self.assertTrue(
issubclass(w[0].category, PendingDeprecationWarning))
self.assertIn(
('Invalid arguments were passed to BashOperator '
'(task_id: test_illegal_args).'),
w[0].message.args[0])
def test_bash_operator(self):
t = BashOperator(
task_id='test_bash_operator',
bash_command="echo success",
dag=self.dag)
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
def test_bash_operator_multi_byte_output(self):
t = BashOperator(
task_id='test_multi_byte_bash_operator',
bash_command=u"echo \u2600",
dag=self.dag,
output_encoding='utf-8')
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
def test_bash_operator_kill(self):
import psutil
sleep_time = "100%d" % os.getpid()
t = BashOperator(
task_id='test_bash_operator_kill',
execution_timeout=timedelta(seconds=1),
bash_command="/bin/bash -c 'sleep %s'" % sleep_time,
dag=self.dag)
self.assertRaises(
exceptions.AirflowTaskTimeout,
t.run,
start_date=DEFAULT_DATE, end_date=DEFAULT_DATE)
sleep(2)
pid = -1
for proc in psutil.process_iter():
if proc.cmdline() == ['sleep', sleep_time]:
pid = proc.pid
if pid != -1:
os.kill(pid, signal.SIGTERM)
self.fail("BashOperator's subprocess still running after stopping on timeout!")
def test_on_failure_callback(self):
# Annoying workaround for nonlocal not existing in python 2
data = {'called': False}
def check_failure(context, test_case=self):
data['called'] = True
error = context.get('exception')
test_case.assertIsInstance(error, AirflowException)
t = BashOperator(
task_id='check_on_failure_callback',
bash_command="exit 1",
dag=self.dag,
on_failure_callback=check_failure)
self.assertRaises(
exceptions.AirflowException,
t.run,
start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
self.assertTrue(data['called'])
def test_trigger_dagrun(self):
def trigga(context, obj):
if True:
return obj
t = TriggerDagRunOperator(
task_id='test_trigger_dagrun',
trigger_dag_id='example_bash_operator',
python_callable=trigga,
dag=self.dag)
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
def test_dryrun(self):
t = BashOperator(
task_id='test_dryrun',
bash_command="echo success",
dag=self.dag)
t.dry_run()
def test_sqlite(self):
import airflow.operators.sqlite_operator
t = airflow.operators.sqlite_operator.SqliteOperator(
task_id='time_sqlite',
sql="CREATE TABLE IF NOT EXISTS unitest (dummy VARCHAR(20))",
dag=self.dag)
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
def test_timeout(self):
t = PythonOperator(
task_id='test_timeout',
execution_timeout=timedelta(seconds=1),
python_callable=lambda: sleep(5),
dag=self.dag)
self.assertRaises(
exceptions.AirflowTaskTimeout,
t.run,
start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
def test_python_op(self):
def test_py_op(templates_dict, ds, **kwargs):
if not templates_dict['ds'] == ds:
raise Exception("failure")
t = PythonOperator(
task_id='test_py_op',
provide_context=True,
python_callable=test_py_op,
templates_dict={'ds': "{{ ds }}"},
dag=self.dag)
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
def test_complex_template(self):
def verify_templated_field(context):
self.assertEqual(context['ti'].task.some_templated_field['bar'][1],
context['ds'])
t = OperatorSubclass(
task_id='test_complex_template',
some_templated_field={
'foo': '123',
'bar': ['baz', '{{ ds }}']
},
dag=self.dag)
t.execute = verify_templated_field
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
def test_template_with_variable(self):
"""
Test the availability of variables in templates
"""
val = {
'test_value': 'a test value'
}
Variable.set("a_variable", val['test_value'])
def verify_templated_field(context):
self.assertEqual(context['ti'].task.some_templated_field,
val['test_value'])
t = OperatorSubclass(
task_id='test_complex_template',
some_templated_field='{{ var.value.a_variable }}',
dag=self.dag)
t.execute = verify_templated_field
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
def test_template_with_json_variable(self):
"""
Test the availability of variables (serialized as JSON) in templates
"""
val = {
'test_value': {'foo': 'bar', 'obj': {'v1': 'yes', 'v2': 'no'}}
}
Variable.set("a_variable", val['test_value'], serialize_json=True)
def verify_templated_field(context):
self.assertEqual(context['ti'].task.some_templated_field,
val['test_value']['obj']['v2'])
t = OperatorSubclass(
task_id='test_complex_template',
some_templated_field='{{ var.json.a_variable.obj.v2 }}',
dag=self.dag)
t.execute = verify_templated_field
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
def test_template_with_json_variable_as_value(self):
"""
Test the availability of variables (serialized as JSON) in templates, but
accessed as a value
"""
val = {
'test_value': {'foo': 'bar'}
}
Variable.set("a_variable", val['test_value'], serialize_json=True)
def verify_templated_field(context):
self.assertEqual(context['ti'].task.some_templated_field,
u'{"foo": "bar"}')
t = OperatorSubclass(
task_id='test_complex_template',
some_templated_field='{{ var.value.a_variable }}',
dag=self.dag)
t.execute = verify_templated_field
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
def test_template_non_bool(self):
"""
Test templates can handle objects with no sense of truthiness
"""
class NonBoolObject(object):
def __len__(self):
return NotImplemented
def __bool__(self):
return NotImplemented
t = OperatorSubclass(
task_id='test_bad_template_obj',
some_templated_field=NonBoolObject(),
dag=self.dag)
t.resolve_template_files()
def test_task_get_template(self):
TI = models.TaskInstance
ti = TI(
task=self.runme_0, execution_date=DEFAULT_DATE)
ti.dag = self.dag_bash
ti.run(ignore_ti_state=True)
context = ti.get_template_context()
# DEFAULT DATE is 2015-01-01
self.assertEquals(context['ds'], '2015-01-01')
self.assertEquals(context['ds_nodash'], '20150101')
# next_ds is 2015-01-02 as the dag interval is daily
self.assertEquals(context['next_ds'], '2015-01-02')
self.assertEquals(context['next_ds_nodash'], '20150102')
# prev_ds is 2014-12-31 as the dag interval is daily
self.assertEquals(context['prev_ds'], '2014-12-31')
self.assertEquals(context['prev_ds_nodash'], '20141231')
self.assertEquals(context['ts'], '2015-01-01T00:00:00+00:00')
self.assertEquals(context['ts_nodash'], '20150101T000000')
self.assertEquals(context['ts_nodash_with_tz'], '20150101T000000+0000')
self.assertEquals(context['yesterday_ds'], '2014-12-31')
self.assertEquals(context['yesterday_ds_nodash'], '20141231')
self.assertEquals(context['tomorrow_ds'], '2015-01-02')
self.assertEquals(context['tomorrow_ds_nodash'], '20150102')
def test_import_examples(self):
self.assertEqual(len(self.dagbag.dags), NUM_EXAMPLE_DAGS)
def test_local_task_job(self):
TI = models.TaskInstance
ti = TI(
task=self.runme_0, execution_date=DEFAULT_DATE)
job = jobs.LocalTaskJob(task_instance=ti, ignore_ti_state=True)
job.run()
def test_raw_job(self):
TI = models.TaskInstance
ti = TI(
task=self.runme_0, execution_date=DEFAULT_DATE)
ti.dag = self.dag_bash
ti.run(ignore_ti_state=True)
def test_doctests(self):
modules = [utils, macros]
for mod in modules:
failed, tests = doctest.testmod(mod)
if failed:
raise Exception("Failed a doctest")
def test_variable_set_get_round_trip(self):
Variable.set("tested_var_set_id", "Monday morning breakfast")
self.assertEqual("Monday morning breakfast", Variable.get("tested_var_set_id"))
def test_variable_set_get_round_trip_json(self):
value = {"a": 17, "b": 47}
Variable.set("tested_var_set_id", value, serialize_json=True)
self.assertEqual(value, Variable.get("tested_var_set_id", deserialize_json=True))
def test_get_non_existing_var_should_return_default(self):
default_value = "some default val"
self.assertEqual(default_value, Variable.get("thisIdDoesNotExist",
default_var=default_value))
def test_get_non_existing_var_should_not_deserialize_json_default(self):
default_value = "}{ this is a non JSON default }{"
self.assertEqual(default_value, Variable.get("thisIdDoesNotExist",
default_var=default_value,
deserialize_json=True))
def test_variable_setdefault_round_trip(self):
key = "tested_var_setdefault_1_id"
value = "Monday morning breakfast in Paris"
Variable.setdefault(key, value)
self.assertEqual(value, Variable.get(key))
def test_variable_setdefault_round_trip_json(self):
key = "tested_var_setdefault_2_id"
value = {"city": 'Paris', "Hapiness": True}
Variable.setdefault(key, value, deserialize_json=True)
self.assertEqual(value, Variable.get(key, deserialize_json=True))
def test_variable_setdefault_existing_json(self):
key = "tested_var_setdefault_2_id"
value = {"city": 'Paris', "Hapiness": True}
Variable.set(key, value, serialize_json=True)
val = Variable.setdefault(key, value, deserialize_json=True)
# Check the returned value, and the stored value are handled correctly.
self.assertEqual(value, val)
self.assertEqual(value, Variable.get(key, deserialize_json=True))
def test_parameterized_config_gen(self):
cfg = configuration.parameterized_config(configuration.DEFAULT_CONFIG)
# making sure some basic building blocks are present:
self.assertIn("[core]", cfg)
self.assertIn("dags_folder", cfg)
self.assertIn("sql_alchemy_conn", cfg)
self.assertIn("fernet_key", cfg)
# making sure replacement actually happened
self.assertNotIn("{AIRFLOW_HOME}", cfg)
self.assertNotIn("{FERNET_KEY}", cfg)
def test_config_use_original_when_original_and_fallback_are_present(self):
self.assertTrue(configuration.conf.has_option("core", "FERNET_KEY"))
self.assertFalse(configuration.conf.has_option("core", "FERNET_KEY_CMD"))
FERNET_KEY = configuration.conf.get('core', 'FERNET_KEY')
configuration.conf.set("core", "FERNET_KEY_CMD", "printf HELLO")
FALLBACK_FERNET_KEY = configuration.conf.get(
"core",
"FERNET_KEY"
)
self.assertEqual(FERNET_KEY, FALLBACK_FERNET_KEY)
# restore the conf back to the original state
configuration.conf.remove_option("core", "FERNET_KEY_CMD")
def test_config_throw_error_when_original_and_fallback_is_absent(self):
self.assertTrue(configuration.conf.has_option("core", "FERNET_KEY"))
self.assertFalse(configuration.conf.has_option("core", "FERNET_KEY_CMD"))
FERNET_KEY = configuration.conf.get("core", "FERNET_KEY")
configuration.conf.remove_option("core", "FERNET_KEY")
with self.assertRaises(AirflowConfigException) as cm:
configuration.conf.get("core", "FERNET_KEY")
exception = str(cm.exception)
message = "section/key [core/fernet_key] not found in config"
self.assertEqual(message, exception)
# restore the conf back to the original state
configuration.conf.set("core", "FERNET_KEY", FERNET_KEY)
self.assertTrue(configuration.conf.has_option("core", "FERNET_KEY"))
def test_config_override_original_when_non_empty_envvar_is_provided(self):
key = "AIRFLOW__CORE__FERNET_KEY"
value = "some value"
self.assertNotIn(key, os.environ)
os.environ[key] = value
FERNET_KEY = configuration.conf.get('core', 'FERNET_KEY')
self.assertEqual(value, FERNET_KEY)
# restore the envvar back to the original state
del os.environ[key]
def test_config_override_original_when_empty_envvar_is_provided(self):
key = "AIRFLOW__CORE__FERNET_KEY"
value = ""
self.assertNotIn(key, os.environ)
os.environ[key] = value
FERNET_KEY = configuration.conf.get('core', 'FERNET_KEY')
self.assertEqual(value, FERNET_KEY)
# restore the envvar back to the original state
del os.environ[key]
def test_round_time(self):
rt1 = round_time(datetime(2015, 1, 1, 6), timedelta(days=1))
self.assertEqual(datetime(2015, 1, 1, 0, 0), rt1)
rt2 = round_time(datetime(2015, 1, 2), relativedelta(months=1))
self.assertEqual(datetime(2015, 1, 1, 0, 0), rt2)
rt3 = round_time(datetime(2015, 9, 16, 0, 0), timedelta(1), datetime(
2015, 9, 14, 0, 0))
self.assertEqual(datetime(2015, 9, 16, 0, 0), rt3)
rt4 = round_time(datetime(2015, 9, 15, 0, 0), timedelta(1), datetime(
2015, 9, 14, 0, 0))
self.assertEqual(datetime(2015, 9, 15, 0, 0), rt4)
rt5 = round_time(datetime(2015, 9, 14, 0, 0), timedelta(1), datetime(
2015, 9, 14, 0, 0))
self.assertEqual(datetime(2015, 9, 14, 0, 0), rt5)
rt6 = round_time(datetime(2015, 9, 13, 0, 0), timedelta(1), datetime(
2015, 9, 14, 0, 0))
self.assertEqual(datetime(2015, 9, 14, 0, 0), rt6)
def test_infer_time_unit(self):
self.assertEqual('minutes', infer_time_unit([130, 5400, 10]))
self.assertEqual('seconds', infer_time_unit([110, 50, 10, 100]))
self.assertEqual('hours', infer_time_unit([100000, 50000, 10000, 20000]))
self.assertEqual('days', infer_time_unit([200000, 100000]))
def test_scale_time_units(self):
# use assert_almost_equal from numpy.testing since we are comparing
# floating point arrays
arr1 = scale_time_units([130, 5400, 10], 'minutes')
assert_array_almost_equal(arr1, [2.167, 90.0, 0.167], decimal=3)
arr2 = scale_time_units([110, 50, 10, 100], 'seconds')
assert_array_almost_equal(arr2, [110.0, 50.0, 10.0, 100.0], decimal=3)
arr3 = scale_time_units([100000, 50000, 10000, 20000], 'hours')
assert_array_almost_equal(arr3, [27.778, 13.889, 2.778, 5.556],
decimal=3)
arr4 = scale_time_units([200000, 100000], 'days')
assert_array_almost_equal(arr4, [2.315, 1.157], decimal=3)
def test_duplicate_dependencies(self):
regexp = "Dependency (.*)runme_0(.*)run_after_loop(.*) " \
"already registered"
with self.assertRaisesRegexp(AirflowException, regexp):
self.runme_0.set_downstream(self.run_after_loop)
with self.assertRaisesRegexp(AirflowException, regexp):
self.run_after_loop.set_upstream(self.runme_0)
def test_bad_trigger_rule(self):
with self.assertRaises(AirflowException):
DummyOperator(
task_id='test_bad_trigger',
trigger_rule="non_existent",
dag=self.dag)
def test_terminate_task(self):
"""If a task instance's db state get deleted, it should fail"""
TI = models.TaskInstance
dag = self.dagbag.dags.get('test_utils')
task = dag.task_dict.get('sleeps_forever')
ti = TI(task=task, execution_date=DEFAULT_DATE)
job = jobs.LocalTaskJob(
task_instance=ti, ignore_ti_state=True, executor=SequentialExecutor())
# Running task instance asynchronously
p = multiprocessing.Process(target=job.run)
p.start()
sleep(5)
settings.engine.dispose()
session = settings.Session()
ti.refresh_from_db(session=session)
# making sure it's actually running
self.assertEqual(State.RUNNING, ti.state)
ti = session.query(TI).filter_by(
dag_id=task.dag_id,
task_id=task.task_id,
execution_date=DEFAULT_DATE
).one()
# deleting the instance should result in a failure
session.delete(ti)
session.commit()
# waiting for the async task to finish
p.join()
# making sure that the task ended up as failed
ti.refresh_from_db(session=session)
self.assertEqual(State.FAILED, ti.state)
session.close()
def test_task_fail_duration(self):
"""If a task fails, the duration should be recorded in TaskFail"""
p = BashOperator(
task_id='pass_sleepy',
bash_command='sleep 3',
dag=self.dag)
f = BashOperator(
task_id='fail_sleepy',
bash_command='sleep 5',
execution_timeout=timedelta(seconds=3),
retry_delay=timedelta(seconds=0),
dag=self.dag)
session = settings.Session()
try:
p.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
except Exception:
pass
try:
f.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
except Exception:
pass
p_fails = session.query(models.TaskFail).filter_by(
task_id='pass_sleepy',
dag_id=self.dag.dag_id,
execution_date=DEFAULT_DATE).all()
f_fails = session.query(models.TaskFail).filter_by(
task_id='fail_sleepy',
dag_id=self.dag.dag_id,
execution_date=DEFAULT_DATE).all()
self.assertEqual(0, len(p_fails))
self.assertEqual(1, len(f_fails))
self.assertGreaterEqual(sum([f.duration for f in f_fails]), 3)
def test_run_command(self):
if six.PY3:
write = r'sys.stdout.buffer.write("\u1000foo".encode("utf8"))'
else:
write = r'sys.stdout.write(u"\u1000foo".encode("utf8"))'
cmd = 'import sys; {0}; sys.stdout.flush()'.format(write)
self.assertEqual(run_command("python -c '{0}'".format(cmd)),
u'\u1000foo' if six.PY3 else 'foo')
self.assertEqual(run_command('echo "foo bar"'), u'foo bar\n')
self.assertRaises(AirflowConfigException, run_command, 'bash -c "exit 1"')
def test_trigger_dagrun_with_execution_date(self):
utc_now = timezone.utcnow()
run_id = 'trig__' + utc_now.isoformat()
def payload_generator(context, object):
object.run_id = run_id
return object
task = TriggerDagRunOperator(task_id='test_trigger_dagrun_with_execution_date',
trigger_dag_id='example_bash_operator',
python_callable=payload_generator,
execution_date=utc_now,
dag=self.dag)
task.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
dag_runs = models.DagRun.find(dag_id='example_bash_operator',
run_id=run_id)
self.assertEquals(len(dag_runs), 1)
dag_run = dag_runs[0]
self.assertEquals(dag_run.execution_date, utc_now)
def test_externally_triggered_dagrun(self):
TI = models.TaskInstance
# Create the dagrun between two "scheduled" execution dates of the DAG
EXECUTION_DATE = DEFAULT_DATE + timedelta(days=2)
EXECUTION_DS = EXECUTION_DATE.strftime('%Y-%m-%d')
EXECUTION_DS_NODASH = EXECUTION_DS.replace('-', '')
dag = DAG(
TEST_DAG_ID,
default_args=self.args,
schedule_interval=timedelta(weeks=1),
start_date=DEFAULT_DATE)
task = DummyOperator(task_id='test_externally_triggered_dag_context',
dag=dag)
dag.create_dagrun(run_id=models.DagRun.id_for_date(EXECUTION_DATE),
execution_date=EXECUTION_DATE,
state=State.RUNNING,
external_trigger=True)
task.run(
start_date=EXECUTION_DATE, end_date=EXECUTION_DATE)
ti = TI(task=task, execution_date=EXECUTION_DATE)
context = ti.get_template_context()
# next_ds/prev_ds should be the execution date for manually triggered runs
self.assertEquals(context['next_ds'], EXECUTION_DS)
self.assertEquals(context['next_ds_nodash'], EXECUTION_DS_NODASH)
self.assertEquals(context['prev_ds'], EXECUTION_DS)
self.assertEquals(context['prev_ds_nodash'], EXECUTION_DS_NODASH)
class CliTests(unittest.TestCase):
@classmethod
def setUpClass(cls):
super(CliTests, cls).setUpClass()
cls._cleanup()
def setUp(self):
super(CliTests, self).setUp()
from airflow.www_rbac import app as application
configuration.load_test_config()
self.app, self.appbuilder = application.create_app(session=Session, testing=True)
self.app.config['TESTING'] = True
self.parser = cli.CLIFactory.get_parser()
self.dagbag = models.DagBag(dag_folder=DEV_NULL, include_examples=True)
settings.configure_orm()
self.session = Session
def tearDown(self):
self._cleanup(session=self.session)
super(CliTests, self).tearDown()
@staticmethod
def _cleanup(session=None):
if session is None:
session = Session()
session.query(models.Pool).delete()
session.query(models.Variable).delete()
session.commit()
session.close()
def test_cli_list_dags(self):
args = self.parser.parse_args(['list_dags', '--report'])
cli.list_dags(args)
def test_cli_list_dag_runs(self):
cli.trigger_dag(self.parser.parse_args([
'trigger_dag', 'example_bash_operator', ]))
args = self.parser.parse_args(['list_dag_runs',
'example_bash_operator',
'--no_backfill'])
cli.list_dag_runs(args)
def test_cli_create_user_random_password(self):
args = self.parser.parse_args([
'users', '-c', '--username', 'test1', '--lastname', 'doe',
'--firstname', 'jon',
'--email', 'jdoe@foo.com', '--role', 'Viewer', '--use_random_password'
])
cli.users(args)
def test_cli_create_user_supplied_password(self):
args = self.parser.parse_args([
'users', '-c', '--username', 'test2', '--lastname', 'doe',
'--firstname', 'jon',
'--email', 'jdoe@apache.org', '--role', 'Viewer', '--password', 'test'
])
cli.users(args)
def test_cli_delete_user(self):
args = self.parser.parse_args([
'users', '-c', '--username', 'test3', '--lastname', 'doe',
'--firstname', 'jon',
'--email', 'jdoe@example.com', '--role', 'Viewer', '--use_random_password'
])
cli.users(args)
args = self.parser.parse_args([
'users', '-d', '--username', 'test3',
])
cli.users(args)
def test_cli_list_users(self):
for i in range(0, 3):
args = self.parser.parse_args([
'users', '-c', '--username', 'user{}'.format(i), '--lastname',
'doe', '--firstname', 'jon',
'--email', 'jdoe+{}@gmail.com'.format(i), '--role', 'Viewer',
'--use_random_password'
])
cli.users(args)
with mock.patch('sys.stdout',
new_callable=six.StringIO) as mock_stdout:
cli.users(self.parser.parse_args(['users', '-l']))
stdout = mock_stdout.getvalue()
for i in range(0, 3):
self.assertIn('user{}'.format(i), stdout)
def test_cli_sync_perm(self):
# test whether sync_perm cli will throw exceptions or not
args = self.parser.parse_args([
'sync_perm'
])
cli.sync_perm(args)
def test_cli_list_tasks(self):
for dag_id in self.dagbag.dags.keys():
args = self.parser.parse_args(['list_tasks', dag_id])
cli.list_tasks(args)
args = self.parser.parse_args([
'list_tasks', 'example_bash_operator', '--tree'])
cli.list_tasks(args)
@mock.patch("airflow.bin.cli.db_utils.initdb")
def test_cli_initdb(self, initdb_mock):
cli.initdb(self.parser.parse_args(['initdb']))
initdb_mock.assert_called_once_with(False)
@mock.patch("airflow.bin.cli.db_utils.resetdb")
def test_cli_resetdb(self, resetdb_mock):
cli.resetdb(self.parser.parse_args(['resetdb', '--yes']))
resetdb_mock.assert_called_once_with(False)
def test_cli_connections_list(self):
with mock.patch('sys.stdout',
new_callable=six.StringIO) as mock_stdout:
cli.connections(self.parser.parse_args(['connections', '--list']))
stdout = mock_stdout.getvalue()
conns = [[x.strip("'") for x in re.findall("'\w+'", line)[:2]]
for ii, line in enumerate(stdout.split('\n'))
if ii % 2 == 1]
conns = [conn for conn in conns if len(conn) > 0]
# Assert that some of the connections are present in the output as
# expected:
self.assertIn(['aws_default', 'aws'], conns)
self.assertIn(['beeline_default', 'beeline'], conns)
self.assertIn(['emr_default', 'emr'], conns)
self.assertIn(['mssql_default', 'mssql'], conns)
self.assertIn(['mysql_default', 'mysql'], conns)
self.assertIn(['postgres_default', 'postgres'], conns)
self.assertIn(['wasb_default', 'wasb'], conns)
self.assertIn(['segment_default', 'segment'], conns)
# Attempt to list connections with invalid cli args
with mock.patch('sys.stdout',
new_callable=six.StringIO) as mock_stdout:
cli.connections(self.parser.parse_args(
['connections', '--list', '--conn_id=fake', '--conn_uri=fake-uri',
'--conn_type=fake-type', '--conn_host=fake_host',
'--conn_login=fake_login', '--conn_password=fake_password',
'--conn_schema=fake_schema', '--conn_port=fake_port', '--conn_extra=fake_extra']))
stdout = mock_stdout.getvalue()
# Check list attempt stdout
lines = [l for l in stdout.split('\n') if len(l) > 0]
self.assertListEqual(lines, [
("\tThe following args are not compatible with the " +
"--list flag: ['conn_id', 'conn_uri', 'conn_extra', " +
"'conn_type', 'conn_host', 'conn_login', " +
"'conn_password', 'conn_schema', 'conn_port']"),
])
def test_cli_connections_list_redirect(self):
cmd = ['airflow', 'connections', '--list']
with tempfile.TemporaryFile() as fp:
p = subprocess.Popen(cmd, stdout=fp)
p.wait()
self.assertEqual(0, p.returncode)
def test_cli_connections_add_delete(self):
# Add connections:
uri = 'postgresql://airflow:airflow@host:5432/airflow'
with mock.patch('sys.stdout',
new_callable=six.StringIO) as mock_stdout:
cli.connections(self.parser.parse_args(
['connections', '--add', '--conn_id=new1',
'--conn_uri=%s' % uri]))
cli.connections(self.parser.parse_args(
['connections', '-a', '--conn_id=new2',
'--conn_uri=%s' % uri]))
cli.connections(self.parser.parse_args(
['connections', '--add', '--conn_id=new3',
'--conn_uri=%s' % uri, '--conn_extra', "{'extra': 'yes'}"]))
cli.connections(self.parser.parse_args(
['connections', '-a', '--conn_id=new4',
'--conn_uri=%s' % uri, '--conn_extra', "{'extra': 'yes'}"]))
cli.connections(self.parser.parse_args(
['connections', '--add', '--conn_id=new5',
'--conn_type=hive_metastore', '--conn_login=airflow',
'--conn_password=airflow', '--conn_host=host',
'--conn_port=9083', '--conn_schema=airflow']))
cli.connections(self.parser.parse_args(
['connections', '-a', '--conn_id=new6',
'--conn_uri', "", '--conn_type=google_cloud_platform', '--conn_extra', "{'extra': 'yes'}"]))
stdout = mock_stdout.getvalue()
# Check addition stdout
lines = [l for l in stdout.split('\n') if len(l) > 0]
self.assertListEqual(lines, [
("\tSuccessfully added `conn_id`=new1 : " +
"postgresql://airflow:airflow@host:5432/airflow"),
("\tSuccessfully added `conn_id`=new2 : " +
"postgresql://airflow:airflow@host:5432/airflow"),
("\tSuccessfully added `conn_id`=new3 : " +
"postgresql://airflow:airflow@host:5432/airflow"),
("\tSuccessfully added `conn_id`=new4 : " +
"postgresql://airflow:airflow@host:5432/airflow"),
("\tSuccessfully added `conn_id`=new5 : " +
"hive_metastore://airflow:airflow@host:9083/airflow"),
("\tSuccessfully added `conn_id`=new6 : " +
"google_cloud_platform://:@:")
])
# Attempt to add duplicate
with mock.patch('sys.stdout',
new_callable=six.StringIO) as mock_stdout:
cli.connections(self.parser.parse_args(
['connections', '--add', '--conn_id=new1',
'--conn_uri=%s' % uri]))
stdout = mock_stdout.getvalue()
# Check stdout for addition attempt
lines = [l for l in stdout.split('\n') if len(l) > 0]
self.assertListEqual(lines, [
"\tA connection with `conn_id`=new1 already exists",
])
# Attempt to add without providing conn_id
with mock.patch('sys.stdout',
new_callable=six.StringIO) as mock_stdout:
cli.connections(self.parser.parse_args(
['connections', '--add', '--conn_uri=%s' % uri]))
stdout = mock_stdout.getvalue()
# Check stdout for addition attempt
lines = [l for l in stdout.split('\n') if len(l) > 0]
self.assertListEqual(lines, [
("\tThe following args are required to add a connection:" +
" ['conn_id']"),
])
# Attempt to add without providing conn_uri
with mock.patch('sys.stdout',
new_callable=six.StringIO) as mock_stdout:
cli.connections(self.parser.parse_args(
['connections', '--add', '--conn_id=new']))
stdout = mock_stdout.getvalue()
# Check stdout for addition attempt
lines = [l for l in stdout.split('\n') if len(l) > 0]
self.assertListEqual(lines, [
("\tThe following args are required to add a connection:" +
" ['conn_uri or conn_type']"),
])
# Prepare to add connections
session = settings.Session()
extra = {'new1': None,
'new2': None,
'new3': "{'extra': 'yes'}",
'new4': "{'extra': 'yes'}"}
# Add connections
for index in range(1, 6):
conn_id = 'new%s' % index
result = (session
.query(Connection)
.filter(Connection.conn_id == conn_id)
.first())
result = (result.conn_id, result.conn_type, result.host,
result.port, result.get_extra())
if conn_id in ['new1', 'new2', 'new3', 'new4']:
self.assertEqual(result, (conn_id, 'postgres', 'host', 5432,
extra[conn_id]))
elif conn_id == 'new5':
self.assertEqual(result, (conn_id, 'hive_metastore', 'host',
9083, None))
elif conn_id == 'new6':
self.assertEqual(result, (conn_id, 'google_cloud_platform',
None, None, "{'extra': 'yes'}"))
# Delete connections
with mock.patch('sys.stdout',
new_callable=six.StringIO) as mock_stdout:
cli.connections(self.parser.parse_args(
['connections', '--delete', '--conn_id=new1']))
cli.connections(self.parser.parse_args(
['connections', '--delete', '--conn_id=new2']))
cli.connections(self.parser.parse_args(
['connections', '--delete', '--conn_id=new3']))
cli.connections(self.parser.parse_args(
['connections', '--delete', '--conn_id=new4']))
cli.connections(self.parser.parse_args(
['connections', '--delete', '--conn_id=new5']))
cli.connections(self.parser.parse_args(
['connections', '--delete', '--conn_id=new6']))
stdout = mock_stdout.getvalue()
# Check deletion stdout
lines = [l for l in stdout.split('\n') if len(l) > 0]
self.assertListEqual(lines, [
"\tSuccessfully deleted `conn_id`=new1",
"\tSuccessfully deleted `conn_id`=new2",
"\tSuccessfully deleted `conn_id`=new3",
"\tSuccessfully deleted `conn_id`=new4",
"\tSuccessfully deleted `conn_id`=new5",
"\tSuccessfully deleted `conn_id`=new6"
])
# Check deletions
for index in range(1, 7):
conn_id = 'new%s' % index
result = (session.query(Connection)
.filter(Connection.conn_id == conn_id)
.first())
self.assertTrue(result is None)
# Attempt to delete a non-existing connnection
with mock.patch('sys.stdout',
new_callable=six.StringIO) as mock_stdout:
cli.connections(self.parser.parse_args(
['connections', '--delete', '--conn_id=fake']))
stdout = mock_stdout.getvalue()
# Check deletion attempt stdout
lines = [l for l in stdout.split('\n') if len(l) > 0]
self.assertListEqual(lines, [
"\tDid not find a connection with `conn_id`=fake",
])
# Attempt to delete with invalid cli args
with mock.patch('sys.stdout',
new_callable=six.StringIO) as mock_stdout:
cli.connections(self.parser.parse_args(
['connections', '--delete', '--conn_id=fake',
'--conn_uri=%s' % uri, '--conn_type=fake-type']))
stdout = mock_stdout.getvalue()
# Check deletion attempt stdout
lines = [l for l in stdout.split('\n') if len(l) > 0]
self.assertListEqual(lines, [
("\tThe following args are not compatible with the " +
"--delete flag: ['conn_uri', 'conn_type']"),
])
session.close()
def test_cli_test(self):
cli.test(self.parser.parse_args([
'test', 'example_bash_operator', 'runme_0',
DEFAULT_DATE.isoformat()]))
cli.test(self.parser.parse_args([
'test', 'example_bash_operator', 'runme_0', '--dry_run',
DEFAULT_DATE.isoformat()]))
def test_cli_test_with_params(self):
cli.test(self.parser.parse_args([
'test', 'example_passing_params_via_test_command', 'run_this',
'-tp', '{"foo":"bar"}', DEFAULT_DATE.isoformat()]))
cli.test(self.parser.parse_args([
'test', 'example_passing_params_via_test_command', 'also_run_this',
'-tp', '{"foo":"bar"}', DEFAULT_DATE.isoformat()]))
def test_cli_run(self):
cli.run(self.parser.parse_args([
'run', 'example_bash_operator', 'runme_0', '-l',
DEFAULT_DATE.isoformat()]))
def test_task_state(self):
cli.task_state(self.parser.parse_args([
'task_state', 'example_bash_operator', 'runme_0',
DEFAULT_DATE.isoformat()]))
def test_dag_state(self):
self.assertEqual(None, cli.dag_state(self.parser.parse_args([
'dag_state', 'example_bash_operator', DEFAULT_DATE.isoformat()])))
def test_pause(self):
args = self.parser.parse_args([
'pause', 'example_bash_operator'])
cli.pause(args)
self.assertIn(self.dagbag.dags['example_bash_operator'].is_paused, [True, 1])
args = self.parser.parse_args([
'unpause', 'example_bash_operator'])
cli.unpause(args)
self.assertIn(self.dagbag.dags['example_bash_operator'].is_paused, [False, 0])
def test_subdag_clear(self):
args = self.parser.parse_args([
'clear', 'example_subdag_operator', '--no_confirm'])
cli.clear(args)
args = self.parser.parse_args([
'clear', 'example_subdag_operator', '--no_confirm', '--exclude_subdags'])
cli.clear(args)
def test_parentdag_downstream_clear(self):
args = self.parser.parse_args([
'clear', 'example_subdag_operator.section-1', '--no_confirm'])
cli.clear(args)
args = self.parser.parse_args([
'clear', 'example_subdag_operator.section-1', '--no_confirm',
'--exclude_parentdag'])
cli.clear(args)
def test_get_dags(self):
dags = cli.get_dags(self.parser.parse_args(['clear', 'example_subdag_operator',
'-c']))
self.assertEqual(len(dags), 1)
dags = cli.get_dags(self.parser.parse_args(['clear', 'subdag', '-dx', '-c']))
self.assertGreater(len(dags), 1)
with self.assertRaises(AirflowException):
cli.get_dags(self.parser.parse_args(['clear', 'foobar', '-dx', '-c']))
def test_backfill(self):
cli.backfill(self.parser.parse_args([
'backfill', 'example_bash_operator',
'-s', DEFAULT_DATE.isoformat()]))
cli.backfill(self.parser.parse_args([
'backfill', 'example_bash_operator', '-t', 'runme_0', '--dry_run',
'-s', DEFAULT_DATE.isoformat()]))
cli.backfill(self.parser.parse_args([
'backfill', 'example_bash_operator', '--dry_run',
'-s', DEFAULT_DATE.isoformat()]))
cli.backfill(self.parser.parse_args([
'backfill', 'example_bash_operator', '-l',
'-s', DEFAULT_DATE.isoformat()]))
def test_process_subdir_path_with_placeholder(self):
self.assertEqual(os.path.join(settings.DAGS_FOLDER, 'abc'), cli.process_subdir('DAGS_FOLDER/abc'))
def test_trigger_dag(self):
cli.trigger_dag(self.parser.parse_args([
'trigger_dag', 'example_bash_operator',
'-c', '{"foo": "bar"}']))
self.assertRaises(
ValueError,
cli.trigger_dag,
self.parser.parse_args([
'trigger_dag', 'example_bash_operator',
'--run_id', 'trigger_dag_xxx',
'-c', 'NOT JSON'])
)
def test_delete_dag(self):
DM = models.DagModel
key = "my_dag_id"
session = settings.Session()
session.add(DM(dag_id=key))
session.commit()
cli.delete_dag(self.parser.parse_args([
'delete_dag', key, '--yes']))
self.assertEqual(session.query(DM).filter_by(dag_id=key).count(), 0)
self.assertRaises(
AirflowException,
cli.delete_dag,
self.parser.parse_args([
'delete_dag',
'does_not_exist_dag',
'--yes'])
)
def test_pool_create(self):
cli.pool(self.parser.parse_args(['pool', '-s', 'foo', '1', 'test']))
self.assertEqual(self.session.query(models.Pool).count(), 1)
def test_pool_get(self):
cli.pool(self.parser.parse_args(['pool', '-s', 'foo', '1', 'test']))
try:
cli.pool(self.parser.parse_args(['pool', '-g', 'foo']))
except Exception as e:
self.fail("The 'pool -g foo' command raised unexpectedly: %s" % e)
def test_pool_delete(self):
cli.pool(self.parser.parse_args(['pool', '-s', 'foo', '1', 'test']))
cli.pool(self.parser.parse_args(['pool', '-x', 'foo']))
self.assertEqual(self.session.query(models.Pool).count(), 0)
def test_pool_no_args(self):
try:
cli.pool(self.parser.parse_args(['pool']))
except Exception as e:
self.fail("The 'pool' command raised unexpectedly: %s" % e)
def test_pool_import_export(self):
# Create two pools first
pool_config_input = {
"foo": {
"description": "foo_test",
"slots": 1
},
"baz": {
"description": "baz_test",
"slots": 2
}
}
with open('pools_import.json', mode='w') as f:
json.dump(pool_config_input, f)
# Import json
try:
cli.pool(self.parser.parse_args(['pool', '-i', 'pools_import.json']))
except Exception as e:
self.fail("The 'pool -i pools_import.json' failed: %s" % e)
# Export json
try:
cli.pool(self.parser.parse_args(['pool', '-e', 'pools_export.json']))
except Exception as e:
self.fail("The 'pool -e pools_export.json' failed: %s" % e)
with open('pools_export.json', mode='r') as f:
pool_config_output = json.load(f)
self.assertEqual(
pool_config_input,
pool_config_output,
"Input and output pool files are not same")
os.remove('pools_import.json')
os.remove('pools_export.json')
def test_variables(self):
# Checks if all subcommands are properly received
cli.variables(self.parser.parse_args([
'variables', '-s', 'foo', '{"foo":"bar"}']))
cli.variables(self.parser.parse_args([
'variables', '-g', 'foo']))
cli.variables(self.parser.parse_args([
'variables', '-g', 'baz', '-d', 'bar']))
cli.variables(self.parser.parse_args([
'variables']))
cli.variables(self.parser.parse_args([
'variables', '-x', 'bar']))
cli.variables(self.parser.parse_args([
'variables', '-i', DEV_NULL]))
cli.variables(self.parser.parse_args([
'variables', '-e', DEV_NULL]))
cli.variables(self.parser.parse_args([
'variables', '-s', 'bar', 'original']))
# First export
cli.variables(self.parser.parse_args([
'variables', '-e', 'variables1.json']))
first_exp = open('variables1.json', 'r')
cli.variables(self.parser.parse_args([
'variables', '-s', 'bar', 'updated']))
cli.variables(self.parser.parse_args([
'variables', '-s', 'foo', '{"foo":"oops"}']))
cli.variables(self.parser.parse_args([
'variables', '-x', 'foo']))
# First import
cli.variables(self.parser.parse_args([
'variables', '-i', 'variables1.json']))
self.assertEqual('original', models.Variable.get('bar'))
self.assertEqual('{"foo": "bar"}', models.Variable.get('foo'))
# Second export
cli.variables(self.parser.parse_args([
'variables', '-e', 'variables2.json']))
second_exp = open('variables2.json', 'r')
self.assertEqual(first_exp.read(), second_exp.read())
second_exp.close()
first_exp.close()
# Second import
cli.variables(self.parser.parse_args([
'variables', '-i', 'variables2.json']))
self.assertEqual('original', models.Variable.get('bar'))
self.assertEqual('{"foo": "bar"}', models.Variable.get('foo'))
os.remove('variables1.json')
os.remove('variables2.json')
def _wait_pidfile(self, pidfile):
while True:
try:
with open(pidfile) as f:
return int(f.read())
except Exception:
sleep(1)
def test_cli_webserver_foreground(self):
# Confirm that webserver hasn't been launched.
# pgrep returns exit status 1 if no process matched.
self.assertEqual(1, subprocess.Popen(["pgrep", "-c", "airflow"]).wait())
self.assertEqual(1, subprocess.Popen(["pgrep", "-c", "gunicorn"]).wait())
# Run webserver in foreground and terminate it.
p = subprocess.Popen(["airflow", "webserver"])
p.terminate()
p.wait()
# Assert that no process remains.
self.assertEqual(1, subprocess.Popen(["pgrep", "-c", "airflow"]).wait())
self.assertEqual(1, subprocess.Popen(["pgrep", "-c", "gunicorn"]).wait())
@unittest.skipIf("TRAVIS" in os.environ and bool(os.environ["TRAVIS"]),
"Skipping test due to lack of required file permission")
def test_cli_webserver_foreground_with_pid(self):
# Run webserver in foreground with --pid option
pidfile = tempfile.mkstemp()[1]
p = subprocess.Popen(["airflow", "webserver", "--pid", pidfile])
# Check the file specified by --pid option exists
self._wait_pidfile(pidfile)
# Terminate webserver
p.terminate()
p.wait()
@unittest.skipIf("TRAVIS" in os.environ and bool(os.environ["TRAVIS"]),
"Skipping test due to lack of required file permission")
def test_cli_webserver_background(self):
import psutil
# Confirm that webserver hasn't been launched.
self.assertEqual(1, subprocess.Popen(["pgrep", "-c", "airflow"]).wait())
self.assertEqual(1, subprocess.Popen(["pgrep", "-c", "gunicorn"]).wait())
# Run webserver in background.
subprocess.Popen(["airflow", "webserver", "-D"])
pidfile = cli.setup_locations("webserver")[0]
self._wait_pidfile(pidfile)
# Assert that gunicorn and its monitor are launched.
self.assertEqual(0, subprocess.Popen(["pgrep", "-c", "airflow"]).wait())
self.assertEqual(0, subprocess.Popen(["pgrep", "-c", "gunicorn"]).wait())
# Terminate monitor process.
pidfile = cli.setup_locations("webserver-monitor")[0]
pid = self._wait_pidfile(pidfile)
p = psutil.Process(pid)
p.terminate()
p.wait()
# Assert that no process remains.
self.assertEqual(1, subprocess.Popen(["pgrep", "-c", "airflow"]).wait())
self.assertEqual(1, subprocess.Popen(["pgrep", "-c", "gunicorn"]).wait())
# Patch for causing webserver timeout
@mock.patch("airflow.bin.cli.get_num_workers_running", return_value=0)
def test_cli_webserver_shutdown_when_gunicorn_master_is_killed(self, _):
# Shorten timeout so that this test doesn't take too long time
configuration.conf.set("webserver", "web_server_master_timeout", "10")
args = self.parser.parse_args(['webserver'])
with self.assertRaises(SystemExit) as e:
cli.webserver(args)
self.assertEqual(e.exception.code, 1)
class SecurityTests(unittest.TestCase):
def setUp(self):
configuration.load_test_config()
configuration.conf.set("webserver", "authenticate", "False")
configuration.conf.set("webserver", "expose_config", "True")
app = application.create_app()
app.config['TESTING'] = True
self.app = app.test_client()
self.dagbag = models.DagBag(
dag_folder=DEV_NULL, include_examples=True)
self.dag_bash = self.dagbag.dags['example_bash_operator']
self.runme_0 = self.dag_bash.get_task('runme_0')
def get_csrf(self, response):
tree = html.fromstring(response.data)
form = tree.find('.//form')
return form.find('.//input[@name="_csrf_token"]').value
def test_csrf_rejection(self):
endpoints = ([
"/admin/queryview/",
"/admin/airflow/paused?dag_id=example_python_operator&is_paused=false",
])
for endpoint in endpoints:
response = self.app.post(endpoint)
self.assertIn('CSRF token is missing', response.data.decode('utf-8'))
def test_csrf_acceptance(self):
response = self.app.get("/admin/queryview/")
csrf = self.get_csrf(response)
response = self.app.post("/admin/queryview/", data=dict(csrf_token=csrf))
self.assertEqual(200, response.status_code)
def test_xss(self):
try:
self.app.get("/admin/airflow/tree?dag_id=<script>alert(123456)</script>")
except Exception:
# exception is expected here since dag doesnt exist
pass
response = self.app.get("/admin/log", follow_redirects=True)
self.assertIn(bleach.clean("<script>alert(123456)</script>"), response.data.decode('UTF-8'))
def test_chart_data_template(self):
"""Protect chart_data from being able to do RCE."""
session = settings.Session()
Chart = models.Chart
chart1 = Chart(
label='insecure_chart',
conn_id='airflow_db',
chart_type='bar',
sql="SELECT {{ ''.__class__.__mro__[1].__subclasses__() }}"
)
chart2 = Chart(
label="{{ ''.__class__.__mro__[1].__subclasses__() }}",
conn_id='airflow_db',
chart_type='bar',
sql="SELECT 1"
)
chart3 = Chart(
label="{{ subprocess.check_output('ls') }}",
conn_id='airflow_db',
chart_type='bar',
sql="SELECT 1"
)
session.add(chart1)
session.add(chart2)
session.add(chart3)
session.commit()
chart1 = session.query(Chart).filter(Chart.label == 'insecure_chart').first()
with self.assertRaises(SecurityError):
self.app.get("/admin/airflow/chart_data?chart_id={}".format(chart1.id))
chart2 = session.query(Chart).filter(
Chart.label == "{{ ''.__class__.__mro__[1].__subclasses__() }}"
).first()
with self.assertRaises(SecurityError):
self.app.get("/admin/airflow/chart_data?chart_id={}".format(chart2.id))
chart3 = session.query(Chart).filter(
Chart.label == "{{ subprocess.check_output('ls') }}"
).first()
with self.assertRaises(UndefinedError):
self.app.get("/admin/airflow/chart_data?chart_id={}".format(chart3.id))
def tearDown(self):
configuration.conf.set("webserver", "expose_config", "False")
self.dag_bash.clear(start_date=DEFAULT_DATE, end_date=timezone.utcnow())
class WebUiTests(unittest.TestCase):
def setUp(self):
configuration.load_test_config()
configuration.conf.set("webserver", "authenticate", "False")
configuration.conf.set("webserver", "expose_config", "True")
app = application.create_app()
app.config['TESTING'] = True
app.config['WTF_CSRF_METHODS'] = []
self.app = app.test_client()
self.dagbag = models.DagBag(include_examples=True)
self.dag_bash = self.dagbag.dags['example_bash_operator']
self.dag_python = self.dagbag.dags['example_python_operator']
self.sub_dag = self.dagbag.dags['example_subdag_operator']
self.runme_0 = self.dag_bash.get_task('runme_0')
self.example_xcom = self.dagbag.dags['example_xcom']
self.dagrun_python = self.dag_python.create_dagrun(
run_id="test_{}".format(models.DagRun.id_for_date(timezone.utcnow())),
execution_date=EXAMPLE_DAG_DEFAULT_DATE,
start_date=timezone.utcnow(),
state=State.RUNNING
)
self.sub_dag.create_dagrun(
run_id="test_{}".format(models.DagRun.id_for_date(timezone.utcnow())),
execution_date=EXAMPLE_DAG_DEFAULT_DATE,
start_date=timezone.utcnow(),
state=State.RUNNING
)
self.example_xcom.create_dagrun(
run_id="test_{}".format(models.DagRun.id_for_date(timezone.utcnow())),
execution_date=EXAMPLE_DAG_DEFAULT_DATE,
start_date=timezone.utcnow(),
state=State.RUNNING
)
def test_index(self):
response = self.app.get('/', follow_redirects=True)
resp_html = response.data.decode('utf-8')
self.assertIn("DAGs", resp_html)
self.assertIn("example_bash_operator", resp_html)
# The HTML should contain data for the last-run. A link to the specific run,
# and the text of the date.
url = "/admin/airflow/graph?" + urlencode({
"dag_id": self.dag_python.dag_id,
"execution_date": self.dagrun_python.execution_date,
}).replace("&", "&")
self.assertIn(url, resp_html)
self.assertIn(
self.dagrun_python.execution_date.strftime("%Y-%m-%d %H:%M"),
resp_html)
def test_query(self):
response = self.app.get('/admin/queryview/')
self.assertIn("Ad Hoc Query", response.data.decode('utf-8'))
response = self.app.post(
"/admin/queryview/", data=dict(
conn_id="airflow_db",
sql="SELECT+COUNT%281%29+as+TEST+FROM+task_instance"))
self.assertIn("TEST", response.data.decode('utf-8'))
def test_health(self):
response = self.app.get('/health')
self.assertIn('The server is healthy!', response.data.decode('utf-8'))
def test_noaccess(self):
response = self.app.get('/admin/airflow/noaccess')
self.assertIn("You don't seem to have access.", response.data.decode('utf-8'))
def test_pickle_info(self):
response = self.app.get('/admin/airflow/pickle_info')
self.assertIn('{', response.data.decode('utf-8'))
def test_dag_views(self):
response = self.app.get(
'/admin/airflow/graph?dag_id=example_bash_operator')
self.assertIn("runme_0", response.data.decode('utf-8'))
# confirm that the graph page loads when execution_date is blank
response = self.app.get(
'/admin/airflow/graph?dag_id=example_bash_operator&execution_date=')
self.assertIn("runme_0", response.data.decode('utf-8'))
response = self.app.get(
'/admin/airflow/tree?num_runs=25&dag_id=example_bash_operator')
self.assertIn("runme_0", response.data.decode('utf-8'))
response = self.app.get(
'/admin/airflow/duration?days=30&dag_id=example_bash_operator')
self.assertIn("example_bash_operator", response.data.decode('utf-8'))
response = self.app.get(
'/admin/airflow/duration?days=30&dag_id=missing_dag',
follow_redirects=True)
self.assertIn("seems to be missing", response.data.decode('utf-8'))
response = self.app.get(
'/admin/airflow/tries?days=30&dag_id=example_bash_operator')
self.assertIn("example_bash_operator", response.data.decode('utf-8'))
response = self.app.get(
'/admin/airflow/landing_times?'
'days=30&dag_id=example_python_operator')
self.assertIn("example_python_operator", response.data.decode('utf-8'))
response = self.app.get(
'/admin/airflow/landing_times?'
'days=30&dag_id=example_xcom')
self.assertIn("example_xcom", response.data.decode('utf-8'))
response = self.app.get(
'/admin/airflow/gantt?dag_id=example_bash_operator')
self.assertIn("example_bash_operator", response.data.decode('utf-8'))
response = self.app.get(
'/admin/airflow/code?dag_id=example_bash_operator')
self.assertIn("example_bash_operator", response.data.decode('utf-8'))
response = self.app.get(
'/admin/airflow/blocked')
response = self.app.get(
'/admin/configurationview/')
self.assertIn("Airflow Configuration", response.data.decode('utf-8'))
self.assertIn("Running Configuration", response.data.decode('utf-8'))
response = self.app.get(
'/admin/airflow/rendered?'
'task_id=runme_1&dag_id=example_bash_operator&'
'execution_date={}'.format(DEFAULT_DATE_ISO))
self.assertIn("example_bash_operator", response.data.decode('utf-8'))
response = self.app.get(
'/admin/airflow/log?task_id=run_this_last&'
'dag_id=example_bash_operator&execution_date={}'
''.format(DEFAULT_DATE_ISO))
self.assertIn("run_this_last", response.data.decode('utf-8'))
response = self.app.get(
'/admin/airflow/task?'
'task_id=runme_0&dag_id=example_bash_operator&'
'execution_date={}'.format(EXAMPLE_DAG_DEFAULT_DATE))
self.assertIn("Attributes", response.data.decode('utf-8'))
response = self.app.get(
'/admin/airflow/dag_stats')
self.assertIn("example_bash_operator", response.data.decode('utf-8'))
response = self.app.get(
'/admin/airflow/task_stats')
self.assertIn("example_bash_operator", response.data.decode('utf-8'))
url = (
"/admin/airflow/success?task_id=print_the_context&"
"dag_id=example_python_operator&upstream=false&downstream=false&"
"future=false&past=false&execution_date={}&"
"origin=/admin".format(EXAMPLE_DAG_DEFAULT_DATE))
response = self.app.get(url)
self.assertIn("Wait a minute", response.data.decode('utf-8'))
response = self.app.get(
'/admin/airflow/clear?task_id=print_the_context&'
'dag_id=example_python_operator&future=true&past=false&'
'upstream=true&downstream=false&'
'execution_date={}&'
'origin=/admin'.format(EXAMPLE_DAG_DEFAULT_DATE))
self.assertIn("Wait a minute", response.data.decode('utf-8'))
url = (
"/admin/airflow/success?task_id=section-1&"
"dag_id=example_subdag_operator&upstream=true&downstream=true&"
"future=false&past=false&execution_date={}&"
"origin=/admin".format(EXAMPLE_DAG_DEFAULT_DATE))
response = self.app.get(url)
self.assertIn("Wait a minute", response.data.decode('utf-8'))
self.assertIn("section-1-task-1", response.data.decode('utf-8'))
self.assertIn("section-1-task-2", response.data.decode('utf-8'))
self.assertIn("section-1-task-3", response.data.decode('utf-8'))
self.assertIn("section-1-task-4", response.data.decode('utf-8'))
self.assertIn("section-1-task-5", response.data.decode('utf-8'))
response = self.app.get(url + "&confirmed=true")
url = (
"/admin/airflow/clear?task_id=print_the_context&"
"dag_id=example_python_operator&future=false&past=false&"
"upstream=false&downstream=true&"
"execution_date={}&"
"origin=/admin".format(EXAMPLE_DAG_DEFAULT_DATE))
response = self.app.get(url)
self.assertIn("Wait a minute", response.data.decode('utf-8'))
response = self.app.get(url + "&confirmed=true")
url = (
"/admin/airflow/clear?task_id=section-1-task-1&"
"dag_id=example_subdag_operator.section-1&future=false&past=false&"
"upstream=false&downstream=true&recursive=true&"
"execution_date={}&"
"origin=/admin".format(EXAMPLE_DAG_DEFAULT_DATE))
response = self.app.get(url)
self.assertIn("Wait a minute", response.data.decode('utf-8'))
self.assertIn("example_subdag_operator.end",
response.data.decode('utf-8'))
self.assertIn("example_subdag_operator.section-1.section-1-task-1",
response.data.decode('utf-8'))
self.assertIn("example_subdag_operator.section-1",
response.data.decode('utf-8'))
self.assertIn("example_subdag_operator.section-2",
response.data.decode('utf-8'))
self.assertIn("example_subdag_operator.section-2.section-2-task-1",
response.data.decode('utf-8'))
self.assertIn("example_subdag_operator.section-2.section-2-task-2",
response.data.decode('utf-8'))
self.assertIn("example_subdag_operator.section-2.section-2-task-3",
response.data.decode('utf-8'))
self.assertIn("example_subdag_operator.section-2.section-2-task-4",
response.data.decode('utf-8'))
self.assertIn("example_subdag_operator.section-2.section-2-task-5",
response.data.decode('utf-8'))
self.assertIn("example_subdag_operator.some-other-task",
response.data.decode('utf-8'))
url = (
"/admin/airflow/run?task_id=runme_0&"
"dag_id=example_bash_operator&ignore_all_deps=false&ignore_ti_state=true&"
"ignore_task_deps=true&execution_date={}&"
"origin=/admin".format(EXAMPLE_DAG_DEFAULT_DATE))
response = self.app.get(url)
response = self.app.get(
"/admin/airflow/refresh?dag_id=example_bash_operator")
response = self.app.get("/admin/airflow/refresh_all")
response = self.app.post(
"/admin/airflow/paused?"
"dag_id=example_python_operator&is_paused=false")
self.assertIn("OK", response.data.decode('utf-8'))
response = self.app.get("/admin/xcom", follow_redirects=True)
self.assertIn("Xcoms", response.data.decode('utf-8'))
def test_charts(self):
session = Session()
chart_label = "Airflow task instance by type"
chart = session.query(
models.Chart).filter(models.Chart.label == chart_label).first()
chart_id = chart.id
session.close()
response = self.app.get(
'/admin/airflow/chart'
'?chart_id={}&iteration_no=1'.format(chart_id))
self.assertIn("Airflow task instance by type", response.data.decode('utf-8'))
response = self.app.get(
'/admin/airflow/chart_data'
'?chart_id={}&iteration_no=1'.format(chart_id))
self.assertIn("example", response.data.decode('utf-8'))
response = self.app.get(
'/admin/airflow/dag_details?dag_id=example_branch_operator')
self.assertIn("run_this_first", response.data.decode('utf-8'))
def test_fetch_task_instance(self):
url = (
"/admin/airflow/object/task_instances?"
"dag_id=example_python_operator&"
"execution_date={}".format(EXAMPLE_DAG_DEFAULT_DATE))
response = self.app.get(url)
self.assertIn("print_the_context", response.data.decode('utf-8'))
def test_dag_view_task_with_python_operator_using_partial(self):
response = self.app.get(
'/admin/airflow/task?'
'task_id=test_dagrun_functool_partial&dag_id=test_task_view_type_check&'
'execution_date={}'.format(EXAMPLE_DAG_DEFAULT_DATE))
self.assertIn("A function with two args", response.data.decode('utf-8'))
def test_dag_view_task_with_python_operator_using_instance(self):
response = self.app.get(
'/admin/airflow/task?'
'task_id=test_dagrun_instance&dag_id=test_task_view_type_check&'
'execution_date={}'.format(EXAMPLE_DAG_DEFAULT_DATE))
self.assertIn("A __call__ method", response.data.decode('utf-8'))
def tearDown(self):
configuration.conf.set("webserver", "expose_config", "False")
self.dag_bash.clear(start_date=EXAMPLE_DAG_DEFAULT_DATE,
end_date=timezone.utcnow())
session = Session()
session.query(models.DagRun).delete()
session.query(models.TaskInstance).delete()
session.commit()
session.close()
class SecureModeWebUiTests(unittest.TestCase):
def setUp(self):
configuration.load_test_config()
configuration.conf.set("webserver", "authenticate", "False")
configuration.conf.set("core", "secure_mode", "True")
app = application.create_app()
app.config['TESTING'] = True
self.app = app.test_client()
def test_query(self):
response = self.app.get('/admin/queryview/')
self.assertEqual(response.status_code, 404)
def test_charts(self):
response = self.app.get('/admin/chart/')
self.assertEqual(response.status_code, 404)
def tearDown(self):
configuration.conf.remove_option("core", "SECURE_MODE")
class PasswordUserTest(unittest.TestCase):
def setUp(self):
user = models.User()
from airflow.contrib.auth.backends.password_auth import PasswordUser
self.password_user = PasswordUser(user)
self.password_user.username = "password_test"
@mock.patch('airflow.contrib.auth.backends.password_auth.generate_password_hash')
def test_password_setter(self, mock_gen_pass_hash):
mock_gen_pass_hash.return_value = b"hashed_pass" if six.PY3 else "hashed_pass"
self.password_user.password = "secure_password"
mock_gen_pass_hash.assert_called_with("secure_password", 12)
def test_password_unicode(self):
# In python2.7 no conversion is required back to str
# In python >= 3 the method must convert from bytes to str
self.password_user.password = "secure_password"
self.assertIsInstance(self.password_user.password, str)
def test_password_user_authenticate(self):
self.password_user.password = "secure_password"
self.assertTrue(self.password_user.authenticate("secure_password"))
def test_password_unicode_user_authenticate(self):
self.password_user.username = u"🐼" # This is a panda
self.password_user.password = "secure_password"
self.assertTrue(self.password_user.authenticate("secure_password"))
def test_password_authenticate_session(self):
from airflow.contrib.auth.backends.password_auth import PasswordUser
self.password_user.password = 'test_password'
session = Session()
session.add(self.password_user)
session.commit()
query_user = session.query(PasswordUser).filter_by(
username=self.password_user.username).first()
self.assertTrue(query_user.authenticate('test_password'))
session.query(models.User).delete()
session.commit()
session.close()
class WebPasswordAuthTest(unittest.TestCase):
def setUp(self):
configuration.conf.set("webserver", "authenticate", "True")
configuration.conf.set("webserver", "auth_backend", "airflow.contrib.auth.backends.password_auth")
app = application.create_app()
app.config['TESTING'] = True
self.app = app.test_client()
from airflow.contrib.auth.backends.password_auth import PasswordUser
session = Session()
user = models.User()
password_user = PasswordUser(user)
password_user.username = 'airflow_passwordauth'
password_user.password = 'password'
print(password_user._password)
session.add(password_user)
session.commit()
session.close()
def get_csrf(self, response):
tree = html.fromstring(response.data)
form = tree.find('.//form')
return form.find('.//input[@name="_csrf_token"]').value
def login(self, username, password):
response = self.app.get('/admin/airflow/login')
csrf_token = self.get_csrf(response)
return self.app.post('/admin/airflow/login', data=dict(
username=username,
password=password,
csrf_token=csrf_token
), follow_redirects=True)
def logout(self):
return self.app.get('/admin/airflow/logout', follow_redirects=True)
def test_login_logout_password_auth(self):
self.assertTrue(configuration.conf.getboolean('webserver', 'authenticate'))
response = self.login('user1', 'whatever')
self.assertIn('Incorrect login details', response.data.decode('utf-8'))
response = self.login('airflow_passwordauth', 'wrongpassword')
self.assertIn('Incorrect login details', response.data.decode('utf-8'))
response = self.login('airflow_passwordauth', 'password')
self.assertIn('Data Profiling', response.data.decode('utf-8'))
response = self.logout()
self.assertIn('form-signin', response.data.decode('utf-8'))
def test_unauthorized_password_auth(self):
response = self.app.get("/admin/airflow/landing_times")
self.assertEqual(response.status_code, 302)
def tearDown(self):
configuration.load_test_config()
session = Session()
session.query(models.User).delete()
session.commit()
session.close()
configuration.conf.set("webserver", "authenticate", "False")
class WebLdapAuthTest(unittest.TestCase):
def setUp(self):
configuration.conf.set("webserver", "authenticate", "True")
configuration.conf.set("webserver", "auth_backend", "airflow.contrib.auth.backends.ldap_auth")
try:
configuration.conf.add_section("ldap")
except Exception:
pass
configuration.conf.set("ldap", "uri", "ldap://openldap:389")
configuration.conf.set("ldap", "user_filter", "objectClass=*")
configuration.conf.set("ldap", "user_name_attr", "uid")
configuration.conf.set("ldap", "bind_user", "cn=Manager,dc=example,dc=com")
configuration.conf.set("ldap", "bind_password", "insecure")
configuration.conf.set("ldap", "basedn", "dc=example,dc=com")
configuration.conf.set("ldap", "cacert", "")
app = application.create_app()
app.config['TESTING'] = True
self.app = app.test_client()
def get_csrf(self, response):
tree = html.fromstring(response.data)
form = tree.find('.//form')
return form.find('.//input[@name="_csrf_token"]').value
def login(self, username, password):
response = self.app.get('/admin/airflow/login')
csrf_token = self.get_csrf(response)
return self.app.post('/admin/airflow/login', data=dict(
username=username,
password=password,
csrf_token=csrf_token
), follow_redirects=True)
def logout(self):
return self.app.get('/admin/airflow/logout', follow_redirects=True)
def test_login_logout_ldap(self):
self.assertTrue(configuration.conf.getboolean('webserver', 'authenticate'))
response = self.login('user1', 'userx')
self.assertIn('Incorrect login details', response.data.decode('utf-8'))
response = self.login('userz', 'user1')
self.assertIn('Incorrect login details', response.data.decode('utf-8'))
response = self.login('user1', 'user1')
self.assertIn('Data Profiling', response.data.decode('utf-8'))
response = self.logout()
self.assertIn('form-signin', response.data.decode('utf-8'))
def test_unauthorized(self):
response = self.app.get("/admin/airflow/landing_times")
self.assertEqual(response.status_code, 302)
def test_no_filter(self):
response = self.login('user1', 'user1')
self.assertIn('Data Profiling', response.data.decode('utf-8'))
self.assertIn('Connections', response.data.decode('utf-8'))
def test_with_filters(self):
configuration.conf.set('ldap', 'superuser_filter',
'description=superuser')
configuration.conf.set('ldap', 'data_profiler_filter',
'description=dataprofiler')
response = self.login('dataprofiler', 'dataprofiler')
self.assertIn('Data Profiling', response.data.decode('utf-8'))
response = self.logout()
self.assertIn('form-signin', response.data.decode('utf-8'))
response = self.login('superuser', 'superuser')
self.assertIn('Connections', response.data.decode('utf-8'))
def tearDown(self):
configuration.load_test_config()
session = Session()
session.query(models.User).delete()
session.commit()
session.close()
configuration.conf.set("webserver", "authenticate", "False")
class LdapGroupTest(unittest.TestCase):
def setUp(self):
configuration.conf.set("webserver", "authenticate", "True")
configuration.conf.set("webserver", "auth_backend", "airflow.contrib.auth.backends.ldap_auth")
try:
configuration.conf.add_section("ldap")
except Exception:
pass
configuration.conf.set("ldap", "uri", "ldap://openldap:389")
configuration.conf.set("ldap", "user_filter", "objectClass=*")
configuration.conf.set("ldap", "user_name_attr", "uid")
configuration.conf.set("ldap", "bind_user", "cn=Manager,dc=example,dc=com")
configuration.conf.set("ldap", "bind_password", "insecure")
configuration.conf.set("ldap", "basedn", "dc=example,dc=com")
configuration.conf.set("ldap", "cacert", "")
def test_group_belonging(self):
from airflow.contrib.auth.backends.ldap_auth import LdapUser
users = {"user1": ["group1", "group3"],
"user2": ["group2"]
}
for user in users:
mu = models.User(username=user,
is_superuser=False)
auth = LdapUser(mu)
self.assertEqual(set(users[user]), set(auth.ldap_groups))
def tearDown(self):
configuration.load_test_config()
configuration.conf.set("webserver", "authenticate", "False")
class FakeWebHDFSHook(object):
def __init__(self, conn_id):
self.conn_id = conn_id
def get_conn(self):
return self.conn_id
def check_for_path(self, hdfs_path):
return hdfs_path
class FakeSnakeBiteClientException(Exception):
pass
class FakeSnakeBiteClient(object):
def __init__(self):
self.started = True
def ls(self, path, include_toplevel=False):
"""
the fake snakebite client
:param path: the array of path to test
:param include_toplevel: to return the toplevel directory info
:return: a list for path for the matching queries
"""
if path[0] == '/datadirectory/empty_directory' and not include_toplevel:
return []
elif path[0] == '/datadirectory/datafile':
return [{
'group': u'supergroup',
'permission': 420,
'file_type': 'f',
'access_time': 1481122343796,
'block_replication': 3,
'modification_time': 1481122343862,
'length': 0,
'blocksize': 134217728,
'owner': u'hdfs',
'path': '/datadirectory/datafile'
}]
elif path[0] == '/datadirectory/empty_directory' and include_toplevel:
return [{
'group': u'supergroup',
'permission': 493,
'file_type': 'd',
'access_time': 0,
'block_replication': 0,
'modification_time': 1481132141540,
'length': 0,
'blocksize': 0,
'owner': u'hdfs',
'path': '/datadirectory/empty_directory'
}]
elif path[0] == '/datadirectory/not_empty_directory' and include_toplevel:
return [{
'group': u'supergroup',
'permission': 493,
'file_type': 'd',
'access_time': 0,
'block_replication': 0,
'modification_time': 1481132141540,
'length': 0,
'blocksize': 0,
'owner': u'hdfs',
'path': '/datadirectory/empty_directory'
}, {
'group': u'supergroup',
'permission': 420,
'file_type': 'f',
'access_time': 1481122343796,
'block_replication': 3,
'modification_time': 1481122343862,
'length': 0,
'blocksize': 134217728,
'owner': u'hdfs',
'path': '/datadirectory/not_empty_directory/test_file'
}]
elif path[0] == '/datadirectory/not_empty_directory':
return [{
'group': u'supergroup',
'permission': 420,
'file_type': 'f',
'access_time': 1481122343796,
'block_replication': 3,
'modification_time': 1481122343862,
'length': 0,
'blocksize': 134217728,
'owner': u'hdfs',
'path': '/datadirectory/not_empty_directory/test_file'
}]
elif path[0] == '/datadirectory/not_existing_file_or_directory':
raise FakeSnakeBiteClientException
elif path[0] == '/datadirectory/regex_dir':
return [{
'group': u'supergroup',
'permission': 420,
'file_type': 'f',
'access_time': 1481122343796,
'block_replication': 3,
'modification_time': 1481122343862, 'length': 12582912,
'blocksize': 134217728,
'owner': u'hdfs',
'path': '/datadirectory/regex_dir/test1file'
}, {
'group': u'supergroup',
'permission': 420,
'file_type': 'f',
'access_time': 1481122343796,
'block_replication': 3,
'modification_time': 1481122343862,
'length': 12582912,
'blocksize': 134217728,
'owner': u'hdfs',
'path': '/datadirectory/regex_dir/test2file'
}, {
'group': u'supergroup',
'permission': 420,
'file_type': 'f',
'access_time': 1481122343796,
'block_replication': 3,
'modification_time': 1481122343862,
'length': 12582912,
'blocksize': 134217728,
'owner': u'hdfs',
'path': '/datadirectory/regex_dir/test3file'
}, {
'group': u'supergroup',
'permission': 420,
'file_type': 'f',
'access_time': 1481122343796,
'block_replication': 3,
'modification_time': 1481122343862,
'length': 12582912,
'blocksize': 134217728,
'owner': u'hdfs',
'path': '/datadirectory/regex_dir/copying_file_1.txt._COPYING_'
}, {
'group': u'supergroup',
'permission': 420,
'file_type': 'f',
'access_time': 1481122343796,
'block_replication': 3,
'modification_time': 1481122343862,
'length': 12582912,
'blocksize': 134217728,
'owner': u'hdfs',
'path': '/datadirectory/regex_dir/copying_file_3.txt.sftp'
}]
else:
raise FakeSnakeBiteClientException
class FakeHDFSHook(object):
def __init__(self, conn_id=None):
self.conn_id = conn_id
def get_conn(self):
client = FakeSnakeBiteClient()
return client
class ConnectionTest(unittest.TestCase):
def setUp(self):
configuration.load_test_config()
utils.db.initdb()
os.environ['AIRFLOW_CONN_TEST_URI'] = (
'postgres://username:password@ec2.compute.com:5432/the_database')
os.environ['AIRFLOW_CONN_TEST_URI_NO_CREDS'] = (
'postgres://ec2.compute.com/the_database')
def tearDown(self):
env_vars = ['AIRFLOW_CONN_TEST_URI', 'AIRFLOW_CONN_AIRFLOW_DB']
for ev in env_vars:
if ev in os.environ:
del os.environ[ev]
def test_using_env_var(self):
c = SqliteHook.get_connection(conn_id='test_uri')
self.assertEqual('ec2.compute.com', c.host)
self.assertEqual('the_database', c.schema)
self.assertEqual('username', c.login)
self.assertEqual('password', c.password)
self.assertEqual(5432, c.port)
def test_using_unix_socket_env_var(self):
c = SqliteHook.get_connection(conn_id='test_uri_no_creds')
self.assertEqual('ec2.compute.com', c.host)
self.assertEqual('the_database', c.schema)
self.assertIsNone(c.login)
self.assertIsNone(c.password)
self.assertIsNone(c.port)
def test_param_setup(self):
c = Connection(conn_id='local_mysql', conn_type='mysql',
host='localhost', login='airflow',
password='airflow', schema='airflow')
self.assertEqual('localhost', c.host)
self.assertEqual('airflow', c.schema)
self.assertEqual('airflow', c.login)
self.assertEqual('airflow', c.password)
self.assertIsNone(c.port)
def test_env_var_priority(self):
c = SqliteHook.get_connection(conn_id='airflow_db')
self.assertNotEqual('ec2.compute.com', c.host)
os.environ['AIRFLOW_CONN_AIRFLOW_DB'] = \
'postgres://username:password@ec2.compute.com:5432/the_database'
c = SqliteHook.get_connection(conn_id='airflow_db')
self.assertEqual('ec2.compute.com', c.host)
self.assertEqual('the_database', c.schema)
self.assertEqual('username', c.login)
self.assertEqual('password', c.password)
self.assertEqual(5432, c.port)
del os.environ['AIRFLOW_CONN_AIRFLOW_DB']
def test_dbapi_get_uri(self):
conn = BaseHook.get_connection(conn_id='test_uri')
hook = conn.get_hook()
self.assertEqual('postgres://username:password@ec2.compute.com:5432/the_database', hook.get_uri())
conn2 = BaseHook.get_connection(conn_id='test_uri_no_creds')
hook2 = conn2.get_hook()
self.assertEqual('postgres://ec2.compute.com/the_database', hook2.get_uri())
def test_dbapi_get_sqlalchemy_engine(self):
conn = BaseHook.get_connection(conn_id='test_uri')
hook = conn.get_hook()
engine = hook.get_sqlalchemy_engine()
self.assertIsInstance(engine, sqlalchemy.engine.Engine)
self.assertEqual('postgres://username:password@ec2.compute.com:5432/the_database', str(engine.url))
def test_get_connections_env_var(self):
conns = SqliteHook.get_connections(conn_id='test_uri')
assert len(conns) == 1
assert conns[0].host == 'ec2.compute.com'
assert conns[0].schema == 'the_database'
assert conns[0].login == 'username'
assert conns[0].password == 'password'
assert conns[0].port == 5432
def test_get_connections_db(self):
conns = BaseHook.get_connections(conn_id='airflow_db')
assert len(conns) == 1
assert conns[0].host == 'localhost'
assert conns[0].schema == 'airflow'
assert conns[0].login == 'root'
class WebHDFSHookTest(unittest.TestCase):
def setUp(self):
configuration.load_test_config()
def test_simple_init(self):
from airflow.hooks.webhdfs_hook import WebHDFSHook
c = WebHDFSHook()
self.assertIsNone(c.proxy_user)
def test_init_proxy_user(self):
from airflow.hooks.webhdfs_hook import WebHDFSHook
c = WebHDFSHook(proxy_user='someone')
self.assertEqual('someone', c.proxy_user)
HDFSHook = None
if six.PY2:
from airflow.hooks.hdfs_hook import HDFSHook
import snakebite
@unittest.skipIf(HDFSHook is None,
"Skipping test because HDFSHook is not installed")
class HDFSHookTest(unittest.TestCase):
def setUp(self):
configuration.load_test_config()
os.environ['AIRFLOW_CONN_HDFS_DEFAULT'] = 'hdfs://localhost:8020'
def test_get_client(self):
client = HDFSHook(proxy_user='foo').get_conn()
self.assertIsInstance(client, snakebite.client.Client)
self.assertEqual('localhost', client.host)
self.assertEqual(8020, client.port)
self.assertEqual('foo', client.service.channel.effective_user)
@mock.patch('airflow.hooks.hdfs_hook.AutoConfigClient')
@mock.patch('airflow.hooks.hdfs_hook.HDFSHook.get_connections')
def test_get_autoconfig_client(self, mock_get_connections,
MockAutoConfigClient):
c = Connection(conn_id='hdfs', conn_type='hdfs',
host='localhost', port=8020, login='foo',
extra=json.dumps({'autoconfig': True}))
mock_get_connections.return_value = [c]
HDFSHook(hdfs_conn_id='hdfs').get_conn()
MockAutoConfigClient.assert_called_once_with(effective_user='foo',
use_sasl=False)
@mock.patch('airflow.hooks.hdfs_hook.AutoConfigClient')
def test_get_autoconfig_client_no_conn(self, MockAutoConfigClient):
HDFSHook(hdfs_conn_id='hdfs_missing', autoconfig=True).get_conn()
MockAutoConfigClient.assert_called_once_with(effective_user=None,
use_sasl=False)
@mock.patch('airflow.hooks.hdfs_hook.HDFSHook.get_connections')
def test_get_ha_client(self, mock_get_connections):
c1 = Connection(conn_id='hdfs_default', conn_type='hdfs',
host='localhost', port=8020)
c2 = Connection(conn_id='hdfs_default', conn_type='hdfs',
host='localhost2', port=8020)
mock_get_connections.return_value = [c1, c2]
client = HDFSHook().get_conn()
self.assertIsInstance(client, snakebite.client.HAClient)
send_email_test = mock.Mock()
class EmailTest(unittest.TestCase):
def setUp(self):
configuration.conf.remove_option('email', 'EMAIL_BACKEND')
@mock.patch('airflow.utils.email.send_email')
def test_default_backend(self, mock_send_email):
res = utils.email.send_email('to', 'subject', 'content')
mock_send_email.assert_called_with('to', 'subject', 'content')
self.assertEqual(mock_send_email.return_value, res)
@mock.patch('airflow.utils.email.send_email_smtp')
def test_custom_backend(self, mock_send_email):
configuration.conf.set('email', 'EMAIL_BACKEND', 'tests.core.send_email_test')
utils.email.send_email('to', 'subject', 'content')
send_email_test.assert_called_with(
'to', 'subject', 'content', files=None, dryrun=False,
cc=None, bcc=None, mime_charset='utf-8', mime_subtype='mixed')
self.assertFalse(mock_send_email.called)
class EmailSmtpTest(unittest.TestCase):
def setUp(self):
configuration.conf.set('smtp', 'SMTP_SSL', 'False')
@mock.patch('airflow.utils.email.send_MIME_email')
def test_send_smtp(self, mock_send_mime):
attachment = tempfile.NamedTemporaryFile()
attachment.write(b'attachment')
attachment.seek(0)
utils.email.send_email_smtp('to', 'subject', 'content', files=[attachment.name])
self.assertTrue(mock_send_mime.called)
call_args = mock_send_mime.call_args[0]
self.assertEqual(configuration.conf.get('smtp', 'SMTP_MAIL_FROM'), call_args[0])
self.assertEqual(['to'], call_args[1])
msg = call_args[2]
self.assertEqual('subject', msg['Subject'])
self.assertEqual(configuration.conf.get('smtp', 'SMTP_MAIL_FROM'), msg['From'])
self.assertEqual(2, len(msg.get_payload()))
filename = u'attachment; filename="' + os.path.basename(attachment.name) + '"'
self.assertEqual(filename, msg.get_payload()[-1].get(u'Content-Disposition'))
mimeapp = MIMEApplication('attachment')
self.assertEqual(mimeapp.get_payload(), msg.get_payload()[-1].get_payload())
@mock.patch('airflow.utils.email.send_MIME_email')
def test_send_smtp_with_multibyte_content(self, mock_send_mime):
utils.email.send_email_smtp('to', 'subject', '🔥', mime_charset='utf-8')
self.assertTrue(mock_send_mime.called)
call_args = mock_send_mime.call_args[0]
msg = call_args[2]
mimetext = MIMEText('🔥', 'mixed', 'utf-8')
self.assertEqual(mimetext.get_payload(), msg.get_payload()[0].get_payload())
@mock.patch('airflow.utils.email.send_MIME_email')
def test_send_bcc_smtp(self, mock_send_mime):
attachment = tempfile.NamedTemporaryFile()
attachment.write(b'attachment')
attachment.seek(0)
utils.email.send_email_smtp('to', 'subject', 'content', files=[attachment.name], cc='cc', bcc='bcc')
self.assertTrue(mock_send_mime.called)
call_args = mock_send_mime.call_args[0]
self.assertEqual(configuration.conf.get('smtp', 'SMTP_MAIL_FROM'), call_args[0])
self.assertEqual(['to', 'cc', 'bcc'], call_args[1])
msg = call_args[2]
self.assertEqual('subject', msg['Subject'])
self.assertEqual(configuration.conf.get('smtp', 'SMTP_MAIL_FROM'), msg['From'])
self.assertEqual(2, len(msg.get_payload()))
self.assertEqual(u'attachment; filename="' + os.path.basename(attachment.name) + '"',
msg.get_payload()[-1].get(u'Content-Disposition'))
mimeapp = MIMEApplication('attachment')
self.assertEqual(mimeapp.get_payload(), msg.get_payload()[-1].get_payload())
@mock.patch('smtplib.SMTP_SSL')
@mock.patch('smtplib.SMTP')
def test_send_mime(self, mock_smtp, mock_smtp_ssl):
mock_smtp.return_value = mock.Mock()
mock_smtp_ssl.return_value = mock.Mock()
msg = MIMEMultipart()
utils.email.send_MIME_email('from', 'to', msg, dryrun=False)
mock_smtp.assert_called_with(
configuration.conf.get('smtp', 'SMTP_HOST'),
configuration.conf.getint('smtp', 'SMTP_PORT'),
)
self.assertTrue(mock_smtp.return_value.starttls.called)
mock_smtp.return_value.login.assert_called_with(
configuration.conf.get('smtp', 'SMTP_USER'),
configuration.conf.get('smtp', 'SMTP_PASSWORD'),
)
mock_smtp.return_value.sendmail.assert_called_with('from', 'to', msg.as_string())
self.assertTrue(mock_smtp.return_value.quit.called)
@mock.patch('smtplib.SMTP_SSL')
@mock.patch('smtplib.SMTP')
def test_send_mime_ssl(self, mock_smtp, mock_smtp_ssl):
configuration.conf.set('smtp', 'SMTP_SSL', 'True')
mock_smtp.return_value = mock.Mock()
mock_smtp_ssl.return_value = mock.Mock()
utils.email.send_MIME_email('from', 'to', MIMEMultipart(), dryrun=False)
self.assertFalse(mock_smtp.called)
mock_smtp_ssl.assert_called_with(
configuration.conf.get('smtp', 'SMTP_HOST'),
configuration.conf.getint('smtp', 'SMTP_PORT'),
)
@mock.patch('smtplib.SMTP_SSL')
@mock.patch('smtplib.SMTP')
def test_send_mime_noauth(self, mock_smtp, mock_smtp_ssl):
configuration.conf.remove_option('smtp', 'SMTP_USER')
configuration.conf.remove_option('smtp', 'SMTP_PASSWORD')
mock_smtp.return_value = mock.Mock()
mock_smtp_ssl.return_value = mock.Mock()
utils.email.send_MIME_email('from', 'to', MIMEMultipart(), dryrun=False)
self.assertFalse(mock_smtp_ssl.called)
mock_smtp.assert_called_with(
configuration.conf.get('smtp', 'SMTP_HOST'),
configuration.conf.getint('smtp', 'SMTP_PORT'),
)
self.assertFalse(mock_smtp.login.called)
@mock.patch('smtplib.SMTP_SSL')
@mock.patch('smtplib.SMTP')
def test_send_mime_dryrun(self, mock_smtp, mock_smtp_ssl):
utils.email.send_MIME_email('from', 'to', MIMEMultipart(), dryrun=True)
self.assertFalse(mock_smtp.called)
self.assertFalse(mock_smtp_ssl.called)
if __name__ == '__main__':
unittest.main()
|
reltestbase.py | ##############################################################################
#
# Copyright (c) 2008 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""A foundation for RelStorage tests"""
from __future__ import absolute_import
from __future__ import print_function
# pylint:disable=too-many-ancestors,abstract-method,too-many-public-methods,too-many-lines
# pylint:disable=too-many-statements,too-many-locals
import contextlib
import functools
import os
import random
import shutil
import tempfile
import time
import threading
import unittest
import transaction
from persistent import Persistent
from persistent.mapping import PersistentMapping
from zc.zlibstorage import ZlibStorage
import ZODB.tests.util
from ZODB.Connection import TransactionMetaData
from ZODB.DB import DB
from ZODB.FileStorage import FileStorage
from ZODB.POSException import ReadConflictError
from ZODB.POSException import ReadOnlyError
from ZODB.serialize import referencesf
from ZODB.utils import z64
from ZODB.utils import u64 as bytes8_to_int64
from ZODB.tests import BasicStorage
from ZODB.tests import ConflictResolution
from ZODB.tests import MTStorage
from ZODB.tests import PackableStorage
from ZODB.tests import PersistentStorage
from ZODB.tests import ReadOnlyStorage
from ZODB.tests import StorageTestBase
from ZODB.tests import Synchronization
from ZODB.tests.StorageTestBase import zodb_pickle
from ZODB.tests.StorageTestBase import zodb_unpickle
from ZODB.tests.MinPO import MinPO
from . import fakecache
from . import util
from . import mock
from . import TestCase
from . import StorageCreatingMixin
from .persistentcache import PersistentCacheStorageTests
from .locking import TestLocking
from .test_zodbconvert import FSZODBConvertTests
class RelStorageTestBase(StorageCreatingMixin,
TestCase,
StorageTestBase.StorageTestBase):
base_dbname = None # Override
keep_history = None # Override
_storage_created = None
def _close(self):
# Override from StorageTestBase.
# Try to avoid creating one through our _storage property.
if '_storage' in self.__dict__:
storage = self._storage
else:
storage = self._storage_created
self._storage = None
if storage is not None:
storage.close()
storage.cleanup()
def make_storage_to_cache(self):
return self.make_storage()
def get_storage(self):
# Create a storage with default options
# if it has not been created already.
storage = self._storage_created
if storage is None:
storage = self.make_storage_to_cache()
self._storage_created = storage
return storage
def set_storage(self, storage):
self._storage_created = storage
_storage = property(
lambda self: self.get_storage(),
lambda self, nv: self.set_storage(nv)
)
def open(self, read_only=False, **kwargs):
# This is used by a few ZODB tests that close and reopen the storage.
storage = self._storage
if storage is not None:
self._storage = None
storage.close()
storage.cleanup()
self._storage = storage = self.make_storage(
read_only=read_only, zap=False, **kwargs)
return storage
class StorageClientThread(MTStorage.StorageClientThread):
# MTStorage assumes that the storage object is thread safe.
# This doesn't make any sense for an MVCC Storage like RelStorage;
# don't try to use a single instance in multiple threads.
#
# This patch makes it respect that.
def __init__(self, storage, *args, **kwargs):
storage = storage.new_instance()
super(StorageClientThread, self).__init__(storage, *args, **kwargs)
def runtest(self):
try:
super(StorageClientThread, self).runtest()
finally:
self.storage.release()
self.storage = None
class ExtStorageClientThread(StorageClientThread, MTStorage.ExtStorageClientThread):
"Same as above."
class ThreadWrapper(object):
def __init__(self, storage):
self.__storage = storage
# We can't use an RLock, which verifies that the thread that
# acquired is the one that releases; check_tid_ordering_w_commit
# deliberately spreads these actions across threads (for same reason).
self.__commit_lock = threading.Lock()
rl = self.__read_lock = threading.Lock()
self.__txn = None
def make_locked(name):
meth = getattr(storage, name)
@functools.wraps(meth)
def func(*args, **kwargs):
with rl:
return meth(*args, **kwargs)
return func
for name in (
'loadBefore',
'load',
'store',
'getTid',
'lastTransaction',
):
setattr(self, name, make_locked(name))
def __getattr__(self, name):
return getattr(self.__storage, name)
def tpc_begin(self, txn):
self.__commit_lock.acquire()
self.__read_lock.acquire()
assert not self.__txn
self.__txn = txn
self.__read_lock.release()
return self.__storage.tpc_begin(txn)
def tpc_finish(self, txn, callback=None):
self.__read_lock.acquire()
assert txn is self.__txn
try:
return self.__storage.tpc_finish(txn, callback)
finally:
self.__txn = None
self.__commit_lock.release()
self.__read_lock.release()
def tpc_abort(self, txn):
self.__read_lock.acquire()
assert txn is self.__txn, (txn, self.__txn)
try:
return self.__storage.tpc_abort(txn)
finally:
self.__txn = None
self.__commit_lock.release()
self.__read_lock.release()
class UsesThreadsOnASingleStorageMixin(object):
# These tests attempt to use threads on a single storage object.
# That doesn't make sense with MVCC, where every instance is its
# own connection and doesn't need to do any locking. This mixin makes
# those tests use a special storage that locks.
@contextlib.contextmanager
def __thread_safe_wrapper(self):
orig_storage = self._storage
wrapped = self._storage = ThreadWrapper(orig_storage)
try:
yield
finally:
if self._storage is wrapped:
self._storage = orig_storage
def __generic_wrapped_test(self, meth_name):
meth = getattr(
super(UsesThreadsOnASingleStorageMixin, self),
meth_name)
try:
with self.__thread_safe_wrapper():
meth()
finally:
self._storage.zap_all(slow=True)
def make_func(name): # pylint:disable=no-self-argument
return lambda self: self.__generic_wrapped_test(name)
for bad_test in (
'check_checkCurrentSerialInTransaction',
# This one stores a b'y' (invalid pickle) into the
# database as the root object, so if we don't get zapped
# afterwards, we can't open the database.
'check_tid_ordering_w_commit',
):
locals()[bad_test] = make_func(bad_test)
del make_func
del bad_test
class GenericRelStorageTests(
UsesThreadsOnASingleStorageMixin,
RelStorageTestBase,
PersistentCacheStorageTests,
TestLocking,
BasicStorage.BasicStorage,
PackableStorage.PackableStorage,
Synchronization.SynchronizedStorage,
ConflictResolution.ConflictResolvingStorage,
PersistentStorage.PersistentStorage,
MTStorage.MTStorage,
ReadOnlyStorage.ReadOnlyStorage,
):
def setUp(self):
# ZODB.tests.util.TestCase likes to change directories
# It tries to change back in tearDown(), but if there's an error,
# we may not get to tearDown. addCleanup() always runs, though.
# do that as the very last thing that happens (except for subclasses, they
# could add things first)
self.addCleanup(os.chdir, os.getcwd())
super(GenericRelStorageTests, self).setUp()
# PackableStorage is particularly bad about leaving things
# dangling. For example, if the ClientThread runs into
# problems, it doesn't close its connection, which can leave
# locks dangling until GC happens and break other threads and even
# other tests.
#
# Patch around that. Be sure to only close a given connection once,
# though.
_closing = self._closing
def db_factory(storage, *args, **kwargs):
db = _closing(DB(storage, *args, **kwargs))
db_open = db.open
def o(transaction_manager=None, at=None, before=None):
conn = db_open(transaction_manager=transaction_manager,
at=at,
before=before)
_closing(conn)
if transaction_manager is not None:
# If we're using an independent transaction, abort it *before*
# attempting to close the connection; that means it must be registered
# after the connection.
self.addCleanup(transaction_manager.abort)
return conn
db.open = o
return db
PackableStorage.DB = db_factory
self.addCleanup(setattr, MTStorage,
'StorageClientThread', MTStorage.StorageClientThread)
MTStorage.StorageClientThread = StorageClientThread
self.addCleanup(setattr, MTStorage,
'ExtStorageClientThread', MTStorage.ExtStorageClientThread)
MTStorage.ExtStorageClientThread = ExtStorageClientThread
def tearDown(self):
PackableStorage.DB = DB
super(GenericRelStorageTests, self).tearDown()
def _make_readonly(self):
# checkWriteMethods in ReadOnlyStorage assumes that
# the object has an undo() method, even though that's only
# required if it's IStorageUndoable, aka history-preserving.
super(GenericRelStorageTests, self)._make_readonly()
storage = self._storage
if not hasattr(storage, 'undo'):
def undo(*args, **kwargs):
raise ReadOnlyError
storage.undo = undo # pylint:disable=attribute-defined-outside-init
return storage
def checkCurrentObjectTidsRoot(self):
# Get the root object in place
db = self._closing(DB(self._storage))
conn = self._closing(db.open())
storage = conn._storage
cursor = storage._load_connection.cursor
oid_to_tid = storage._adapter.mover.current_object_tids(cursor, [0])
self.assertEqual(1, len(oid_to_tid))
self.assertIn(0, oid_to_tid)
# Ask for many, many objects that don't exist.
# Force the implementation to loop if that's what it does internally.
oid_to_tid = storage._adapter.mover.current_object_tids(cursor, range(0, 3523))
self.assertEqual(1, len(oid_to_tid))
self.assertIn(0, oid_to_tid)
# No matching oids.
oid_to_tid = storage._adapter.mover.current_object_tids(cursor, range(1, 3523))
self.assertEqual(0, len(oid_to_tid))
conn.close()
db.close()
def checkLen(self):
# Override the version from BasicStorage because we
# actually do guarantee to keep track of the counts,
# within certain limits.
# len(storage) reports the number of objects.
# check it is zero when empty
self.assertEqual(len(self._storage), 0)
# check it is correct when the storage contains two object.
# len may also be zero, for storages that do not keep track
# of this number
self._dostore(data=PersistentMapping())
self._dostore(data=PersistentMapping())
self._storage._adapter.stats.large_database_change()
self.assertEqual(len(self._storage), 2)
def checkDropAndPrepare(self):
# Under PyPy, this test either takes a very long time (PyMySQL)
# or hangs (psycopg2cffi) longer than I want to wait (10+ minutes).
# This suggests there's a lock on a particular table (the eighth table we drop)
# which in turn suggests that there are connections still open and leaked!
# Running a manual GC seems to fix it. It's hard to reproduce manually because
# it seems to depend on a particular set of tests being run.
import gc
gc.collect()
gc.collect()
self._storage._adapter.schema.drop_all()
self._storage._adapter.schema.prepare()
def checkCrossConnectionInvalidation(self):
# Verify connections see updated state at txn boundaries
db = DB(self._storage)
try:
c1 = db.open()
r1 = c1.root()
r1['myobj'] = 'yes'
c2 = db.open()
r2 = c2.root()
self.assertNotIn('myobj', r2)
storage = c1._storage
t = transaction.Transaction()
t.description = u'invalidation test'
c1.tpc_begin(t)
c1.commit(t)
storage.tpc_vote(storage._transaction)
storage.tpc_finish(storage._transaction)
self.assertNotIn('myobj', r2)
c2.sync()
self.assertIn('myobj', r2)
self.assertEqual(r2['myobj'], 'yes')
finally:
db.close()
def checkCrossConnectionIsolation(self):
# Verify MVCC isolates connections
db = DB(self._storage)
try:
c1 = db.open()
r1 = c1.root()
r1['alpha'] = PersistentMapping()
r1['gamma'] = PersistentMapping()
transaction.commit()
# Open a second connection but don't load root['alpha'] yet
c2 = db.open()
r2 = c2.root()
r1['alpha']['beta'] = 'yes'
storage = c1._storage
t = transaction.Transaction()
t.description = u'isolation test 1'
c1.tpc_begin(t)
c1.commit(t)
storage.tpc_vote(storage._transaction)
storage.tpc_finish(storage._transaction)
# The second connection will now load root['alpha'], but due to
# MVCC, it should continue to see the old state.
self.assertIsNone(r2['alpha']._p_changed) # A ghost
self.assertFalse(r2['alpha'])
self.assertEqual(r2['alpha']._p_changed, 0)
# make root['alpha'] visible to the second connection
c2.sync()
# Now it should be in sync
self.assertIsNone(r2['alpha']._p_changed) # A ghost
self.assertTrue(r2['alpha'])
self.assertEqual(r2['alpha']._p_changed, 0)
self.assertEqual(r2['alpha']['beta'], 'yes')
# Repeat the test with root['gamma']
r1['gamma']['delta'] = 'yes'
storage = c1._storage
t = transaction.Transaction()
t.description = u'isolation test 2'
c1.tpc_begin(t)
c1.commit(t)
storage.tpc_vote(storage._transaction)
storage.tpc_finish(storage._transaction)
# The second connection will now load root[3], but due to MVCC,
# it should continue to see the old state.
self.assertIsNone(r2['gamma']._p_changed) # A ghost
self.assertFalse(r2['gamma'])
self.assertEqual(r2['gamma']._p_changed, 0)
# make root[3] visible to the second connection
c2.sync()
# Now it should be in sync
self.assertIsNone(r2['gamma']._p_changed) # A ghost
self.assertTrue(r2['gamma'])
self.assertEqual(r2['gamma']._p_changed, 0)
self.assertEqual(r2['gamma']['delta'], 'yes')
finally:
db.close()
def __make_tryToResolveConflict_ignore_committedData(self, storage):
orig = storage.tryToResolveConflict
def resolve(oid, ctid, ptid, newpickle, committed_data): # pylint:disable=unused-argument
return orig(oid, ctid, ptid, newpickle)
storage.tryToResolveConflict = resolve
return storage
def checkResolveConflictBetweenConnections(self, clear_cache=False):
# Verify that conflict resolution works between storage instances
# bound to connections.
obj = ConflictResolution.PCounter()
obj.inc()
# Establish a polling state; dostoreNP won't.
self._storage.poll_invalidations()
oid = self._storage.new_oid()
revid1 = self._dostoreNP(oid, data=zodb_pickle(obj))
self._storage.poll_invalidations()
# These will both poll and get the state for (oid, revid1)
# cached at that location, where it will be found during conflict
# resolution.
storage1 = self._storage.new_instance()
storage1.load(oid, '')
storage2 = self._storage.new_instance()
storage2.load(oid, '')
# Remember that the cache stats are shared between instances.
# The first had to fetch it, the second can use it.
__traceback_info__ = storage1._cache.stats()
self.assertEqual(storage1._cache.stats()['hits'], 1)
storage1._cache.reset_stats()
if clear_cache:
storage1._cache.clear(load_persistent=False)
self.assertEqual(storage1._cache.stats()['hits'], 0)
obj.inc()
obj.inc()
# The effect of committing two transactions with the same
# pickle is to commit two different transactions relative to
# revid1 that add two to _value.
root_storage = self._storage
try:
def noConflict(*_args, **_kwargs):
self.fail("Should be no conflict.")
storage1.tryToResolveConflict = noConflict
self._storage = storage1
_revid2 = self._dostoreNP(oid, revid=revid1, data=zodb_pickle(obj))
# This one had no conflicts and did no cache work
self.assertEqual(storage1._cache.stats()['hits'], 0)
self.assertEqual(storage1._cache.stats()['misses'], 0)
# This will conflict and will have to use the cache and DB for loadSerial
self._storage = self.__make_tryToResolveConflict_ignore_committedData(storage2)
_revid3 = self._dostoreNP(oid, revid=revid1, data=zodb_pickle(obj))
# Since we didn't pass any data up from the storage, this
# would need to make two lookups, for committed data and
# previous data. If we're history free, we invalidated the
# object when the first one saved it, but we're lucky
# enough to find the committed data in our shared state, as well
# as the previous state: we've got a storage open to a previous
# transaction that's letting that data stay in memory.
cache_stats = storage1._cache.stats()
__traceback_info__ = cache_stats, clear_cache
if clear_cache:
self.assertEqual(cache_stats['misses'], 1)
self.assertEqual(cache_stats['hits'], 1)
else:
self.assertEqual(cache_stats['misses'], 0)
self.assertEqual(cache_stats['hits'], 2)
data, _serialno = self._storage.load(oid, '')
inst = zodb_unpickle(data)
self.assertEqual(inst._value, 5)
finally:
storage1.close()
storage2.close()
self._storage = root_storage
def checkResolveConflictBetweenConnectionsNoCache(self):
# If we clear the cache, we can still loadSerial()
self.checkResolveConflictBetweenConnections(clear_cache=True)
def check16KObject(self):
# Store 16 * 1024 bytes in an object, then retrieve it
data = b'a 16 byte string' * 1024
oid = self._storage.new_oid()
self._dostoreNP(oid, data=data)
got, _ = self._storage.load(oid, '')
self.assertIsInstance(got, bytes)
self.assertEqual(got, data)
self.assertEqual(len(got), len(data))
def check16MObject(self):
# Store 16 * 1024 * 1024 bytes in an object, then retrieve it
data = b'a 16 byte string' * (1024 * 1024)
oid = self._storage.new_oid()
self._dostoreNP(oid, data=data)
got, _serialno = self._storage.load(oid, '')
self.assertEqual(len(got), len(data))
self.assertEqual(got, data)
def check99X1900Objects(self):
# Store 99 objects each with 1900 bytes. This is intended
# to exercise possible buffer overfilling that the batching
# code might cause.
data = b'0123456789012345678' * 100
t = transaction.Transaction()
self._storage.tpc_begin(t)
oids = []
for _ in range(99):
oid = self._storage.new_oid()
self._storage.store(oid, b'\0'*8, data, '', t)
oids.append(oid)
self._storage.tpc_vote(t)
self._storage.tpc_finish(t)
for oid in oids:
got, _serialno = self._storage.load(oid, '')
self.assertEqual(len(got), len(data))
self.assertEqual(got, data)
def checkPreventOIDOverlap(self):
# Store an object with a particular OID, then verify that
# OID is not reused.
data = b'mydata'
oid1 = b'\0' * 7 + b'\x0f'
self._dostoreNP(oid1, data=data)
oid2 = self._storage.new_oid()
oid1_int = bytes8_to_int64(oid1)
oid2_int = bytes8_to_int64(oid2)
self.assertGreater(
oid2_int, oid1_int,
'old OID %r (%d) should be less than new OID %r (%d)'
% (oid1, oid1_int, oid2, oid2_int))
def checkNoDuplicateOIDsManyThreads(self):
# Many threads in many storages can allocate OIDs with
# no duplicates or overlaps.
# https://github.com/zodb/relstorage/issues/283
from itertools import combinations
thread_count = 11
oids_per_segment = 578
segment_count = 3
total_expected_oids = oids_per_segment * segment_count
oids_by_thread = [list() for _ in range(thread_count)]
def allocate_oids(thread_storage, thread_num):
try:
store_conn = thread_storage._store_connection
allocator = thread_storage._oids
my_oids = oids_by_thread[thread_num]
for _ in range(segment_count):
my_oids.extend(
bytes8_to_int64(thread_storage.new_oid())
for _ in range(oids_per_segment)
)
# Periodically call set_min_oid, like the storage does,
# to check for interference.
allocator.set_min_oid(my_oids[-1])
store_conn.commit()
finally:
thread_storage.release()
threads = [threading.Thread(target=allocate_oids,
args=(self._storage.new_instance(), i))
for i in range(thread_count)]
for t in threads:
t.start()
for t in threads:
t.join(99)
# They all have the desired length, and each one has no duplicates.
self.assertEqual(
[len(s) for s in oids_by_thread],
[total_expected_oids for _ in range(thread_count)]
)
self.assertEqual(
[len(s) for s in oids_by_thread],
[len(set(s)) for s in oids_by_thread]
)
# They are all disjoint
for a, b in combinations(oids_by_thread, 2):
__traceback_info__ = a, b
a = set(a)
b = set(b)
self.assertTrue(a.isdisjoint(b))
# They are all monotonically increasing.
for s in oids_by_thread:
self.assertEqual(
s,
sorted(s)
)
def checkUseCache(self):
# Store an object, cache it, then retrieve it from the cache
self._storage = self.make_storage(
cache_servers='x:1 y:2',
cache_module_name=fakecache.__name__,
cache_prefix='zzz',
)
fakecache.data.clear()
db = DB(self._storage)
try:
c1 = db.open()
self.assertEqual(
c1._storage._cache.cache.g.client.servers,
['x:1', 'y:2'])
r1 = c1.root()
# The root state and checkpoints should now be cached.
# A commit count *might* be cached depending on the ZODB version.
# (Checkpoints are stored in the cache for the sake of tests/monitoring,
# but aren't read.)
# self.assertIn('zzz:checkpoints', fakecache.data)
# self.assertIsNotNone(db.storage._cache.polling_state.checkpoints)
self.assertEqual(sorted(fakecache.data.keys())[-1][:10],
'zzz:state:')
r1['alpha'] = PersistentMapping()
transaction.commit()
cp_count = 1
if self.keep_history:
item_count = 2
else:
# The previous root state was automatically invalidated
# XXX: We go back and forth on that.
item_count = 2
item_count += cp_count
self.assertEqual(len(fakecache.data), item_count)
oid = r1['alpha']._p_oid
c1._storage.load(oid, '')
# Came out of the cache, nothing new
self.assertEqual(len(fakecache.data), item_count)
# make a change
r1['beta'] = 0
transaction.commit()
# Once again, history free automatically invalidated.
# XXX: Depending on my mood.
item_count += 1
self.assertEqual(len(fakecache.data), item_count)
c1._storage.load(oid, '')
# try to load an object that doesn't exist
self.assertRaises(KeyError, c1._storage.load, b'bad.oid.', '')
finally:
db.close()
def checkMultipleStores(self):
# Verify a connection can commit multiple transactions
db = DB(self._storage)
try:
c1 = db.open()
r1 = c1.root()
r1['alpha'] = 1
transaction.commit()
r1['alpha'] = 2
transaction.commit()
finally:
db.close()
def checkLongTransactionDescription(self):
# Don't trip over long transaction descriptions
db = DB(self._storage)
try:
c = db.open()
r = c.root()
r['key'] = 1
transaction.get().note(u'A long description. ' * 1000)
transaction.commit()
finally:
db.close()
def checkAutoReconnect(self):
# Verify auto-reconnect
db = self._closing(DB(self._storage))
c1 = db.open()
r = c1.root()
r['alpha'] = 1
transaction.commit()
c1.close()
# Going behind its back.
c1._storage._load_connection.connection.close()
c1._storage._store_connection.connection.close()
# ZODB5 implicitly calls sync
# immediately when a connection is opened;
# fake that here for older releases.
c2 = db.open()
self.assertIs(c2, c1)
c2.sync()
r = c2.root()
self.assertEqual(r['alpha'], 1)
r['beta'] = PersistentMapping()
c2.add(r['beta'])
transaction.commit()
c2.close()
del c1
del c2
def checkAutoReconnectOnSync(self):
# Verify auto-reconnect.
db = self._closing(DB(self._storage))
c1 = db.open()
r = c1.root()
c1._storage._load_connection.connection.close()
c1._storage.sync()
# ZODB5 calls sync when a connection is opened. Our monkey
# patch on a Connection makes sure that works in earlier
# versions, but we don't have that patch on ZODB5. So test
# the storage directly. NOTE: The load connection must be open.
# to trigger the actual sync.
r = c1.root()
r['alpha'] = 1
transaction.commit()
c1.close()
c1._storage._load_connection.connection.close()
c1._storage._store_connection.connection.close()
c2 = db.open()
self.assertIs(c2, c1)
r = c2.root()
self.assertEqual(r['alpha'], 1)
r['beta'] = PersistentMapping()
c2.add(r['beta'])
transaction.commit()
c2.close()
del c1
del c2
def checkCachePolling(self):
storage2 = self.make_storage(zap=False)
db = DB(self._storage)
db2 = DB(storage2)
try:
# Set up the database.
tm1 = transaction.TransactionManager()
c1 = db.open(transaction_manager=tm1)
r1 = c1.root()
r1['obj'] = obj1 = PersistentMapping({'change': 0})
tm1.commit()
# Load and change the object in an independent connection.
tm2 = transaction.TransactionManager()
c2 = db2.open(transaction_manager=tm2)
r2 = c2.root()
r2['obj']['change'] = 1
tm2.commit()
# Now c2 has delta_after0.
# self.assertEqual(len(c2._storage._cache.delta_after0), 2)
c2.close()
# Change the object in the original connection.
c1.sync()
obj1['change'] = 2
tm1.commit()
# Close the database connection to c2.
c2._storage._load_connection.drop()
self.assertFalse(c2._storage._load_connection)
# Make the database connection to c2 reopen without polling.
c2._storage.load(b'\0' * 8, '')
self.assertTrue(c2._storage._load_connection)
# Open a connection, which should be the same connection
# as c2.
c3 = db2.open(transaction_manager=tm2)
self.assertTrue(c3 is c2)
# self.assertEqual(len(c2._storage._cache.delta_after0), 2)
# Clear the caches (but not delta_after*)
c3._resetCache()
c3._storage._cache.cache.flush_all()
obj3 = c3.root()['obj']
# Should have loaded the new object.
self.assertEqual(obj3['change'], 2)
finally:
db.close()
db2.close()
def checkDoubleCommitter(self):
# Verify we can store an object that gets committed twice in
# a single transaction.
db = DB(self._storage)
try:
conn = db.open()
try:
conn.root()['dc'] = DoubleCommitter()
transaction.commit()
conn2 = db.open()
self.assertEqual(conn2.root()['dc'].new_attribute, 1)
conn2.close()
finally:
transaction.abort()
conn.close()
finally:
db.close()
def checkHistoryWithExtension(self):
# Verify the history method works with transactions that have
# extended info.
db = DB(self._storage)
try:
conn = db.open()
try:
conn.root()['pi'] = 3.14
transaction.get().setExtendedInfo("digits", 3)
transaction.commit()
history = self._storage.history(conn.root()._p_oid)
self.assertEqual(len(history), 1)
if self.keep_history:
self.assertEqual(history[0]['digits'], 3)
finally:
conn.close()
finally:
db.close()
def checkPackBatchLockNoWait(self):
# Holding the commit lock doesn't interfere with packing.
#
# TODO: But what about row locking? Let's add a test
# that begins a commit and locks some rows and then packs.
self._storage = self.make_storage(pack_batch_timeout=0)
adapter = self._storage._adapter
test_conn, test_cursor = adapter.connmanager.open_for_store()
db = self._closing(DB(self._storage))
try:
# add some data to be packed
c = self._closing(db.open())
r = c.root()
r['alpha'] = PersistentMapping()
transaction.commit()
del r['alpha']
transaction.commit()
# Pack, with a commit lock held
now = packtime = time.time()
while packtime <= now:
packtime = time.time()
adapter.locker.hold_commit_lock(test_cursor)
self._storage.pack(packtime, referencesf)
adapter.locker.release_commit_lock(test_cursor)
finally:
db.close()
adapter.connmanager.close(test_conn, test_cursor)
def checkPackKeepNewObjects(self):
# Packing should not remove objects created or modified after
# the pack time, even if they are unreferenced.
db = DB(self._storage)
try:
# add some data to be packed
c = db.open()
extra1 = PersistentMapping()
c.add(extra1)
extra2 = PersistentMapping()
c.add(extra2)
transaction.commit()
# Choose the pack time to be that last committed transaction.
packtime = c._storage.lastTransactionInt()
extra2.foo = 'bar'
extra3 = PersistentMapping()
c.add(extra3)
transaction.commit()
self.assertGreater(c._storage.lastTransactionInt(), packtime)
self._storage.pack(packtime, referencesf)
# extra1 should have been garbage collected
self.assertRaises(KeyError,
self._storage.load, extra1._p_oid, '')
# extra2 and extra3 should both still exist
self._storage.load(extra2._p_oid, '')
self._storage.load(extra3._p_oid, '')
finally:
db.close()
@util.skipOnAppveyor("Random failures")
# https://ci.appveyor.com/project/jamadden/relstorage/build/1.0.19/job/a1vq619n84ss1s9a
def checkPackWhileReferringObjectChanges(self):
# Packing should not remove objects referenced by an
# object that changes during packing.
from persistent.timestamp import TimeStamp
db = self._closing(DB(self._storage))
try:
# add some data to be packed
c = self._closing(db.open())
root = c.root()
child = PersistentMapping()
root['child'] = child
transaction.commit()
expect_oids = [child._p_oid]
def inject_changes():
# Change the database just after the list of objects
# to analyze has been determined.
child2 = PersistentMapping()
root['child2'] = child2
transaction.commit()
expect_oids.append(child2._p_oid)
adapter = self._storage._adapter
adapter.packundo.on_filling_object_refs = inject_changes
# Pack to the current time based on the TID in the database
last_tid = self._storage.lastTransaction()
last_tid_time = TimeStamp(last_tid).timeTime()
packtime = last_tid_time + 1
self._storage.pack(packtime, referencesf)
# "The on_filling_object_refs hook should have been called once")
self.assertEqual(len(expect_oids), 2, expect_oids)
# Both children should still exist.
self._storage.load(expect_oids[0], '')
self._storage.load(expect_oids[1], '')
finally:
db.close()
def checkPackBrokenPickle(self):
# Verify the pack stops with the right exception if it encounters
# a broken pickle.
# Under Python 2, with zodbpickle, there may be a difference depending
# on whether the accelerated implementation is in use. Also ,the pure-python
# version on PyPy can raise IndexError
from zodbpickle.pickle import UnpicklingError as pUnpickErr
unpick_errs = (pUnpickErr, IndexError)
try:
from zodbpickle.fastpickle import UnpicklingError as fUnpickErr
except ImportError:
pass
else:
unpick_errs += (fUnpickErr,)
self._dostoreNP(self._storage.new_oid(), data=b'brokenpickle')
self.assertRaises(unpick_errs, self._storage.pack,
time.time() + 10000, referencesf)
def checkBackwardTimeTravelWithoutRevertWhenStale(self):
# If revert_when_stale is false (the default), when the database
# connection is stale (such as through failover to an
# asynchronous slave that is not fully up to date), the poller
# should notice that backward time travel has occurred and
# raise a ReadConflictError.
self._storage = self.make_storage(revert_when_stale=False)
db = DB(self._storage)
try:
c = db.open()
r = c.root()
r['alpha'] = PersistentMapping()
transaction.commit()
# To simulate failover to an out of date async slave, take
# a snapshot of the database at this point, change some
# object, then restore the database to its earlier state.
d = tempfile.mkdtemp()
try:
# Snapshot the database.
fs = FileStorage(os.path.join(d, 'Data.fs'))
fs.copyTransactionsFrom(c._storage)
# Change data in it.
r['beta'] = PersistentMapping()
transaction.commit()
self.assertTrue('beta' in r)
# Revert the data.
# We must use a separate, unrelated storage object to do this,
# because our storage object is smart enough to notice that the data
# has been zapped and revert caches for all connections and
# ZODB objects when we invoke this API.
storage_2 = self.make_storage(zap=False)
storage_2.zap_all(reset_oid=False, slow=True)
storage_2.copyTransactionsFrom(fs)
storage_2.close()
fs.close()
finally:
shutil.rmtree(d)
# Sync, which will call poll_invalidations().
c.sync()
# Try to load an object, which should cause ReadConflictError.
r._p_deactivate()
with self.assertRaises(ReadConflictError):
r.__getitem__('beta')
finally:
db.close()
def checkBackwardTimeTravelWithRevertWhenStale(self):
# If revert_when_stale is true, when the database
# connection is stale (such as through failover to an
# asynchronous slave that is not fully up to date), the poller
# should notice that backward time travel has occurred and
# invalidate all objects that have changed in the interval.
self._storage = self.make_storage(revert_when_stale=True)
db = DB(self._storage)
try:
transaction.begin()
c = db.open()
r = c.root()
r['alpha'] = PersistentMapping()
transaction.commit()
# To simulate failover to an out of date async slave, take
# a snapshot of the database at this point, change some
# object, then restore the database to its earlier state.
d = tempfile.mkdtemp()
try:
transaction.begin()
fs = FileStorage(os.path.join(d, 'Data.fs'))
fs.copyTransactionsFrom(c._storage)
r['beta'] = PersistentMapping()
transaction.commit()
self.assertTrue('beta' in r)
c._storage.zap_all(reset_oid=False, slow=True)
c._storage.copyTransactionsFrom(fs)
fs.close()
finally:
shutil.rmtree(d)
# r should still be in the cache.
self.assertTrue('beta' in r)
# Now sync, which will call poll_invalidations().
c.sync()
# r should have been invalidated
self.assertEqual(r._p_changed, None)
# r should be reverted to its earlier state.
self.assertFalse('beta' in r)
finally:
db.close()
@util.skipOnAppveyor("Random failures")
# https://ci.appveyor.com/project/jamadden/relstorage/build/1.0.75/job/32uu4xdp5mubqma8
def checkBTreesLengthStress(self):
# BTrees.Length objects are unusual Persistent objects: they
# have a conflict resolution algorithm that cannot fail, so if
# we do get a failure it's due to a problem with us.
# Unfortunately, tryResolveConflict hides all underlying exceptions
# so we have to enable logging to see them.
from ZODB.ConflictResolution import logger as CRLogger
from BTrees.Length import Length
from six import reraise
def log_err(*args, **kwargs): # pylint:disable=unused-argument
import sys
reraise(*sys.exc_info())
CRLogger.debug = log_err
CRLogger.exception = log_err
updates_per_thread = 50
thread_count = 4
db = DB(self._storage)
try:
c = db.open()
try:
c.root()['length'] = Length()
transaction.commit()
finally:
c.close()
def updater():
for _ in range(updates_per_thread):
thread_c = db.open()
__traceback_info__ = thread_c._storage
try:
thread_c.root()['length'].change(1)
time.sleep(random.random() * 0.05)
transaction.commit()
finally:
thread_c.close()
threads = []
for _ in range(thread_count):
t = threading.Thread(target=updater)
threads.append(t)
for t in threads:
t.start()
for t in threads:
t.join(120)
c = db.open()
try:
self.assertEqual(c.root()['length'](),
updates_per_thread * thread_count)
finally:
transaction.abort()
c.close()
finally:
db.close()
del CRLogger.debug
del CRLogger.exception
def checkAfterCompletion(self):
# The after completion method, which can only be called
# outside of 2-phase commit is otherise equivalent to calling
# tpc_abort.
from ZODB.interfaces import IMVCCAfterCompletionStorage
self._storage = self.make_storage(revert_when_stale=False)
with mock.patch.object(self._storage._load_connection,
'rollback_quietly') as rb:
self._storage.afterCompletion()
rb.assert_called_with()
self.assertTrue(
IMVCCAfterCompletionStorage.providedBy(self._storage))
def checkConfigureViaZConfig(self):
replica_fn = None
replica_conf = ''
if util.DEFAULT_DATABASE_SERVER_HOST == util.STANDARD_DATABASE_SERVER_HOST:
replica_fn = self.get_adapter_zconfig_replica_conf()
replica_conf = 'replica-conf ' + self.get_adapter_zconfig_replica_conf()
conf = u"""
%import relstorage
<zodb main>
<relstorage>
name xyz
read-only false
keep-history {KEEP_HISTORY}
{REPLICA_CONF}
blob-dir .
blob-cache-size-check-external true
blob-cache-size 100MB
blob-chunk-size 10MB
cache-local-dir-read-count 12
cache-local-dir-write-max-size 10MB
{ADAPTER}
</relstorage>
</zodb>
""".format(
KEEP_HISTORY='true' if self.keep_history else 'false',
REPLICA_CONF=replica_conf,
ADAPTER=self.get_adapter_zconfig()
)
__traceback_info__ = conf
schema_xml = u"""
<schema>
<import package="ZODB"/>
<section type="ZODB.database" name="main" attribute="database"/>
</schema>
"""
import ZConfig
from io import StringIO
from ZODB.interfaces import IBlobStorageRestoreable
from relstorage.adapters.interfaces import IRelStorageAdapter
from relstorage.blobhelper.interfaces import ICachedBlobHelper
from hamcrest import assert_that
from nti.testing.matchers import validly_provides
schema = ZConfig.loadSchemaFile(StringIO(schema_xml))
config, _ = ZConfig.loadConfigFile(schema, StringIO(conf))
db = config.database.open()
try:
storage = db.storage
assert_that(storage, validly_provides(IBlobStorageRestoreable))
self.assertEqual(storage.isReadOnly(), False)
self.assertEqual(storage.getName(), "xyz")
assert_that(storage.blobhelper, validly_provides(ICachedBlobHelper))
self.assertIn('_External', str(storage.blobhelper.cache_checker))
adapter = storage._adapter
self.assertIsInstance(adapter, self.get_adapter_class())
assert_that(adapter, validly_provides(IRelStorageAdapter))
self.verify_adapter_from_zconfig(adapter)
self.assertEqual(adapter.keep_history, self.keep_history)
if replica_fn:
self.assertEqual(
adapter.connmanager.replica_selector.replica_conf,
replica_fn)
self.assertEqual(storage._options.blob_chunk_size, 10485760)
finally:
db.close()
def checkGeventSwitchesOnOpen(self):
# We make some queries when we open; if the driver is gevent
# capable, that should switch.
driver = self._storage._adapter.driver
if not driver.gevent_cooperative():
raise unittest.SkipTest("Driver %s not gevent capable" % (driver,))
from gevent.util import assert_switches
with assert_switches():
self.open()
#####
# Prefetch Tests
#####
def checkPrefetch(self):
db = DB(self._storage)
conn = db.open()
mapping = conn.root()['key'] = PersistentMapping()
transaction.commit()
item_count = 3
# The new state for the root invalidated the old state,
# and since there is no other connection that might be using it,
# we drop it from the cache.
item_count = 2
self.assertEqual(item_count, len(self._storage._cache))
tid = bytes8_to_int64(mapping._p_serial)
d = self._storage._cache.local_client._cache.data
self.assertEqual(d[0].value[1], tid)
self.assertEqual(d[1].value[1], tid)
self._storage._cache.clear()
self.assertEmpty(self._storage._cache)
conn.prefetch(z64, mapping)
self.assertEqual(2, len(self._storage._cache))
# second time is a no-op
conn.prefetch(z64, mapping)
self.assertEqual(2, len(self._storage._cache))
######
# Parallel Commit Tests
######
def checkCanVoteAndCommitWhileOtherStorageVotes(self):
storage1 = self._closing(self._storage.new_instance())
storage2 = self._closing(self._storage.new_instance())
# Bring them both into tpc_vote phase. Before parallel commit,
# this would have blocked as the first storage took the commit lock
# in tpc_vote.
txs = {}
for storage in (storage1, storage2):
data = zodb_pickle(MinPO(str(storage)))
t = TransactionMetaData()
txs[storage] = t
storage.tpc_begin(t)
oid = storage.new_oid()
storage.store(oid, None, data, '', t)
storage.tpc_vote(t)
# The order we choose to finish is the order of the returned
# tids.
tid1 = storage2.tpc_finish(txs[storage2])
tid2 = storage1.tpc_finish(txs[storage1])
self.assertGreater(tid2, tid1)
storage1.close()
storage2.close()
def checkCanLoadObjectStateWhileBeingModified(self):
# Get us an object in the database
storage1 = self._closing(self._storage.new_instance())
data = zodb_pickle(MinPO(str(storage1)))
t = TransactionMetaData()
storage1.tpc_begin(t)
oid = storage1.new_oid()
storage1.store(oid, None, data, '', t)
storage1.tpc_vote(t)
initial_tid = storage1.tpc_finish(t)
storage1.release()
del storage1
self._storage._cache.clear(load_persistent=False)
storage1 = self._closing(self._storage.new_instance())
# Get a completely independent storage, not sharing a cache
storage2 = self._closing(self.make_storage(zap=False))
# First storage attempts to modify the oid.
t = TransactionMetaData()
storage1.tpc_begin(t)
storage1.store(oid, initial_tid, data, '', t)
# And locks the row.
storage1.tpc_vote(t)
# storage2 would like to read the old row.
loaded_data, loaded_tid = storage2.load(oid)
self.assertEqual(loaded_data, data)
self.assertEqual(loaded_tid, initial_tid)
# Commit can now happen.
tid2 = storage1.tpc_finish(t)
self.assertGreater(tid2, initial_tid)
storage1.close()
storage2.close()
class AbstractRSZodbConvertTests(StorageCreatingMixin,
FSZODBConvertTests,
# This one isn't cooperative in
# setUp(), so it needs to be last.
ZODB.tests.util.TestCase):
keep_history = True
filestorage_name = 'source'
relstorage_name = 'destination'
filestorage_file = None
def setUp(self):
super(AbstractRSZodbConvertTests, self).setUp()
cfg = """
%%import relstorage
%%import zc.zlibstorage
<zlibstorage %s>
<filestorage>
path %s
</filestorage>
</zlibstorage>
<zlibstorage %s>
<relstorage>
%s
cache-prefix %s
cache-local-dir %s
</relstorage>
</zlibstorage>
""" % (
self.filestorage_name,
self.filestorage_file,
self.relstorage_name,
self.get_adapter_zconfig(),
self.relstorage_name,
os.path.abspath('.'),
)
self._write_cfg(cfg)
self.make_storage(zap=True).close()
def _wrap_storage(self, storage):
return self._closing(ZlibStorage(storage))
def _create_dest_storage(self):
return self._wrap_storage(super(AbstractRSZodbConvertTests, self)._create_dest_storage())
def _create_src_storage(self):
return self._wrap_storage(super(AbstractRSZodbConvertTests, self)._create_src_storage())
def test_new_instance_still_zlib(self):
storage = self._closing(self.make_storage())
new_storage = self._closing(storage.new_instance())
self.assertIsInstance(new_storage,
ZlibStorage)
self.assertIn('_crs_untransform_record_data', storage.base.__dict__)
self.assertIn('_crs_transform_record_data', storage.base.__dict__)
self.assertIn('_crs_untransform_record_data', new_storage.base.__dict__)
self.assertIn('_crs_transform_record_data', new_storage.base.__dict__)
class AbstractRSDestZodbConvertTests(AbstractRSZodbConvertTests):
zap_supported_by_dest = True
@property
def filestorage_file(self):
return self.srcfile
def _create_dest_storage(self):
return self._closing(self.make_storage(cache_prefix=self.relstorage_name, zap=False))
class AbstractRSSrcZodbConvertTests(AbstractRSZodbConvertTests):
filestorage_name = 'destination'
relstorage_name = 'source'
@property
def filestorage_file(self):
return self.destfile
def _create_src_storage(self):
return self._closing(self.make_storage(cache_prefix=self.relstorage_name, zap=False))
class AbstractIDBOptionsTest(unittest.TestCase):
db_options = None
def test_db_options_compliance(self):
from hamcrest import assert_that
from nti.testing.matchers import validly_provides
from relstorage.adapters.interfaces import IDBDriverOptions
from relstorage.adapters.interfaces import IDBDriverFactory
__traceback_info__ = self.db_options
assert_that(self.db_options, validly_provides(IDBDriverOptions))
for factory in self.db_options.known_driver_factories():
assert_that(factory, validly_provides(IDBDriverFactory))
class AbstractIDBDriverTest(unittest.TestCase):
driver = None
def test_db_driver_compliance(self):
from hamcrest import assert_that
from nti.testing.matchers import validly_provides
from relstorage.adapters.interfaces import IDBDriver
__traceback_info__ = self.driver
assert_that(self.driver, validly_provides(IDBDriver))
class DoubleCommitter(Persistent):
"""A crazy persistent class that changes self in __getstate__"""
def __getstate__(self):
if not hasattr(self, 'new_attribute'):
self.new_attribute = 1 # pylint:disable=attribute-defined-outside-init
return Persistent.__getstate__(self)
def _close_and_clean_storage(storage):
try:
storage.close()
storage.cleanup()
except Exception: # pylint:disable=broad-except
pass
class AbstractToFileStorage(RelStorageTestBase):
# Subclass this and set:
# - keep_history = True; and
# - A base class of UndoableRecoveryStorage
#
# or
# - keep_history = False; and
# A base class of BasicRecoveryStorage
# We rely on being placed in a temporary directory by a super
# class that will be cleaned up by tearDown().
def setUp(self):
super(AbstractToFileStorage, self).setUp()
# Use the abspath so that even if we close it after
# we've returned to our original directory (e.g.,
# close is run as part of addCleanup(), which happens after
# tearDown) we don't write index files into the original directory.
self._dst_path = os.path.abspath(self.rs_temp_prefix + 'Dest.fs')
self.__dst = None
@property
def _dst(self):
if self.__dst is None:
self.__dst = FileStorage(self._dst_path, create=True)
# On Windows, though, this could be too late: We can't remove
# files that are still open, and zope.testing.setupstack
# was asked to remove the temp dir as part of tearing itself down;
# cleanups run after tearDown runs (which is when the setupstack runs.)
self.addCleanup(_close_and_clean_storage, self.__dst)
return self.__dst
def tearDown(self):
if hasattr(self.__dst, 'close'):
_close_and_clean_storage(self.__dst)
self.__dst = 42 # Not none so we don't try to create.
super(AbstractToFileStorage, self).tearDown()
def new_dest(self):
return self._closing(FileStorage(self._dst_path))
class AbstractFromFileStorage(RelStorageTestBase):
# As for AbstractToFileStorage
def setUp(self):
super(AbstractFromFileStorage, self).setUp()
self._src_path = os.path.abspath(self.rs_temp_prefix + 'Source.fs')
self.__dst = None
def make_storage_to_cache(self):
return FileStorage(self._src_path, create=True)
@property
def _dst(self):
if self.__dst is None:
self.__dst = self.make_storage()
self.addCleanup(_close_and_clean_storage, self.__dst)
return self.__dst
def tearDown(self):
if hasattr(self.__dst, 'close'):
_close_and_clean_storage(self.__dst)
self.__dst = 42 # Not none so we don't try to create.
super(AbstractFromFileStorage, self).tearDown()
def new_dest(self):
return self._dst
|
atombot.py | # atombot.py
from threading import Lock, Thread
from time import sleep
from atom import Element
from atom.messages import Response
class AtomBot:
def __init__(self):
# This defines atombot's current position
self.pos = 2
# We allow 5 different positions that atombot can move to
self.max_pos = 5
# An ascii representation of atombot!
self.atombot = "o"
# Lock on updates to robot position
self.pos_lock = Lock()
# Lock on updates to robot representation
self.bot_lock = Lock()
def move_left(self, steps):
"""
Command for moving AtomBot in left for a number of steps.
Args:
steps: Number of steps to move.
"""
# Note that we are responsible for converting the data type from the
# sent command
steps = int(steps)
if steps < 0 or steps > self.max_pos:
# If we encounter an error, we can send an error code and error
# string in the response of the command
return Response(
err_code=1, err_str=f"Steps must be between 0 and {self.max_pos}"
)
# Update the position
try:
self.pos_lock.acquire()
self.pos = max(0, self.pos - steps)
finally:
self.pos_lock.release()
# If successful, we simply return a success string
return Response(data=f"Moved left {steps} steps.", serialize=True)
def move_right(self, steps):
"""
Command for moving AtomBot in right for a number of steps.
Args:
steps: Number of steps to move.
"""
# Note that we are responsible for converting the data type from the
# sent command
steps = int(steps)
if steps < 0 or steps > self.max_pos:
# If we encounter an error, we can send an error code and error
# string in the response of the command
return Response(
err_code=1, err_str=f"Steps must be between 0 and {self.max_pos}"
)
# Update the position
try:
self.pos_lock.acquire()
self.pos = min(self.max_pos, self.pos + steps)
finally:
self.pos_lock.release()
# If successful, we simply return a success string
return Response(data=f"Moved right {steps} steps.", serialize=True)
def transform(self, _):
"""
Command for transforming AtomBot!
"""
# Notice that we must have a single parameter to a command, even if
# we aren't using it.
# Update bot ascii representation
try:
self.bot_lock.acquire()
if self.atombot == "o":
self.atombot = "O"
else:
self.atombot = "o"
finally:
self.bot_lock.release()
return Response(data=f"Transformed to {self.atombot}!", serialize=True)
def get_pos(self):
try:
self.pos_lock.acquire()
return self.pos
finally:
self.pos_lock.release()
def get_pos_map(self):
"""
Returns the current position of AtomBot as a visual.
"""
pos_map = ["-"] * self.max_pos
cur_pos = self.get_pos()
try:
self.bot_lock.acquire()
pos_map[cur_pos] = self.atombot
return_str = " ".join(pos_map)
return return_str
finally:
self.bot_lock.release()
def is_healthy(self):
# This is an example health-check, which can be used to tell other
# elements that depend on you
# whether you are ready to receive commands or not. Any non-zero error
# code means you are unhealthy.
return Response(err_code=0, err_str="Everything is good")
if __name__ == "__main__":
print("Launching...")
# Create our element and call it "atombot"
element = Element("atombot")
# Instantiate our AtomBot class
atombot = AtomBot()
# We add a healthcheck to our atombot element.
# This is optional. If you don't do this, atombot is assumed healthy as
# soon as its command_loop executes
element.healthcheck_set(atombot.is_healthy)
# This registers the relevant AtomBot methods as a command in the atom
# system
# We set the timeout so the caller will know how long to wait for the
# command to execute
element.command_add("move_left", atombot.move_left, timeout=50, deserialize=True)
element.command_add("move_right", atombot.move_right, timeout=50, deserialize=True)
# Transform takes no inputs, so there's nothing to deserialize
element.command_add("transform", atombot.transform, timeout=50)
# We create a thread and run the command loop which will constantly check
# for incoming commands from atom
# We use a thread so we don't hang on the command_loop function because
# we will be performing other tasks
thread = Thread(target=element.command_loop, daemon=True)
thread.start()
# This will block until every element in the list reports it is healthy.
# Useful if you depend on other elements.
element.wait_for_elements_healthy(["atombot"])
# Create an infinite loop that publishes the position of atombot to a
# stream as well as a visual of its position
while True:
# We write our position data and the visual of atombot's position to
# their respective streams
# The maxlen parameter will determine how many entries atom stores
# This data is serialized using msgpack
element.entry_write(
"pos", {"data": atombot.get_pos()}, maxlen=10, serialize=True
)
element.entry_write(
"pos_map", {"data": atombot.get_pos_map()}, maxlen=10, serialize=True
)
# We can also choose to write binary data directly without serializing
element.entry_write("pos_binary", {"data": atombot.get_pos()}, maxlen=10)
# Sleep so that we aren't consuming all of our CPU resources
sleep(0.01)
|
database_server.py | from __future__ import print_function, absolute_import, division, unicode_literals
# This file is part of the ISIS IBEX application.
# Copyright (C) 2012-2016 Science & Technology Facilities Council.
# All rights reserved.
#
# This program is distributed in the hope that it will be useful.
# This program and the accompanying materials are made available under the
# terms of the Eclipse Public License v1.0 which accompanies this distribution.
# EXCEPT AS EXPRESSLY SET FORTH IN THE ECLIPSE PUBLIC LICENSE V1.0, THE PROGRAM
# AND ACCOMPANYING MATERIALS ARE PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES
# OR CONDITIONS OF ANY KIND. See the Eclipse Public License v1.0 for more details.
#
# You should have received a copy of the Eclipse Public License v1.0
# along with this program; if not, you can obtain a copy from
# https://www.eclipse.org/org/documents/epl-v10.php or
# http://opensource.org/licenses/eclipse-1.0.php
# Add root path for access to server_commons
import os
import traceback
import six
import sys
import json
import argparse
import codecs
from functools import partial
from pcaspy import Driver
from time import sleep
from threading import Thread, RLock
sys.path.insert(0, os.path.abspath(os.environ["MYDIRBLOCK"]))
from DatabaseServer.exp_data import ExpData, ExpDataSource
from DatabaseServer.procserv_utils import ProcServWrapper
from DatabaseServer.options_holder import OptionsHolder
from DatabaseServer.options_loader import OptionsLoader
from server_common.mysql_abstraction_layer import SQLAbstraction
from server_common.utilities import compress_and_hex, print_and_log, set_logger, convert_to_json, \
dehex_and_decompress, char_waveform
from server_common.channel_access_server import CAServer
from server_common.constants import IOCS_NOT_TO_STOP
from server_common.ioc_data import IOCData
from server_common.ioc_data_source import IocDataSource
from server_common.pv_names import DatabasePVNames as DbPVNames
from server_common.loggers.isis_logger import IsisLogger
set_logger(IsisLogger())
MACROS = {
"$(MYPVPREFIX)": os.environ['MYPVPREFIX'],
"$(EPICS_KIT_ROOT)": os.environ['EPICS_KIT_ROOT'],
"$(ICPCONFIGROOT)": os.environ['ICPCONFIGROOT']
}
LOG_TARGET = "DBSVR"
INFO_MSG = "INFO"
MAJOR_MSG = "MAJOR"
class DatabaseServer(Driver):
"""
The class for handling all the static PV access and monitors etc.
"""
def __init__(self, ca_server: CAServer, ioc_data: IOCData, exp_data: ExpData, options_folder: str,
blockserver_prefix: str, test_mode: bool = False):
"""
Constructor.
Args:
ca_server: The CA server used for generating PVs on the fly
ioc_data: The data source for IOC information
exp_data: The data source for experiment information
options_folder: The location of the folder containing the config.xml file that holds IOC options
blockserver_prefix: The PV prefix to use
test_mode: Enables starting the server in a mode suitable for unit tests
"""
if not test_mode:
super(DatabaseServer, self).__init__()
self._blockserver_prefix = blockserver_prefix
self._ca_server = ca_server
self._options_holder = OptionsHolder(options_folder, OptionsLoader())
self._pv_info = self._generate_pv_acquisition_info()
self._iocs = ioc_data
self._ed = exp_data
if self._iocs is not None and not test_mode:
# Start a background thread for keeping track of running IOCs
self.monitor_lock = RLock()
monitor_thread = Thread(target=self._update_ioc_monitors, args=())
monitor_thread.daemon = True # Daemonise thread
monitor_thread.start()
def _generate_pv_acquisition_info(self) -> dict:
"""
Generates information needed to get the data for the DB PVs.
Returns:
Dictionary containing the information to get the information for the PVs
"""
enhanced_info = DatabaseServer.generate_pv_info()
def add_get_method(pv, get_function):
enhanced_info[pv]['get'] = get_function
add_get_method(DbPVNames.IOCS, self._get_iocs_info)
add_get_method(DbPVNames.HIGH_INTEREST, partial(self._get_interesting_pvs, "HIGH"))
add_get_method(DbPVNames.MEDIUM_INTEREST, partial(self._get_interesting_pvs, "MEDIUM"))
add_get_method(DbPVNames.LOW_INTEREST, partial(self._get_interesting_pvs, "LOW"))
add_get_method(DbPVNames.FACILITY, partial(self._get_interesting_pvs, "FACILITY"))
add_get_method(DbPVNames.ACTIVE_PVS, self._get_active_pvs)
add_get_method(DbPVNames.ALL_PVS, partial(self._get_interesting_pvs, ""))
add_get_method(DbPVNames.SAMPLE_PARS, self._get_sample_par_names)
add_get_method(DbPVNames.BEAMLINE_PARS, self._get_beamline_par_names)
add_get_method(DbPVNames.USER_PARS, self._get_user_par_names)
add_get_method(DbPVNames.IOCS_NOT_TO_STOP, DatabaseServer._get_iocs_not_to_stop)
return enhanced_info
@staticmethod
def generate_pv_info() -> dict:
"""
Generates information needed to construct PVs. Must be consumed by Server before
DatabaseServer is initialized so must be static
Returns:
Dictionary containing the information to construct PVs
"""
pv_size_128k = 128000
pv_size_10k = 10000
pv_info = {}
for pv in [DbPVNames.IOCS, DbPVNames.HIGH_INTEREST, DbPVNames.MEDIUM_INTEREST, DbPVNames.LOW_INTEREST,
DbPVNames.FACILITY, DbPVNames.ACTIVE_PVS, DbPVNames.ALL_PVS, DbPVNames.IOCS_NOT_TO_STOP]:
pv_info[pv] = char_waveform(pv_size_128k)
for pv in [DbPVNames.SAMPLE_PARS, DbPVNames.BEAMLINE_PARS, DbPVNames.USER_PARS]:
pv_info[pv] = char_waveform(pv_size_10k)
return pv_info
def get_data_for_pv(self, pv: str) -> bytes:
"""
Get the data for the given pv name.
Args:
The name of the PV to get the data for.
Return:
The data, compressed and hexed.
"""
data = self._pv_info[pv]['get']()
data = compress_and_hex(six.text_type(json.dumps(data)))
self._check_pv_capacity(pv, len(data), self._blockserver_prefix)
return data
def read(self, reason: str) -> str:
"""
A method called by SimpleServer when a PV is read from the DatabaseServer over Channel Access.
Args:
reason: The PV that is being requested (without the PV prefix)
Returns:
A compressed and hexed JSON formatted string that gives the desired information based on reason.
"""
return self.get_data_for_pv(reason) if reason in self._pv_info.keys() else self.getParam(reason)
def write(self, reason: str, value: str) -> bool:
"""
A method called by SimpleServer when a PV is written to the DatabaseServer over Channel Access.
Args:
reason: The PV that is being requested (without the PV prefix)
value: The data being written to the 'reason' PV
Returns:
True
"""
try:
if reason == 'ED:RBNUMBER:SP':
self._ed.update_experiment_id(value)
elif reason == 'ED:USERNAME:SP':
self._ed.update_username(dehex_and_decompress(value.encode('utf-8')).decode('utf-8'))
except Exception as e:
value = compress_and_hex(convert_to_json("Error: " + str(e)))
print_and_log(str(e), MAJOR_MSG)
# store the values
self.setParam(reason, value)
return True
def _update_ioc_monitors(self) -> None:
"""
Updates all the PVs that hold information on the IOCS and their associated PVs.
"""
while True:
if self._iocs is not None:
self._iocs.update_iocs_status()
for pv in [DbPVNames.IOCS, DbPVNames.HIGH_INTEREST, DbPVNames.MEDIUM_INTEREST, DbPVNames.FACILITY,
DbPVNames.ACTIVE_PVS, DbPVNames.ALL_PVS]:
encoded_data = self.get_data_for_pv(pv)
# No need to update monitors if data hasn't changed
if not self.getParam(pv) == encoded_data:
self.setParam(pv, encoded_data)
# Update them
with self.monitor_lock:
self.updatePVs()
sleep(1)
def _check_pv_capacity(self, pv: str, size: int, prefix: str) -> None:
"""
Check the capacity of a PV and write to the log if it is too small.
Args:
pv: The PV that is being requested (without the PV prefix)
size: The required size
prefix: The PV prefix
"""
if size > self._pv_info[pv]['count']:
print_and_log("Too much data to encode PV {0}. Current size is {1} characters but {2} are required"
.format(prefix + pv, self._pv_info[pv]['count'], size),
MAJOR_MSG, LOG_TARGET)
def _get_iocs_info(self) -> dict:
iocs = self._iocs.get_iocs()
options = self._options_holder.get_config_options()
for iocname in iocs.keys():
if iocname in options:
iocs[iocname].update(options[iocname])
return iocs
def _get_pvs(self, get_method: callable, replace_pv_prefix: bool, *get_args: list) -> list:
"""
Method to get pv data using the given method called with the given arguments and optionally remove instrument
prefixes from pv names.
Args:
get_method: The method used to get pv data.
replace_pv_prefix: True to remove pv prefixes, False if not.
get_args: The arguments to be applied to get_method.
Returns:
a list of names of pvs.
"""
if self._iocs is not None:
pv_data = get_method(*get_args)
if replace_pv_prefix:
pv_data = [p.replace(MACROS["$(MYPVPREFIX)"], "") for p in pv_data]
return pv_data
else:
return []
def _get_interesting_pvs(self, level) -> list:
"""
Gets interesting pvs of the current instrument.
Args:
level: The level of high interesting pvs, can be high, low, medium or facility. If level is an empty
string, it returns all interesting pvs of all levels.
Returns:
a list of names of pvs with given level of interest.
"""
return self._get_pvs(self._iocs.get_interesting_pvs, False, level)
def _get_active_pvs(self) -> list:
"""
Gets all pvs belonging to IOCs that are currently running on the current instrument.
Returns:
a list of names of pvs.
"""
return self._get_pvs(self._iocs.get_active_pvs, False)
def _get_sample_par_names(self) -> list:
"""
Returns the sample parameters from the database, replacing the MYPVPREFIX macro.
Returns:
A list of sample parameter names, an empty list if the database does not exist
"""
return self._get_pvs(self._iocs.get_sample_pars, True)
def _get_beamline_par_names(self) -> list:
"""
Returns the beamline parameters from the database, replacing the MYPVPREFIX macro.
Returns:
A list of beamline parameter names, an empty list if the database does not exist
"""
return self._get_pvs(self._iocs.get_beamline_pars, True)
def _get_user_par_names(self) -> list:
"""
Returns the user parameters from the database, replacing the MYPVPREFIX macro.
Returns:
A list of user parameter names, an empty list if the database does not exist
"""
return self._get_pvs(self._iocs.get_user_pars, True)
@staticmethod
def _get_iocs_not_to_stop() -> list:
"""
Get the IOCs that are not to be stopped.
Returns:
A list of IOCs not to stop
"""
return IOCS_NOT_TO_STOP
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-bs', '--blockserver_prefix', nargs=1, type=str,
default=[MACROS["$(MYPVPREFIX)"]+'CS:'],
help='The prefix for PVs served by the blockserver(default=%MYPVPREFIX%CS:)')
parser.add_argument('-od', '--options_dir', nargs=1, type=str, default=['.'],
help='The directory from which to load the configuration options(default=current directory)')
args = parser.parse_args()
BLOCKSERVER_PREFIX = args.blockserver_prefix[0]
if not BLOCKSERVER_PREFIX.endswith(':'):
BLOCKSERVER_PREFIX += ":"
BLOCKSERVER_PREFIX = BLOCKSERVER_PREFIX.replace('%MYPVPREFIX%', MACROS["$(MYPVPREFIX)"])
print_and_log("BLOCKSERVER PREFIX = %s" % BLOCKSERVER_PREFIX, INFO_MSG, LOG_TARGET)
OPTIONS_DIR = os.path.abspath(args.options_dir[0])
print_and_log("OPTIONS DIRECTORY = %s" % OPTIONS_DIR, INFO_MSG, LOG_TARGET)
if not os.path.isdir(os.path.abspath(OPTIONS_DIR)):
# Create it then
os.makedirs(os.path.abspath(OPTIONS_DIR))
SERVER = CAServer(BLOCKSERVER_PREFIX)
SERVER.createPV(BLOCKSERVER_PREFIX, DatabaseServer.generate_pv_info())
SERVER.createPV(MACROS["$(MYPVPREFIX)"], ExpData.EDPV)
# Initialise IOC database connection
try:
ioc_data = IOCData(IocDataSource(SQLAbstraction("iocdb", "iocdb", "$iocdb")), ProcServWrapper(),
MACROS["$(MYPVPREFIX)"])
print_and_log("Connected to IOCData database", INFO_MSG, LOG_TARGET)
except Exception as e:
ioc_data = None
print_and_log("Problem initialising IOCData DB connection: {}".format(traceback.format_exc()),
MAJOR_MSG, LOG_TARGET)
# Initialise experimental database connection
try:
exp_data = ExpData(MACROS["$(MYPVPREFIX)"], ExpDataSource())
print_and_log("Connected to experimental details database", INFO_MSG, LOG_TARGET)
except Exception as e:
exp_data = None
print_and_log("Problem connecting to experimental details database: {}".format(traceback.format_exc()),
MAJOR_MSG, LOG_TARGET)
DRIVER = DatabaseServer(SERVER, ioc_data, exp_data, OPTIONS_DIR, BLOCKSERVER_PREFIX)
# Process CA transactions
while True:
try:
SERVER.process(0.1)
except Exception as err:
print_and_log(traceback.format_exc(), MAJOR_MSG)
break
|
utility.py | """Contains general utility functions to be used by the execution server."""
import os
import signal
import json
import sys
import subprocess
import threading
import platform
def load_configuration():
config_file_name = os.path.join(os.path.dirname(__file__), 'config.json')
config = json.load(open(config_file_name))
return config
def process_commandline_args(register, update):
if len(sys.argv) > 1:
if sys.argv[1] == 'register':
register()
print('Successfully registered.')
sys.exit(0)
elif sys.argv[1] == 'update':
update()
print('Successfully updated.')
sys.exit(0)
else:
print('Python custom execution server can take one of two optional arguments:')
print('register - register the execution server with details from config.json')
print('update - update the details of the execution server to those in config.json')
sys.exit(1)
def run_background_thread(target, args=()):
background_thread = threading.Thread(target=target, args=args)
background_thread.daemon = True
background_thread.start()
class ProcessRunner():
def __init__(self):
self._current_processes = {}
self._stopping_processes = []
self._running_on_windows = platform.system() == 'Windows'
def execute(self, command, identifier):
if self._running_on_windows:
process = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=False)
else:
process = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=True, preexec_fn=os.setsid)
self._current_processes[identifier] = process
output = ''
for line in iter(process.stdout.readline, b''):
output += line
process.communicate()
self._current_processes.pop(identifier, None)
if identifier in self._stopping_processes:
self._stopping_processes.remove(identifier)
return None
return output, process.returncode
def stop(self, identifier):
process = self._current_processes.get(identifier)
if process is not None:
self._stopping_processes.append(identifier)
if self._running_on_windows:
process.kill()
else:
os.killpg(process.pid, signal.SIGTERM)
|
watcher_monitor.py | # -*- coding: utf-8 -*-
# This code is part of Qiskit.
#
# (C) Copyright IBM 2017, 2019.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""A module of widgets for job monitoring."""
import sys
import time
import threading
from qiskit.providers.jobstatus import JobStatus
from qiskit.providers.ibmq.job.ibmqjob import IBMQJob
from ...utils.converters import duration_difference
def _job_monitor(job: IBMQJob, status: JobStatus, watcher: 'IQXDashboard') -> None:
"""Monitor the status of an ``IBMQJob`` instance.
Args:
job: Job to monitor.
status: Job status.
watcher: Job watcher instance.
"""
thread = threading.Thread(target=_job_checker, args=(job, status, watcher))
thread.start()
def _job_checker(job: IBMQJob, status: JobStatus, watcher: 'IQXDashboard') -> None:
"""A simple job status checker.
Args:
job: The job to check.
status: Job status.
watcher: Job watcher instance.
"""
prev_status_name = None
prev_queue_pos = None
interval = 2
exception_count = 0
prev_est_time = ''
while status.name not in ['DONE', 'CANCELLED', 'ERROR']:
time.sleep(interval)
try:
status = job.status()
exception_count = 0
if status.name == 'QUEUED':
queue_pos = job.queue_position()
if queue_pos != prev_queue_pos:
queue_info = job.queue_info()
if queue_info and queue_info.estimated_start_time:
est_time = duration_difference(queue_info.estimated_start_time)
prev_est_time = est_time
else:
est_time = prev_est_time
update_info = (job.job_id(), status.name+' ({})'.format(queue_pos),
est_time, status.value)
watcher.update_single_job(update_info)
if queue_pos is not None:
interval = max(queue_pos, 2)
else:
interval = 2
prev_queue_pos = queue_pos
elif status.name != prev_status_name:
msg = status.name
if msg == 'RUNNING':
job_mode = job.scheduling_mode()
if job_mode:
msg += ' [{}]'.format(job_mode[0].upper())
update_info = (job.job_id(), msg, 0, status.value)
watcher.update_single_job(update_info)
interval = 2
prev_status_name = status.name
# pylint: disable=broad-except
except Exception:
exception_count += 1
if exception_count == 5:
update_info = (job.job_id(), 'NA', 0, "Could not query job.")
watcher.update_single_job(update_info)
sys.exit()
|
dokku-installer.py | #!/usr/bin/env python3
import cgi
import json
import os
import re
try:
import SimpleHTTPServer
import SocketServer
except ImportError:
import http.server as SimpleHTTPServer
import socketserver as SocketServer
import subprocess
import sys
import threading
VERSION = 'v0.20.0'
def bytes_to_string(b):
if type(b) == bytes:
encoding = sys.stdout.encoding
if encoding is None:
encoding = 'utf-8'
b = b.decode(encoding)
b = b.strip()
return b
def string_to_bytes(s):
if type(s) == str:
encoding = sys.stdout.encoding
if encoding is None:
encoding = 'utf-8'
s = s.encode(encoding)
return s
hostname = ''
try:
command = "bash -c '[[ $(dig +short $HOSTNAME) ]] && echo $HOSTNAME || wget -q -O - icanhazip.com'"
hostname = bytes_to_string(subprocess.check_output(command, shell=True))
except subprocess.CalledProcessError:
pass
key_file = os.getenv('KEY_FILE', None)
if os.path.isfile('/home/ec2-user/.ssh/authorized_keys'):
key_file = '/home/ec2-user/.ssh/authorized_keys'
elif os.path.isfile('/home/ubuntu/.ssh/authorized_keys'):
key_file = '/home/ubuntu/.ssh/authorized_keys'
else:
key_file = '/root/.ssh/authorized_keys'
admin_keys = []
if os.path.isfile(key_file):
try:
command = "cat {0}".format(key_file)
admin_keys = bytes_to_string(subprocess.check_output(command, shell=True)).strip().split("\n")
except subprocess.CalledProcessError:
pass
ufw_display = 'block'
try:
command = "sudo ufw status"
ufw_output = bytes_to_string(subprocess.check_output(command, shell=True).strip())
if "inactive" in ufw_output:
ufw_display = 'none'
except subprocess.CalledProcessError:
ufw_display = 'none'
nginx_dir = '/etc/nginx'
nginx_init = '/etc/init.d/nginx'
try:
command = "test -x /usr/bin/openresty"
subprocess.check_output(command, shell=True)
nginx_dir = '/usr/local/openresty/nginx/conf'
nginx_init = '/etc/init.d/openresty'
except subprocess.CalledProcessError:
pass
def check_boot():
if 'onboot' not in sys.argv:
return
init_dir = os.getenv('INIT_DIR', '/etc/init')
systemd_dir = os.getenv('SYSTEMD_DIR', '/etc/systemd/system')
nginx_conf_dir = os.getenv('NGINX_CONF_DIR', '{0}/conf.d'.format(nginx_dir))
if os.path.exists(init_dir):
with open('{0}/dokku-installer.conf'.format(init_dir), 'w') as f:
f.write("start on runlevel [2345]\n")
f.write("exec {0} selfdestruct\n".format(os.path.abspath(__file__)))
if os.path.exists(systemd_dir):
with open('{0}/dokku-installer.service'.format(systemd_dir), 'w') as f:
f.write("[Unit]\n")
f.write("Description=Dokku web-installer\n")
f.write("\n")
f.write("[Service]\n")
f.write("ExecStart={0} selfdestruct\n".format(os.path.abspath(__file__)))
f.write("\n")
f.write("[Install]\n")
f.write("WantedBy=multi-user.target\n")
f.write("WantedBy=graphical.target\n")
if os.path.exists(nginx_conf_dir):
with open('{0}/dokku-installer.conf'.format(nginx_conf_dir), 'w') as f:
f.write("upstream dokku-installer { server 127.0.0.1:2000; }\n")
f.write("server {\n")
f.write(" listen 80;\n")
f.write(" location / {\n")
f.write(" proxy_pass http://dokku-installer;\n")
f.write(" }\n")
f.write("}\n")
subprocess.call('rm -f {0}/sites-enabled/*'.format(nginx_dir), shell=True)
sys.exit(0)
class GetHandler(SimpleHTTPServer.SimpleHTTPRequestHandler):
def write_content(self, content):
try:
self.wfile.write(content)
except TypeError:
self.wfile.write(string_to_bytes(content))
def do_GET(self):
content = PAGE.replace('{VERSION}', VERSION)
content = content.replace('{UFW_DISPLAY}', ufw_display)
content = content.replace('{HOSTNAME}', hostname)
content = content.replace('{AUTHORIZED_KEYS_LOCATION}', key_file)
content = content.replace('{ADMIN_KEYS}', "\n".join(admin_keys))
self.send_response(200)
self.end_headers()
self.write_content(content)
def do_POST(self):
if self.path not in ['/setup', '/setup/']:
return
params = cgi.FieldStorage(fp=self.rfile,
headers=self.headers,
environ={
'REQUEST_METHOD': 'POST',
'CONTENT_TYPE': self.headers['Content-Type']})
vhost_enable = 'false'
dokku_root = os.getenv('DOKKU_ROOT', '/home/dokku')
if 'vhost' in params and params['vhost'].value == 'true':
vhost_enable = 'true'
with open('{0}/VHOST'.format(dokku_root), 'w') as f:
f.write(params['hostname'].value)
else:
try:
os.remove('{0}/VHOST'.format(dokku_root))
except OSError:
pass
with open('{0}/HOSTNAME'.format(dokku_root), 'w') as f:
f.write(params['hostname'].value)
for (index, key) in enumerate(params['keys'].value.splitlines(), 1):
user = 'admin'
if self.admin_user_exists() is not None:
user = 'web-admin'
if self.web_admin_user_exists() is not None:
index = int(self.web_admin_user_exists()) + 1
elif self.web_admin_user_exists() is None:
index = 1
elif self.admin_user_exists() is None:
pass
else:
index = int(self.admin_user_exists()) + 1
user = user + str(index)
command = ['sshcommand', 'acl-add', 'dokku', user]
proc = subprocess.Popen(command, stdin=subprocess.PIPE)
try:
proc.stdin.write(key)
except TypeError:
proc.stdin.write(string_to_bytes(key))
proc.stdin.close()
proc.wait()
set_debconf_selection('boolean', 'nginx_enable', 'true')
set_debconf_selection('boolean', 'skip_key_file', 'true')
set_debconf_selection('boolean', 'vhost_enable', vhost_enable)
set_debconf_selection('boolean', 'web_config', 'false')
set_debconf_selection('string', 'hostname', params['hostname'].value)
if 'selfdestruct' in sys.argv:
DeleteInstallerThread()
content = json.dumps({'status': 'ok'})
self.send_response(200)
self.end_headers()
self.write_content(content)
def web_admin_user_exists(self):
return self.user_exists('web-admin(\d+)')
def admin_user_exists(self):
return self.user_exists('admin(\d+)')
def user_exists(self, name):
command = 'dokku ssh-keys:list'
pattern = re.compile(r'NAME="' + name + '"')
proc = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE)
max_num = 0
exists = False
for line in proc.stdout:
m = pattern.search(bytes_to_string(line))
if m:
# User of the form `user` or `user#` exists
exists = True
max_num = max(max_num, int(m.group(1)))
if exists:
return max_num
else:
return None
def set_debconf_selection(debconf_type, key, value):
found = False
with open('/etc/os-release', 'r') as f:
for line in f:
if 'debian' in line:
found = True
if not found:
return
ps = subprocess.Popen(['echo', 'dokku dokku/{0} {1} {2}'.format(
key, debconf_type, value
)], stdout=subprocess.PIPE)
try:
subprocess.check_output(['debconf-set-selections'], stdin=ps.stdout)
except subprocess.CalledProcessError:
pass
ps.wait()
class DeleteInstallerThread(object):
def __init__(self, interval=1):
thread = threading.Thread(target=self.run, args=())
thread.daemon = True
thread.start()
def run(self):
command = "rm {0}/conf.d/dokku-installer.conf && {1} stop && {1} start".format(nginx_dir, nginx_init)
try:
subprocess.call(command, shell=True)
except:
pass
command = "rm -f /etc/init/dokku-installer.conf /etc/systemd/system/dokku-installer.service && (stop dokku-installer || systemctl stop dokku-installer.service)"
try:
subprocess.call(command, shell=True)
except:
pass
def main():
check_boot()
port = int(os.getenv('PORT', 2000))
httpd = SocketServer.TCPServer(("", port), GetHandler)
print("Listening on 0.0.0.0:{0}, CTRL+C to stop".format(port))
httpd.serve_forever()
PAGE = """
<html>
<head>
<meta charset="utf-8" />
<title>Dokku Setup</title>
<link rel="stylesheet" href="https://stackpath.bootstrapcdn.com/bootstrap/4.1.3/css/bootstrap.min.css" integrity="sha384-MCw98/SFnGE8fJT3GXwEOngsV7Zt27NXFoaoApmYm81iuXoPkFOJwJ8ERdknLPMO" crossorigin="anonymous">
<style>
.bd-callout {
padding: 1.25rem;
margin-top: 1.25rem;
margin-bottom: 1.25rem;
border: 1px solid #eee;
border-left-width: .25rem;
border-radius: .25rem;
}
.bd-callout p:last-child {
margin-bottom: 0;
}
.bd-callout-info {
border-left-color: #5bc0de;
}
pre {
font-size: 80%;
margin-bottom: 0;
}
h1 small {
font-size: 50%;
}
h5 {
font-size: 1rem;
}
.container {
width: 640px;
}
.result {
padding-left: 20px;
}
input.form-control, textarea.form-control {
background-color: #fafbfc;
font-size: 14px;
}
input.form-control::placeholder, textarea.form-control::placeholder {
color: #adb2b8
}
</style>
</head>
<body>
<div class="container">
<form id="form" role="form">
<h1 class="pt-3">Dokku Setup <small class="text-muted">{VERSION}</small></h1>
<div class="alert alert-warning small" role="alert">
<strong>Warning:</strong> The SSH key filled out here can grant root access to the server. Please complete the setup as soon as possible.
</div>
<div class="row">
<div class="col">
<h3>Admin Access</h3>
<div class="form-group">
<label for="key">Public SSH Keys</label><br />
<textarea class="form-control" name="keys" rows="5" id="key" placeholder="Begins with 'ssh-rsa', 'ssh-dss', 'ssh-ed25519', 'ecdsa-sha2-nistp256', 'ecdsa-sha2-nistp384', or 'ecdsa-sha2-nistp521'">{ADMIN_KEYS}</textarea>
<small class="form-text text-muted">Public keys allow users to ssh onto the server as the <code>dokku</code> user, as well as remotely execute Dokku commands. They are currently auto-populated from: <code>{AUTHORIZED_KEYS_LOCATION}</code>, and can be changed later via the <a href="http://dokku.viewdocs.io/dokku/deployment/user-management/" target="_blank"><code>dokku ssh-keys</code></a> plugin.</small>
</div>
</div>
</div>
<div class="row">
<div class="col">
<h3>Hostname Configuration</h3>
<div class="form-group">
<label for="hostname">Hostname</label>
<input class="form-control" type="text" id="hostname" name="hostname" value="{HOSTNAME}" placeholder="A hostname or ip address such as {HOSTNAME}" />
<small class="form-text text-muted">This will be used as the default host for all applications, and can be changed later via the <a href="http://dokku.viewdocs.io/dokku/configuration/domains/" target="_blank"><code>dokku domains:set-global</code></a> command.</small>
</div>
<div class="form-check">
<input class="form-check-input" type="checkbox" id="vhost" name="vhost" value="true">
<label class="form-check-label" for="vhost">Use virtualhost naming for apps</label>
<small class="form-text text-muted">When enabled, Nginx will be run on port 80 and proxy requests to apps based on hostname.</small>
<small class="form-text text-muted">When disabled, a specific port will be setup for each application on first deploy, and requests to that port will be proxied to the relevant app.</small>
</div>
<div class="alert alert-warning small mt-3 d-{UFW_DISPLAY}" role="alert">
<strong>Warning:</strong> UFW is active. To allow traffic to specific ports, run <code>sudo ufw allow PORT</code> for the port in question.
</div>
<div class="bd-callout bd-callout-info">
<h5>What will app URLs look like?</h5>
<pre><code id="example">http://hostname:port</code></pre>
</div>
</div>
</div>
<button type="button" onclick="setup()" class="btn btn-primary">Finish Setup</button> <span class="result"></span>
</form>
</div>
<div id="error-output"></div>
<script>
var $ = document.querySelector.bind(document)
function setup() {
if ($("#key").value.trim() == "") {
alert("Your admin public key cannot be blank.")
return
}
if ($("#hostname").value.trim() == "") {
alert("Your hostname cannot be blank.")
return
}
var data = new FormData($("#form"))
var inputs = [].slice.call(document.querySelectorAll("input, textarea, button"))
inputs.forEach(function (input) {
input.disabled = true
})
var result = $(".result")
fetch("/setup", {method: "POST", body: data})
.then(function(response) {
if (response.ok) {
return response.json()
} else {
throw new Error('Server returned error')
}
})
.then(function(response) {
result.classList.add("text-success");
result.textContent = "Success! Redirecting in 3 seconds. .."
setTimeout(function() {
window.location.href = "http://dokku.viewdocs.io/dokku~{VERSION}/deployment/application-deployment/";
}, 3000);
})
.catch(function (error) {
result.classList.add("text-danger");
result.textContent = "Could not send the request"
})
}
function update() {
if ($("#vhost").matches(":checked") && $("#hostname").value.match(/^(\d{1,3}\.){3}\d{1,3}$/)) {
alert("In order to use virtualhost naming, the hostname must not be an IP but a valid domain name.")
$("#vhost").checked = false;
}
if ($("#vhost").matches(':checked')) {
$("#example").textContent = "http://<app-name>."+$("#hostname").value
} else {
$("#example").textContent = "http://"+$("#hostname").value+":<app-port>"
}
}
$("#vhost").addEventListener("change", update);
$("#hostname").addEventListener("input", update);
update();
</script>
</body>
</html>
"""
if __name__ == "__main__":
main()
|
uploader-gui.py | #!/usr/bin/env python3
VERSION = " v1.10"
VERSION_DATE =" Apr.2020 - Jun.2021"
print("Arduboy Uploader GUI/FX activator/flasher" + VERSION + VERSION_DATE + " by Mr.Blinky running\n")
from tkinter import filedialog
from tkinter import *
from tkinter.ttk import *
from threading import Thread
from tkinter import messagebox
import sys
import os
import time
## defaults ##
fxActivator = "activator" in os.path.basename(sys.argv[0])
fxFlasher = "flasher" in os.path.basename(sys.argv[0])
ssd1309patch = "ssd1309" in os.path.basename(sys.argv[0])
if len(sys.argv) > 1:
if sys.argv[1].lower() == 'uploader' : fxActivator = False
elif sys.argv[1].lower() == 'activator' : fxActivator = True
elif sys.argv[1].lower() == 'flasher' :
fxActivator = False
fxFlasher = True
for arg in sys.argv:
if arg.lower() == 'ssd1309':
ssd1309patch = True
path = os.path.dirname(os.path.abspath(sys.argv[0]))+os.sep
if fxActivator:
title = "FX Activator"
defaultAppFilename = path + "arduboy-activator.hex"
defaultFlashFilename = path + "flash-image.bin"
elif fxFlasher:
title = "FX Flasher"
defaultAppFilename = ""
defaultFlashFilename = path + "flashcart-image91820.bin"
else:
title = "Arduboy uploader GUI"
defaultAppFilename = path + "hex-file.hex"
defaultFlashFilename = path + "flash-image.bin"
defaultDevDataFilename = path + "fxdata.bin"
selectAppInitialDir = path
selectFlashInitialDir = path
selectDevDataInitialDir = path
selectEEPROMinitialDir = path
try:
from serial.tools.list_ports import comports
from serial import Serial
except:
print("The PySerial module is required but not installed!")
print("Use 'python -m pip install pyserial' from the commandline to install.")
time.sleep(3)
sys.exit()
lcdBootProgram = b"\xD5\xF0\x8D\x14\xA1\xC8\x81\xCF\xD9\xF1\xAF\x20\x00"
compatibledevices = [
#Arduboy Leonardo
"VID:PID=2341:0036", "VID:PID=2341:8036",
"VID:PID=2A03:0036", "VID:PID=2A03:8036",
#Arduboy Micro
"VID:PID=2341:0037", "VID:PID=2341:8037",
"VID:PID=2A03:0037", "VID:PID=2A03:8037",
#Genuino Micro
"VID:PID=2341:0237", "VID:PID=2341:8237",
#Sparkfun Pro Micro 5V
"VID:PID=1B4F:9205", "VID:PID=1B4F:9206",
#Adafruit ItsyBitsy 5V
"VID:PID=239A:000E", "VID:PID=239A:800E",
]
manufacturers = {
0x01 : "Spansion",
0x14 : "Cypress",
0x1C : "EON",
0x1F : "Adesto(Atmel)",
0x20 : "Micron",
0x37 : "AMIC",
0x9D : "ISSI",
0xC2 : "General Plus",
0xC8 : "Giga Device",
0xBF : "Microchip",
0xEF : "Winbond"
}
PAGESIZE = 256
BLOCKSIZE = 65536
PAGES_PER_BLOCK = BLOCKSIZE // PAGESIZE
MAX_PAGES = 65536
bootloader_active = False
def addLog(s):
log.insert(END, s +'\n')
log.see("end")
#root.update()
def addLogRed(s):
log.insert(END, s +'\n','red')
log.see("end")
#root.update()
def addLogGreen(s):
log.insert(END, s +'\n','green')
log.see("end")
#root.update()
def addLogBlack(s):
log.insert(END, s +'\n','black')
log.see("end")
#root.update()
def delayedExit():
time.sleep(3)
sys.exit()
def getComPort(verbose):
global bootloader_active
devicelist = list(comports())
for device in devicelist:
for vidpid in compatibledevices:
if vidpid in device[2]:
port=device[0]
bootloader_active = (compatibledevices.index(vidpid) & 1) == 0
if verbose : addLog("Found {} at port {}".format(device[1],port))
return port
if verbose : addLogRed("Arduboy not found! Please Check Arduboy is switched on or try a different USB cable.")
def bootloaderStart():
global bootloader
## find and connect to Arduboy in bootloader mode ##
port = getComPort(True)
if port is None :
return False
if not bootloader_active:
addLog("Selecting bootloader mode...")
try:
bootloader = Serial(port,1200)
bootloader.close()
time.sleep(0.5)
#wait for disconnect and reconnect in bootloader mode
while getComPort(False) == port :
time.sleep(0.1)
if bootloader_active: break
while getComPort(False) is None : time.sleep(0.1)
port = getComPort(True)
except:
addLogRed("Error accessing port {}".format(port))
return False
log.insert(END, "Opening port..")
log.see("end")
root.update_idletasks()
for retries in range(20):
try:
time.sleep(0.1)
bootloader = Serial(port, 57600)
break
except:
if retries == 19:
addLogRed(" Failed!")
return False
log.insert(END, ".")
log.see("end")
root.update_idletasks()
time.sleep(0.4)
addLog("succes")
return True
def getVersion():
bootloader.write(b"V")
return int(bootloader.read(2))
def getJedecID():
bootloader.write(b"j")
jedec_id = bootloader.read(3)
time.sleep(0.5)
bootloader.write(b"j")
jedec_id2 = bootloader.read(3)
if jedec_id2 != jedec_id or jedec_id == b'\x00\x00\x00' or jedec_id == b'\xFF\xFF\xFF':
addLogRed("No flash cart detected.")
return
return bytearray(jedec_id)
def bootloaderExit():
global bootloader
bootloader.write(b"E")
bootloader.read(1)
bootloader.close()
def disableButtons():
flashButton['state'] = DISABLED
if not fxFlasher:
hexButton['state'] = DISABLED
if not fxFlasher and not fxActivator:
devDataButton['state'] = DISABLED
def enableButtons():
flashButton['state'] = NORMAL
if not fxFlasher:
hexButton['state'] = NORMAL
if not fxFlasher and not fxActivator:
devDataButton['state'] = NORMAL
## Uploader ####################################################################
def uploadHexfile():
disableButtons()
hexfile = appFilename.get()
if not os.path.isfile(hexfile) :
addLogRed('File not found! "{}"\n'.format(hexfile))
enableButtons()
return
addLog('\nLoading "{}"\n'.format(hexfile))
f = open(hexfile,"r")
records = f.readlines()
f.close()
flash_addr = 0
flash_data = bytearray(b'\xFF' * 32768)
flash_page = 1
flash_page_count = 0
flash_page_used = [False] * 256
caterina_overwrite = False
for rcd in records :
if rcd == ":00000001FF" : break
if rcd[0] == ":" :
rcd_len = int(rcd[1:3],16)
rcd_typ = int(rcd[7:9],16)
rcd_addr = int(rcd[3:7],16)
rcd_sum = int(rcd[9+rcd_len*2:11+rcd_len*2],16)
if (rcd_typ == 0) and (rcd_len > 0) :
flash_addr = rcd_addr
flash_page_used[int(rcd_addr / 128)] = True
flash_page_used[int((rcd_addr + rcd_len - 1) / 128)] = True
checksum = rcd_sum
for i in range(1,9+rcd_len*2, 2) :
byte = int(rcd[i:i+2],16)
checksum = (checksum + byte) & 0xFF
if i >= 9:
flash_data[flash_addr] = byte
flash_addr += 1
if checksum != 0 :
addLogRed("Hex file contains errors. upload aborted.")
enableButtons()
return
# check and apply patch for SSD1309
if applySsd1309patch.get():
lcdBootProgram_addr = flash_data.find(lcdBootProgram)
if lcdBootProgram_addr >= 0:
flash_data[lcdBootProgram_addr+2] = 0xE3;
flash_data[lcdBootProgram_addr+3] = 0xE3;
addLog("Found lcdBootProgram in hex file, upload will be patched for SSD1309 displays\n")
## check for data in catarina bootloader area ##
for i in range (256) :
if flash_page_used[i] :
flash_page_count += 1
if i >= 224 :
caterina_overwrite = True
progressbar['value'] = 0
progressbar['maximum'] = 512
if not bootloaderStart():
enableButtons()
return
#test if bootloader can and will be overwritten by hex file
bootloader.write(b"V") #get bootloader software version
if bootloader.read(2) == b"10" : #original caterina 1.0 bootloader
bootloader.write(b"r") #read lock bits
if (ord(bootloader.read(1)) & 0x10 != 0) and caterina_overwrite :
addLogRed("This upload will most likely corrupt the catarina bootloader. Upload aborted.")
bootloaderExit()
enableButtons()
return
## Flash ##
addLog("Flashing {} bytes. ({} flash pages)".format(flash_page_count * 128, flash_page_count))
for i in range (256) :
if flash_page_used[i] :
bootloader.write(bytearray([ord("A"), i >> 2, (i & 3) << 6]))
bootloader.read(1)
bootloader.write(b"B\x00\x80F")
bootloader.write(flash_data[i * 128 : (i + 1) * 128])
bootloader.read(1)
flash_page += 1
progressbar.step()
root.update_idletasks()
## Verify ##
addLog("Verifying {} bytes. ({} flash pages)".format(flash_page_count * 128, flash_page_count))
for i in range (256) :
if flash_page_used[i] :
bootloader.write(bytearray([ord("A"), i >> 2, (i & 3) << 6]))
bootloader.read(1)
bootloader.write(b"g\x00\x80F")
if bootloader.read(128) != flash_data[i * 128 : (i + 1) * 128] :
addLogRed("Verify failed at address {:04X}. Upload unsuccessful.\n".format(i * 128))
bootloaderExit()
enableButtons()
return
flash_page += 1
progressbar['value'] = progressbar['value'] + 1
root.update_idletasks()
addLogGreen("\nUpload success!!")
bootloaderExit()
enableButtons()
## flasher #####################################################################
def flashImage():
disableButtons()
progressbar['value'] = 0
filename = flashFilename.get()
## load and pad imagedata to multiple of PAGESIZE bytes ##
if not os.path.isfile(filename):
addLogRed('File not found! "{}" \n'.format(filename))
enableButtons()
return()
addLog('\nLoading flash image from file "{}"\n'.format(filename))
f = open(filename,"rb")
flashdata = bytearray(f.read())
f.close
if applySsd1309patch.get():
addLog("Patching flash image for SSD1309 displays...\n")
lcdBootProgram_addr = 0
while lcdBootProgram_addr >= 0:
lcdBootProgram_addr = flashdata.find(lcdBootProgram, lcdBootProgram_addr)
if lcdBootProgram_addr >= 0:
flashdata[lcdBootProgram_addr+2] = 0xE3;
flashdata[lcdBootProgram_addr+3] = 0xE3;
if (len(flashdata) % PAGESIZE != 0):
flashdata += b'\xFF' * (PAGESIZE - (len(flashdata) % PAGESIZE))
pagenumber = 0
if not bootloaderStart():
enableButtons()
return
#check version
if getVersion() < 13:
addLogRed("Bootloader does not support writing to flash. Write aborted!\nPlease update bootloader first.")
enableButtons()
return
## detect flash cart ##
jedec_id = getJedecID()
if jedec_id[0] in manufacturers.keys():
manufacturer = manufacturers[jedec_id[0]]
else:
manufacturer = "unknown"
capacity = 1 << jedec_id[2]
addLog("\nFlash JEDEC ID : {:02X}{:02X}{:02X}".format(jedec_id[0],jedec_id[1],jedec_id[2]))
addLog("Flash Manufacturer: {}".format(manufacturer))
if manufacturer != "unknown": addLog("Flash capacity : {} KB\n".format(capacity // 1024))
# when ending partially in a block, preserve the ending of old block data
if len(flashdata) % BLOCKSIZE:
blocklen = BLOCKSIZE - len(flashdata) % BLOCKSIZE
blockaddr = pagenumber + len(flashdata) // PAGESIZE
#read partial block data end
bootloader.write(bytearray([ord("A"), blockaddr >> 8, blockaddr & 0xFF]))
bootloader.read(1)
bootloader.write(bytearray([ord("g"), (blocklen >> 8) & 0xFF, blocklen & 0xFF,ord("C")]))
flashdata += bootloader.read(blocklen)
## write to flash cart ##
verifyAfterWrite = flashVerify.get()
blocks = len(flashdata) // BLOCKSIZE
log.insert(END,"writing {} blocks/{}KB to flash".format(blocks, len(flashdata) // 1024))
if verifyAfterWrite:
addLog(" with verify")
else:
addLog("")
progressbar['maximum'] = 2 * blocks
for block in range (blocks):
if (block & 1 == 0) or verifyAfterWrite:
bootloader.write(b"x\xC2") #RGB LED RED, buttons disabled
else:
bootloader.write(b"x\xC0") #RGB LED OFF, buttons disabled
bootloader.read(1)
blockaddr = pagenumber + block * BLOCKSIZE // PAGESIZE
blocklen = BLOCKSIZE
#write block
bootloader.write(bytearray([ord("A"), blockaddr >> 8, blockaddr & 0xFF]))
bootloader.read(1)
bootloader.write(bytearray([ord("B"), (blocklen >> 8) & 0xFF, blocklen & 0xFF,ord("C")]))
bootloader.write(flashdata[block * BLOCKSIZE : block * BLOCKSIZE + blocklen])
bootloader.read(1)
progressbar.step()
root.update_idletasks()
if verifyAfterWrite:
bootloader.write(b"x\xC1") #RGB BLUE RED, buttons disabled
bootloader.read(1)
bootloader.write(bytearray([ord("A"), blockaddr >> 8, blockaddr & 0xFF]))
bootloader.read(1)
bootloader.write(bytearray([ord("g"), (blocklen >> 8) & 0xFF, blocklen & 0xFF,ord("C")]))
if bootloader.read(blocklen) != flashdata[block * BLOCKSIZE : block * BLOCKSIZE + blocklen]:
addLogRed(" verify failed!\n\nWrite aborted.")
break
progressbar['value'] = progressbar['value'] + 1
root.update_idletasks()
#write complete
bootloader.write(b"x\xC4")#RGB LED GREEN, buttons disabled
bootloader.read(1)
time.sleep(0.5) #keep LED on for half a second
bootloader.write(b"x\x40")#RGB LED OFF, buttons enabled
bootloader.read(1)
bootloader.close() # Stay in bootloader Menu
#bootloaderExit() # Exit bootloader menu and start sketch
addLogGreen("\nUploaded flash image successfully!!\n")
if not fxFlasher:
addLog("Press LEFT or RIGHT on Arduboy to browse through the game catagogories.")
addLog("Press UP or DOWN to select a game followed by A or B to load and play a game.")
addLog("Press A or B on the Loader title screen to play last loaded game.")
enableButtons()
def flashDevData():
disableButtons()
progressbar['value'] = 0
filename = devDataFilename.get()
## load and pad imagedata to multiple of PAGESIZE bytes ##
if not os.path.isfile(filename):
addLogRed('File not found! "{}" \n'.format(filename))
enableButtons()
return()
addLog('\nLoading development data from file "{}"\n'.format(filename))
f = open(filename,"rb")
flashdata = bytearray(f.read())
f.close
if (len(flashdata) % PAGESIZE != 0):
flashdata += b'\xFF' * (PAGESIZE - (len(flashdata) % PAGESIZE))
programpage = MAX_PAGES - (len(flashdata) // PAGESIZE)
pagenumber = programpage
if not bootloaderStart():
enableButtons()
return
#check version
if getVersion() < 13:
addLogRed("Bootloader does not support writing to flash. Write aborted!\nPlease update bootloader first.")
enableButtons()
return
## detect flash cart ##
jedec_id = getJedecID()
if jedec_id[0] in manufacturers.keys():
manufacturer = manufacturers[jedec_id[0]]
else:
manufacturer = "unknown"
capacity = 1 << jedec_id[2]
addLog("\nFlash JEDEC ID : {:02X}{:02X}{:02X}".format(jedec_id[0],jedec_id[1],jedec_id[2]))
addLog("Flash Manufacturer: {}".format(manufacturer))
if manufacturer != "unknown": addLog("Flash capacity : {} KB\n".format(capacity // 1024))
# when starting partially in a block, preserve the beginning of old block data
if pagenumber % PAGES_PER_BLOCK:
blocklen = pagenumber % PAGES_PER_BLOCK * PAGESIZE
blockaddr = pagenumber // PAGES_PER_BLOCK * PAGES_PER_BLOCK
#read partial block data start
bootloader.write(bytearray([ord("A"), blockaddr >> 8, blockaddr & 0xFF]))
bootloader.read(1)
bootloader.write(bytearray([ord("g"), (blocklen >> 8) & 0xFF, blocklen & 0xFF,ord("C")]))
flashdata = bootloader.read(blocklen) + flashdata
pagenumber = blockaddr
## write to flash cart ##
verifyAfterWrite = flashVerify.get()
blocks = len(flashdata) // BLOCKSIZE
log.insert(END,"writing {} blocks/{}KB to flash".format(blocks, len(flashdata) // 1024))
if verifyAfterWrite:
addLog(" with verify")
else:
addLog("")
progressbar['maximum'] = 2 * blocks
for block in range (blocks):
if (block & 1 == 0) or verifyAfterWrite:
bootloader.write(b"x\xC2") #RGB LED RED, buttons disabled
else:
bootloader.write(b"x\xC0") #RGB LED OFF, buttons disabled
bootloader.read(1)
blockaddr = pagenumber + block * BLOCKSIZE // PAGESIZE
blocklen = BLOCKSIZE
#write block
bootloader.write(bytearray([ord("A"), blockaddr >> 8, blockaddr & 0xFF]))
bootloader.read(1)
bootloader.write(bytearray([ord("B"), (blocklen >> 8) & 0xFF, blocklen & 0xFF,ord("C")]))
bootloader.write(flashdata[block * BLOCKSIZE : block * BLOCKSIZE + blocklen])
bootloader.read(1)
progressbar.step()
root.update_idletasks()
if verifyAfterWrite:
bootloader.write(b"x\xC1") #RGB BLUE RED, buttons disabled
bootloader.read(1)
bootloader.write(bytearray([ord("A"), blockaddr >> 8, blockaddr & 0xFF]))
bootloader.read(1)
bootloader.write(bytearray([ord("g"), (blocklen >> 8) & 0xFF, blocklen & 0xFF,ord("C")]))
if bootloader.read(blocklen) != flashdata[block * BLOCKSIZE : block * BLOCKSIZE + blocklen]:
addLogRed(" verify failed!\n\nWrite aborted.")
break
progressbar['value'] = progressbar['value'] + 1
root.update_idletasks()
#write complete
bootloader.write(b"x\x40")#RGB LED OFF, buttons enabled
bootloader.read(1)
bootloaderExit() # Exit bootloader menu and start sketch
addLogGreen("\nUploaded development data successfully!!\n")
log.insert(END,"Please use the following line in your ")
log.insert(END,"fxdata.h",'black')
addLog(" header file:\n")
addLogBlack("#define PROGRAM_DATA_PAGE 0x{:04X}".format(programpage))
enableButtons()
## backup EEPROM ###############################################################
def backupEEPROM():
filename = selectEEPROMbackupFile()
if filename == None: return
disableButtons()
progressbar['value'] = 0
progressbar['maximum'] = 1024
if not bootloaderStart():
addLogRed("\nEEPROM backup failed.")
enableButtons()
return
addLog("\nReading 1K EEPROM data...")
eepromdata = bytearray()
for addr in range(0,1024,128):
bootloader.write(bytearray([ord('A'),addr >> 8,addr & 0xFF]))
bootloader.read(1)
bootloader.write(b'g\x00\x80E')
eepromdata += bytearray(bootloader.read(128))
progressbar['value'] = progressbar['value'] + 128
root.update_idletasks()
bootloaderExit()
addLog("Saving EEPROM data to '{}'".format(filename))
try:
f = open(filename,"wb")
f.write(eepromdata)
f.close
addLogGreen("\nEEPROM backup succesfully saved.")
except:
addLog("Saving EEPROM backup failed.")
enableButtons()
## restore EEPROM ##############################################################
def restoreEEPROM():
filename = selectEEPROMrestoreFile()
if filename == None: return
addLog("\nLoading EEPROM backup file '{}'".format(filename))
try:
f = open(filename,"rb")
eepromdata = bytearray(f.read())
f.close
except:
addLogRed("\nFailed to load EEPROM restore file.")
return
if len(eepromdata) != 1024:
addLogRed("\nEEPROM restore file has incorrect size (Must be 1024 bytes).")
return
disableButtons()
progressbar['value'] = 0
progressbar['maximum'] = 1024
if not bootloaderStart():
addLogRed("\nEEPROM backup failed.")
enableButtons()
return
addLog("\nRestoring EEPROM data...")
for addr in range(0,1024,64):
bootloader.write(bytearray([ord('A'),addr >> 8,addr & 0xFF]))
bootloader.read(1)
bootloader.write(b'B\x00\x40E')
bootloader.write(eepromdata[addr:addr+64])
bootloader.read(1)
progressbar['value'] = progressbar['value'] + 64
root.update_idletasks()
bootloaderExit()
addLogGreen("\nEEPROM restored successfully.")
enableButtons()
## view EEPROM ###############################################################
def viewEEPROM():
disableButtons()
progressbar['value'] = 0
progressbar['maximum'] = 1024
if not bootloaderStart():
addLogRed("\nEEPROM read failed.")
enableButtons()
return
addLog("\nReading 1K EEPROM data...")
eepromdata = bytearray()
for addr in range(0,1024,128):
bootloader.write(bytearray([ord('A'),addr >> 8,addr & 0xFF]))
bootloader.read(1)
bootloader.write(b'g\x00\x80E')
eepromdata += bytearray(bootloader.read(128))
progressbar['value'] = progressbar['value'] + 128
bootloaderExit()
addLog('\nEEPROM contents:\n')
h='----- '
for i in range (16):
h += '{:02X} '.format(i)
h += '-' * 16
addLogBlack(h)
for addr in range(0,1024,16):
s = '{:04X}: '.format(addr)
for i in range (16):
s += '{:02X} '.format(eepromdata[addr+i])
for i in range (16):
if eepromdata[addr+i] < 32: s += '.'
else: s += eepromdata[addr+i:addr+i+1].decode('cp1252')
addLogBlack(s)
addLogBlack(h)
enableButtons()
## erase EEPROM ################################################################
def eraseEEPROM():
if messagebox.showwarning("Erase EEPROM", "Are you sure you want to erase the EEPROM?", type = "yesno") != "yes":
return
disableButtons()
progressbar['value'] = 0
progressbar['maximum'] = 1024
if not bootloaderStart():
addLogRed("\nEEPROM erase failed.")
enableButtons()
return
addLog("\nErasing EEPROM memory...\n")
for addr in range(0,1024,64):
bootloader.write(bytearray([ord("A"),addr >> 8,addr & 0xFF]))
bootloader.read(1)
bootloader.write(b"B\x00\x40E")
bootloader.write(b"\xFF" * 64)
bootloader.read(1)
progressbar['value'] = progressbar['value'] + 64
bootloaderExit()
addLogGreen("EEPROM erased successfully.")
enableButtons()
## GUI interface ###############################################################
## menu commands ##
def selectHexFile():
global selectAppInitialDir
selectHexFilename = filedialog.askopenfilename(initialdir = selectAppInitialDir, title = "Select Hex file",filetypes = (("hex files","*.hex"),("all files","*.*")))
if selectHexFilename != '':
selectHexFilename = os.path.abspath(selectHexFilename)
selectAppInitialDir = os.path.dirname(selectHexFilename)+os.sep
appFilename.set(selectHexFilename)
def selectFlashFile():
global selectFlashInitialDir
selectFlashFilename = filedialog.askopenfilename(initialdir = selectFlashInitialDir, title = "Select Flash image",filetypes = (("bin files","*.bin"),("all files","*.*")))
if selectFlashFilename != '':
selectFlashFilename = os.path.abspath(selectFlashFilename)
selectFlashInitialDir = os.path.dirname(selectFlashFilename)+os.sep
flashFilename.set(selectFlashFilename)
def selectDevDataFile():
global selectDevDataInitialDir
selectDevDataFilename = filedialog.askopenfilename(initialdir = selectDevDataInitialDir, title = "Select development data",filetypes = (("bin files","*.bin"),("all files","*.*")))
if selectDevDataFilename != '':
selectDevDataFilename = os.path.abspath(selectDevDataFilename)
selectDevDataInitialDir = os.path.dirname(selectDevDataFilename)+os.sep
devDataFilename.set(selectDevDataFilename)
def selectEEPROMbackupFile():
global selectEEPROMinitialDir
filename = filedialog.asksaveasfilename(initialdir = selectEEPROMinitialDir, initialfile = time.strftime("eeprom-backup-%Y%m%d-%H%M%S.eep", time.localtime()), title = "Select EEPROM backup file",filetypes = (("eep files","*.eep"),("bin files","*.bin"),("all files","*.*")))
if filename != '':
filename = os.path.abspath(filename)
selectEEPROMinitialDir = os.path.dirname(filename)+os.sep
return filename
return None
def selectEEPROMrestoreFile():
global selectEEPROMinitialDir
filename = filedialog.askopenfilename(initialdir = selectEEPROMinitialDir, title = "Select EEPROM restore file",filetypes = (("eep files","*.eep"),("bin files","*.bin"),("all files","*.*")))
if filename != '':
filename = os.path.abspath(filename)
selectEEPROMinitialDir = os.path.dirname(filename)+os.sep
return filename
return None
def clearLog():
log.delete(1.0, END)
def uploadHexfileThread():
Thread(target = uploadHexfile).start()
def flashImageThread():
Thread(target = flashImage).start()
def devDataImageThread():
Thread(target = flashDevData).start()
def backupEEPROMThread():
Thread(target = backupEEPROM).start()
def restoreEEPROMThread():
Thread(target = restoreEEPROM).start()
def viewEEPROMThread():
Thread(target = viewEEPROM).start()
def eraseEEPROMThread():
Thread(target = eraseEEPROM).start()
## events ##
def onResize(event):
pass
def selectHexFileHotKey(event):
selectHexFile()
def selectFlashFileHotKey(event):
selectFlashFile()
def selectDevDataFileHotKey(event):
selectDevDataFile()
def backupEEPROMHotKey(event):
backupEEPROMThread()
def restoreEEPROMHotKey(event):
restoreEEPROMThread()
def viewEEPROMHotKey(event):
viewEEPROMThread()
def ExitAppHotKey(event):
root.quit()
## create form and widgets ##
root = Tk()
root.geometry("700x400")
root.title(title + VERSION)
try:
root.iconbitmap("icon.ico")
except:
pass
#add progress bar at bottom (first ensure Vscrollbar rests on progress bar)
progressbar=Progressbar(root, orient='horizontal', mode='determinate')
progressbar.pack(side="bottom", expand=False, fill='both')
#add app button and selector frame
if not fxFlasher:
appFrame = Frame(root)
appFrame.pack(side = TOP, fill = BOTH)
appFilename = StringVar(appFrame, value = defaultAppFilename)
hexButton = Button(appFrame, text="Upload Hex file", width = 23, command = uploadHexfileThread)
hexButton.pack(side = LEFT)
hexDirButton = Button(appFrame, text="...", width = 2, command = selectHexFile).pack(side = RIGHT)
hexEntry = Entry(appFrame, textvariable = appFilename).pack(side = LEFT, expand = True, fill = X)
#add flash button and selector frame
flashFrame = Frame(root)
flashFrame.pack(side = TOP, fill = BOTH)
flashFilename = StringVar(flashFrame, value = defaultFlashFilename)
flashButton = Button(flashFrame, text="Upload Flash image", width = 23, command = flashImageThread)
flashButton.pack(side = LEFT)
flashDirButton = Button(flashFrame, text="...", width = 2, command = selectFlashFile).pack(side = RIGHT)
flashEntry = Entry(flashFrame, textvariable = flashFilename).pack(side = LEFT, expand = True, fill = X)
#add development data button and selector frame
if not fxFlasher and not fxActivator:
devDataFrame = Frame(root)
devDataFrame.pack(side = TOP, fill = BOTH)
devDataFilename = StringVar(devDataFrame, value = defaultDevDataFilename)
devDataButton = Button(devDataFrame, text="Upload development data", width = 23, command = devDataImageThread)
devDataButton.pack(side = LEFT)
devDataDirButton = Button(devDataFrame, text="...", width = 2, command = selectDevDataFile).pack(side = RIGHT)
devDataEntry = Entry(devDataFrame, textvariable = devDataFilename).pack(side = LEFT, expand = True, fill = X)
#create log text area with scrollbar
scrollbar = Scrollbar(root)
scrollbar.pack(side = RIGHT, fill = Y)
log = Text(root, wrap = NONE, yscrollcommand = scrollbar.set, font=("Courier New", 10))
log.tag_configure("red", foreground="red", font=("Courier New", 10,"bold"))
log.tag_configure("green", foreground="green",font=("Courier New", 10,"bold"))
log.tag_configure("black", foreground="black",font=("Courier New", 10,"bold"))
scrollbar.config(command = log.yview)
#Menu checkmarks
appVerify = BooleanVar()
flashVerify = BooleanVar()
applySsd1309patch = BooleanVar()
#create menus
mainmenu = Menu(root)
filemenu = Menu(mainmenu, tearoff=0)
if not fxFlasher:
filemenu.add_command(label = "Select Hex file", underline = 7, accelerator = "Ctrl + H", command = selectHexFile)
filemenu.add_command(label = "Select Flash image", underline = 7, accelerator = "Ctrl + F", command = selectFlashFile)
if not fxFlasher and not fxActivator:
filemenu.add_command(label = "Select development data", underline = 0, accelerator = "Ctrl + D", command = selectDevDataFile)
if not (fxActivator or fxFlasher):
filemenu.add_separator()
filemenu.add_command(label = "Backup EEPROM", underline = 0, accelerator = "Ctrl + B", command = backupEEPROMThread)
filemenu.add_command(label = "Restore EEPROM", underline = 0, accelerator = "Ctrl + R", command = restoreEEPROMThread)
filemenu.add_command(label = "View EEPROM", underline = 0, accelerator = "Ctrl + L", command = viewEEPROMThread)
filemenu.add_command(label = "Erase EEPROM", underline = 0, command = eraseEEPROMThread)
filemenu.add_separator()
filemenu.add_command(label = "Exit", underline = 1, accelerator = "Ctrl + X", command = root.quit)
optionmenu = Menu(mainmenu, tearoff = 0)
if not fxFlasher:
optionmenu.add_checkbutton(label="Verify Hex file after upload",onvalue=True,offvalue=False,variable=appVerify)
appVerify.set(True)
optionmenu.add_checkbutton(label="Verify flash data",onvalue=True,offvalue=False,variable=flashVerify)
#flashVerify.set(True)
optionmenu.add_checkbutton(label="Apply SSD1309 display patch",onvalue=True,offvalue=False,variable=applySsd1309patch)
applySsd1309patch.set(ssd1309patch)
optionmenu.add_command(label="Clear log",command=clearLog)
mainmenu.add_cascade(label="File", menu = filemenu)
mainmenu.add_cascade(label="Options", menu = optionmenu)
root.config(menu=mainmenu)
# default log
if fxActivator:
addLog("\nArduboy FX activator" + VERSION + VERSION_DATE + " by Mr.Blinky.\n\nInstructions:\n-------------\n1) Connect Arduboy and turn power on.\n2) Click Upload Hex file button and wait for upload to complete.\n3) Run Flash mod chip option on Arduboy.\n4) Run Flash bootloader option on Arduboy.\n5) Click 'Upload Flash image' button and wait for upload to complete.\n6) Enjoy your Arduboy FX.\n")
elif fxFlasher:
addLog("\nArduboy FX flasher" + VERSION + VERSION_DATE + " by Mr.Blinky.\n\nInstructions:\n-------------\n1) Connect Arduboy and turn power on.\n2) Click 'Upload Flash image' button.\n3) Wait for upload to complete.\n4) Press LEFT then RIGHT to view ARDUBOY FX LOADER screen.\n")
else:
addLog("\nArduboy uploader GUI" + VERSION + VERSION_DATE + " by Mr.Blinky.\n\n1) Use File menu or […] button to browse for a Hex file or Flash image.\n2) Press the appropriate upload button to upload the file.\n")
log.pack(side="top", expand=True, fill='both')
log.bind("<Configure>", onResize)
#create hot keys
root.bind_all("<Control-x>", ExitAppHotKey)
if not fxFlasher:
root.bind_all("<Control-h>", selectHexFileHotKey)
root.bind_all("<Control-f>", selectFlashFileHotKey)
if not fxFlasher and not fxActivator:
root.bind_all("<Control-d>", selectDevDataFileHotKey)
root.bind_all("<Control-b>", backupEEPROMHotKey)
root.bind_all("<Control-r>", restoreEEPROMHotKey)
root.bind_all("<Control-l>", viewEEPROMHotKey)
#run application
root.mainloop() |
test_sqlqueue.py | # coding=utf-8
import random
import shutil
import sys
import tempfile
import unittest
from threading import Thread
from persistqueue import SQLiteQueue, FILOSQLiteQueue, UniqueQ
from persistqueue import Empty
from persistqueue.serializers import json as internal_json
class SQLite3QueueTest(unittest.TestCase):
def setUp(self):
self.path = tempfile.mkdtemp(suffix='sqlqueue')
self.auto_commit = True
def tearDown(self):
shutil.rmtree(self.path, ignore_errors=True)
def test_raise_empty(self):
q = SQLiteQueue(self.path, auto_commit=self.auto_commit)
q.put('first')
d = q.get()
self.assertEqual('first', d)
self.assertRaises(Empty, q.get, block=False)
# assert with timeout
self.assertRaises(Empty, q.get, block=True, timeout=1.0)
# assert with negative timeout
self.assertRaises(ValueError, q.get, block=True, timeout=-1.0)
def test_open_close_single(self):
"""Write 1 item, close, reopen checking if same item is there"""
q = SQLiteQueue(self.path, auto_commit=self.auto_commit)
q.put(b'var1')
del q
q = SQLiteQueue(self.path)
self.assertEqual(1, q.qsize())
self.assertEqual(b'var1', q.get())
def test_open_close_1000(self):
"""Write 1000 items, close, reopen checking if all items are there"""
q = SQLiteQueue(self.path, auto_commit=self.auto_commit)
for i in range(1000):
q.put('var%d' % i)
self.assertEqual(1000, q.qsize())
del q
q = SQLiteQueue(self.path)
self.assertEqual(1000, q.qsize())
for i in range(1000):
data = q.get()
self.assertEqual('var%d' % i, data)
# assert adding another one still works
q.put('foobar')
data = q.get()
self.assertEqual('foobar', data)
def test_random_read_write(self):
"""Test random read/write"""
q = SQLiteQueue(self.path, auto_commit=self.auto_commit)
n = 0
for _ in range(1000):
if random.random() < 0.5:
if n > 0:
q.get()
n -= 1
else:
self.assertRaises(Empty, q.get, block=False)
else:
q.put('var%d' % random.getrandbits(16))
n += 1
def test_multi_threaded_parallel(self):
"""Create consumer and producer threads, check parallelism"""
# self.skipTest("Not supported multi-thread.")
m_queue = SQLiteQueue(path=self.path, multithreading=True,
auto_commit=self.auto_commit)
def producer():
for i in range(1000):
m_queue.put('var%d' % i)
def consumer():
for i in range(1000):
x = m_queue.get(block=True)
self.assertEqual('var%d' % i, x)
c = Thread(target=consumer)
c.start()
p = Thread(target=producer)
p.start()
p.join()
c.join()
self.assertEqual(0, m_queue.size)
self.assertEqual(0, len(m_queue))
self.assertRaises(Empty, m_queue.get, block=False)
def test_multi_threaded_multi_producer(self):
"""Test sqlqueue can be used by multiple producers."""
queue = SQLiteQueue(path=self.path, multithreading=True,
auto_commit=self.auto_commit)
def producer(seq):
for i in range(10):
queue.put('var%d' % (i + (seq * 10)))
def consumer():
for _ in range(100):
data = queue.get(block=True)
self.assertTrue('var' in data)
c = Thread(target=consumer)
c.start()
producers = []
for seq in range(10):
t = Thread(target=producer, args=(seq,))
t.start()
producers.append(t)
for t in producers:
t.join()
c.join()
def test_multiple_consumers(self):
"""Test sqlqueue can be used by multiple consumers."""
queue = SQLiteQueue(path=self.path, multithreading=True,
auto_commit=self.auto_commit)
def producer():
for x in range(1000):
queue.put('var%d' % x)
counter = []
# Set all to 0
for _ in range(1000):
counter.append(0)
def consumer(index):
for i in range(200):
data = queue.get(block=True)
self.assertTrue('var' in data)
counter[index * 200 + i] = data
p = Thread(target=producer)
p.start()
consumers = []
for index in range(5):
t = Thread(target=consumer, args=(index,))
t.start()
consumers.append(t)
p.join()
for t in consumers:
t.join()
self.assertEqual(0, queue.qsize())
for x in range(1000):
self.assertNotEqual(0, counter[x],
"not 0 for counter's index %s" % x)
self.assertEqual(len(set(counter)), len(counter))
def test_task_done_with_restart(self):
"""Test that items are not deleted before task_done."""
q = SQLiteQueue(path=self.path, auto_commit=False)
for i in range(1, 11):
q.put(i)
self.assertEqual(1, q.get())
self.assertEqual(2, q.get())
# size is correct before task_done
self.assertEqual(8, q.qsize())
q.task_done()
# make sure the size still correct
self.assertEqual(8, q.qsize())
self.assertEqual(3, q.get())
# without task done
del q
q = SQLiteQueue(path=self.path, auto_commit=False)
# After restart, the qsize and head item are the same
self.assertEqual(8, q.qsize())
# After restart, the queue still works
self.assertEqual(3, q.get())
self.assertEqual(7, q.qsize())
def test_protocol_1(self):
shutil.rmtree(self.path, ignore_errors=True)
q = SQLiteQueue(path=self.path)
self.assertEqual(q._serializer.protocol,
2 if sys.version_info[0] == 2 else 4)
def test_protocol_2(self):
q = SQLiteQueue(path=self.path)
self.assertEqual(q._serializer.protocol,
2 if sys.version_info[0] == 2 else 4)
def test_json_serializer(self):
q = SQLiteQueue(
path=self.path,
serializer=internal_json)
x = dict(
a=1,
b=2,
c=dict(
d=list(range(5)),
e=[1]
))
q.put(x)
self.assertEquals(q.get(), x)
def test_put_0(self):
q = SQLiteQueue(path=self.path)
q.put(0)
d = q.get(block=False)
self.assertIsNotNone(d)
class SQLite3QueueNoAutoCommitTest(SQLite3QueueTest):
def setUp(self):
self.path = tempfile.mkdtemp(suffix='sqlqueue_auto_commit')
self.auto_commit = False
def test_multiple_consumers(self):
"""
FAIL: test_multiple_consumers (
-tests.test_sqlqueue.SQLite3QueueNoAutoCommitTest)
Test sqlqueue can be used by multiple consumers.
----------------------------------------------------------------------
Traceback (most recent call last):
File "persist-queue\tests\test_sqlqueue.py", line 183,
-in test_multiple_consumers
self.assertEqual(0, queue.qsize())
AssertionError: 0 != 72
:return:
"""
self.skipTest('Skipped due to a known bug above.')
class SQLite3QueueInMemory(SQLite3QueueTest):
def setUp(self):
self.path = ":memory:"
self.auto_commit = True
def test_open_close_1000(self):
self.skipTest('Memory based sqlite is not persistent.')
def test_open_close_single(self):
self.skipTest('Memory based sqlite is not persistent.')
def test_multiple_consumers(self):
self.skipTest('Skipped due to occasional crash during '
'multithreading mode.')
def test_multi_threaded_multi_producer(self):
self.skipTest('Skipped due to occasional crash during '
'multithreading mode.')
def test_multi_threaded_parallel(self):
self.skipTest('Skipped due to occasional crash during '
'multithreading mode.')
def test_task_done_with_restart(self):
self.skipTest('Skipped due to not persistent.')
def test_protocol_2(self):
self.skipTest('In memory queue is always new.')
class FILOSQLite3QueueTest(unittest.TestCase):
def setUp(self):
self.path = tempfile.mkdtemp(suffix='filo_sqlqueue')
self.auto_commit = True
def tearDown(self):
shutil.rmtree(self.path, ignore_errors=True)
def test_open_close_1000(self):
"""Write 1000 items, close, reopen checking if all items are there"""
q = FILOSQLiteQueue(self.path, auto_commit=self.auto_commit)
for i in range(1000):
q.put('var%d' % i)
self.assertEqual(1000, q.qsize())
del q
q = FILOSQLiteQueue(self.path)
self.assertEqual(1000, q.qsize())
for i in range(1000):
data = q.get()
self.assertEqual('var%d' % (999 - i), data)
# assert adding another one still works
q.put('foobar')
data = q.get()
self.assertEqual('foobar', data)
class FILOSQLite3QueueNoAutoCommitTest(FILOSQLite3QueueTest):
def setUp(self):
self.path = tempfile.mkdtemp(suffix='filo_sqlqueue_auto_commit')
self.auto_commit = False
class SQLite3UniqueQueueTest(unittest.TestCase):
def setUp(self):
self.path = tempfile.mkdtemp(suffix='sqlqueue')
self.auto_commit = True
def test_add_duplicate_item(self):
q = UniqueQ(self.path)
q.put(1111)
self.assertEqual(1, q.size)
# put duplicate item
q.put(1111)
self.assertEqual(1, q.size)
q.put(2222)
self.assertEqual(2, q.size)
del q
q = UniqueQ(self.path)
self.assertEqual(2, q.size)
def test_multiple_consumers(self):
"""Test UniqueQ can be used by multiple consumers."""
queue = UniqueQ(path=self.path, multithreading=True,
auto_commit=self.auto_commit)
def producer():
for x in range(1000):
queue.put('var%d' % x)
counter = []
# Set all to 0
for _ in range(1000):
counter.append(0)
def consumer(index):
for i in range(200):
data = queue.get(block=True)
self.assertTrue('var' in data)
counter[index * 200 + i] = data
p = Thread(target=producer)
p.start()
consumers = []
for index in range(5):
t = Thread(target=consumer, args=(index,))
t.start()
consumers.append(t)
p.join()
for t in consumers:
t.join()
self.assertEqual(0, queue.qsize())
for x in range(1000):
self.assertNotEqual(0, counter[x],
"not 0 for counter's index %s" % x)
self.assertEqual(len(set(counter)), len(counter))
|
chart.py | import threading
import matplotlib as mpl
from mpl_toolkits.mplot3d import Axes3D
import numpy as np
import matplotlib.pyplot as plt
import data
from data import R
fig = None
axis = None
# Creates an chart
def createChart():
print("Initlialising plot")
global fig, axis
mpl.rcParams['legend.fontsize'] = 10
fig = plt.figure()
axis = fig.gca(projection='3d')
theta = np.linspace(-4 * np.pi, 4 * np.pi, 100)
z = [2, 3]
x = [2, 2]
y = [1, 4]
axis.plot(x, y, z, label='A Leg (?)')
axis.legend()
plt.show()
def rotate_poses(poses_3d, R, t):
R_inv = np.linalg.inv(R)
for pose_id in range(len(poses_3d)):
pose_3d = poses_3d[pose_id].reshape((-1, 4)).transpose()
pose_3d[0:3, :] = np.dot(R_inv, pose_3d[0:3, :] - t)
poses_3d[pose_id] = pose_3d.transpose().reshape(-1)
return poses_3d
# Updates data in the chart
def __updateData(pose_3d):
global axis
axis.clear() # Clear all data
if not len(pose_3d): # if we have data
return
pose_3d = rotate_poses(pose_3d, data.R, data.t)
pose_3d_copy = pose_3d.copy()
x = pose_3d_copy[:, 0::4]
y = pose_3d_copy[:, 1::4]
z = pose_3d_copy[:, 2::4]
pose_3d[:, 0::4], pose_3d[:, 1::4], pose_3d[:, 2::4] = -z, x, -y
pose_3d = pose_3d.reshape(pose_3d.shape[0], 19, -1)[:, :, 0:3]
for sid in range(len(data.SKELETON_EDGES)):
firstNr = data.SKELETON_EDGES[sid][0]
secondNr = data.SKELETON_EDGES[sid][1]
for human in range(len(pose_3d)): # Go through all humans
first = pose_3d[human][firstNr]
second = pose_3d[human][secondNr]
x = [first[0], second[0]]
y = [first[1], second[1]]
z = [first[2], second[2]]
axis.plot(x, y, z, label=str(human) + "-" + str(firstNr) + "." + str(secondNr))
plt.draw()
def updateData(pose_3d, sync = False):
if sync:
__updateData(pose_3d)
else:
pose3d_copy = pose_3d.copy()
t = threading.Thread(target=__updateData, args=[pose3d_copy])
t.start()
|
save.py | #!/usr/bin/python
import requests
import sys
import shutil
import re
import threading
from BeautifulSoup import BeautifulSoup as soup
THREAD_COUNTER = 0
THREAD_MAX = 5
def get_source( link ):
r = requests.get( link )
if r.status_code == 200:
return soup( r.text )
else:
sys.exit( "[~] Invalid Response Received." )
def filter( html ):
imgs = html.findAll( "img" )
if imgs:
return imgs
else:
sys.exit("[~] No images detected on the page.")
def requesthandle( link, name ):
global THREAD_COUNTER
THREAD_COUNTER += 1
try:
r = requests.get( link, stream=True )
if r.status_code == 200:
r.raw.decode_content = True
f = open( name, "wb" )
shutil.copyfileobj(r.raw, f)
f.close()
print "[*] Downloaded Image: %s" % name
except Exception, error:
print "[~] Error Occured with %s : %s" % (name, error)
THREAD_COUNTER -= 1
def main():
html = get_source( "https://www.drivespark.com/wallpapers/" )
tags = filter( html )
for tag in tags:
src = tag.get( "src" )
if src:
src = re.match( r"((?:https?:\/\/.*)?\/(.*\.(?:png|jpg)))", src )
if src:
(link, name) = src.groups()
if not link.startswith("http"):
link = "https://www.drivespark.com" + link
_t = threading.Thread( target=requesthandle, args=(link, name.split("/")[-1]) )
_t.daemon = True
_t.start()
while THREAD_COUNTER >= THREAD_MAX:
pass
while THREAD_COUNTER > 0:
pass
if __name__ == "__main__":
main() |
websocketApi2.py | # -*- coding: utf-8 -*-
import websocket
import gzip
import time
import StringIO
import json
import zlib
import json
from threading import Thread
from urlparse import urlparse
import hmac
import hashlib
import requests
import os
import warnings
from Queue import Queue, Empty
SECRET = 'tRJs6HQP-cnpMnEB7-T9xRGOGM-TOusHtG7'
API_KEY = '9rsmJL65-ztLxddIV-H1a2c1xw-gcwWTFOM'
ACCOUNT_NAME = "huobip/mock-adamzzz"
class onetokenWebsocketApi(object):
params_dict = {
'Account': {'info': ['获取账户信息'],
"withdraws": ['获取提币记录', '请求提币', '取消提币']},
'Order': {'orders': ['查询订单信息','创建订单','修改订单,只有BITMEX交易所支持','取消订单,最多支持同时取消9个订单'],
'orders/all': ['取消所有符合条件的订单'],
'trans': ['获取最近成交记录']}
}
WSS_TICK = 'wss://1token.trade/api/v1/ws/tick?gzip=true' # tick数据,返回gzip
WSS_CANDLE = 'wss://1token.trade/api/v1/ws/candle?gzip=true' # bar数据,支持 1m,5m,15m,30m,1h,2h,4h,6h,1d,1w
WSS_V2 = 'wss://1token.trade/api/v1/ws/low-freq-quote-v2?gzip=true' # 24小时涨跌幅
WSS_V3 = 'wss://1token.trade/api/v1/ws/tick-v3?gzip=true'
WSS_ACCOUNT = 'wss://1token.trade/api/v1/ws/trade'
RES_URL = "https://1token.trade/api/v1/"
def __init__(self):
"""Constructor"""
self.apiKey = '' # 用户名
self.secretKey = '' # 密码
self.host = '' # 服务器地址
self.currency = '' # 货币类型(usd或者cny)
self.ws = None # websocket应用对象
self.thread = {} # 工作线程
self.ping_thread = {} # ping的线程
self.rec_thread = {} # 获取数据的线程
self.accounts = []
self.exchange = ''
self.account_name = ''
self.account_symbol = ''
self.reqID = 0
self.reqQueue = Queue() # 请求队列
self.reqThread = Thread(target=self.processQueue)
self.orderDict = {}
self.ws_dict = {'bar': None,
'tick': None,
'info': None}
self.host_dict = {'bar': self.WSS_CANDLE,
'tick': self.WSS_TICK,
'info': self.WSS_ACCOUNT}
def readData(self, compressData):
# result=gzip.decompress(compressData) #python3用这个方法,python2用下面的三行代码替代
compressedstream = StringIO.StringIO(compressData)
gziper = gzip.GzipFile(fileobj=compressedstream)
result = gziper.read().decode('utf-8')
return json.loads(result)
def startReq(self):
self.active = True
self.reqThread.start()
def generateSign(self, secret, verb, url, nonce, data_str=None):
if data_str is None:
data_str = ''
parsed_url = urlparse(url)
path = parsed_url.path
# print "Computing HMAC: %s" % verb + path + str(nonce) + data
message = os.path.join(verb, path) + str(nonce) + data_str
# print(message)
signature = hmac.new(bytes(secret).encode('utf8'),
bytes(message).encode('utf8'), digestmod=hashlib.sha256).hexdigest()
return signature
def gen_headers(self, nonce, key, sig):
headers = {
'Api-Nonce': str(nonce),
'Api-Key': key,
'Api-Signature': sig,
'Content-Type': 'application/json'
}
return headers
def gen_nonce(self):
return str((int(time.time() * 1000000)))
def auth(self, ws, **kwargs):
"""支持同时订阅不同交易所的不同合约,首先需要发送auth进行认证"""
tradeStr = {
"uri": "auth"
}
tradeStr.update(kwargs)
params = json.dumps(tradeStr)
ws.send(params)
def login(self, account, apiKey, secretKey):
self.account_symbol = account
self.exchange, self.account_name = account.split("/")
self.apiKey = apiKey
self.secretKey = secretKey
def processQueue(self):
"""处理请求队列中的请求"""
while self.active:
try:
req = self.reqQueue.get(block=True, timeout=1) # 获取请求的阻塞为一秒
callback = req['callback']
reqID = req['reqID']
r = callback(req['params'], reqID)
except Empty:
pass
# -----------------Restful:下单------------------------
def sendOrder(self, symbol, price, amount, direction, client_oid=None, options=None):
"""
:param symbol: contract
:param price:
:param amount:
:param direction: 'long' or 'short'
:param client_oid: 由用户指定或者由OneToken随机生成,用于追踪订单信息。
:param options:
e.g :
{
"contract": "string",
"bs": "b",
"price": 0,
"amount": 0,
"client_oid": "string",
"options": {
"additionalProp1": "string",
"additionalProp2": "string",
"additionalProp3": "string"
}
}
:return:
{
"exchange_oid": "string", # 由交易所生成,用于追踪订单信息。
"client_oid": "string"
}
sample: {u'client_oid': u'xxx2', u'exchange_oid': u'huobip/btc.usdt-sfazm8495l0h7mmk4yq97wmp6zo'}
"""
params = dict()
self.reqID += 1
params['contract'] = symbol
params['price'] = price
params['amount'] = abs(amount)
params['bs'] = direction
if client_oid is not None:
params['client_oid'] = client_oid
if options is not None:
params['options'] = options
data = {'reqID': self.reqID, 'callback': self.onSendOrder, 'params': params}
self.reqQueue.put(data)
def onSendOrder(self, params, reqID):
r = self.sendTradingRequest(data=params,
path="%s/%s/orders" % (self.exchange, self.account_name),
verb="POST"
)
if "exchange_oid" in r.keys():
orderID = r['exchange_oid']
self.orderDict[reqID] = orderID
print "发单成功, order detail:%s"%str(params)
else:
print "发单失败, error detail:%s"%str(r)
return r
# ----------------- Restful:撤单,撤所有 ------------------------
def cancelAll(self, contract=None):
"""
:param contract: 类似于binance/btc.usdt。对于部分交易所,如果不输入此参数可以撤销所有交易对的订单,根据交易所实际情况而定。
:return: sample
{u'status': u'success'}
"""
self.reqID += 1
if contract is not None:
params = {'contract': contract}
else:
params = None
data = {'reqID': self.reqID, 'callback': self.onCancelAll, 'params': params}
self.reqQueue.put(data)
def onCancelAll(self, params, reqID=None):
r = self.sendTradingRequest(params=params,
path="%s/%s/orders/all" % (self.exchange, self.account_name),
verb="DELETE"
)
print(r)
# ----------------- Restful:撤单 ------------------------
def cancelOrder(self, client_oid=None, exchange_oid=None):
"""
:param client_oid: 用户定义的订单号,最多支持9个订单号,用逗号分隔。
:param exchange_oid: 交易所生成的订单号,最多支持9个订单号,用逗号分隔。
:return:
[{u'exchange_oid': u'huobip/btc.usdt-sfazm8495l0h7mmk4yq97wmp6zo'}]
"""
params = {}
self.reqID += 1
if client_oid is None and exchange_oid is None:
raise ValueError("至少输入一个参数")
if client_oid is not None:
params['client_oid'] = client_oid
if exchange_oid is not None:
params['exchange_oid'] = exchange_oid
data = {'reqID': self.reqID, 'callback': self.onCancelOrder, 'params': params}
self.reqQueue.put(data)
def onCancelOrder(self, params, reqID=None):
r = self.sendTradingRequest(params=params,
path="%s/%s/orders" % (self.exchange, self.account_name),
verb="DELETE"
)
print(r)
# ----------------- Restful:查询订单 ------------------------
def qryOrder(self, contract=None, client_oid=None, exchange_oid=None, status=None):
"""
:param contract: e.g "binance/btc.usdt"
:param status: available values : waiting, pending, withdrawing, withdrawn, dealt, part-deal-pending,
part-deal-withdrawn, part-deal-withdrawing, error-order, active, end
:param client_oid: 用户定义的订单号,最多支持9个订单号,用逗号分隔。
:param exchange_oid: 交易所生成的订单号,最多支持9个订单号,用逗号分隔。
:return:
e.g
[
{
"account": "string",
"average_dealt_price": 0,
"bs": "string",
"client_oid": "string",
"comment": "string",
"commission": 0,
"contract": "string",
"dealt_amount": 0,
"entrust_amount": 0,
"entrust_price": 0,
"entrust_time": "string",
"exchange_oid": "string",
"last_dealt_amount": 0,
"canceled_time": "string",
"closed_time": "string",
"ots_closed_time": "string",
"last_update": "string",
"exchange_update": "string",
"options": {},
"status": "waiting",
"tags": {}
}
]
"""
params = dict()
self.reqID += 1
if contract is not None:
params['contract'] = contract
if client_oid is not None:
params['client_oid'] = client_oid
if exchange_oid is not None:
params['exchange_oid'] = exchange_oid
data = {'reqID': self.reqID, 'callback': self.onQryOrder, 'params': params}
self.reqQueue.put(data)
def onQryOrder(self, params, reqID=None):
r = self.sendTradingRequest(params=params,
path="%s/%s/orders" % (self.exchange, self.account_name),
verb="GET")
print(r)
# ----------------- Restful:查询成交信息 ------------------------
def qryTransaction(self, contract=None, count=None):
"""
:param contract: 建议填写。但是对于部分交易所,此项不填可以返回所有成交。
:param count: 返回成交记录的条数
:return:
"""
params = {}
self.reqID += 1
if contract is not None:
params['contract'] = contract
else:
warnings.showwarning(":param contract: 建议填写。但是对于部分交易所,此项不填可以返回所有成交。")
if count is not None:
params['count'] = count
data = {'reqID': self.reqID,
'callback': self.onQryTransaction,
'params': params}
self.reqQueue.put(data)
def onQryTransaction(self, params, reqID=None):
r = self.sendTradingRequest(params=params,
verb='GET',
path="%s/%s/trans" % (self.exchange, self.account_name))
print(r)
def sendTradingRequest(self,
path,
verb,
params=None,
data=None
):
"""
:param verb: "POST" or "GET" or "DELETE"
:param path: 根据具体的操作进行构造{exchange}/{account}/orders e.g '/huobip/zannb/orders'
:param nonce: 时间戳
:param data: 具体的操作信息 e.g data = {"contract": "huobip/btc.usdt", "price": 1, "bs": "b", "amount": 0.6}
:return:
"""
if params is not None and len(params)==0:
params = None
if data is not None and len(data)==0:
data = None
nonce = self.gen_nonce()
data_str = json.dumps(data, sort_keys=True) if data is not None else ""
sig = self.generateSign(self.secretKey, verb, path, nonce, data_str)
headers = self.gen_headers(nonce, self.apiKey, sig)
# server并不要求请求中的data按key排序,只需所发送请求中的data与用来签名的data_str和相同即可,是否排序请按实际情况选择
if verb == "POST":
resp = requests.post(os.path.join(self.RES_URL, "trade", path), headers=headers, data=data_str, params=params)
elif verb == "GET":
resp = requests.get(os.path.join(self.RES_URL, "trade", path), headers=headers, data=data_str, params=params)
elif verb == 'DELETE':
resp = requests.delete(os.path.join(self.RES_URL, "trade", path), headers=headers, data=data_str, params=params)
else:
pass
return resp.json()
def qryAccount(self):
"""
{
"balance": 4362.166242423991, # 总资产 = 现金 + 虚拟货币市值
"cash": 0.0, # 现金(根据人民币汇率计算)
"market_value": 4362.166242423991, # 货币市值
"market_value_detail": { # 每个币种的市值
"btc": 0.0,
"usdt": 0.0,
"eth": 4362.166242423991
},
"position": [{ # 货币持仓,默认包含btc,usdt或法币
"total_amount": 1.0, # 总数
"contract": "eth", # 币种
"market_value": 4362.166242423991, # 市值
"available": 0.97, # 可用数量
"frozen": 0.03, # 冻结数量
"type": "spot" # 类型,spot表示现货持仓
},
{
"total_amount": 0.0,
"contract": "usdt",
"market_value": 0.0,
"available": 0.0,
"frozen": 0.0,
"type": "spot",
"value_cny": 0.0
},
{
"total_amount": 0.0,
"contract": "btc",
"market_value": 0.0,
"available": 0.0,
"frozen": 0.0,
"type": "spot"
}
]
}
sample:
{u'market_value_detail': {u'usdt': 0.0, u'eth': 281424.5021877569, u'btc': 360121.62614054297},
u'market_value': 641546.1283282998,
u'balance': 865561.1346905641,
u'cash': 224015.00636226428,
u'position': [
{u'available': 8.0698, u'market_value': 360121.62614054297, u'total_amount': 8.0798, u'frozen': 0.01, u'contract': u'btc', u'extra_info': {}, u'type': u'spot', u'value_cny': 360121.62614054297},
{u'available': 32667.309774, u'market_value': 0.0, u'total_amount': 32667.309774, u'frozen': 0.0, u'contract': u'usdt', u'extra_info': {}, u'type': u'spot', u'value_cny': 224015.00636226428},
{u'available': 197.39999999999998, u'market_value': 281424.5021877569, u'total_amount': 199.99999999999997, u'frozen': 2.6000000000000005, u'contract': u'eth', u'extra_info': {}, u'type': u'spot', u'value_cny': 281424.5021877569}
]
}
"""
return self.sendTradingRequest(
verb="GET",
path="%s/%s/info" % (self.exchange, self.account_name)
)
def ws_connect(self, data_type, **options):
"""连接服务器"""
headers = None
host = self.host_dict[data_type]
if data_type == "info":
verb = 'GET'
nonce = self.gen_nonce()
secret = self.secretKey
path = "/ws/%s" % self.account_name
api_key = self.apiKey
signature = self.generateSign(secret, verb, path, nonce, data_str=None)
headers = self.gen_headers(nonce, api_key, signature)
ws = websocket.create_connection(host, headers=headers, **options)
ws.data_type = data_type
self.ws_dict[data_type] = ws
self.ping_thread[data_type] = Thread(target=self.send_ping, args=(ws,))
self.rec_thread[data_type] = Thread(target=self.rec_data, args=(ws,))
self.ping_thread[data_type].start()
self.rec_thread[data_type].start()
def reconnect(self, data_type):
"""重新连接"""
pass
def close(self):
"""关闭接口"""
for k in self.thread.keys():
if self.thread[k] and self.thread[k].isAlive():
self.ws[k].close()
self.thread[k].join()
self.ping_thread[k].join()
if self.reqThread and self.reqThread.isAlive():
self.active = False
self.reqThread
def send_ping(self, ws):
"""所有接口支持心跳方式检测服务器是否在线,心跳时长为30秒。若客户端超过30秒未发送心跳包,服务端会返回丢失心跳的通知,。"""
tradeStr = {"uri": "ping"}
params = json.dumps(tradeStr)
while (True):
time.sleep(5)
ws.send(params)
def rec_data(self, ws):
while (True):
r = ws.recv()
# r = self.readData(r)
# print(str(r) + "\n\n")
self.onMessage(ws, r)
# -----------------WSS_TICK:实时TICK数据接口-------------------------
def subscribeTick(self, contract):
"""
订阅tick数据, 如果需要请求多个tick, 可以在同一个websocket里面发送多个subscribe-single-tick-verbose的请求, 每个请求带着多个不同的contract
//Websocket Client request
{
"uri": "subscribe-single-tick-verbose",
"contract": "bitfinex/btc.usd"
}
//Websocket Server response
{
"uri":"single-tick-verbose",
"data":
{
"asks":
[
{"price": 9218.5, "volume": 1.7371},
...
],
"bids":
[
{"price": 9218.4, "volume": 0.81871728},
...
],
"contract": "bitfinex/btc.usd",
"last": 9219.3, // 最新成交价
"time": "2018-05-03T16:16:41.630400+08:00", // 1token的系统时间 ISO 格式 带时区
"exchange_time": "2018-05-03T16:16:41.450400+08:00", // 交易所给的时间 ISO 格式 带时区
"amount": 16592.4, //成交额 (CNY)
"volume": 0.3 // 成交量
}
}
"""
if self.ws_dict['tick'] is None:
self.ws_connect(data_type='tick')
self.auth(self.ws_dict['tick'])
self.sendMarketDataRequest(ws=self.ws_dict['tick'], contract=contract, uri="subscribe-single-tick-verbose")
# # -----------------WSS_v3:实时TICK数据接口-------------------------
# def subscribeTick_v3(self, contract):
# """
# 推送v3格式的tick行情,每隔30秒服务器会推送snapshot,在此间隔内发送diff,客户端可以通过计算snapshot+diff得出当前行情。
# 在diff类型的数据中,如bids或者asks中存在[x,y=0]的情况,则删除上个snapshot中bids或asks价格为x的行情,否则更新此行情。
# """
# self.ws_connect(self.WSS_V3, self.apiKey, self.secretKey)
# self.auth(self.ws_dict['tick'])
# self.sendMarketDataRequest(contract, self.ws_dict['tick'])
# -----------------WSS_TICK:实时ZHUBI数据接口-------------------------
def subscribeZhubi(self, contract):
"""
订阅逐笔数据, 如果需要请求多个contract的逐笔数据, 可以在同一个websocket里面发送多个subscribe-single-zhubi-verbose的请求, 每个请求带着多个不同的contract
tradeStr = {
"uri": "subscribe-single-zhubi-verbose",
"contract": "bitfinex/btc.usd"
}
return:
{u'data': [{u'exchange_time': u'2018-10-29T07:28:45.293000+00:00', u'price': 6499.4, u'bs': u's', u'contract': u'huobip/btc.usdt', u'amount': 0.1154, u'time': u'2018-10-29T15:28:45.866691+08:00'}], u'uri': u'single-zhubi-verbose'}
"""
if self.ws_dict['tick'] is None:
self.ws_connect(data_type='zhubi')
self.auth(self.ws_dict['zhubi'])
self.sendMarketDataRequest(self.ws_dict['zhubi'], contract, uri="subscribe-single-zhubi-verbose")
# -----------------WSS_CANDLE:实时candle数据接口-------------------------
def subscribeKline(self, contract, duration):
"""
支持不同时长:duration=1m,5m,15m,30m,1h,2h,4h,6h,1d,1w。
支持同一连接订阅多个合约
//Websocket Client request
{
"contract":"huobip/btc.usdt",
"duration": "1m"
}
//Websocket Server response
{
"amount": 16592.4, //成交量
"close": 9220.11,
"high": 9221,
"low": 9220.07,
"open": 9220.07,
"volume": 0.3, //成交额
"contract": "huobip/btc.usdt",
"duration": "1m",
"time": "2018-05-03T07:30:00Z" // 时间戳 isoformat
}
"""
if self.ws_dict['bar'] is None:
self.ws_connect(data_type='bar')
self.auth(self.ws_dict['bar'])
self.sendMarketDataRequest(self.ws_dict['bar'], contract, duration=duration)
# # ----------------------WSS_V2:24小时涨跌幅数据接口-----------------------
# def subscribeLowFreqQuote(self, contract):
# """
# 推送各个合约的当前价格以及24小时涨跌幅。
# 支持同时订阅不同交易所的不同合约
# 每个请求可以带着1个的contracts,例子:["huobip/btc.usdt"]
# 每个请求可以带着多个不同的contracts,例子:["huobip/btc.usdt", "huobip/ht.usdt"]
#
# //Websocket Client request
# {
# "uri":"batch-subscribe",
# "contracts":["huobip/btc.usdt", "huobip/ht.usdt"]
# }
#
# //Websocket Server response
# {
# "uri":"batch-subscribe",
# "code":"success"
# }
#
# //Websocket Server response
# {
# "uri":"low-freq-quote",
# "data":
# [
# {
# "contract":"huobip/btc.usdt",
# "rise":3.345103,
# "price":6152.32,
# "price_s":"6152.32" //根据交易所的min_change format的字符串
# },
# {
# "contract":"huobip/ht.usdt",
# "rise":-0.539916,
# "price":3.7027,
# "price_s":"3.7027"
# }
# ]
# }
# """
# self.ws_connect(self.WSS_V2, self.apiKey, self.secretKey)
# self.sendMarketDataRequest(contract, uri="batch-subscribe")
# ----------------------WSS_V4:账户信息订阅-----------------------
def subscribeAccount(self):
""" account name 必须为{exchange}/{account}的类型, 不支持模拟账户"""
host = os.path.join(self.WSS_ACCOUNT, account)
self.ws_connect(data_type="info")
self.ws['info'].send(json.dumps({"uri": "sub-info"}))
def unsubscribeTick(self, contract):
"""
逐笔与tick数据支持订阅后退订
{
"uri": "unsubscribe-single-tick-verbose",
"contract": "bitfinex/btc.usd"
}
"""
tradeStr = {}
tradeStr['uri'] = "unsubscribe-single-tick-verbose"
tradeStr['contract'] = contract
params = json.dumps(tradeStr)
self.ws.send(params)
def unsubscribeZhubi(self, contract):
"""
逐笔与tick数据支持订阅后退订
{
"uri": "unsubscribe-single-zhubi-verbose",
"contract": "bitfinex/btc.usd"
}
"""
tradeStr = {}
tradeStr['uri'] = "unsubscribe-single-zhubi-verbose"
tradeStr['contract'] = contract
return json.dumps(tradeStr)
def sendMarketDataRequest(self, ws, contract, **kwargs):
tradeStr = {}
if isinstance(contract, str):
tradeStr['contract'] = contract
elif isinstance(contract, list):
tradeStr['contracts'] = contract
tradeStr.update(kwargs)
params = json.dumps(tradeStr)
try:
ws.send(params)
except websocket.WebSocketConnectionClosedException:
pass
def unsubscribeLowFreqQuote(self, contract):
"""
支持订阅后退订
{
"uri":"batch-unsubscribe",
"contracts":["huobip/btc.usdt", "huobip/ht.usdt"]
}
"""
tradeStr = {}
tradeStr['uri'] = "batch-unsubscribe"
tradeStr['contract'] = contract
return json.dumps(tradeStr)
def onMessage(self, ws, evt):
"""信息推送"""
if ws.data_type == "tick" or ws.data_type == "bar":
evt = self.readData(evt)
print(evt)
# def onError(self, ws, evt):
# """错误推送"""
# print('onError')
# print(evt)
#
# def onClose(self, ws):
# """接口断开"""
# print('onClose')
#
# def onOpen(self, ws):
# """接口打开"""
# print('onOpen')
# # self.auth()
|
daemon_tunnel_example.py | # daemon_tunnel_example.py
"""
Example showing how to use the DaemonTunnel to create an SSH tunnel to an
object sitting on a remote server. This uses the daemon's URI on the remote
server.
Note that I use "remote_alias" as argument for remote_server_name. This
corresponds to some SSH config alias that's sitting on my "~/.ssh/config".
For this example, I'm assuming you've got a "BasicServer" instance running on
the remote server (see basic_pyro4_server.py)
"""
import Pyro4
import trifeni
uri = "PYRO:BasicServer@localhost:55000"
# We can use the with statement to make sure that the tunnels get destroyed
# responsibly. If not, we _have_ to call shutdown on the DaemonTunnel object.
with trifeni.DaemonTunnel(remote_server_name="remote_alias") as dt:
obj_proxy = dt.get_remote_object(uri)
print(obj_proxy.square(10))
# Without the with statement we have to do the following:
dt = trifeni.DaemonTunnel(remote_server_name="remote_alias")
obj_proxy = dt.get_remote_object(uri)
obj_proxy.square(10)
dt.shutdown()
# Using a custom Pyro4.Proxy subclass
class MyProxy(Pyro4.core.Proxy):
pass
with trifeni.DaemonTunnel(remote_server_name="remote_alias") as dt:
obj_proxy = dt.get_remote_object(uri, proxy_class=MyProxy)
obj_proxy.square(1e4)
# There are situations where it's useful to create a reverse tunnel from the
# remote server to the local one, so the server can access methods that
# are registered locally. Obviously the following example is a little overkill,
# but it might be useful to use callbacks when calling methods that take a long
# time to call (longer than the timeout of the server).
import threading
class Callbacks(object):
@Pyro4.expose
def some_callback(self, res):
print(res)
# do something with res
callbacks = Callbacks()
fresh_daemon = Pyro4.Daemon()
fresh_daemon_uri = fresh_daemon.register(callbacks)
daemon_thread = threading.Thread(target=fresh_daemon.requestLoop)
daemon_thread.daemon = True
daemon_thread.start()
with trifeni.DaemonTunnel(remote_server_name="remote_alias") as dt:
dt.register_remote_daemon(fresh_daemon)
obj_proxy = dt.get_remote_object(uri, proxy_class=MyProxy)
obj_proxy.square_with_callback(100, callback_info={
"handler": Pyro4.Proxy(fresh_daemon_uri)
"callback_name": "some_callback"
})
|
conftest.py | # -*- coding: utf-8 -*-
from __future__ import (absolute_import,
unicode_literals, print_function, division)
import threading
try:
import mock
except ImportError:
import unittest.mock as mock
import pytest
import valve.source.a2s
import valve.source.master_server
import valve.testing
def srcds_functional(**filter_):
"""Enable SRCDS functional testing for a test case
This decorator will cause the test case to be parametrised with addresses
for Source servers as returned from the master server. The test case
should request a fixture called ``address`` which is a two-item tuple
of the address of the server.
All keyword arguments will be converted to a filter string which will
be used when querying the master server. For example:
```
@srcds_functional(gamedir="tf")
def test_foo(address):
pass
```
This will result in the test only being ran on TF2 servers. See the link
below for other filter options:
https://developer.valvesoftware.com/wiki/Master_Server_Query_Protocol#Filter
"""
def decorator(function):
function._srcds_filter = filter_
return function
return decorator
def pytest_addoption(parser):
parser.addoption("--srcds-functional",
action="store_true",
default=False,
dest="srcds_functional",
help="Enable A2S functional tests against 'real' servers")
parser.addoption("--srcds-functional-limit",
action="store",
type=int,
default=20,
help=("Limit the number of servers srcds_functional "
"tests are ran against. Set to 0 to run against "
"*all* servers -- warning: really slow"),
dest="srcds_functional_limit")
def pytest_generate_tests(metafunc):
"""Generate parametrised tests from real Source server instances
This will apply an 'address' parametrised fixture for all rests marked
with srcds_functional which is a two-item tuple address for a public
Source server.
This uses the MasterServerQuerier to find public server addressess from
all regions. Filters passed into ``@srcds_functional`` will be used when
querying the master server.
"""
if hasattr(metafunc.function, "_srcds_filter"):
if not metafunc.config.getoption("srcds_functional"):
pytest.skip("--srcds-functional not enabled")
if "address" not in metafunc.fixturenames:
raise Exception("You cannot use the srcds_functional decorator "
"without requesting an 'address' fixture")
msq = valve.source.master_server.MasterServerQuerier()
server_addresses = []
address_limit = metafunc.config.getoption("srcds_functional_limit")
region = metafunc.function._srcds_filter.pop('region', 'eu')
try:
for address in msq.find(region=region,
**metafunc.function._srcds_filter):
if address_limit:
if len(server_addresses) >= address_limit:
break
server_addresses.append(address)
except valve.source.NoResponseError:
pass
metafunc.parametrize("address", server_addresses)
def pytest_namespace():
return {
"Mock": mock.Mock,
"MagicMock": mock.MagicMock,
"srcds_functional": srcds_functional,
}
@pytest.yield_fixture
def rcon_server():
server = valve.testing.TestRCONServer()
thread = threading.Thread(target=server.serve_forever)
thread.start()
yield server
server.shutdown()
thread.join()
|
instrument.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Abstract serial instrument
"""
import queue
import threading
import logging
from time import sleep
__author__ = "Brent Maranzano"
__license__ = "MIT"
logger = logging.getLogger("instrument")
class Instrument(object):
"""Abstract serial instrument that provides state retention, user
authentification and asynchronous communication.
"""
def __init__(self, port):
"""Sets an object attribute with defining the service parameters
Arguments
port (str): The device port
callback (func): The function to call after command execution.
"""
self._port = port
self._queue = queue.Queue()
def connect(self, port):
"""Connect to a serial port. Method to be overridden
Arguments
port (str): The device port
"""
pass
def _process_queue(self):
"""Pop a request off of the queue and process the request.
"""
logger.info("process queue thread starting")
while True:
request = self._queue.get()
self._process_request(**request)
def _queue_request(self, **request):
"""Queue requests.
Arguments
request (dict): Details of service request
command (str): Name of command to execute
callback (fun): function to call back with command results.
"""
self._queue.put(request)
def _start_threads(self):
"""Setup the instrument communication.
"""
threading.Thread(target=self._process_queue, daemon=True).start()
|
model.py | #!/usr/bin/env python3
from os import environ, path
from queue import Queue
from threading import Thread
from typing import Callable, List, NoReturn, Optional, Union, cast
import torch
import numpy as np
from loguru import logger
from transformers import GPT2LMHeadModel, GPT2Tokenizer
import random
from nltk import sent_tokenize
import locationtagger
MODEL_NAME: str = environ.get('MODEL_NAME', 'gpt2')
try:
WEIGHTS_DIR: str = {'gpt2': 'gpt2', '4chan': path.abspath(path.join(path.dirname(path.curdir), '../weights'))}[MODEL_NAME]
logger.debug(f"Using '{MODEL_NAME}' model.")
except KeyError:
raise ValueError("Environment variable MODEL_NAME must be either 'gpt2' or '4chan'")
MAX_LENGTH = 10000 # Hardcoded max length to avoid infinite loop
def set_random_seed(seed, n_gpu):
np.random.seed(seed)
torch.manual_seed(seed)
if n_gpu > 0:
torch.cuda.manual_seed_all(seed)
def adjust_seq_length_to_model(length: int, max_sequence_length: int):
if length < 0 and max_sequence_length > 0:
return max_sequence_length
elif 0 < max_sequence_length < length:
return max_sequence_length # No generation bigger than model size
elif length < 0:
return MAX_LENGTH # avoid infinite loop
else:
return length
def generate_text(
prompt_text: str,
k: int = 50,
p: float = 0.9,
seq_length: int = 150,
seed: Optional[int] = None,
temperature: float = 1.0,
num_return_sequences: int = 5
):
""" Create a synthetic text sequence using a pretrained model. """
device: torch.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
n_gpu = 0 if device == 'cpu' else torch.cuda.device_count()
repetition_penalty: float = 1.0 # Primarily used for CTRL model, so hardcoding this value
stop_token: str = "<|endoftext|>"
set_random_seed(seed or np.random.randint(0, 1000000), n_gpu)
# Initialize the model and tokenizer
model_class, tokenizer_class = (GPT2LMHeadModel, GPT2Tokenizer)
tokenizer: GPT2Tokenizer = tokenizer_class.from_pretrained(WEIGHTS_DIR)
model = model_class.from_pretrained(WEIGHTS_DIR)
model.to(device)
encoded_prompt = tokenizer.encode(prompt_text, add_special_tokens=True, return_tensors="pt")
encoded_prompt = encoded_prompt.to(device)
if encoded_prompt.size()[-1] == 0:
input_ids = None
else:
input_ids = encoded_prompt
max_length = adjust_seq_length_to_model(seq_length, max_sequence_length=model.config.max_position_embeddings)
output_sequences = model.generate(
input_ids=input_ids,
max_length=max_length + len(encoded_prompt[0]),
temperature=temperature,
top_k=k,
top_p=p,
repetition_penalty=repetition_penalty,
do_sample=True,
num_return_sequences=num_return_sequences,
)
# Remove the batch dimension when returning multiple sequences
if len(output_sequences.shape) > 2:
output_sequences.squeeze_()
generated_sequences = []
for _, generated_sequence in enumerate(output_sequences):
generated_sequence = generated_sequence.tolist()
text = tokenizer.decode(generated_sequence, clean_up_tokenization_spaces=True)
# Remove all text after the stop token
text = text[:text.find(stop_token) if stop_token else None]
# Add the prompt at the beginning of the sequence. Remove the excess text that was used for pre-processing
total_sequence = (prompt_text + text[len(tokenizer.decode(encoded_prompt[0], clean_up_tokenization_spaces=True)):])
generated_sequences.append(total_sequence)
return generated_sequences
def new_text_generator(name: str, prompt_text: Union[str, Callable[[], str]], **kwargs) -> Queue:
BATCH_SIZE: int = int(environ.get('API_BATCH_SIZE', 5))
queue: Queue = Queue(maxsize=int(BATCH_SIZE))
def generate_to_queue() -> NoReturn:
while True:
kwargs['num_return_sequences'] = BATCH_SIZE
prompt: str = prompt_text() if callable(prompt_text) else prompt_text
logger.debug(f'Generating {BATCH_SIZE} more sequences for {name}')
batch: str = generate_text(prompt, **kwargs)
logger.debug(f'Done generating sequences for {name}')
for item in batch:
queue.put(item)
logger.debug(f'Starting generation thread for {name}')
generate_thread: Thread = Thread(target=generate_to_queue)
generate_thread.start()
return queue
TEXT_REPLACEMENTS = {
'\nAdvertisement\n': '',
'[pullquote]': '',
'\n': ' ',
'"': '',
}
def clean_text(text: str, city: Union[str, List[str]], state: Union[str, List[str]], country: Union[str, List[str]]) -> str:
for k, v in TEXT_REPLACEMENTS.items():
text = text.replace(k, v)
text = ' '.join(sent_tokenize(text)[:-1]) # TODO: Not sure why this is here, it just removes the last sentence.
text = text.encode('ascii', 'ignore').decode() # Removes leftover unicode characters
def random_choice_or_str(values: Union[str, List[str]]) -> str:
if type(values) == list:
return random.choice(values)
else:
return cast(str, values)
# Replace references to locations to match the correct city/state/country.
place = locationtagger.find_locations(text=text)
for replacements, found in ((city, place.cities), (state, place.regions), (country, place.countries)):
for item in found:
if not (item in replacements or item == replacements):
logger.debug(f'Replacing {item} with {replacements} because item != replacement')
text = text.replace(item, random_choice_or_str(replacements))
return text
|
ipython.py | r"""
The ParaViewWeb iPython module is used as a helper to create custom
iPython notebook profile.
The following sample show how the helper class can be used inside
an iPython profile.
# Global python import
import exceptions, logging, random, sys, threading, time, os
# Update python path to have ParaView libs
pv_path = '/.../ParaView/build'
sys.path.append('%s/lib' % pv_path)
sys.path.append('%s/lib/site-packages' % pv_path)
# iPython import
from IPython.display import HTML
from IPython.parallel import Client
import paraview
from paraview.web import ipython as pv_ipython
from vtk import *
iPythonClient = None
paraviewHelper = pv_ipython.ParaViewIPython()
webArguments = pv_ipython.WebArguments('/.../path-to-web-directory')
def _start_paraview():
paraviewHelper.Initialize()
paraviewHelper.SetWebProtocol(pv_ipython.IPythonProtocol, webArguments)
return paraviewHelper.Start()
def _stop_paraview():
paraviewHelper.Finalize()
def _pv_activate_dataset():
pv_ipython.IPythonProtocol.ActivateDataSet('iPython-demo')
def _push_new_timestep():
# processing code generating new vtkDataSet
# newDataset = ...
pv_ipython.IPythonProtocol.RegisterDataSet('iPython-demo', newDataset)
def StartParaView(height=600, path='/apps/Visualizer/'):
global iPythonClient, paraviewHelper
if not iPythonClient:
iPythonClient = Client(profile='pvw')
urls = iPythonClient[:].apply_sync(lambda:_start_paraview())
url = ""
for i in urls:
if len(i) > 0:
url = i
return HTML("<iframe src='%s%s' width='100%%' height='%i'></iframe>"%(url, path, height))
def StopParaView():
global iPythonClient, paraviewHelper
iPythonClient[:].apply_sync(lambda:_stop_paraview())
def ActivateDataSet():
iPythonClient[:].apply_sync(lambda:_pv_activate_dataset())
def ComputeNextTimeStep():
global iPythonClient
if not iPythonClient:
iPythonClient = Client(profile='pvw')
iPythonClient[:].apply_sync(lambda:_push_new_timestep())
"""
import exceptions, traceback, logging, random, sys, threading, time, os, paraview
from mpi4py import MPI
from vtkmodules.web import server
from paraview.vtk import *
from vtkmodules.vtkCommonCore import *
from vtkmodules.vtkCommonDataModel import *
from vtkmodules.vtkCommonExecutionModel import *
from vtkmodules.vtkFiltersSources import *
from vtkmodules.vtkParallelCore import *
from vtkmodules.vtkPVClientWeb import *
from vtkmodules.vtkRemotingCore import *
from vtkmodules.vtkRemotingApplication import *
from vtkmodules.vtkRemotingServerManager import *
from vtkmodules.vtkRemotingViews import *
from vtkmodules.vtkWebCore import *
from paraview.web import wamp as pv_wamp
#------------------------------------------------------------------------------
# Global variables
#------------------------------------------------------------------------------
logger = logging.getLogger()
logger.setLevel(logging.ERROR)
#------------------------------------------------------------------------------
# Global internal methods
#------------------------------------------------------------------------------
def _get_hostname():
import socket
if socket.gethostname().find('.')>=0:
return socket.gethostname()
else:
return socket.gethostbyaddr(socket.gethostname())[0]
#------------------------------------------------------------------------------
# ParaView iPython helper class
#------------------------------------------------------------------------------
class ParaViewIPython(object):
processModule = None
globalController = None
localController = None
webProtocol = None
webArguments = None
processId = -1
number_of_process = -1
def Initialize(self, log_file_path = None, logging_level = logging.DEBUG):
if not ParaViewIPython.processModule:
vtkInitializationHelper.Initialize("ipython-notebook", 4) # 4 is type of process
ParaViewIPython.processModule = vtkProcessModule.GetProcessModule()
ParaViewIPython.globalController = ParaViewIPython.processModule.GetGlobalController()
if MPI.COMM_WORLD.Get_size() > 1 and (ParaViewIPython.globalController is None or ParaViewIPython.globalController.IsA("vtkDummyController") == True):
import vtkParallelMPI
ParaViewIPython.globalController = vtkParallelMPI.vtkMPIController()
ParaViewIPython.globalController.Initialize()
ParaViewIPython.globalController.SetGlobalController(ParaViewIPython.globalController)
ParaViewIPython.processId = ParaViewIPython.globalController.GetLocalProcessId()
ParaViewIPython.number_of_process = ParaViewIPython.globalController.GetNumberOfProcesses()
ParaViewIPython.localController = ParaViewIPython.globalController.PartitionController(ParaViewIPython.number_of_process, ParaViewIPython.processId)
# must unregister if the reference count is greater than 1
if ParaViewIPython.localController.GetReferenceCount() > 1:
ParaViewIPython.localController.UnRegister(None)
ParaViewIPython.globalController.SetGlobalController(ParaViewIPython.localController)
if log_file_path:
formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')
fh = logging.FileHandler('%s-%s.txt' % (log_file_path, str(ParaViewIPython.processId)))
fh.setLevel(logging_level)
fh.setFormatter(formatter)
logger.addHandler(fh)
logger.info("Process %i initialized for ParaView" % os.getpid())
logger.info("Sub-Controller: " + str(ParaViewIPython.localController.GetLocalProcessId()) + "/" + str(ParaViewIPython.localController.GetNumberOfProcesses()))
logger.info("GlobalController: " + str(ParaViewIPython.processId) + "/" + str(ParaViewIPython.number_of_process))
else:
logger.info("ParaView has already been initialized. No operation was performed.")
def Finalize(self):
if ParaViewIPython.processModule:
vtkInitializationHelper.Finalize()
ParaViewIPython.processModule = None
def GetProcessId(self):
return ParaViewIPython.processId
def GetNumberOfProcesses(self):
return ParaViewIPython.number_of_process
def __repr__(self):
return self.__str__()
def __str__(self):
return "Host: %s - Controller: %s - Rank: %d/%d" % (_get_hostname(), ParaViewIPython.localController.GetClassName(), ParaViewIPython.processId, ParaViewIPython.number_of_process)
def SetWebProtocol(self, protocol, arguments):
ParaViewIPython.webProtocol = protocol
ParaViewIPython.webArguments = arguments
if not hasattr(ParaViewIPython.webArguments, 'port'):
ParaViewIPython.webArguments.port = 8080
ParaViewIPython.webProtocol.rootNode = (self.GetProcessId() == 0)
ParaViewIPython.webProtocol.updateArguments(ParaViewIPython.webArguments)
@staticmethod
def _start_satelite():
logger.info('ParaView Satelite %d - Started' % ParaViewIPython.processId)
sid = vtkSMSession.ConnectToSelf();
vtkWebUtilities.ProcessRMIs()
ParaViewIPython.processModule.UnRegisterSession(sid);
logger.info('ParaView Satelite %d - Ended' % ParaViewIPython.processId)
@staticmethod
def _start_web_server():
server.start_webserver(options=ParaViewIPython.webArguments, protocol=ParaViewIPython.webProtocol)
from paraview import simple
simple.Disconnect()
ParaViewIPython.localController.TriggerBreakRMIs()
@staticmethod
def debug():
for i in range(10):
logger.info('In debug loop ' + str(i))
def Start(self):
thread = None
if self.GetProcessId() == 0:
thread = threading.Thread(target=ParaViewIPython._start_web_server)
thread.start()
time.sleep(10)
logger.info("WebServer thread started")
return "http://%s:%d" % (_get_hostname(), ParaViewIPython.webArguments.port)
else:
thread = threading.Thread(target=ParaViewIPython._start_satelite)
thread.start()
logger.info("Satelite thread started")
return ""
#------------------------------------------------------------------------------
# ParaView iPython protocol
#------------------------------------------------------------------------------
class IPythonProtocol(pv_wamp.PVServerProtocol):
rootNode = False
dataDir = None
authKey = "vtkweb-secret"
fileToLoad = None
producer = None
groupRegex = "[0-9]+\\."
excludeRegex = "^\\.|~$|^\\$"
@staticmethod
def ActivateDataSet(key):
if IPythonProtocol.rootNode and IPythonProtocol.producer:
IPythonProtocol.producer.UpdateDataset = ''
IPythonProtocol.producer.UpdateDataset = key
@staticmethod
def RegisterDataSet(key, dataset):
vtkDistributedTrivialProducer.SetGlobalOutput(key, dataset)
@staticmethod
def updateArguments(options):
IPythonProtocol.dataDir = options.dataDir
IPythonProtocol.authKey = options.authKey
IPythonProtocol.fileToLoad = options.fileToLoad
IPythonProtocol.authKey = options.authKey
IPythonProtocol.groupRegex = options.groupRegex
IPythonProtocol.excludeRegex = options.excludeRegex
def initialize(self):
from paraview import simple
from paraview.web import protocols as pv_protocols
# Make sure ParaView is initialized
if not simple.servermanager.ActiveConnection:
simple.Connect()
if not IPythonProtocol.producer:
IPythonProtocol.producer = simple.DistributedTrivialProducer()
IPythonProtocol.ActivateDataSet('iPython-demo')
simple.Show(IPythonProtocol.producer)
simple.Render()
# Bring used components
self.registerVtkWebProtocol(pv_protocols.ParaViewWebFileListing(IPythonProtocol.dataDir, "Home", IPythonProtocol.excludeRegex, IPythonProtocol.groupRegex))
self.registerVtkWebProtocol(pv_protocols.ParaViewWebPipelineManager(IPythonProtocol.dataDir, IPythonProtocol.fileToLoad))
self.registerVtkWebProtocol(pv_protocols.ParaViewWebMouseHandler())
self.registerVtkWebProtocol(pv_protocols.ParaViewWebViewPort())
self.registerVtkWebProtocol(pv_protocols.ParaViewWebViewPortImageDelivery())
self.registerVtkWebProtocol(pv_protocols.ParaViewWebViewPortGeometryDelivery())
self.registerVtkWebProtocol(pv_protocols.ParaViewWebTimeHandler())
self.registerVtkWebProtocol(pv_protocols.ParaViewWebRemoteConnection())
self.registerVtkWebProtocol(pv_protocols.ParaViewWebFileManager(IPythonProtocol.dataDir))
# Update authentication key to use
self.updateSecret(IPythonProtocol.authKey)
def __str__(self):
return "Root node: " + str(IPythonProtocol.rootNode)
#------------------------------------------------------------------------------
# ParaView iPython default arguments
#------------------------------------------------------------------------------
class WebArguments(object):
def __init__(self, webDir = None):
self.content = webDir
self.port = 8080
self.host = 'localhost'
self.debug = 0
self.timeout = 120
self.nosignalhandlers = True
self.authKey = 'vtkweb-secret'
self.uploadDir = ""
self.testScriptPath = ""
self.baselineImgDir = ""
self.useBrowser = ""
self.tmpDirectory = ""
self.testImgFile = ""
self.forceFlush = False
self.dataDir = '.'
self.groupRegex = "[0-9]+\\."
self.excludeRegex = "^\\.|~$|^\\$"
self.fileToLoad = None
def __str__(self):
return "http://%s:%d/%s" % (self.host, self.port, self.content)
|
app.py | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from __future__ import unicode_literals, print_function
import datetime
import json
import math
import os
import re
import subprocess
import sys
from threading import Thread
from six.moves import configparser
from knack.log import get_logger
from knack.util import CLIError
from azure.cli.core.commands.client_factory import ENV_ADDITIONAL_USER_AGENT
from azure.cli.core._profile import _SUBSCRIPTION_NAME, Profile
from azure.cli.core._session import ACCOUNT, CONFIG, SESSION
from azure.cli.core.api import get_config_dir
from azure.cli.core.util import handle_exception
# pylint: disable=import-error
import jmespath
from prompt_toolkit.auto_suggest import AutoSuggestFromHistory
from prompt_toolkit.buffer import Buffer
from prompt_toolkit.document import Document
from prompt_toolkit.enums import DEFAULT_BUFFER
from prompt_toolkit.filters import Always
from prompt_toolkit.history import FileHistory
from prompt_toolkit.interface import Application, CommandLineInterface
from prompt_toolkit.shortcuts import create_eventloop
# pylint: enable=import-error
from . import VERSION
from .az_completer import AzCompleter
from .az_lexer import get_az_lexer, ExampleLexer, ToolbarLexer
from .configuration import Configuration, SELECT_SYMBOL
from .frequency_heuristic import DISPLAY_TIME, frequency_heuristic
from .gather_commands import add_new_lines, GatherCommands
from .key_bindings import InteractiveKeyBindings
from .layout import LayoutManager
from .progress import progress_view
from . import telemetry
from .threads import LoadCommandTableThread
from .util import get_window_dim, parse_quotes, get_os_clear_screen_word
NOTIFICATIONS = ""
PART_SCREEN_EXAMPLE = .3
START_TIME = datetime.datetime.utcnow()
CLEAR_WORD = get_os_clear_screen_word()
logger = get_logger(__name__)
def space_toolbar(settings_items, empty_space):
""" formats the toolbar """
counter = 0
for part in settings_items:
counter += len(part)
if len(settings_items) == 1:
spacing = ''
else:
spacing = empty_space[
:int(math.floor((len(empty_space) - counter) / (len(settings_items) - 1)))]
settings = spacing.join(settings_items)
empty_space = empty_space[len(NOTIFICATIONS) + len(settings) + 1:]
return settings, empty_space
# pylint: disable=too-many-instance-attributes
class AzInteractiveShell(object):
def __init__(self, cli_ctx, style=None, completer=None,
lexer=None, history=None,
input_custom=sys.stdin, output_custom=None,
user_feedback=False, intermediate_sleep=.25, final_sleep=4):
from .color_styles import style_factory
self.cli_ctx = cli_ctx
self.config = Configuration(cli_ctx.config, style=style)
self.config.set_style(style)
self.style = style_factory(self.config.get_style())
try:
gathered_commands = GatherCommands(self.config)
self.completer = completer or AzCompleter(self, gathered_commands)
self.completer.initialize_command_table_attributes()
self.lexer = lexer or get_az_lexer(gathered_commands)
except IOError: # if there is no cache
self.completer = AzCompleter(self, None)
self.lexer = None
self.history = history or FileHistory(os.path.join(self.config.get_config_dir(), self.config.get_history()))
os.environ[ENV_ADDITIONAL_USER_AGENT] = 'AZURECLISHELL/' + VERSION
# OH WHAT FUN TO FIGURE OUT WHAT THESE ARE!
self._cli = None
self.layout = None
self.description_docs = u''
self.param_docs = u''
self.example_docs = u''
self.last = None
self.last_exit = 0
self.user_feedback = user_feedback
self.input = input_custom
self.output = output_custom
self.config_default = ""
self.default_command = ""
self.threads = []
self.curr_thread = None
self.spin_val = -1
self.intermediate_sleep = intermediate_sleep
self.final_sleep = final_sleep
self.command_table_thread = None
# try to consolidate state information here...
# Used by key bindings and layout
self.example_page = 1
self.is_prompting = False
self.is_example_repl = False
self.is_showing_default = False
self.is_symbols = True
def __call__(self):
if self.cli_ctx.data["az_interactive_active"]:
logger.warning("You're in the interactive shell already.")
return
if self.config.BOOLEAN_STATES[self.config.config.get('DEFAULT', 'firsttime')]:
self.config.firsttime()
if not self.config.has_feedback() and frequency_heuristic(self):
print("\n\nAny comments or concerns? You can use the \'feedback\' command!" +
" We would greatly appreciate it.\n")
self.cli_ctx.data["az_interactive_active"] = True
self.run()
self.cli_ctx.data["az_interactive_active"] = False
@property
def cli(self):
""" Makes the interface or refreshes it """
if self._cli is None:
self._cli = self.create_interface()
return self._cli
def handle_cd(self, cmd):
"""changes dir """
if len(cmd) != 2:
print("Invalid syntax: cd path", file=self.output)
return
path = os.path.expandvars(os.path.expanduser(cmd[1]))
try:
os.chdir(path)
except OSError as ex:
print("cd: %s\n" % ex, file=self.output)
def on_input_timeout(self, cli):
"""
brings up the metadata for the command if there is a valid command already typed
"""
document = cli.current_buffer.document
text = document.text
text = text.replace('az ', '')
if self.default_command:
text = self.default_command + ' ' + text
param_info, example = self.generate_help_text()
self.param_docs = u'{}'.format(param_info)
self.example_docs = u'{}'.format(example)
self._update_default_info()
cli.buffers['description'].reset(
initial_document=Document(self.description_docs, cursor_position=0))
cli.buffers['parameter'].reset(
initial_document=Document(self.param_docs))
cli.buffers['examples'].reset(
initial_document=Document(self.example_docs))
cli.buffers['default_values'].reset(
initial_document=Document(
u'{}'.format(self.config_default if self.config_default else 'No Default Values')))
self._update_toolbar()
cli.request_redraw()
def restart_completer(self):
command_info = GatherCommands(self.config)
if not self.completer:
self.completer.start(command_info)
self.completer.initialize_command_table_attributes()
if not self.lexer:
self.lexer = get_az_lexer(command_info)
self._cli = None
def _space_examples(self, list_examples, rows, section_value):
""" makes the example text """
examples_with_index = []
for i, _ in list(enumerate(list_examples)):
if len(list_examples[i]) > 1:
examples_with_index.append("[" + str(i + 1) + "] " + list_examples[i][0] +
list_examples[i][1])
example = "".join(exam for exam in examples_with_index)
num_newline = example.count('\n')
page_number = ''
if num_newline > rows * PART_SCREEN_EXAMPLE and rows > PART_SCREEN_EXAMPLE * 10:
len_of_excerpt = math.floor(float(rows) * PART_SCREEN_EXAMPLE)
group = example.split('\n')
end = int(section_value * len_of_excerpt)
begin = int((section_value - 1) * len_of_excerpt)
if end < num_newline:
example = '\n'.join(group[begin:end]) + "\n"
else:
# default chops top off
example = '\n'.join(group[begin:]) + "\n"
while ((section_value - 1) * len_of_excerpt) > num_newline:
self.example_page -= 1
page_number = '\n' + str(section_value) + "/" + str(int(math.ceil(num_newline / len_of_excerpt)))
return example + page_number + ' CTRL+Y (^) CTRL+N (v)'
def _update_toolbar(self):
cli = self.cli
_, cols = get_window_dim()
cols = int(cols)
empty_space = " " * cols
delta = datetime.datetime.utcnow() - START_TIME
if self.user_feedback and delta.seconds < DISPLAY_TIME:
toolbar = [
' Try out the \'feedback\' command',
'If refreshed disappear in: {}'.format(str(DISPLAY_TIME - delta.seconds))]
elif self.command_table_thread.is_alive():
toolbar = [
' Loading...',
'Hit [enter] to refresh'
]
else:
toolbar = self._toolbar_info()
toolbar, empty_space = space_toolbar(toolbar, empty_space)
cli.buffers['bottom_toolbar'].reset(
initial_document=Document(u'{}{}{}'.format(NOTIFICATIONS, toolbar, empty_space)))
def _toolbar_info(self):
sub_name = ""
try:
profile = Profile(cli_ctx=self.cli_ctx)
sub_name = profile.get_subscription()[_SUBSCRIPTION_NAME]
except CLIError:
pass
curr_cloud = "Cloud: {}".format(self.cli_ctx.cloud.name)
tool_val = 'Subscription: {}'.format(sub_name) if sub_name else curr_cloud
settings_items = [
" [F1]Layout",
"[F2]Defaults",
"[F3]Keys",
"[Ctrl+D]Quit",
tool_val
]
return settings_items
def generate_help_text(self):
""" generates the help text based on commands typed """
param_descrip = example = ""
self.description_docs = u''
rows, _ = get_window_dim()
rows = int(rows)
param_args = self.completer.leftover_args
last_word = self.completer.unfinished_word
command = self.completer.current_command
new_command = ' '.join([command, last_word]).strip()
if not self.completer.complete_command and new_command in self.completer.command_description:
command = new_command
# get command/group help
if self.completer and command in self.completer.command_description:
self.description_docs = u'{}'.format(self.completer.command_description[command])
# get parameter help if full command
if self.completer and command in self.completer.command_param_info:
param = param_args[-1] if param_args else ''
param = last_word if last_word.startswith('-') else param
if param in self.completer.command_param_info[command] and self.completer.has_description(
command + " " + param):
param_descrip = ''.join([
param, ":", '\n', self.completer.param_description.get(command + " " + param, '')])
if command in self.completer.command_examples:
string_example = []
for example in self.completer.command_examples[command]:
for part in example:
string_example.append(part)
''.join(string_example)
example = self._space_examples(
self.completer.command_examples[command], rows, self.example_page)
return param_descrip, example
def _update_default_info(self):
try:
defaults_section = self.cli_ctx.config.defaults_section_name
self.config_default = ""
if hasattr(self.cli_ctx.config, 'config_parser'):
options = self.cli_ctx.config.config_parser.options(defaults_section)
else:
return
for opt in options:
self.config_default += opt + ": " + self.cli_ctx.config.get(defaults_section, opt) + " "
except configparser.NoSectionError:
self.config_default = ""
def create_application(self, full_layout=True):
""" makes the application object and the buffers """
layout_manager = LayoutManager(self)
if full_layout:
layout = layout_manager.create_layout(ExampleLexer, ToolbarLexer)
else:
layout = layout_manager.create_tutorial_layout()
buffers = {
DEFAULT_BUFFER: Buffer(is_multiline=True),
'description': Buffer(is_multiline=True, read_only=True),
'parameter': Buffer(is_multiline=True, read_only=True),
'examples': Buffer(is_multiline=True, read_only=True),
'bottom_toolbar': Buffer(is_multiline=True),
'example_line': Buffer(is_multiline=True),
'default_values': Buffer(),
'symbols': Buffer(),
'progress': Buffer(is_multiline=False)
}
writing_buffer = Buffer(
history=self.history,
auto_suggest=AutoSuggestFromHistory(),
enable_history_search=True,
completer=self.completer,
complete_while_typing=Always()
)
return Application(
mouse_support=False,
style=self.style,
buffer=writing_buffer,
on_input_timeout=self.on_input_timeout,
key_bindings_registry=InteractiveKeyBindings(self).registry,
layout=layout,
buffers=buffers,
)
def create_interface(self):
""" instantiates the interface """
return CommandLineInterface(
application=self.create_application(),
eventloop=create_eventloop())
def set_prompt(self, prompt_command="", position=0):
""" writes the prompt line """
self.description_docs = u'{}'.format(prompt_command)
self.cli.current_buffer.reset(
initial_document=Document(
self.description_docs,
cursor_position=position))
self.cli.request_redraw()
def set_scope(self, value):
""" narrows the scopes the commands """
if self.default_command:
self.default_command += ' ' + value
else:
self.default_command += value
return value
def handle_example(self, text, continue_flag):
""" parses for the tutorial """
cmd = text.partition(SELECT_SYMBOL['example'])[0].rstrip()
num = text.partition(SELECT_SYMBOL['example'])[2].strip()
example = ""
try:
num = int(num) - 1
except ValueError:
print("An Integer should follow the colon", file=self.output)
return ""
if cmd in self.completer.command_examples:
if num >= 0 and num < len(self.completer.command_examples[cmd]):
example = self.completer.command_examples[cmd][num][1]
example = example.replace('\n', '')
else:
print('Invalid example number', file=self.output)
return '', True
example = example.replace('az', '')
starting_index = None
counter = 0
example_no_fill = ""
flag_fill = True
for word in example.split():
if flag_fill:
example_no_fill += word + " "
if word.startswith('-'):
example_no_fill += word + " "
if not starting_index:
starting_index = counter
flag_fill = False
counter += 1
return self.example_repl(example_no_fill, example, starting_index, continue_flag)
def example_repl(self, text, example, start_index, continue_flag):
""" REPL for interactive tutorials """
if start_index:
start_index = start_index + 1
cmd = ' '.join(text.split()[:start_index])
example_cli = CommandLineInterface(
application=self.create_application(
full_layout=False),
eventloop=create_eventloop())
example_cli.buffers['example_line'].reset(
initial_document=Document(u'{}\n'.format(
add_new_lines(example)))
)
while start_index < len(text.split()):
if self.default_command:
cmd = cmd.replace(self.default_command + ' ', '')
example_cli.buffers[DEFAULT_BUFFER].reset(
initial_document=Document(
u'{}'.format(cmd),
cursor_position=len(cmd)))
example_cli.request_redraw()
answer = example_cli.run()
if not answer:
return "", True
answer = answer.text
if answer.strip('\n') == cmd.strip('\n'):
continue
else:
if len(answer.split()) > 1:
start_index += 1
cmd += " " + answer.split()[-1] + " " +\
u' '.join(text.split()[start_index:start_index + 1])
example_cli.exit()
del example_cli
else:
cmd = text
return cmd, continue_flag
# pylint: disable=too-many-statements
def _special_cases(self, cmd, outside):
break_flag = False
continue_flag = False
args = parse_quotes(cmd)
cmd_stripped = cmd.strip()
if not cmd_stripped and cmd:
# add scope if there are only spaces
cmd = self.default_command + " " + cmd
elif cmd_stripped in ("quit", "exit"):
break_flag = True
elif cmd_stripped == "clear-history":
continue_flag = True
self.reset_history()
elif cmd_stripped == CLEAR_WORD:
outside = True
cmd = CLEAR_WORD
elif cmd_stripped[0] == SELECT_SYMBOL['outside']:
cmd = cmd_stripped[1:]
outside = True
if cmd.strip() and cmd.split()[0] == 'cd':
self.handle_cd(parse_quotes(cmd))
continue_flag = True
telemetry.track_outside_gesture()
elif cmd_stripped[0] == SELECT_SYMBOL['exit_code']:
meaning = "Success" if self.last_exit == 0 else "Failure"
print(meaning + ": " + str(self.last_exit), file=self.output)
continue_flag = True
telemetry.track_exit_code_gesture()
elif SELECT_SYMBOL['query'] in cmd_stripped and self.last and self.last.result:
continue_flag = self.handle_jmespath_query(args)
telemetry.track_query_gesture()
elif not args:
continue_flag = True
elif args[0] == '--version' or args[0] == '-v':
try:
continue_flag = True
self.cli_ctx.show_version()
except SystemExit:
pass
elif SELECT_SYMBOL['example'] in cmd:
cmd, continue_flag = self.handle_example(cmd, continue_flag)
telemetry.track_ran_tutorial()
elif SELECT_SYMBOL['scope'] == cmd_stripped[0:2]:
continue_flag, cmd = self.handle_scoping_input(continue_flag, cmd, cmd_stripped)
telemetry.track_scope_changes()
else:
# not a special character; add scope and remove 'az'
if self.default_command:
cmd = self.default_command + " " + cmd
elif cmd.split(' ', 1)[0].lower() == 'az':
cmd = ' '.join(cmd.split()[1:])
if "|" in cmd or ">" in cmd:
# anything I don't parse, send off
outside = True
cmd = "az " + cmd
telemetry.track_cli_commands_used()
return break_flag, continue_flag, outside, cmd
def handle_jmespath_query(self, args):
""" handles the jmespath query for injection or printing """
continue_flag = False
query_symbol = SELECT_SYMBOL['query']
symbol_len = len(query_symbol)
try:
if len(args) == 1:
# if arguments start with query_symbol, just print query result
if args[0] == query_symbol:
result = self.last.result
elif args[0].startswith(query_symbol):
result = jmespath.search(args[0][symbol_len:], self.last.result)
print(json.dumps(result, sort_keys=True, indent=2), file=self.output)
elif args[0].startswith(query_symbol):
# print error message, user unsure of query shortcut usage
print(("Usage Error: " + os.linesep +
"1. Use {0} stand-alone to display previous result with optional filtering "
"(Ex: {0}[jmespath query])" +
os.linesep + "OR:" + os.linesep +
"2. Use {0} to query the previous result for argument values "
"(Ex: group show --name {0}[jmespath query])").format(query_symbol), file=self.output)
else:
# query, inject into cmd
def jmespath_query(match):
if match.group(0) == query_symbol:
return str(self.last.result)
query_result = jmespath.search(match.group(0)[symbol_len:], self.last.result)
return str(query_result)
def sub_result(arg):
escaped_symbol = re.escape(query_symbol)
# regex captures query symbol and all characters following it in the argument
return json.dumps(re.sub(r'%s.*' % escaped_symbol, jmespath_query, arg))
cmd_base = ' '.join(map(sub_result, args))
self.cli_execute(cmd_base)
continue_flag = True
except (jmespath.exceptions.ParseError, CLIError) as e:
print("Invalid Query Input: " + str(e), file=self.output)
continue_flag = True
return continue_flag
def handle_scoping_input(self, continue_flag, cmd, text):
""" handles what to do with a scoping gesture """
default_split = text.partition(SELECT_SYMBOL['scope'])[2].split()
cmd = cmd.replace(SELECT_SYMBOL['scope'], '')
continue_flag = True
if not default_split:
self.default_command = ""
print('unscoping all', file=self.output)
return continue_flag, cmd
while default_split:
if not text:
value = ''
else:
value = default_split[0]
tree_path = self.default_command.split()
tree_path.append(value)
if self.completer.command_tree.in_tree(tree_path):
self.set_scope(value)
print("defaulting: " + value, file=self.output)
cmd = cmd.replace(SELECT_SYMBOL['scope'], '')
elif SELECT_SYMBOL['unscope'] == default_split[0] and self.default_command.split():
value = self.default_command.split()[-1]
self.default_command = ' ' + ' '.join(self.default_command.split()[:-1])
if not self.default_command.strip():
self.default_command = self.default_command.strip()
print('unscoping: ' + value, file=self.output)
elif SELECT_SYMBOL['unscope'] not in text:
print("Scope must be a valid command", file=self.output)
default_split = default_split[1:]
return continue_flag, cmd
def reset_history(self):
history_file_path = os.path.join(self.config.get_config_dir(), self.config.get_history())
os.remove(history_file_path)
self.history = FileHistory(history_file_path)
self.cli.buffers[DEFAULT_BUFFER].history = self.history
def cli_execute(self, cmd):
""" sends the command to the CLI to be executed """
try:
args = parse_quotes(cmd)
if args and args[0] == 'feedback':
self.config.set_feedback('yes')
self.user_feedback = False
azure_folder = get_config_dir()
if not os.path.exists(azure_folder):
os.makedirs(azure_folder)
ACCOUNT.load(os.path.join(azure_folder, 'azureProfile.json'))
CONFIG.load(os.path.join(azure_folder, 'az.json'))
SESSION.load(os.path.join(azure_folder, 'az.sess'), max_age=3600)
invocation = self.cli_ctx.invocation_cls(cli_ctx=self.cli_ctx,
parser_cls=self.cli_ctx.parser_cls,
commands_loader_cls=self.cli_ctx.commands_loader_cls,
help_cls=self.cli_ctx.help_cls)
if '--progress' in args:
args.remove('--progress')
execute_args = [args]
thread = Thread(target=invocation.execute, args=execute_args)
thread.daemon = True
thread.start()
self.threads.append(thread)
self.curr_thread = thread
progress_args = [self]
thread = Thread(target=progress_view, args=progress_args)
thread.daemon = True
thread.start()
self.threads.append(thread)
result = None
else:
result = invocation.execute(args)
self.last_exit = 0
if result and result.result is not None:
if self.output:
self.output.write(result)
self.output.flush()
else:
formatter = self.cli_ctx.output.get_formatter(self.cli_ctx.invocation.data['output'])
self.cli_ctx.output.out(result, formatter=formatter, out_file=sys.stdout)
self.last = result
except Exception as ex: # pylint: disable=broad-except
self.last_exit = handle_exception(ex)
except SystemExit as ex:
self.last_exit = int(ex.code)
def progress_patch(self, _=False):
""" forces to use the Shell Progress """
from .progress import ShellProgressView
self.cli_ctx.progress_controller.init_progress(ShellProgressView())
return self.cli_ctx.progress_controller
def run(self):
""" starts the REPL """
from .progress import ShellProgressView
self.cli_ctx.get_progress_controller().init_progress(ShellProgressView())
self.cli_ctx.get_progress_controller = self.progress_patch
self.command_table_thread = LoadCommandTableThread(self.restart_completer, self)
self.command_table_thread.start()
from .configuration import SHELL_HELP
self.cli.buffers['symbols'].reset(
initial_document=Document(u'{}'.format(SHELL_HELP)))
# flush telemetry for new commands and send successful interactive mode entry event
telemetry.set_success()
telemetry.flush()
while True:
try:
document = self.cli.run(reset_current_buffer=True)
text = document.text
if not text:
# not input
self.set_prompt()
continue
cmd = text
outside = False
except AttributeError:
# when the user pressed Control D
break
except (KeyboardInterrupt, ValueError):
# CTRL C
self.set_prompt()
continue
else:
self.history.append(text)
b_flag, c_flag, outside, cmd = self._special_cases(cmd, outside)
if b_flag:
break
if c_flag:
self.set_prompt()
continue
self.set_prompt()
if outside:
subprocess.Popen(cmd, shell=True).communicate()
else:
telemetry.start()
self.cli_execute(cmd)
if self.last_exit and self.last_exit != 0:
telemetry.set_failure()
else:
telemetry.set_success()
telemetry.flush()
telemetry.conclude()
|
leetcode.py | import json
import logging
import re
import time
import os
import pickle
from threading import Semaphore, Thread, current_thread
try:
from bs4 import BeautifulSoup
import requests
inited = 1
except ImportError:
inited = 0
try:
import vim
except ImportError:
vim = None
try:
import browser_cookie3
except ImportError:
browser_cookie3 = None
try:
import keyring
except ImportError:
keyring = None
LC_BASE = os.environ['LEETCODE_BASE_URL']
LC_CSRF = LC_BASE + '/ensure_csrf/'
LC_LOGIN = LC_BASE + '/accounts/login/'
LC_GRAPHQL = LC_BASE + '/graphql'
LC_CATEGORY_PROBLEMS = LC_BASE + '/api/problems/{category}'
LC_PROBLEM = LC_BASE + '/problems/{slug}/description'
LC_TEST = LC_BASE + '/problems/{slug}/interpret_solution/'
LC_SUBMIT = LC_BASE + '/problems/{slug}/submit/'
LC_SUBMISSIONS = LC_BASE + '/api/submissions/{slug}'
LC_SUBMISSION = LC_BASE + '/submissions/detail/{submission}/'
LC_CHECK = LC_BASE + '/submissions/detail/{submission}/check/'
LC_PROBLEM_SET_ALL = LC_BASE + '/problemset/all/'
LC_PROGRESS_ALL = LC_BASE + '/api/progress/all/'
EMPTY_FREQUENCIES = [0, 0, 0, 0, 0, 0, 0, 0]
session = None
task_running = False
task_done = False
task_trigger = Semaphore(0)
task_name = ''
task_input = None
task_progress = ''
task_output = None
task_err = ''
log = logging.getLogger(__name__)
log.setLevel(logging.ERROR)
def enable_logging():
out_hdlr = logging.FileHandler('leetcode-vim.log')
out_hdlr.setFormatter(logging.Formatter('%(asctime)s %(message)s'))
out_hdlr.setLevel(logging.INFO)
log.addHandler(out_hdlr)
log.setLevel(logging.INFO)
def _make_headers():
assert is_login()
headers = {'Origin': LC_BASE,
'Referer': LC_BASE,
'X-Requested-With': 'XMLHttpRequest',
'X-CSRFToken': session.cookies.get('csrftoken', '')}
return headers
def _level_to_name(level):
if level == 1:
return 'Easy'
if level == 2:
return 'Medium'
if level == 3:
return 'Hard'
return ' '
def _state_to_flag(state):
if state == 'ac':
return 'X'
if state == 'notac':
return '?'
return ' '
def _status_to_name(status):
if status == 10:
return 'Accepted'
if status == 11:
return 'Wrong Answer'
if status == 12:
return 'Memory Limit Exceeded'
if status == 13:
return 'Output Limit Exceeded'
if status == 14:
return 'Time Limit Exceeded'
if status == 15:
return 'Runtime Error'
if status == 16:
return 'Internal Error'
if status == 20:
return 'Compile Error'
if status == 21:
return 'Unknown Error'
return 'Unknown State'
def _break_code_lines(s):
return s.replace('\r\n', '\n').replace('\xa0', ' ').split('\n')
def _break_paragraph_lines(s):
lines = _break_code_lines(s)
result = []
# reserve one and only one empty line between two non-empty lines
for line in lines:
if line.strip() != '': # a line with only whitespaces is also empty
result.append(line)
result.append('')
return result
def _remove_description(code):
eod = code.find('[End of Description]')
if eod == -1:
return code
eol = code.find('\n', eod)
if eol == -1:
return ''
return code[eol+1:]
def is_login():
return session and 'LEETCODE_SESSION' in session.cookies
def get_progress():
headers = _make_headers()
res = session.get(LC_PROGRESS_ALL, headers=headers)
if res.status_code != 200:
_echoerr('cannot get the progress')
return None
data = res.json()
if 'solvedTotal' not in data:
return None
return data
def load_session_cookie(browser):
if browser_cookie3 is None:
_echoerr('browser_cookie3 not installed: pip3 install browser_cookie3 --user')
return False
if keyring is None:
_echoerr('keyring not installed: pip3 install keyring --user')
return False
session_cookie_raw = keyring.get_password('leetcode.vim', 'SESSION_COOKIE')
if session_cookie_raw is None:
cookies = getattr(browser_cookie3, browser)(domain_name=LC_BASE.split('/')[-1])
for cookie in cookies:
if cookie.name == 'LEETCODE_SESSION':
session_cookie = cookie
session_cookie_raw = pickle.dumps(cookie, protocol=0).decode('utf-8')
break
else:
_echoerr('Leetcode session cookie not found. Please login in browser.')
return False
keyring.set_password('leetcode.vim', 'SESSION_COOKIE', session_cookie_raw)
else:
session_cookie = pickle.loads(session_cookie_raw.encode('utf-8'))
global session
session = requests.Session()
session.cookies.set_cookie(session_cookie)
progress = get_progress()
if progress is None:
_echoerr('cannot get progress. Please relogin in your browser.')
keyring.delete_password('leetcode.vim', 'SESSION_COOKIE')
return False
return True
def _get_category_problems(category):
headers = _make_headers()
url = LC_CATEGORY_PROBLEMS.format(category=category)
log.info('_get_category_problems request: url="%s" headers="%s"',
url, headers)
res = session.get(url, headers=headers)
log.info('_get_category_problems response: status="%s" body="%s"',
res.status_code, res.text)
if res.status_code != 200:
_echoerr('cannot get the category: {}'.format(category))
return []
problems = []
content = res.json()
for p in content['stat_status_pairs']:
# skip hidden questions
if p['stat']['question__hide']:
continue
problem = {'state': _state_to_flag(p['status']),
'id': p['stat']['question_id'],
'fid': p['stat']['frontend_question_id'],
'title': p['stat']['question__title'],
'slug': p['stat']['question__title_slug'],
'paid_only': p['paid_only'],
'ac_rate': p['stat']['total_acs'] / p['stat']['total_submitted'],
'level': _level_to_name(p['difficulty']['level']),
'favor': p['is_favor'],
'category': content['category_slug'],
'frequency': p['frequency']}
problems.append(problem)
return problems
def get_problems(categories):
assert is_login()
problems = []
for c in categories:
problems.extend(_get_category_problems(c))
return sorted(problems, key=lambda p: p['id'])
def _split(s):
if isinstance(s, list):
lines = []
for element in s:
lines.extend(_split(element))
return lines
# Replace all \r\n to \n and all \r (alone) to \n
s = s.replace('\r\n', '\n').replace('\r', '\n').replace('\0', '\n')
# str.split has an disadvantage that ''.split('\n') results in [''], but what we want
# is []. This small function returns [] if `s` is a blank string, that is, containing no
# characters other than whitespaces.
if s.strip() == '':
return []
return s.split('\n')
def get_problem(slug):
assert is_login()
headers = _make_headers()
headers['Referer'] = LC_PROBLEM.format(slug=slug)
body = {'query': '''query getQuestionDetail($titleSlug : String!) {
question(titleSlug: $titleSlug) {
questionId
questionFrontendId
title
content
stats
difficulty
codeDefinition
sampleTestCase
enableRunCode
translatedContent
}
}''',
'variables': {'titleSlug': slug},
'operationName': 'getQuestionDetail'}
log.info('get_problem request: url="%s" headers="%s" body="%s"', LC_GRAPHQL, headers, body)
res = session.post(LC_GRAPHQL, json=body, headers=headers)
log.info('get_problem response: status="%s" body="%s"', res.status_code, res.text)
if res.status_code != 200:
_echoerr('cannot get the problem: {}'.format(slug))
return None
q = res.json()['data']['question']
content = q['translatedContent'] or q['content']
if content is None:
_echoerr('cannot get the problem: {}'.format(slug))
return None
soup = BeautifulSoup(content, features='html.parser')
problem = {}
problem['id'] = q['questionId']
problem['fid'] = q['questionFrontendId']
problem['title'] = q['title']
problem['slug'] = slug
problem['level'] = q['difficulty']
problem['desc'] = _break_paragraph_lines(soup.get_text())
problem['templates'] = {}
for t in json.loads(q['codeDefinition']):
problem['templates'][t['value']] = _break_code_lines(t['defaultCode'])
problem['testable'] = q['enableRunCode']
problem['testcase'] = _split(q['sampleTestCase'])
stats = json.loads(q['stats'])
problem['total_accepted'] = stats['totalAccepted']
problem['total_submission'] = stats['totalSubmission']
problem['ac_rate'] = stats['acRate']
return problem
def _check_result(submission_id):
global task_progress
if _in_task():
prog_stage = 'Uploading '
prog_bar = '.'
task_progress = prog_stage + prog_bar
while True:
headers = _make_headers()
url = LC_CHECK.format(submission=submission_id)
log.info('check result request: url="%s" headers="%s"', url, headers)
res = session.get(url, headers=headers)
log.info('check result response: status="%s" body="%s"', res.status_code, res.text)
if res.status_code != 200:
_echoerr('cannot get the execution result')
return None
if _in_task():
prog_bar += '.'
r = res.json()
if r['state'] == 'SUCCESS':
prog_stage = 'Done '
break
elif r['state'] == 'PENDING':
prog_stage = 'Pending '
elif r['state'] == 'STARTED':
prog_stage = 'Running '
if _in_task():
task_progress = prog_stage + prog_bar
time.sleep(1)
result = {
'answer': r.get('code_answer', []),
'runtime': r['status_runtime'],
'state': _status_to_name(r['status_code']),
'testcase': _split(r.get('input', r.get('last_testcase', ''))),
'passed': r.get('total_correct') or 0,
'total': r.get('total_testcases') or 0,
'error': _split([v for k, v in r.items() if 'error' in k and v])
}
# the keys differs between the result of testing the code and submitting it
# for submission judge_type is 'large', and for testing judge_type does not exist
if r.get('judge_type') == 'large':
result['answer'] = _split(r.get('code_output', ''))
result['expected_answer'] = _split(r.get('expected_output', ''))
result['stdout'] = _split(r.get('std_output', ''))
result['runtime_percentile'] = r.get('runtime_percentile', '')
else:
# Test states cannot distinguish accepted answers from wrong answers.
if result['state'] == 'Accepted':
result['state'] = 'Finished'
result['stdout'] = _split(r.get('code_output', []))
result['expected_answer'] = []
result['runtime_percentile'] = r.get('runtime_percentile', '')
result['expected_answer'] = r.get('expected_code_answer', [])
return result
def test_solution(problem_id, title, slug, filetype, code, test_input):
assert is_login()
code = _remove_description(code)
headers = _make_headers()
headers['Referer'] = LC_PROBLEM.format(slug=slug)
body = {'data_input': test_input,
'lang': filetype,
'question_id': str(problem_id),
'test_mode': False,
'typed_code': code}
url = LC_TEST.format(slug=slug)
log.info('test solution request: url="%s" headers="%s" body="%s"', url, headers, body)
res = session.post(url, json=body, headers=headers)
log.info('test solution response: status="%s" body="%s"', res.status_code, res.text)
if res.status_code != 200:
if 'too fast' in res.text:
_echoerr('you are sending the request too fast')
else:
_echoerr('cannot test the solution for ' + slug)
return None
result = _check_result(res.json()['interpret_id'])
result['testcase'] = test_input.split('\n')
result['title'] = title
return result
def test_solution_async(problem_id, title, slug, filetype, code, test_input):
assert is_login()
global task_input, task_name
if task_running:
_echoerr('there is other task running: ' + task_name)
return False
code = _remove_description(code)
task_name = 'test_solution'
task_input = [problem_id, title, slug, filetype, code, test_input]
task_trigger.release()
return True
def submit_solution(slug, filetype, code=None):
assert is_login()
problem = get_problem(slug)
if not problem:
return None
if code is None:
code = '\n'.join(vim.current.buffer)
code = _remove_description(code)
headers = _make_headers()
headers['Referer'] = LC_PROBLEM.format(slug=slug)
body = {'data_input': problem['testcase'],
'lang': filetype,
'question_id': str(problem['id']),
'test_mode': False,
'typed_code': code,
'judge_type': 'large'}
url = LC_SUBMIT.format(slug=slug)
log.info('submit solution request: url="%s" headers="%s" body="%s"', url, headers, body)
res = session.post(url, json=body, headers=headers)
log.info('submit solution response: status="%s" body="%s"', res.status_code, res.text)
if res.status_code != 200:
if 'too fast' in res.text:
_echoerr('you are sending the request too fast')
else:
_echoerr('cannot submit the solution for ' + slug)
return None
result = _check_result(res.json()['submission_id'])
result['title'] = problem['title']
return result
def submit_solution_async(slug, filetype, code=None):
assert is_login()
global task_input, task_name
if task_running:
_echoerr('there is other task running: ' + task_name)
return False
if code is None:
code = '\n'.join(vim.current.buffer)
task_name = 'submit_solution'
task_input = [slug, filetype, code]
task_trigger.release()
return True
def get_submissions(slug):
assert is_login()
headers = _make_headers()
headers['Referer'] = LC_PROBLEM.format(slug=slug)
url = LC_SUBMISSIONS.format(slug=slug)
log.info('get submissions request: url="%s" headers="%s"', url, headers)
res = session.get(url, headers=headers)
log.info('get submissions response: status="%s" body="%s"', res.status_code, res.text)
if res.status_code != 200:
_echoerr('cannot find the submissions of problem: ' + slug)
return None
submissions = []
for r in res.json()['submissions_dump']:
s = {
'id': r['url'].split('/')[3],
'time': r['time'].replace('\xa0', ' '),
'status': r['status_display'],
'runtime': r['runtime'],
}
submissions.append(s)
return submissions
def _group1(match, default):
if match:
return match.group(1)
return default
def _unescape(s):
return s.encode().decode('unicode_escape')
def get_submission(sid):
assert is_login()
headers = _make_headers()
url = LC_SUBMISSION.format(submission=sid)
log.info('get submission request: url="%s" headers="%s"', url, headers)
res = session.get(url, headers=headers)
log.info('get submission response: status="%s" body="%s"', res.status_code, res.text)
if res.status_code != 200:
_echoerr('cannot find the submission: ' + sid)
return None
# we need to parse the data from the Javascript snippet
s = res.text
submission = {
'id': sid,
'state': _status_to_name(int(_group1(re.search(r"status_code: parseInt\('([^']*)'", s),
'not found'))),
'runtime': _group1(re.search("runtime: '([^']*)'", s), 'not found'),
'passed': _group1(re.search("total_correct : '([^']*)'", s), 'not found'),
'total': _group1(re.search("total_testcases : '([^']*)'", s), 'not found'),
'testcase': _split(_unescape(_group1(re.search("input : '([^']*)'", s), ''))),
'answer': _split(_unescape(_group1(re.search("code_output : '([^']*)'", s), ''))),
'expected_answer': _split(_unescape(_group1(re.search("expected_output : '([^']*)'", s),
''))),
'problem_id': _group1(re.search("questionId: '([^']*)'", s), 'not found'),
'slug': _group1(re.search("editCodeUrl: '([^']*)'", s), '///').split('/')[2],
'filetype': _group1(re.search("getLangDisplay: '([^']*)'", s), 'not found'),
'error': [],
'stdout': [],
}
problem = get_problem(submission['slug'])
submission['title'] = problem['title']
# the punctuations and newlines in the code are escaped like '\\u0010' ('\\' => real backslash)
# to unscape the string, we do the trick '\\u0010'.encode().decode('unicode_escape') ==> '\n'
# submission['code'] = _break_code_lines(_unescape(_group1(
# re.search("submissionCode: '([^']*)'", s), '')))
submission['code'] = _unescape_with_Chinese(
_group1(re.search("submissionCode: '([^']*)'", s), ''))
dist_str = _unescape(_group1(re.search("runtimeDistributionFormatted: '([^']*)'", s),
'{"distribution":[]}'))
dist = json.loads(dist_str)['distribution']
dist.reverse()
# the second key "runtime" is the runtime in milliseconds
# we need to search from the position after the first "runtime" key
prev_runtime = re.search("runtime: '([^']*)'", s)
if not prev_runtime:
my_runtime = 0
else:
my_runtime = int(_group1(re.search("runtime: '([^']*)'", s[prev_runtime.end():]), 0))
accum = 0
for runtime, frequency in dist:
accum += frequency
if my_runtime >= int(runtime):
break
submission['runtime_percentile'] = '{:.1f}%'.format(accum)
return submission
def _process_topic_element(topic):
return {'topic_name': topic.find(class_='text-gray').string.strip(),
'num_problems': topic.find(class_='badge').string,
'topic_slug': topic.get('href').split('/')[2]}
def _process_company_element(company):
return {'company_name': company.find(class_='text-gray').string.strip(),
'num_problems': company.find(class_='badge').string,
'company_slug': company.get('href').split('/')[2]}
def get_topics_and_companies():
headers = _make_headers()
log.info('get_topics_and_companies request: url="%s', LC_PROBLEM_SET_ALL)
res = session.get(LC_PROBLEM_SET_ALL, headers=headers)
log.info('get_topics_and_companies response: status="%s" body="%s"', res.status_code,
res.text)
if res.status_code != 200:
_echoerr('cannot get topics')
return []
soup = BeautifulSoup(res.text, features='html.parser')
topic_elements = soup.find_all(class_='sm-topic')
topics = [_process_topic_element(topic) for topic in topic_elements]
company_elements = soup.find_all(class_='sm-company')
companies = [_process_company_element(company) for company in company_elements]
return {
'topics': topics,
'companies': companies
}
def get_problems_of_topic(topic_slug):
request_body = {
'operationName':'getTopicTag',
'variables': {'slug': topic_slug},
'query': '''query getTopicTag($slug: String!) {
topicTag(slug: $slug) {
name
translatedName
questions {
status
questionId
questionFrontendId
title
titleSlug
translatedTitle
stats
difficulty
isPaidOnly
}
frequencies
}
}
'''}
headers = _make_headers()
log.info('get_problems_of_topic request: headers="%s" body="%s"', headers,
request_body)
res = session.post(LC_GRAPHQL, headers=headers, json=request_body)
log.info('get_problems_of_topic response: status="%s" body="%s"',
res.status_code, res.text)
if res.status_code != 200:
_echoerr('cannot get problems of the topic')
return {'topic_name': topic_slug, 'problems': []}
topic_tag = res.json()['data']['topicTag']
if not topic_tag:
return {'topic_name': topic_slug, 'problems': []}
if topic_tag['frequencies']:
id_to_frequency_map = json.loads(topic_tag['frequencies'])
else:
id_to_frequency_map = {}
def process_problem(p):
stats = json.loads(p['stats'])
return {
'state': _state_to_flag(p['status']),
'id': p['questionId'],
'fid': p['questionFrontendId'],
'title': p['title'],
'slug': p['titleSlug'],
'paid_only': p['isPaidOnly'],
'ac_rate': stats['totalAcceptedRaw'] / stats['totalSubmissionRaw'],
'level': p['difficulty'],
'favor': False,
'frequency': id_to_frequency_map.get(p['questionId'], 0)}
return {
'topic_name': topic_tag['name'],
'problems': [process_problem(p) for p in topic_tag['questions']]}
def get_problems_of_company(company_slug):
request_body = {
'operationName':'getCompanyTag',
'variables': {'slug': company_slug},
'query': '''query getCompanyTag($slug: String!) {
companyTag(slug: $slug) {
name
translatedName
frequencies
questions {
...questionFields
}
}
}
fragment questionFields on QuestionNode {
status
questionId
questionFrontendId
title
titleSlug
translatedTitle
stats
difficulty
isPaidOnly
frequencyTimePeriod
}
'''}
headers = _make_headers()
headers['Referer'] = 'https://leetcode.com/company/{}/'.format(company_slug)
log.info('get_problems_of_company request: headers="%s" body="%s"', headers,
request_body)
res = session.post(LC_GRAPHQL, headers=headers, json=request_body)
log.info('get_problems_of_company response: status="%s" body="%s"',
res.status_code, res.text)
if res.status_code != 200:
_echoerr('cannot get problems of the company')
return {'company_name': company_slug, 'problems': []}
company_tag = res.json()['data']['companyTag']
if not company_tag:
_echoerr('cannot get problems of the company')
return {'company_name': company_slug, 'problems': []}
if company_tag['frequencies']:
id_to_frequency_map = json.loads(company_tag['frequencies'])
else:
id_to_frequency_map = {}
def process_problem(p):
stats = json.loads(p['stats'])
return {
'state': _state_to_flag(p['status']),
'id': p['questionId'],
'fid': p['questionFrontendId'],
'title': p['title'],
'slug': p['titleSlug'],
'paid_only': p['isPaidOnly'],
'ac_rate': stats['totalAcceptedRaw'] / stats['totalSubmissionRaw'],
'level': p['difficulty'],
'favor': False,
'frequencies': id_to_frequency_map.get(p['questionId'],
EMPTY_FREQUENCIES)[4:]}
return {
'company_name': company_tag['name'],
'problems': [process_problem(p) for p in company_tag['questions']]}
def _thread_main():
global task_running, task_done, task_output, task_err
while True:
task_trigger.acquire()
task_running = True
task_done = False
task_output = None
task_err = ''
log.info('task thread input: name="%s" input="%s"', task_name, task_input)
try:
if task_name == 'test_solution':
task_output = test_solution(*task_input)
elif task_name == 'submit_solution':
task_output = submit_solution(*task_input)
except BaseException as e:
task_err = str(e)
log.info('task thread output: name="%s" output="%s" error="%s"', task_name, task_output,
task_err)
task_running = False
task_done = True
def _in_task():
return current_thread() == task_thread
def _echoerr(s):
global task_err
if _in_task():
task_err = s
else:
print(s)
def _unescape_with_Chinese(code):
for ch in set(re.findall(r'\\u\w{4}', code)):
code = code.replace(ch, ch.encode('utf-8').decode('unicode_escape'))
return code.splitlines()
task_thread = Thread(target=_thread_main, daemon=True)
task_thread.start()
|
Principal.py | from iqoptionapi.stable_api import IQ_Option
from PyQt5 import uic, QtWidgets
from PyQt5.QtWidgets import QMessageBox
import threading
import time, configparser, logging, sys
from datetime import datetime, timedelta
from dateutil import tz
import json
logging.disable(level=(logging.DEBUG))
logging.disable(level=(logging.ERROR))
def main():
config = configparser.RawConfigParser()
config.read('cnf.ini')
global email
email = formulario.email.text()
config.set('GERAL', 'email', str(email))
cfgfile = open('cnf.ini','w')
config.write(cfgfile, space_around_delimiters=False)
cfgfile.close()
API = IQ_Option(email, formulario.senha.text())
API.connect()
while True:
if API.check_connect() == False:
formulario.plainTextEdit_2.addItem('Erro ao se conectar!')
API.connect()
else:
formulario.plainTextEdit_2.addItem('Conectado com sucesso!')
formulario.pushButton.setText('Robo Iniciado com Sucesso!')
formulario.pushButton.setEnabled(False)
formulario.comboBox.setEnabled(False)
formulario.senha.setEnabled(False)
formulario.email.setEnabled(False)
formulario.delay.setEnabled(False)
formulario.payout.setEnabled(False)
formulario.gales.setEnabled(False)
formulario.stopgain.setEnabled(False)
formulario.stoploss.setEnabled(False)
formulario.primeira.setEnabled(False)
formulario.segunda.setEnabled(False)
formulario.terceira.setEnabled(False)
formulario.pushButton_2.setEnabled(False)
break
time.sleep(1)
API.change_balance(conta)
def banca():
return round(API.get_balance(), 2)
global saldo, saldo_mov
saldo = banca()
saldo_mov = saldo
formulario.banca.setText(str(saldo))
time.sleep(1)
def Mensagem(msg):
formulario.plainTextEdit_2.addItem(msg)
def Payout(par,timeframe):
API.subscribe_strike_list(par, timeframe)
while True:
d = API.get_digital_current_profit(par, timeframe)
if d > 0:
break
time.sleep(1)
API.unsubscribe_strike_list(par, timeframe)
return float(d / 100)
def carregar_sinais():
arquivo = open('sinais.txt', encoding='UTF-8')
lista = arquivo.read()
arquivo.close
lista = lista.split('\n')
for index,a in enumerate(lista):
if a == '':
del lista[index]
return lista
def HoraAtual():
hora = datetime.now()
tm = tz.gettz('America/Sao Paulo')
hora_atual = hora.astimezone(tm)
return hora_atual.strftime('%H:%M:%S')
def CalculaStop(valor_mov):
global stop
global stop_mensagem
global saldo_mov
saldo_mov = abs(saldo_mov + valor_mov)
if (saldo + int(stopgain)) <= saldo_mov:
stop = True
stop_mensagem = 'Stop Win: ' + str(round((saldo_mov - saldo), 2))
elif (saldo - int(stoploss)) >= saldo_mov:
stop = True
stop_mensagem = 'Stop Loss: ' + str(round((saldo_mov - saldo), 2))
def entradas(par,entrada,direcao,config,timeframe):
status,id = API.buy(int(entrada),par,direcao,int(timeframe))
if status:
lucro = API.check_win_v3(id)
if lucro:
if lucro > 0:
return 'win',round(lucro, 2)
elif lucro < 0:
return 'loss',0
elif lucro == 0:
return 'equal',0
'''
resultado,lucro = API.check_win_v3(id)
if resultado:
if resultado == 'win':
return 'win',round(lucro, 2)
elif resultado == 'loose':
return 'loss',0
elif resultado == 'equal':
return 'equal',0
'''
else:
return 'error',0
def IniciaTrade():
sinais = carregar_sinais()
Mensagem('')
Mensagem('Moedas a serem operadas:')
for y in sinais:
Mensagem(y.split(';')[0].upper() + ' / ' + y.split(';')[2])
for x in sinais:
timeframe = x.split(';')[4].upper()
par = x.split(';')[0].upper()
dia = x.split(';')[1]
minutos_lista = x.split(';')[2]
direcao = x.split(';')[3].lower().replace('\n','')
minutos_lista_delay = format(datetime.strptime(minutos_lista, '%H:%M:%S') - timedelta(seconds=int(delay)),"%H:%M:%S")
dia_atual = format(datetime.now(), '%d')
if dia_atual > dia:
Mensagem('Dia informado é menor que o dia atual!')
break
if stop:
Mensagem('')
Mensagem(stop_mensagem)
break
while True:
minutos = HoraAtual()
if minutos_lista_delay < minutos:
break
if minutos > minutos_lista_delay:
break
entrar = True if (minutos_lista_delay == minutos ) else False
if entrar:
if True:
Mensagem('')
Mensagem('Iniciando Operaçao')
Mensagem('Paridade: ' + par + ' / ' + 'Horario: ' + str(minutos_lista))
resultado,lucro = entradas(par,primeira_entrada, direcao, config,timeframe)
Mensagem('Paridade: ' + par + ' -> ' + resultado + ' / ' + str(lucro))
if resultado == 'error':
break
if resultado == 'win':
CalculaStop(lucro)
break
'''
if stop:
mensagem_stop = '\n\nStop '+ resultado.upper() + ' batido!'
Mensagem(mensagem_stop)
sys.exit()
'''
if resultado == 'loss' and int(gales) > 0:
CalculaStop(int(primeira_entrada) * -1)
valor_entrada = segunda_entrada
for i in range(int(gales) if int(gales) > 0 else 1):
Mensagem('Entrada Martingale Nivel ' + str(i+1) + ' - ' + HoraAtual())
resultado,lucro = entradas(par, valor_entrada, direcao,config,timeframe)
Mensagem('Resultado Martingale, Paridade: ' + par + ' -> ' + resultado + ' / ' + str(lucro))
if resultado == 'win':
CalculaStop(lucro)
break
else:
CalculaStop(int(valor_entrada) * -1)
valor_entrada = terceira_entrada
break
else:
break
time.sleep(0.1)
Mensagem('')
formulario.plainTextEdit_2.addItem('Lista Finalizada!')
#sys.exit()
threading.Thread(target=IniciaTrade).start()
def grava_configuracoes():
config = configparser.RawConfigParser()
config.read('cnf.ini')
global payout
payout = formulario.payout.value()
config.set('ESTRATEGIA', 'payout', str(payout))
global gales
gales = formulario.gales.value()
config.set('ESTRATEGIA', 'gales', str(gales))
global stopgain
stopgain = formulario.stopgain.text()
config.set('ESTRATEGIA', 'stopgain', str(stopgain))
global stoploss
stoploss = formulario.stoploss.text()
config.set('ESTRATEGIA', 'stoploss', str(stoploss))
global primeira_entrada
primeira_entrada = formulario.primeira.text()
config.set('ENTRADAS', 'primeira', str(primeira_entrada))
global segunda_entrada
segunda_entrada = formulario.segunda.text()
config.set('ENTRADAS', 'segunda', str(segunda_entrada))
global terceira_entrada
terceira_entrada = formulario.terceira.text()
config.set('ENTRADAS', 'terceira', str(terceira_entrada))
global delay
delay = formulario.delay.value()
config.set('ESTRATEGIA', 'delay', str(delay))
cfgfile = open('cnf.ini','w')
config.write(cfgfile, space_around_delimiters=False)
cfgfile.close()
QMessageBox.about(formulario, 'Informação', 'Configurações salvas com sucesso!')
app = QtWidgets.QApplication(sys.argv)
formulario=uic.loadUi("Principal.ui")
saldo = 0
saldo_mov = 0
stop = False
stop_mensagem = ''
arquivo = configparser.RawConfigParser()
arquivo.read('cnf.ini')
email = arquivo.get('GERAL', 'email')
formulario.email.setText(email)
conta = arquivo.get('GERAL', 'conta')
formulario.comboBox.setCurrentIndex(0)
payout = arquivo.get('ESTRATEGIA', 'payout')
formulario.payout.setValue(int(payout))
gales = arquivo.get('ESTRATEGIA', 'gales')
formulario.gales.setValue(int(gales))
stopgain = arquivo.get('ESTRATEGIA', 'stopgain')
formulario.stopgain.setText(stopgain)
stoploss = arquivo.get('ESTRATEGIA', 'stoploss')
formulario.stoploss.setText(stoploss)
primeira_entrada = arquivo.get('ENTRADAS', 'primeira')
formulario.primeira.setText(primeira_entrada)
segunda_entrada = arquivo.get('ENTRADAS', 'segunda')
formulario.segunda.setText(segunda_entrada)
terceira_entrada = arquivo.get('ENTRADAS', 'terceira')
formulario.terceira.setText(terceira_entrada)
delay = arquivo.get('ESTRATEGIA', 'delay')
formulario.delay.setValue(int(delay))
formulario.pushButton.clicked.connect(main)
formulario.pushButton_2.clicked.connect(grava_configuracoes)
formulario.show()
app.exec()
|
test_partition_20.py | import threading
import pytest
from base.partition_wrapper import ApiPartitionWrapper
from base.client_base import TestcaseBase
from common import common_func as cf
from common import common_type as ct
from common.common_type import CaseLabel, CheckTasks
from common.code_mapping import PartitionErrorMessage
prefix = "partition_"
class TestPartitionParams(TestcaseBase):
""" Test case of partition interface in parameters"""
@pytest.mark.tags(CaseLabel.L0)
# @pytest.mark.parametrize("partition_name, description",
# [(cf.gen_unique_str(prefix), cf.gen_unique_str("desc_"))])
def test_partition_default(self):
"""
target: verify create a partition
method: 1. create a partition
expected: 1. create successfully
"""
# create collection
collection_w = self.init_collection_wrap()
# create partition
partition_name = cf.gen_unique_str(prefix)
description = cf.gen_unique_str("desc_")
self.init_partition_wrap(collection_w, partition_name,
description=description,
check_task=CheckTasks.check_partition_property,
check_items={"name": partition_name, "description": description,
"is_empty": True, "num_entities": 0}
)
# check that the partition has been created
assert collection_w.has_partition(partition_name)[0]
@pytest.mark.tags(CaseLabel.L1)
# @pytest.mark.xfail(reason="issue #5375")
@pytest.mark.parametrize("partition_name", [""])
def test_partition_empty_name(self, partition_name):
"""
target: verify create a partition with empyt name
method: 1. create a partition empty none name
expected: 1. raise exception
"""
# create a collection
collection_w = self.init_collection_wrap()
# create partition
self.partition_wrap.init_partition(collection_w.collection, partition_name,
check_task=CheckTasks.err_res,
check_items={ct.err_code: 1, ct.err_msg: "Partition name should not be empty"})
@pytest.mark.tags(CaseLabel.L1)
# @pytest.mark.parametrize("partition_name, description", [(cf.gen_unique_str(prefix), "")])
def test_partition_empty_description(self):
"""
target: verify create a partition with empty description
method: 1. create a partition with empty description
expected: 1. create successfully
"""
# create collection
collection_w = self.init_collection_wrap()
# init partition
partition_name = cf.gen_unique_str(prefix)
description = ""
self.init_partition_wrap(collection_w, partition_name,
description=description,
check_task=CheckTasks.check_partition_property,
check_items={"name": partition_name, "description": description,
"is_empty": True, "num_entities": 0}
)
# check that the partition has been created
assert collection_w.has_partition(partition_name)[0]
@pytest.mark.tags(CaseLabel.L1)
# @pytest.mark.parametrize("partition_name, description",
# [(cf.gen_str_by_length(255), cf.gen_str_by_length(2048))])
def test_partition_max_description_length(self):
"""
target: verify create a partition with 255 length name and 1024 length description
method: 1. create a partition with 255 length name and 1024 length description
expected: 1. create successfully
"""
# create collection
collection_w = self.init_collection_wrap()
# init partition
partition_name = cf.gen_str_by_length(255)
description = cf.gen_str_by_length(2048)
self.init_partition_wrap(collection_w, partition_name,
description=description,
check_task=CheckTasks.check_partition_property,
check_items={"name": partition_name, "description": description,
"is_empty": True}
)
@pytest.mark.tags(CaseLabel.L1)
# @pytest.mark.parametrize("collection_name, partition_name, description",
# [(cf.gen_unique_str(), cf.gen_unique_str(prefix), cf.gen_unique_str())])
def test_partition_dup_name(self):
"""
target: verify create partitions with duplicate name
method: 1. create partitions with duplicate name
expected: 1. create successfully
2. the same partition returned with diff object id
"""
# create a collection
collection_w = self.init_collection_wrap()
# create two partitions
partition_name = cf.gen_unique_str(prefix)
description = cf.gen_unique_str()
partition_w1 = self.init_partition_wrap(collection_w, partition_name, description)
partition_w2 = self.init_partition_wrap(collection_w, partition_name, description)
# public check func to be extracted
assert id(partition_w1.partition) != id(partition_w2.partition)
assert partition_w1.name == partition_w2.name
assert partition_w1.description == partition_w2.description
@pytest.mark.tags(CaseLabel.L1)
@pytest.mark.parametrize("description", ct.get_invalid_strs)
# @pytest.mark.parametrize("partition_name", [cf.gen_unique_str(prefix)])
def test_partition_special_chars_description(self, description):
"""
target: verify create a partition with special characters in description
method: 1. create a partition with special characters in description
expected: 1. create successfully
"""
# create collection
collection_w = self.init_collection_wrap()
# create partition
partition_name = cf.gen_unique_str(prefix)
self.init_partition_wrap(collection_w, partition_name,
description=description,
check_task=CheckTasks.check_partition_property,
check_items={"name": partition_name, "description": description,
"is_empty": True, "num_entities": 0}
)
assert collection_w.has_partition(partition_name)[0]
@pytest.mark.tags(CaseLabel.L0)
def test_partition_default_name(self):
"""
target: verify create a partition with default name
method: 1. get the _default partition
2. create a partition with _default name
expected: 1. the same partition returned
"""
# create collection
collection_w = self.init_collection_wrap()
# check that the default partition exists
assert collection_w.has_partition(ct.default_partition_name)[0]
# check that can get the _default partition
collection, _ = collection_w.partition(ct.default_partition_name)
# check that init the _default partition object
partition_w = self.init_partition_wrap(collection_w, ct.default_partition_name)
assert collection.name == partition_w.name
@pytest.mark.tags(CaseLabel.L1)
# @pytest.mark.parametrize("partition_name", [cf.gen_str_by_length(256)])
def test_partition_maxlength_name(self):
"""
target: verify create a partition with maxlength(256) name
method: 1. create a partition with max length names
expected: 1. raise exception
"""
# create collection
collection_w = self.init_collection_wrap()
# create partition
partition_name = cf.gen_str_by_length(256)
self.partition_wrap.init_partition(collection_w.collection, partition_name,
check_task=CheckTasks.err_res,
check_items={ct.err_code: 1, 'err_msg': "is illegal"}
)
@pytest.mark.tags(CaseLabel.L1)
@pytest.mark.parametrize("partition_name", ct.get_invalid_strs)
def test_partition_invalid_name(self, partition_name):
"""
target: verify create a partition with invalid name
method: 1. create a partition with invalid names
expected: 1. raise exception
"""
# create collection
collection_w = self.init_collection_wrap()
# create partition
self.partition_wrap.init_partition(collection_w.collection, partition_name,
check_task=CheckTasks.err_res,
check_items={ct.err_code: 1, 'err_msg': "is illegal"}
)
# TODO: need an error code issue #5144 and assert independently
@pytest.mark.tags(CaseLabel.L1)
# @pytest.mark.parametrize("partition_name", [cf.gen_unique_str(prefix)])
def test_partition_none_collection(self):
"""
target: verify create a partition with none collection
method: 1. create a partition with none collection
expected: 1. raise exception
"""
# create partition with collection is None
partition_name = cf.gen_unique_str(prefix)
self.partition_wrap.init_partition(collection=None, name=partition_name,
check_task=CheckTasks.err_res,
check_items={ct.err_code: 1,
ct.err_msg: "must be pymilvus.Collection"})
@pytest.mark.tags(CaseLabel.L1)
# @pytest.mark.parametrize("partition_name", [cf.gen_unique_str(prefix)])
def test_partition_drop(self):
"""
target: verify drop a partition in one collection
method: 1. create a partition in one collection
2. drop the partition
expected: 1. drop successfully
"""
# create collection
collection_w = self.init_collection_wrap()
# create partition
partition_name = cf.gen_unique_str(prefix)
partition_w = self.init_partition_wrap(collection_w, partition_name)
# check that the partition exists
assert collection_w.has_partition(partition_name)[0]
# drop partition
partition_w.drop()
# check that the partition not exists
assert not collection_w.has_partition(partition_name)[0]
@pytest.mark.tags(CaseLabel.L1)
# @pytest.mark.parametrize("search_vectors", [cf.gen_vectors(1, ct.default_dim)])
def test_partition_release(self):
"""
target: verify release partition
method: 1. create a collection and several partitions
2. insert data into each partition
3. flush and load the partitions
4. release partition1
5. release partition1 twice
expected: 1. the released partition is released
2. the other partition is not released
"""
# create collection
collection_w = self.init_collection_wrap()
# create two partitions
partition_w1 = self.init_partition_wrap(collection_w)
partition_w2 = self.init_partition_wrap(collection_w)
# insert data to two partition
partition_w1.insert(cf.gen_default_list_data())
partition_w2.insert(cf.gen_default_list_data())
# load two partitions
partition_w1.load()
partition_w2.load()
# search two partitions
search_vectors = cf.gen_vectors(1, ct.default_dim)
res1, _ = partition_w1.search(data=search_vectors,
anns_field=ct.default_float_vec_field_name,
params={"nprobe": 32}, limit=1)
res2, _ = partition_w2.search(data=search_vectors,
anns_field=ct.default_float_vec_field_name,
params={"nprobe": 32}, limit=1)
assert len(res1) == 1 and len(res2) == 1
# release the first partition
partition_w1.release()
# check result
res1, _ = partition_w1.search(data=search_vectors,
anns_field=ct.default_float_vec_field_name,
params={"nprobe": 32}, limit=1,
check_task=ct.CheckTasks.err_res,
check_items={ct.err_code: 1, ct.err_msg: "partitions have been released"})
res2, _ = partition_w2.search(data=search_vectors,
anns_field=ct.default_float_vec_field_name,
params={"nprobe": 32}, limit=1)
assert len(res2) == 1
@pytest.mark.tags(CaseLabel.L1)
# @pytest.mark.parametrize("partition_name", [cf.gen_unique_str(prefix)])
@pytest.mark.parametrize("data", [cf.gen_default_dataframe_data(10),
cf.gen_default_list_data(10),
cf.gen_default_tuple_data(10)])
def test_partition_insert(self, data):
"""
target: verify insert multi entities by dataFrame
method: 1. create a collection and a partition
2. partition.insert(data)
3. insert data again
expected: 1. insert data successfully
"""
nums = 10
# create collection
collection_w = self.init_collection_wrap()
# create partition
partition_name = cf.gen_unique_str(prefix)
partition_w = self.init_partition_wrap(collection_w, partition_name,
check_task=CheckTasks.check_partition_property,
check_items={"name": partition_name,
"is_empty": True, "num_entities": 0}
)
# insert data
partition_w.insert(data)
# self._connect().flush([collection_w.name]) # don't need flush for issue #5737
assert not partition_w.is_empty
assert partition_w.num_entities == nums
# insert data
partition_w.insert(data)
# self._connect().flush([collection_w.name])
assert not partition_w.is_empty
assert partition_w.num_entities == (nums + nums)
class TestPartitionOperations(TestcaseBase):
""" Test case of partition interface in operations """
@pytest.mark.tags(CaseLabel.L1)
# @pytest.mark.parametrize("partition_name", [cf.gen_unique_str(prefix)])
def test_partition_dropped_collection(self):
"""
target: verify create partition against a dropped collection
method: 1. create collection1
2. drop collection1
3. create partition in collection1
expected: 1. raise exception
"""
# create collection
collection_w = self.init_collection_wrap()
# drop collection
collection_w.drop()
# create partition failed
self.partition_wrap.init_partition(collection_w.collection, cf.gen_unique_str(prefix),
check_task=CheckTasks.err_res,
check_items={ct.err_code: 1, ct.err_msg: "can't find collection"})
@pytest.mark.tags(CaseLabel.L2)
def test_partition_same_name_in_diff_collections(self):
"""
target: verify create partitions with same name in diff collections
method: 1. create a partition in collection1
2. create a partition in collection2
expected: 1. create successfully
"""
# create two collections
collection_w1 = self.init_collection_wrap()
collection_w2 = self.init_collection_wrap()
# create 2 partitions in 2 diff collections
partition_name = cf.gen_unique_str(prefix)
self.init_partition_wrap(collection_wrap=collection_w1, name=partition_name)
self.init_partition_wrap(collection_wrap=collection_w2, name=partition_name)
# check result
assert collection_w1.has_partition(partition_name)[0]
assert collection_w2.has_partition(partition_name)[0]
@pytest.mark.tags(CaseLabel.L2)
def test_partition_multi_partitions_in_collection(self):
"""
target: verify create multiple partitions in one collection
method: 1. create multiple partitions in one collection
expected: 1. create successfully
"""
# create collection
collection_w = self.init_collection_wrap()
for _ in range(10):
partition_name = cf.gen_unique_str(prefix)
# create partition with different names and check the partition exists
self.init_partition_wrap(collection_w, partition_name)
assert collection_w.has_partition(partition_name)[0]
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.skip(reason="skip for memory issue check")
def test_partition_maximum_partitions(self):
"""
target: verify create maximum partitions
method: 1. create maximum partitions
2. create one more partition
expected: 1. raise exception
"""
threads_num = 8
threads = []
def create_partition(collection, threads_n):
for _ in range(ct.max_partition_num // threads_n):
name = cf.gen_unique_str(prefix)
par_wrap = ApiPartitionWrapper()
par_wrap.init_partition(collection, name, check_task=CheckTasks.check_nothing)
collection_w = self.init_collection_wrap()
for _ in range(threads_num):
t = threading.Thread(target=create_partition, args=(collection_w.collection, threads_num))
threads.append(t)
t.start()
for t in threads:
t.join()
p_name = cf.gen_unique_str()
self.partition_wrap.init_partition(
collection_w.collection, p_name,
check_task=CheckTasks.err_res,
check_items={ct.err_code: 1,
ct.err_msg: "maximum partition's number should be limit to 4096"})
@pytest.mark.tags(CaseLabel.L0)
def test_partition_drop_default_partition(self):
"""
target: verify drop the _default partition
method: 1. drop the _default partition
expected: 1. raise exception
"""
# create collection
collection_w = self.init_collection_wrap()
# get the default partition
default_partition, _ = collection_w.partition(ct.default_partition_name)
partition_w = self.init_partition_wrap(collection_w, ct.default_partition_name)
assert default_partition.name == partition_w.name
# verify that drop partition with error
partition_w.drop(check_task=CheckTasks.err_res,
check_items={ct.err_code: 1, ct.err_msg: "default partition cannot be deleted"})
@pytest.mark.tags(CaseLabel.L1)
# @pytest.mark.parametrize("partition_name", [cf.gen_unique_str(prefix)])
def test_partition_drop_partition_twice(self):
"""
target: verify drop the same partition twice
method: 1.create a partition with default schema
2. drop the partition
3. drop the same partition again
expected: raise exception when 2nd time
"""
# create collection
collection_w = self.init_collection_wrap()
# create partition
partition_name = cf.gen_unique_str(prefix)
partition_w = self.init_partition_wrap(collection_w, partition_name)
collection_w.has_partition(partition_name)
# drop partition
partition_w.drop()
assert not collection_w.has_partition(partition_name)[0]
# verify that drop the partition again with exception
partition_w.drop(check_task=CheckTasks.err_res,
check_items={ct.err_code: 1, ct.err_msg: PartitionErrorMessage.PartitionNotExist})
@pytest.mark.tags(CaseLabel.L2)
# @pytest.mark.parametrize("partition_name", [cf.gen_unique_str(prefix)])
def test_partition_create_and_drop_multi_times(self):
"""
target: verify create and drop for times
method: 1.create a partition with default schema
2. drop the partition
3. loop #1 and #2 for times
expected: create and drop successfully
"""
# create collection
collection_w = self.init_collection_wrap()
# range for 5 times
partition_name = cf.gen_unique_str(prefix)
for i in range(5):
# create partition and check that the partition exists
partition_w = self.init_partition_wrap(collection_w, partition_name)
assert collection_w.has_partition(partition_name)[0]
# drop partition and check that the partition not exists
partition_w.drop()
assert not collection_w.has_partition(partition_name)[0]
@pytest.mark.tags(CaseLabel.L2)
# @pytest.mark.parametrize("flush", [True, False])
# @pytest.mark.parametrize("partition_name", [cf.gen_unique_str(prefix)])
def test_partition_drop_non_empty_partition(self):
"""
target: verify drop a partition which has data inserted
method: 1.create a partition with default schema
2. insert some data
3. flush / not flush
3. drop the partition
expected: drop successfully
"""
# create collection
collection_w = self.init_collection_wrap()
# create partition
partition_name = cf.gen_unique_str(prefix)
partition_w = self.init_partition_wrap(collection_w, partition_name)
assert collection_w.has_partition(partition_name)[0]
# insert data to partition
partition_w.insert(cf.gen_default_dataframe_data())
# # flush remove flush for issue #5837
# if flush:
# self._connect().flush([collection_w.name])
# drop partition
partition_w.drop()
assert not collection_w.has_partition(partition_name)[0]
@pytest.mark.tags(CaseLabel.L2)
# @pytest.mark.parametrize("flush", [True, False])
@pytest.mark.parametrize("data", [cf.gen_default_list_data(nb=3000)])
@pytest.mark.parametrize("index_param", cf.gen_simple_index())
def test_partition_drop_indexed_partition(self, data, index_param):
"""
target: verify drop an indexed partition
method: 1.create a partition
2. insert same data
3. create an index
4. flush or not flush (remove flush step for issue # 5837)
5. drop the partition
expected: drop successfully
"""
# create collection
collection_w = self.init_collection_wrap()
# create partition
partition_name = cf.gen_unique_str(prefix)
partition_w = self.init_partition_wrap(collection_w, partition_name)
assert collection_w.has_partition(partition_name)[0]
# insert data to partition
ins_res, _ = partition_w.insert(data)
assert len(ins_res.primary_keys) == len(data[0])
# create index of collection
collection_w.create_index(ct.default_float_vec_field_name, index_param)
# # flush
# if flush:
# self._connect().flush([collection_w.name])
# drop partition
partition_w.drop()
assert not collection_w.has_partition(partition_name)[0]
@pytest.mark.tags(CaseLabel.L1)
def test_partition_release_empty_partition(self):
"""
target: verify release an empty partition
method: 1.create a partition
2. release the partition
expected: release successfully
"""
# create partition
partition_w = self.init_partition_wrap()
assert partition_w.is_empty
# release partition
partition_w.release()
# TODO: assert no more memory consumed
@pytest.mark.tags(CaseLabel.L1)
def test_partition_release_dropped_partition(self):
"""
target: verify release an dropped partition
method: 1.create a partition
2. drop the partition
2. release the partition
expected: raise exception
"""
# create partition
partition_w = self.init_partition_wrap()
# drop partition
partition_w.drop()
# release the dropped partition and check err response
partition_w.release(check_task=CheckTasks.err_res,
check_items={ct.err_code: 1, ct.err_msg: PartitionErrorMessage.PartitionNotExist})
@pytest.mark.tags(CaseLabel.L1)
# @pytest.mark.parametrize("partition_name", [cf.gen_unique_str(prefix)])
def test_partition_release_dropped_collection(self):
"""
target: verify release an dropped collection
method: 1.create a collection and partition
2. drop the collection
2. release the partition
expected: raise exception
"""
# create collection
collection_w = self.init_collection_wrap()
# create partition
partition_name = cf.gen_unique_str(prefix)
partition_w = self.init_partition_wrap(collection_w, partition_name)
assert collection_w.has_partition(partition_name)[0]
# drop collection
collection_w.drop()
# release the partition and check err response
partition_w.release(check_task=CheckTasks.err_res,
check_items={ct.err_code: 1, ct.err_msg: "can't find collection"})
@pytest.mark.tags(CaseLabel.L1)
# @pytest.mark.parametrize("partition_name, search_vectors",
# [(cf.gen_unique_str(prefix), cf.gen_vectors(1, ct.default_dim))])
def test_partition_release_after_collection_released(self):
"""
target: verify release a partition after the collection released
method: 1.create a collection and partition
2. insert some data
2. release the collection
2. release the partition
expected: partition released successfully
"""
# create collection
collection_w = self.init_collection_wrap()
# create partition
partition_name = cf.gen_unique_str(prefix)
partition_w = self.init_partition_wrap(collection_w, partition_name)
assert collection_w.has_partition(partition_name)[0]
# insert data to partition
data = cf.gen_default_list_data()
partition_w.insert(data)
assert partition_w.num_entities == len(data[0])
assert collection_w.num_entities == len(data[0])
# load partition
partition_w.load()
# search of partition
search_vectors = cf.gen_vectors(1, ct.default_dim)
res_1, _ = partition_w.search(data=search_vectors,
anns_field=ct.default_float_vec_field_name,
params={"nprobe": 32}, limit=1)
assert len(res_1) == 1
# release collection
collection_w.release()
# search of partition
res_2, _ = partition_w.search(data=search_vectors,
anns_field=ct.default_float_vec_field_name,
params={"nprobe": 32}, limit=1,
check_task=ct.CheckTasks.err_res,
check_items={ct.err_code: 0,
ct.err_msg: "not loaded into memory"})
# release partition
partition_w.release()
@pytest.mark.tags(CaseLabel.L1)
# @pytest.mark.parametrize("partition_name, data", [(ct.default_partition_name, cf.gen_default_dataframe_data())])
def test_partition_insert_default_partition(self):
"""
target: verify insert data into _default partition
method: 1.create a collection
2. insert some data into _default partition
expected: insert successfully
"""
# create collection
collection_w = self.init_collection_wrap()
# get the default partition
partition_name = ct.default_partition_name
assert collection_w.has_partition(partition_name)[0]
partition_w = self.init_partition_wrap(collection_w, partition_name)
# insert data to partition
data = cf.gen_default_dataframe_data()
partition_w.insert(data)
# self._connect().flush([collection_w.name])
assert partition_w.num_entities == len(data)
@pytest.mark.tags(CaseLabel.L1)
def test_partition_insert_dropped_partition(self):
"""
target: verify insert data into dropped partition
method: 1.create a collection
2. insert some data into dropped partition
expected: raise exception
"""
# create partition
partition_w = self.init_partition_wrap()
# drop partition
partition_w.drop()
# insert data to partition
partition_w.insert(cf.gen_default_dataframe_data(),
check_task=CheckTasks.err_res,
check_items={ct.err_code: 1, ct.err_msg: "Partition not exist"})
# TODO: update the assert error
@pytest.mark.tags(CaseLabel.L1)
# @pytest.mark.parametrize("partition_name", [cf.gen_unique_str(prefix)])
def test_partition_insert_dropped_collection(self):
"""
target: verify insert data into dropped collection
method: 1.create a collection
2. insert some data into dropped collection
expected: raise exception
"""
# create collection
collection_w = self.init_collection_wrap()
# create partition
partition_name = cf.gen_unique_str(prefix)
partition_w = self.init_partition_wrap(collection_w, partition_name)
assert collection_w.has_partition(partition_name)[0]
# drop collection
collection_w.drop()
# insert data to partition
partition_w.insert(cf.gen_default_dataframe_data(),
check_task=CheckTasks.err_res,
check_items={ct.err_code: 1, ct.err_msg: "None Type"})
@pytest.mark.tags(CaseLabel.L2)
def test_partition_insert_maximum_size_data(self):
"""
target: verify insert maximum size data(256M?) a time
method: 1.create a partition
2. insert maximum size data
expected: insert successfully
"""
# create collection
collection_w = self.init_collection_wrap()
# create partition
partition_w = self.init_partition_wrap(collection_w)
# insert data to partition
max_size = 100000 # TODO: clarify the max size of data
ins_res, _ = partition_w.insert(cf.gen_default_dataframe_data(max_size), timeout=40)
assert len(ins_res.primary_keys) == max_size
# self._connect().flush([collection_w.name])
assert partition_w.num_entities == max_size
@pytest.mark.tags(CaseLabel.L1)
@pytest.mark.parametrize("dim", [ct.default_dim - 1, ct.default_dim + 1])
def test_partition_insert_mismatched_dimensions(self, dim):
"""
target: verify insert maximum size data(256M?) a time
method: 1.create a collection with default dim
2. insert dismatch dim data
expected: raise exception
"""
# create partition
partition_w = self.init_partition_wrap()
data = cf.gen_default_list_data(nb=10, dim=dim)
# insert data to partition
partition_w.insert(data, check_task=CheckTasks.err_res,
check_items={ct.err_code: 1, ct.err_msg: "but entities field dim"})
@pytest.mark.tags(CaseLabel.L1)
@pytest.mark.parametrize("sync", [True, False])
def test_partition_insert_sync(self, sync):
"""
target: verify insert sync
method: 1.create a partition
2. insert data in sync
expected: insert successfully
"""
pass
|
utils.py | # -*- coding: utf-8 -*-
from __future__ import print_function
__true_print = print # noqa
import os
import sys
import pdb
import time
import json
import datetime
import argparse
import threading
import logging
import string
import random
# import multiprocessing
# import numpy
# import psutil
import h5py
# import docker
from yaml import full_load, dump
import tableprint as tp
from pprint import pprint
logger = logging.getLogger("milvus_benchmark.utils")
MULTI_DB_SLAVE_PATH = "/opt/milvus/data2;/opt/milvus/data3"
REGISTRY_URL = "registry.zilliz.com/milvus/engine"
def get_unique_name():
return "benchmark-test-"+"".join(random.choice(string.ascii_letters + string.digits) for _ in range(8)).lower()
def get_current_time():
return time.strftime('%Y-%m-%d %H:%M:%S', time.localtime())
def print_table(headers, columns, data):
bodys = []
for index, value in enumerate(columns):
tmp = [value]
tmp.extend(data[index])
bodys.append(tmp)
tp.table(bodys, headers)
def get_dataset(hdf5_file_path):
if not os.path.exists(hdf5_file_path):
raise Exception("%s not existed" % hdf5_file_path)
dataset = h5py.File(hdf5_file_path)
return dataset
def modify_config(k, v, type=None, file_path="conf/server_config.yaml", db_slave=None):
if not os.path.isfile(file_path):
raise Exception('File: %s not found' % file_path)
with open(file_path) as f:
config_dict = full_load(f)
f.close()
if config_dict:
if k.find("use_blas_threshold") != -1:
config_dict['engine_config']['use_blas_threshold'] = int(v)
elif k.find("use_gpu_threshold") != -1:
config_dict['engine_config']['gpu_search_threshold'] = int(v)
elif k.find("cpu_cache_capacity") != -1:
config_dict['cache_config']['cpu_cache_capacity'] = int(v)
elif k.find("enable_gpu") != -1:
config_dict['gpu_resource_config']['enable'] = v
elif k.find("gpu_cache_capacity") != -1:
config_dict['gpu_resource_config']['cache_capacity'] = int(v)
elif k.find("index_build_device") != -1:
config_dict['gpu_resource_config']['build_index_resources'] = v
elif k.find("search_resources") != -1:
config_dict['resource_config']['resources'] = v
if db_slave:
config_dict['db_config']['db_slave_path'] = MULTI_DB_SLAVE_PATH
with open(file_path, 'w') as f:
dump(config_dict, f, default_flow_style=False)
f.close()
else:
raise Exception('Load file:%s error' % file_path)
# update server_config.yaml
def update_server_config(file_path, server_config):
if not os.path.isfile(file_path):
raise Exception('File: %s not found' % file_path)
with open(file_path) as f:
values_dict = full_load(f)
f.close()
for k, v in server_config.items():
if k.find("primary_path") != -1:
values_dict["db_config"]["primary_path"] = v
elif k.find("use_blas_threshold") != -1:
values_dict['engine_config']['use_blas_threshold'] = int(v)
elif k.find("gpu_search_threshold") != -1:
values_dict['engine_config']['gpu_search_threshold'] = int(v)
elif k.find("cpu_cache_capacity") != -1:
values_dict['cache_config']['cpu_cache_capacity'] = int(v)
elif k.find("cache_insert_data") != -1:
values_dict['cache_config']['cache_insert_data'] = v
elif k.find("enable") != -1:
values_dict['gpu_resource_config']['enable'] = v
elif k.find("gpu_cache_capacity") != -1:
values_dict['gpu_resource_config']['cache_capacity'] = int(v)
elif k.find("build_index_resources") != -1:
values_dict['gpu_resource_config']['build_index_resources'] = v
elif k.find("search_resources") != -1:
values_dict['gpu_resource_config']['search_resources'] = v
with open(file_path, 'w') as f:
dump(values_dict, f, default_flow_style=False)
f.close()
# update values.yaml
def update_values(file_path, hostname, server_config):
from kubernetes import client, config
client.rest.logger.setLevel(logging.WARNING)
if not os.path.isfile(file_path):
raise Exception('File: %s not found' % file_path)
# bak values.yaml
file_name = os.path.basename(file_path)
bak_file_name = file_name+".bak"
file_parent_path = os.path.dirname(file_path)
bak_file_path = file_parent_path+'/'+bak_file_name
if os.path.exists(bak_file_path):
os.system("cp %s %s" % (bak_file_path, file_path))
else:
os.system("cp %s %s" % (file_path, bak_file_path))
with open(file_path) as f:
values_dict = full_load(f)
f.close()
for k, v in server_config.items():
if k.find("primary_path") != -1:
values_dict["primaryPath"] = v
values_dict['wal']['path'] = v+"/wal"
elif k.find("use_blas_threshold") != -1:
values_dict['useBLASThreshold'] = int(v)
elif k.find("gpu_search_threshold") != -1:
values_dict['gpuSearchThreshold'] = int(v)
elif k.find("cpu_cache_capacity") != -1:
values_dict['cpuCacheCapacity'] = int(v)
elif k.find("cache_insert_data") != -1:
values_dict['cacheInsertData'] = v
elif k.find("insert_buffer_size") != -1:
values_dict['insertBufferSize'] = v
elif k.find("gpu_resource_config.enable") != -1:
values_dict['gpu']['enabled'] = v
elif k.find("gpu_resource_config.cache_capacity") != -1:
values_dict['gpu']['cacheCapacity'] = int(v)
elif k.find("build_index_resources") != -1:
values_dict['gpu']['buildIndexResources'] = v
elif k.find("search_resources") != -1:
values_dict['gpu']['searchResources'] = v
# wal
elif k.find("auto_flush_interval") != -1:
values_dict['autoFlushInterval'] = v
elif k.find("wal_enable") != -1:
values_dict['wal']['enabled'] = v
# if values_dict['nodeSelector']:
# logger.warning("nodeSelector has been set: %s" % str(values_dict['engine']['nodeSelector']))
# return
values_dict["wal"]["ignoreErrorLog"] = True
# enable monitor
values_dict["metrics"]["enabled"] = True
values_dict["metrics"]["address"] = "192.168.1.237"
values_dict["metrics"]["port"] = 9091
# update values.yaml with the given host
values_dict['nodeSelector'] = {'kubernetes.io/hostname': hostname}
# Using sqlite
values_dict["mysql"]["enabled"] = False
config.load_kube_config()
v1 = client.CoreV1Api()
# node = v1.read_node(hostname)
cpus = v1.read_node(hostname).status.allocatable.get("cpu")
# DEBUG
# set limit/request cpus in resources
values_dict['resources'] = {
"limits": {
"cpu": str(int(cpus))+".0"
},
"requests": {
"cpu": str(int(cpus)-1)+".0"
}
}
values_dict['tolerations'] = [{
"key": "worker",
"operator": "Equal",
"value": "performance",
"effect": "NoSchedule"
}]
# add extra volumes
values_dict['extraVolumes'] = [{
'name': 'test',
'flexVolume': {
'driver': "fstab/cifs",
'fsType': "cifs",
'secretRef': {
'name': "cifs-test-secret"
},
'options': {
'networkPath': "//192.168.1.126/test",
'mountOptions': "vers=1.0"
}
}
}]
values_dict['extraVolumeMounts'] = [{
'name': 'test',
'mountPath': '/test'
}]
logger.debug(values_dict)
with open(file_path, 'w') as f:
dump(values_dict, f, default_flow_style=False)
f.close()
# deploy server
def helm_install_server(helm_path, image_tag, image_type, name, namespace):
from kubernetes import client, config
client.rest.logger.setLevel(logging.WARNING)
timeout = 300
install_cmd = "helm install --wait --timeout %ds \
--set image.repository=%s \
--set image.tag=%s \
--set image.pullPolicy=Always \
--set service.type=ClusterIP \
-f ci/filebeat/values.yaml \
--namespace %s \
%s ." % (timeout, REGISTRY_URL, image_tag, namespace, name)
logger.debug(install_cmd)
if os.system("cd %s && %s" % (helm_path, install_cmd)):
logger.error("Helm install failed")
return None
time.sleep(5)
config.load_kube_config()
v1 = client.CoreV1Api()
host = "%s.%s.svc.cluster.local" % (name, namespace)
logger.debug(host)
pod_name = None
pod_id = None
pods = v1.list_namespaced_pod(namespace)
for i in pods.items:
if i.metadata.name.find(name) != -1:
pod_name = i.metadata.name
pod_ip = i.status.pod_ip
logger.debug(pod_name)
logger.debug(pod_ip)
return pod_name, pod_ip
# delete server
def helm_del_server(name, namespace):
# del_cmd = "helm uninstall -n milvus benchmark-test-gzelwvgk"
# os.system(del_cmd)
del_cmd = "helm uninstall -n milvus %s" % name
logger.debug(del_cmd)
if os.system(del_cmd):
logger.error("Helm delete name:%s failed" % name)
return False
return True
# def pull_image(image):
# registry = image.split(":")[0]
# image_tag = image.split(":")[1]
# client = docker.APIClient(base_url='unix://var/run/docker.sock')
# logger.info("Start pulling image: %s" % image)
# return client.pull(registry, image_tag)
# def run_server(image, mem_limit=None, timeout=30, test_type="local", volume_name=None, db_slave=None):
# import colors
# client = docker.from_env()
# # if mem_limit is None:
# # mem_limit = psutil.virtual_memory().available
# # logger.info('Memory limit:', mem_limit)
# # cpu_limit = "0-%d" % (multiprocessing.cpu_count() - 1)
# # logger.info('Running on CPUs:', cpu_limit)
# for dir_item in ['logs', 'db']:
# try:
# os.mkdir(os.path.abspath(dir_item))
# except Exception as e:
# pass
# if test_type == "local":
# volumes = {
# os.path.abspath('conf'):
# {'bind': '/opt/milvus/conf', 'mode': 'ro'},
# os.path.abspath('logs'):
# {'bind': '/opt/milvus/logs', 'mode': 'rw'},
# os.path.abspath('db'):
# {'bind': '/opt/milvus/db', 'mode': 'rw'},
# }
# elif test_type == "remote":
# if volume_name is None:
# raise Exception("No volume name")
# remote_log_dir = volume_name+'/logs'
# remote_db_dir = volume_name+'/db'
# for dir_item in [remote_log_dir, remote_db_dir]:
# if not os.path.isdir(dir_item):
# os.makedirs(dir_item, exist_ok=True)
# volumes = {
# os.path.abspath('conf'):
# {'bind': '/opt/milvus/conf', 'mode': 'ro'},
# remote_log_dir:
# {'bind': '/opt/milvus/logs', 'mode': 'rw'},
# remote_db_dir:
# {'bind': '/opt/milvus/db', 'mode': 'rw'}
# }
# # add volumes
# if db_slave and isinstance(db_slave, int):
# for i in range(2, db_slave+1):
# remote_db_dir = volume_name+'/data'+str(i)
# if not os.path.isdir(remote_db_dir):
# os.makedirs(remote_db_dir, exist_ok=True)
# volumes[remote_db_dir] = {'bind': '/opt/milvus/data'+str(i), 'mode': 'rw'}
# container = client.containers.run(
# image,
# volumes=volumes,
# runtime="nvidia",
# ports={'19530/tcp': 19530, '8080/tcp': 8080},
# # environment=["OMP_NUM_THREADS=48"],
# # cpuset_cpus=cpu_limit,
# # mem_limit=mem_limit,
# # environment=[""],
# detach=True)
# def stream_logs():
# for line in container.logs(stream=True):
# logger.info(colors.color(line.decode().rstrip(), fg='blue'))
# if sys.version_info >= (3, 0):
# t = threading.Thread(target=stream_logs, daemon=True)
# else:
# t = threading.Thread(target=stream_logs)
# t.daemon = True
# t.start()
# logger.info('Container: %s started' % container)
# return container
# # exit_code = container.wait(timeout=timeout)
# # # Exit if exit code
# # if exit_code == 0:
# # return container
# # elif exit_code is not None:
# # print(colors.color(container.logs().decode(), fg='red'))
# # raise Exception('Child process raised exception %s' % str(exit_code))
# def restart_server(container):
# client = docker.APIClient(base_url='unix://var/run/docker.sock')
# client.restart(container.name)
# logger.info('Container: %s restarted' % container.name)
# return container
# def remove_container(container):
# container.remove(force=True)
# logger.info('Container: %s removed' % container)
# def remove_all_containers(image):
# client = docker.from_env()
# try:
# for container in client.containers.list():
# if image in container.image.tags:
# container.stop(timeout=30)
# container.remove(force=True)
# except Exception as e:
# logger.error("Containers removed failed")
# def container_exists(image):
# '''
# Check if container existed with the given image name
# @params: image name
# @return: container if exists
# '''
# res = False
# client = docker.from_env()
# for container in client.containers.list():
# if image in container.image.tags:
# # True
# res = container
# return res
if __name__ == '__main__':
# print(pull_image('branch-0.3.1-debug'))
stop_server()
|
threads.py | import sys
import mpi4py
mpi4py.profile('mpe', logfile='threads')
from mpi4py import MPI
from array import array
try:
import threading
except ImportError:
sys.stderr.write("threading module not available\n")
sys.exit(0)
send_msg = array('i', [7]*1000); send_msg *= 1000
recv_msg = array('i', [0]*1000); recv_msg *= 1000
def self_send(comm, rank):
comm.Send([send_msg, MPI.INT], dest=rank, tag=0)
def self_recv(comm, rank):
comm.Recv([recv_msg, MPI.INT], source=rank, tag=0)
comm = MPI.COMM_WORLD
rank = comm.Get_rank()
send_thread = threading.Thread(target=self_send, args=(comm, rank))
recv_thread = threading.Thread(target=self_recv, args=(comm, rank))
send_thread.start()
recv_thread.start()
recv_thread.join()
send_thread.join()
|
control.py | import atexit
import errno
import io
import logging
import mmap
import os.path
import pickle
import queue
import shutil
import subprocess
import tempfile
import threading
import types
import uuid
from collections import abc
from shlex import quote
import __main__
from speedling import conf
# temporary solution with threading,
# most of the locaed opt can be considered atomic in python so likely we can work with less lock
# if lock holder thread does not gets cpu cycle the lock hold can be long
# TODO: switch event based i/o
# TODO: switch to C
# NOTE: uin32 might be sufficient
LOG = logging.getLogger(__name__)
def _call(cmd, single=False):
if not single:
# allowing bashism
p = subprocess.Popen(cmd, shell=True, close_fds=True,
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
executable='/bin/bash')
else:
p = subprocess.Popen(cmd, shell=False, close_fds=True,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
r = p.returncode
return (r, stdout.decode('utf-8'), stderr.decode('utf-8'))
def _run(cmd, single=False):
""" Execute the thing, the user not interested in the output,
raise on error"""
r, stdout, stderr = _call(cmd, single)
if (r != 0):
LOG.info(stdout)
LOG.error(stdout)
raise Exception(str((cmd, r, stdout, stderr))) # TODO: new ex
LOG.debug(str((cmd, stdout, stderr)))
payload_size = None
payload_path = None
def tar(targetfile, directories):
args = []
for d in directories:
dire = os.path.abspath(d)
split = dire.split(os.path.sep)
base = os.path.sep.join(split[0:-1])
leaf = split[-1]
args.append('-C ' + quote(base) + ' ' + quote(leaf))
dirs = ' '.join(args)
tar_cmd = "tar -czf {payload} --exclude='*.pyc' --exclude='*.pyo' {dirs}".format(dirs=dirs,
payload=targetfile)
_run(tar_cmd)
def init_transfer():
global payload_size, payload_path
location = __file__.split(os.path.sep)
sl_dir = os.sep.join((location[0:-1])) # speedling/control.py
main_loc = __main__.__file__.split(os.path.sep)
main_dir = os.sep.join(main_loc[0:-1])
payload_size = -1
temp_dir = tempfile.mkdtemp()
# TODO: delete on exit
payload_path = temp_dir + '/payload.tar.gz'
extra = set(conf.get_args().extra_module)
dirs = {main_dir, sl_dir}.union(extra)
tar(payload_path, dirs)
payload_size = os.path.getsize(payload_path)
atexit.register(shutil.rmtree, temp_dir)
ssh_messages = {}
ssh_messages_mutex = threading.Lock()
magic_string = b'systemcontrol\n'
magic_string_lenth = len(magic_string)
ZERO_SIZE = int(0).to_bytes(8, byteorder='little')
def established_log(ssh_ctx):
LOG.info("{host}:System Control. stderr: {stderr} stdout: {stdout}".format(
host=ssh_ctx['host'],
stderr=ssh_ctx['stderr_text'].decode('utf-8')[:-magic_string_lenth],
stdout=ssh_ctx['stdout_text'].decode('utf-8')[:-magic_string_lenth]))
def early_terminate(ssh_ctx):
if not ssh_ctx['terminate']:
LOG.warning('Connection to host {host} terminated without a request'
'stderr: {stderr} stdout: {stdout}'.format
(host=ssh_ctx['host'],
stderr=ssh_ctx['stderr_text'].decode('utf-8')[:-magic_string_lenth],
stdout=ssh_ctx['stdout_text'].decode('utf-8')[:-magic_string_lenth]))
ssh_ctx['terminate'] = True
def input_handler(ssh_ctx):
pipe = ssh_ctx['popen'].stdout
host = ssh_ctx['host']
ssh_ctx['stdout_text'] = pipe.read(magic_string_lenth)
# TODO: handle as log the before message, error if magic word does not arrive within timelimit
while ssh_ctx['stdout_text'][-magic_string_lenth:] != magic_string:
n = pipe.read(1)
if not n:
early_terminate(ssh_ctx)
return
ssh_ctx['stdout_text'] += n
if not ssh_ctx['established']: # not thread safe, log may miss
established_log(ssh_ctx)
ssh_ctx['established'] = True
# size uint64
# payload_pickle
# repeate..
while True:
to_nr = b''
size = None
msg = b''
# assuming the read_size is respected, not looping now..
to_nr = pipe.read(8)
if len(to_nr) == 8:
size = int.from_bytes(to_nr, byteorder='little')
else:
if not ssh_ctx['terminate']:
LOG.warning('Connection to host {host} terminated without a request'.format(host=host))
ssh_ctx['terminate'] = True
return
msg = pipe.read(size)
if not msg:
ssh_ctx['terminate'] = True
LOG.error('Unexpected ssh connection termination to {host}'.format(host=host))
return
real_msg = pickle.loads(msg)
task_id = real_msg['task_id']
ssh_messages_mutex.acquire()
ctx = ssh_messages[task_id]
ssh_messages_mutex.release()
ctx['mutex'].acquire()
ctx['response_dicts'][host] = real_msg
ctx['to_process'] -= 1
if ctx['to_process'] == 0:
ctx['finalize'].release()
ctx['mutex'].release()
def sender(ssh_ctx):
try:
pipe = ssh_ctx['popen'].stdin
queue = ssh_ctx['queue']
with open(payload_path, mode='rb') as file:
fileContent = file.read()
pipe.write(str(payload_size).encode('utf-8') + b'\n')
pipe.write(fileContent)
while True:
ans = queue.get(block=True)
pipe.write(ans['head'])
if 'stream' in ans: # in case of stream, the head needs to notifiy the recevier
stream = ans['stream']
while True:
buf = stream.read(65536)
le = len(buf)
if not le:
break
size = int(le).to_bytes(8, byteorder='little')
buf = size + buf
pipe.write(buf) # [int64 + chunk]+ ZERO_SIZE
pipe.flush()
pipe.write(ZERO_SIZE)
pipe.flush()
except IOError as e:
ssh_ctx['terminate'] = True
if e.errno != errno.EPIPE:
LOG.exception('Unexpected I/O Error')
raise e
except BaseException:
ssh_ctx['terminate'] = True
LOG.exception('Strange exception in the ssh sender')
raise
def logger_pipe(ssh_ctx):
pipe = ssh_ctx['popen'].stderr
host = ssh_ctx['host']
ssh_ctx['stderr_text'] = pipe.read(magic_string_lenth)
# TODO: error if magic word does not arrive within timelimit
while ssh_ctx['stderr_text'][-magic_string_lenth:] != magic_string:
n = pipe.read(1)
if not n:
early_terminate(ssh_ctx)
return
ssh_ctx['stderr_text'] += n
if not ssh_ctx['established']: # not thread safe, log maybe incomple
established_log(ssh_ctx)
ssh_ctx['established'] = True
# size uint64
# payload_pickle
# repeate..
while True:
to_nr = b''
size = None
msg = b''
# assuming the read_size is respected, not looping now..
to_nr = pipe.read(8)
if len(to_nr) == 8:
size = int.from_bytes(to_nr, byteorder='little')
else:
if not ssh_ctx['terminate']:
LOG.warning('Connection to host {host} terminated without a request'.format(host=host))
ssh_ctx['terminate'] = True
return
msg = pipe.read(size)
if not msg:
ssh_ctx['terminate'] = True
LOG.error('Unexpected ssh connection termination to {host}'.format(host=host))
return
real_msg = pickle.loads(msg)
suffix = real_msg.get('msg', '')
real_msg['msg'] = ' '.join(('host:', ssh_ctx['host'], str(suffix)))
LOG.handle(logging.LogRecord(**real_msg))
ssh_hosts = {}
def init_connection(host, host_address=None, user=None, ssh_args=None):
global ssh_hosts
if not host_address:
host_address = host
if user:
user_part = user + '@'
else:
user_part = ''
if not ssh_args:
ssh_args = ['-o', 'UserKnownHostsFile=/dev/null', '-o', 'StrictHostKeyChecking=no', '-o', 'ConnectionAttempts=32']
if not payload_size:
init_transfer()
assert(payload_size)
# Warning UserKnownHostsFile=/dev/null is not secure..
# todo: dafult to aes (aes-128), consider compression
main_loc = __main__.__file__.split(os.path.sep)
main_py = os.sep.join(main_loc[-2:])
args = ['ssh', user_part + host_address, ] + ssh_args + [
"""read a; workdir=`mktemp -d`; cd "$workdir"; dd iflag=fullblock bs="$a" count=1 2>/dev/null |
tar xz; sudo bash -c 'exec 4>&0 ; exec 5>&1 ; exec 6>&2; PYTHONPATH=. exec python3 {main} -r -I "{host}" </dev/null &>"$workdir"/worker.out'""".format(host=host, main=main_py)]
# will it be zombiee without wait or communicate call ?
p = subprocess.Popen(args, stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
outgoing_queue = queue.Queue()
ssh_ctx = {'queue': outgoing_queue, 'popen': p, 'host': host,
'terminate': False, 'established': False,
'stderr_text': b'', 'stdout_text': b''}
th = threading.Thread(target=input_handler, kwargs={'ssh_ctx': ssh_ctx})
th.daemon = True
th.start()
th = threading.Thread(target=sender, kwargs={'ssh_ctx': ssh_ctx})
th.daemon = True
th.start()
th = threading.Thread(target=logger_pipe, kwargs={'ssh_ctx': ssh_ctx})
th.daemon = True
th.start()
ssh_hosts[host] = ssh_ctx
def terminate_connection(host):
ssh_ctx = ssh_hosts[host]
ssh_ctx['terminate'] = True
ssh_ctx['popen'].terminate()
del ssh_hosts[host]
class MMFileReader(object):
def __init__(self, parent, mapping, limit):
self.mapping = mapping
self.limit = limit + 1
self.pos = 0
self.parent = parent
self.closed = False
def read(self, size=None):
if not size:
size = self.limit
r = self.mapping[self.pos: min(self.pos + size, self.limit)]
self.pos += size # pos can be higer than limit
return r
def close(self):
if not self.closed:
self.parent.reader_done()
self.closed = True
# NOTE: let's not depend on gc cleanup internals for close
class StreamFactoryMMapFile(object):
def __init__(self, source):
origin = open(source, 'rb')
mm = mmap.mmap(origin.fileno(), 0, prot=mmap.PROT_READ)
self.mm = mm
self.limit = os.path.getsize(source)
self.nr_child = 0
self.finished = False
self.child_lock = threading.Lock()
origin.close() # mmap expected to stay open
def _has_child(self):
self.child_lock.acquire()
c = self.nr_child
self.child_lock.release()
return c
def _dec_child(self):
self.child_lock.acquire()
assert self.nr_child > 0
self.nr_child -= 1
# c = self.nr_child
self.child_lock.release()
def _inc_child(self):
self.child_lock.acquire()
self.nr_child += 1
self.child_lock.release()
def get_stream_for(self, host):
assert not self.finished
self._inc_child()
return MMFileReader(self.mm, self.limit)
def _close(self):
self.mm.close()
def finish_distribute(self): # no more get_stream_for will be called
self.finished = True
if not self._has_child():
self._close()
def reader_done(self):
c = self._dec_child()
if not c and self.finished:
self._close()
# NOTE: let's not depend on gc cleanup internals for close
class StreamFactoryBytes(object):
def __init__(self, data):
self.data = data
def get_stream_for(self, host):
# Does it duplicates the data in memory ?
return io.BytesIO(self.data)
def send_msgs_stream(hosts, msg_type, task_id, stream_factory, **kwargs):
global ssh_messages
m_dict = {'msg_type': msg_type, 'task_id': task_id}
m_dict.update(kwargs)
msg = pickle.dumps(m_dict)
size = len(msg)
real_msg = size.to_bytes(8, byteorder='little') + msg
for host in hosts:
stream = stream_factory.get_stream_for(host)
ssh_hosts[host]['queue'].put({'head': real_msg, 'stream': stream})
targets = len(hosts)
reponse_dicts = {x: {} for x in hosts}
finalize = threading.Lock()
if targets:
finalize.acquire()
ssh_messages_mutex.acquire()
ssh_messages[task_id] = {'response_dicts': reponse_dicts, 'mutex': threading.Lock(), 'finalize': finalize, 'to_process': targets}
ssh_messages_mutex.release()
def send_msgs(hosts, msg_type, task_id, **kwargs):
global ssh_messages
m_dict = {'msg_type': msg_type, 'task_id': task_id}
m_dict.update(kwargs)
msg = pickle.dumps(m_dict)
size = len(msg)
real_msg = size.to_bytes(8, byteorder='little') + msg
for host in hosts:
ssh_hosts[host]['queue'].put({'head': real_msg})
targets = len(hosts)
reponse_dicts = {x: {} for x in hosts}
finalize = threading.Lock()
if targets:
finalize.acquire()
ssh_messages_mutex.acquire()
ssh_messages[task_id] = {'response_dicts': reponse_dicts, 'mutex': threading.Lock(), 'finalize': finalize, 'to_process': targets}
ssh_messages_mutex.release()
# sends different message to each host with the same task__id
def send_msgs_diff(hosts_msg, msg_type, task_id):
global ssh_messages
for host, msg_d in hosts_msg.items():
m_dict = {'msg_type': msg_type, 'task_id': task_id}
m_dict.update(msg_d)
msg = pickle.dumps(m_dict)
size = len(msg)
real_msg = size.to_bytes(8, byteorder='little') + msg
ssh_hosts[host]['queue'].put({'head': real_msg})
targets = len(hosts_msg)
reponse_dicts = {x: {} for x in hosts_msg.keys()}
finalize = threading.Lock()
if targets:
finalize.acquire()
ssh_messages_mutex.acquire()
ssh_messages[task_id] = {'response_dicts': reponse_dicts, 'mutex': threading.Lock(), 'finalize': finalize, 'to_process': targets}
ssh_messages_mutex.release()
# TODO: handle (unexpectedly) terminated
# TODO: add timout
def wait_for_all_response(task_id):
ssh_messages_mutex.acquire()
task_ctx = ssh_messages[task_id]
ssh_messages_mutex.release()
task_ctx['finalize'].acquire()
ssh_messages_mutex.acquire()
del ssh_messages[task_id]
ssh_messages_mutex.release()
return task_ctx['response_dicts']
def func_to_str(func):
if not isinstance(func, abc.Callable):
return func # assume it is already a string
# NOTE: it will work only if the import used without any special thing
if func.__module__ == '__main__':
return func.__name__
if isinstance(func, types.MethodType):
return '.'.join((func.__module__, func.__self__.__class__.__name__, func.__name__))
return '.'.join((func.__module__, func.__name__))
def call_function(hosts, function, c_args=tuple(), c_kwargs={}):
function_name = func_to_str(function)
task_id = str(uuid.uuid4()) # todo: consider sequence
send_msgs(hosts, msg_type='func', function_name=function_name,
c_args=c_args, c_kwargs=c_kwargs,
task_id=task_id)
return task_id
def call_function_diff(host_calls, function, patch_first=None):
function_name = func_to_str(function)
task_id = str(uuid.uuid4()) # todo: consider sequence
host_msg = {}
pf = (patch_first, )
for host, params in host_calls.items():
if patch_first:
c_args = pf + params.get('args', tuple())
else:
c_args = params.get('args', tuple())
c_kwargs = params.get('kwargs', dict())
host_msg[host] = {'function_name': function_name,
'c_args': c_args,
'c_kwargs': c_kwargs}
send_msgs_diff(host_msg, msg_type='func', task_id=task_id)
return task_id
def call_function_stream(hosts, stream_factory, function,
c_args=tuple(), c_kwargs={}):
function_name = func_to_str(function)
task_id = str(uuid.uuid4()) # todo: consider sequence
send_msgs_stream(hosts, msg_type='input_stream', task_id=task_id,
stream_factory=stream_factory, function_name=function_name,
c_args=c_args, c_kwargs=c_kwargs)
return task_id
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO,
format='%(thread)d %(created)f %(levelname)s %(name)s %(message)s')
init_connection('127.0.0.1')
init_connection('127.0.0.2')
init_connection('127.0.0.3')
LOG.info('start')
for a in range(1000):
task_id = call_function(['127.0.0.1', '127.0.0.2', '127.0.0.3'], c_args=('abs', -42))
wait_for_all_response(task_id)
LOG.info('finish')
|
dream_socket.py | from socket import AF_INET, SOCK_STREAM, socket, SOL_SOCKET, SO_REUSEADDR
import asyncio
async def echo_server(address):
sock = socket(AF_INET, SOCK_STREAM)
sock.setsockopt(SOL_SOCKET, SO_REUSEADDR, True)
sock.bind(address)
sock.listen(5)
while True:
client, addr = await sock.accept()
#echo_handler(client, addr)
#Thread(target=echo_handler, args=(client, addr)).start()
#Process(target=echo_handler, args=(client, addr)).start()
await spawn(echo_client(client, addr))
async def echo_client(client, addr):
print("Connection from", addr)
async with client:
while True:
data = await client.recv(100_000)
if not data:
break
await client.sendall(data)
print("Connection closed")
#def run(coro):
# try:
# coro.send(None)
# except StopIteration as e:
# return e.value
#if __name__ == "__main__":
# run(echo_server(('', 25000)))
|
utils.py | from __future__ import print_function
from os.path import dirname, join
from six.moves.http_client import HTTPConnection
from threading import Thread
from six.moves.BaseHTTPServer import HTTPServer, BaseHTTPRequestHandler
from socket import error
from sys import stderr
from re import search
from collective.solr.local import getLocal, setLocal
from collective.solr import tests
import six
from zope.component.hooks import getSite, setSite
try:
from Zope2.App import zcml
except ImportError:
from Products.Five import zcml
def loadZCMLString(string):
# Unset current site for Zope 2.13
saved = getSite()
setSite(None)
try:
zcml.load_string(string)
finally:
setSite(saved)
def getData(filename):
"""return a file object from the test data folder"""
filename = join(dirname(tests.__file__), "data", filename)
return open(filename, "rb").read()
def fakehttp(solrconn, *fakedata):
"""helper function to set up a fake http request on a SolrConnection"""
class FakeOutput(list):
"""helper class to organize output from fake connections"""
conn = solrconn
def log(self, item):
self.current.append(item)
def get(self, skip=0):
self[:] = self[skip:]
return b"".join(self.pop(0)).replace(b"\r", b"")
def new(self):
self.current = []
self.append(self.current)
def __len__(self):
self.conn.flush() # send out all pending xml
return super(FakeOutput, self).__len__()
def __str__(self):
self.conn.flush() # send out all pending xml
if self:
return "".join([chunk.decode("utf-8") for chunk in self[0]]).replace(
"\r", ""
)
else:
return ""
output = FakeOutput()
class FakeSocket(six.BytesIO):
"""helper class to fake socket communication"""
def sendall(self, str):
output.log(str)
if six.PY2:
def makefile(self, mode, name):
return self
else:
def makefile(self, mode):
return self
def read(self, amt=None):
if self.closed:
return b""
return six.BytesIO.read(self, amt)
def readline(self, length=None):
if self.closed:
return b""
return six.BytesIO.readline(self, length)
class FakeHTTPConnection(HTTPConnection):
"""helper class to fake a http connection object from httplib.py"""
def __init__(self, host, *fakedata):
HTTPConnection.__init__(self, host)
self.fakedata = list(fakedata)
def putrequest(self, *args, **kw):
self.url = args[1]
response = self.fakedata.pop(0) # get first response
self.sock = FakeSocket(response) # and set up a fake socket
output.new() # as well as an output buffer
HTTPConnection.putrequest(self, *args, **kw)
def setTimeout(self, timeout):
pass
solrconn.conn = FakeHTTPConnection(solrconn.conn.host, *fakedata)
return output
def fakemore(solrconn, *fakedata):
"""helper function to add more fake http requests to a SolrConnection"""
assert hasattr(solrconn.conn, "fakedata") # `isinstance()` doesn't work?
solrconn.conn.fakedata.extend(fakedata)
def fakeServer(actions, port=55555):
"""helper to set up and activate a fake http server used for testing
purposes; <actions> must be a list of handler functions, which will
receive the base handler as their only argument and are used to
process the incoming requests in turn; returns a thread that should
be 'joined' when done"""
class Handler(BaseHTTPRequestHandler):
def do_POST(self):
action = actions.pop(0) # get next action
action(self) # and process it...
def do_GET(self):
action = actions.pop(0) # get next action
action(self) # and process it...
def log_request(*args, **kw):
pass
def runner():
while actions:
server.handle_request()
server = HTTPServer(("", port), Handler)
thread = Thread(target=runner)
thread.start()
return thread
def pingSolr():
"""test if the solr server is available"""
status = getLocal("solrStatus")
if status is not None:
return status
conn = HTTPConnection("localhost", 8983)
try:
conn.request("GET", "/solr/plone/admin/ping")
response = conn.getresponse()
status = response.status == 200
msg = "INFO: solr return status '%s'" % response.status
except error as e:
status = False
msg = 'WARNING: solr tests could not be run: "%s".' % e
if not status:
print(file=stderr)
print("*" * len(msg), file=stderr)
print(msg, file=stderr)
print("*" * len(msg), file=stderr)
print(file=stderr)
setLocal("solrStatus", status)
return status
def numFound(result):
if isinstance(result, six.binary_type):
result = result.decode("utf-8")
match = search(r'numFound="(\d+)"', result)
if match is not None:
match = int(match.group(1))
return match
|
deletionwatcher.py | import json
import requests
import time
import websocket
from bs4 import BeautifulSoup
from threading import Thread
from metasmoke import Metasmoke
from globalvars import GlobalVars
from datahandling import is_false_positive, is_ignored_post, get_post_site_id_link
class DeletionWatcher:
@classmethod
def update_site_id_list(self):
soup = BeautifulSoup(requests.get("http://meta.stackexchange.com/topbar/site-switcher/site-list").text)
site_id_dict = {}
for site in soup.findAll("a", attrs={"data-id": True}):
site_name = site["href"][2:]
site_id = site["data-id"]
site_id_dict[site_name] = site_id
GlobalVars.site_id_dict = site_id_dict
@classmethod
def check_websocket_for_deletion(self, post_site_id, post_url, timeout):
time_to_check = time.time() + timeout
post_id = post_site_id[0]
post_type = post_site_id[2]
if post_type == "answer":
question_id = str(get_post_site_id_link(post_site_id))
if question_id is None:
return
else:
question_id = post_id
post_site = post_site_id[1]
if post_site not in GlobalVars.site_id_dict:
return
site_id = GlobalVars.site_id_dict[post_site]
ws = websocket.create_connection("ws://qa.sockets.stackexchange.com/")
ws.send(site_id + "-question-" + question_id)
while time.time() < time_to_check:
ws.settimeout(time_to_check - time.time())
try:
a = ws.recv()
except websocket.WebSocketTimeoutException:
t_metasmoke = Thread(target=Metasmoke.send_deletion_stats_for_post, args=(post_url, False))
t_metasmoke.start()
return False
if a is not None and a != "":
try:
d = json.loads(json.loads(a)["data"])
except:
continue
if d["a"] == "post-deleted" and str(d["qId"]) == question_id \
and ((post_type == "answer" and "aId" in d and str(d["aId"]) == post_id) or post_type == "question"):
t_metasmoke = Thread(target=Metasmoke.send_deletion_stats_for_post, args=(post_url, True))
t_metasmoke.start()
return True
t_metasmoke = Thread(target=Metasmoke.send_deletion_stats_for_post, args=(post_url, False))
t_metasmoke.start()
return False
@classmethod
def check_if_report_was_deleted(self, post_site_id, post_url, message):
was_report_deleted = self.check_websocket_for_deletion(post_site_id, post_url, 1200)
if was_report_deleted:
try:
message.delete()
except:
pass
@classmethod
def post_message_if_not_deleted(self, post_site_id, post_url, message_text, room):
was_report_deleted = self.check_websocket_for_deletion(post_site_id, post_url, 300)
if not was_report_deleted and not is_false_positive(post_site_id[0:2]) and not is_ignored_post(post_site_id[0:2]):
room.send_message(message_text)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.